The cam_sim structure was being deallocated unconditionally by device
[dragonfly.git] / sys / dev / raid / asr / asr.c
CommitLineData
984263bc 1/* $FreeBSD: src/sys/dev/asr/asr.c,v 1.3.2.2 2001/08/23 05:21:29 scottl Exp $ */
521cf4d2 2/* $DragonFly: src/sys/dev/raid/asr/asr.c,v 1.13 2004/03/15 03:05:08 dillon Exp $ */
984263bc
MD
3/*
4 * Copyright (c) 1996-2000 Distributed Processing Technology Corporation
5 * Copyright (c) 2000-2001 Adaptec Corporation
6 * All rights reserved.
7 *
8 * TERMS AND CONDITIONS OF USE
9 *
10 * Redistribution and use in source form, with or without modification, are
11 * permitted provided that redistributions of source code must retain the
12 * above copyright notice, this list of conditions and the following disclaimer.
13 *
14 * This software is provided `as is' by Adaptec and any express or implied
15 * warranties, including, but not limited to, the implied warranties of
16 * merchantability and fitness for a particular purpose, are disclaimed. In no
17 * event shall Adaptec be liable for any direct, indirect, incidental, special,
18 * exemplary or consequential damages (including, but not limited to,
19 * procurement of substitute goods or services; loss of use, data, or profits;
20 * or business interruptions) however caused and on any theory of liability,
21 * whether in contract, strict liability, or tort (including negligence or
22 * otherwise) arising in any way out of the use of this driver software, even
23 * if advised of the possibility of such damage.
24 *
25 * SCSI I2O host adapter driver
26 *
27 * V1.08 2001/08/21 Mark_Salyzyn@adaptec.com
28 * - The 2000S and 2005S do not initialize on some machines,
29 * increased timeout to 255ms from 50ms for the StatusGet
30 * command.
31 * V1.07 2001/05/22 Mark_Salyzyn@adaptec.com
32 * - I knew this one was too good to be true. The error return
33 * on ioctl commands needs to be compared to CAM_REQ_CMP, not
34 * to the bit masked status.
35 * V1.06 2001/05/08 Mark_Salyzyn@adaptec.com
36 * - The 2005S that was supported is affectionately called the
37 * Conjoined BAR Firmware. In order to support RAID-5 in a
38 * 16MB low-cost configuration, Firmware was forced to go
39 * to a Split BAR Firmware. This requires a separate IOP and
40 * Messaging base address.
41 * V1.05 2001/04/25 Mark_Salyzyn@adaptec.com
42 * - Handle support for 2005S Zero Channel RAID solution.
43 * - System locked up if the Adapter locked up. Do not try
44 * to send other commands if the resetIOP command fails. The
45 * fail outstanding command discovery loop was flawed as the
46 * removal of the command from the list prevented discovering
47 * all the commands.
48 * - Comment changes to clarify driver.
49 * - SysInfo searched for an EATA SmartROM, not an I2O SmartROM.
50 * - We do not use the AC_FOUND_DEV event because of I2O.
51 * Removed asr_async.
52 * V1.04 2000/09/22 Mark_Salyzyn@adaptec.com, msmith@freebsd.org,
53 * lampa@fee.vutbr.cz and Scott_Long@adaptec.com.
54 * - Removed support for PM1554, PM2554 and PM2654 in Mode-0
55 * mode as this is confused with competitor adapters in run
56 * mode.
57 * - critical locking needed in ASR_ccbAdd and ASR_ccbRemove
58 * to prevent operating system panic.
59 * - moved default major number to 154 from 97.
60 * V1.03 2000/07/12 Mark_Salyzyn@adaptec.com
61 * - The controller is not actually an ASR (Adaptec SCSI RAID)
62 * series that is visible, it's more of an internal code name.
63 * remove any visible references within reason for now.
64 * - bus_ptr->LUN was not correctly zeroed when initially
65 * allocated causing a possible panic of the operating system
66 * during boot.
67 * V1.02 2000/06/26 Mark_Salyzyn@adaptec.com
68 * - Code always fails for ASR_getTid affecting performance.
69 * - initiated a set of changes that resulted from a formal
70 * code inspection by Mark_Salyzyn@adaptec.com,
71 * George_Dake@adaptec.com, Jeff_Zeak@adaptec.com,
72 * Martin_Wilson@adaptec.com and Vincent_Trandoan@adaptec.com.
73 * Their findings were focussed on the LCT & TID handler, and
74 * all resulting changes were to improve code readability,
75 * consistency or have a positive effect on performance.
76 * V1.01 2000/06/14 Mark_Salyzyn@adaptec.com
77 * - Passthrough returned an incorrect error.
78 * - Passthrough did not migrate the intrinsic scsi layer wakeup
79 * on command completion.
80 * - generate control device nodes using make_dev and delete_dev.
81 * - Performance affected by TID caching reallocing.
82 * - Made suggested changes by Justin_Gibbs@adaptec.com
83 * - use splcam instead of splbio.
84 * - use cam_imask instead of bio_imask.
85 * - use u_int8_t instead of u_char.
86 * - use u_int16_t instead of u_short.
87 * - use u_int32_t instead of u_long where appropriate.
88 * - use 64 bit context handler instead of 32 bit.
89 * - create_ccb should only allocate the worst case
90 * requirements for the driver since CAM may evolve
91 * making union ccb much larger than needed here.
92 * renamed create_ccb to asr_alloc_ccb.
93 * - go nutz justifying all debug prints as macros
94 * defined at the top and remove unsightly ifdefs.
95 * - INLINE STATIC viewed as confusing. Historically
96 * utilized to affect code performance and debug
97 * issues in OS, Compiler or OEM specific situations.
98 * V1.00 2000/05/31 Mark_Salyzyn@adaptec.com
99 * - Ported from FreeBSD 2.2.X DPT I2O driver.
100 * changed struct scsi_xfer to union ccb/struct ccb_hdr
101 * changed variable name xs to ccb
102 * changed struct scsi_link to struct cam_path
103 * changed struct scsibus_data to struct cam_sim
104 * stopped using fordriver for holding on to the TID
105 * use proprietary packet creation instead of scsi_inquire
106 * CAM layer sends synchronize commands.
107 */
108
109#define ASR_VERSION 1
110#define ASR_REVISION '0'
111#define ASR_SUBREVISION '8'
112#define ASR_MONTH 8
113#define ASR_DAY 21
114#define ASR_YEAR 2001 - 1980
115
116/*
117 * Debug macros to reduce the unsightly ifdefs
118 */
119#if (defined(DEBUG_ASR) || defined(DEBUG_ASR_USR_CMD) || defined(DEBUG_ASR_CMD))
120# define debug_asr_message(message) \
121 { \
122 u_int32_t * pointer = (u_int32_t *)message; \
123 u_int32_t length = I2O_MESSAGE_FRAME_getMessageSize(message);\
124 u_int32_t counter = 0; \
125 \
126 while (length--) { \
127 printf ("%08lx%c", (u_long)*(pointer++), \
128 (((++counter & 7) == 0) || (length == 0)) \
129 ? '\n' \
130 : ' '); \
131 } \
132 }
133#endif /* DEBUG_ASR || DEBUG_ASR_USR_CMD || DEBUG_ASR_CMD */
134
135#if (defined(DEBUG_ASR))
136 /* Breaks on none STDC based compilers :-( */
137# define debug_asr_printf(fmt,args...) printf(fmt, ##args)
138# define debug_asr_dump_message(message) debug_asr_message(message)
139# define debug_asr_print_path(ccb) xpt_print_path(ccb->ccb_h.path);
140 /* None fatal version of the ASSERT macro */
141# if (defined(__STDC__))
142# define ASSERT(phrase) if(!(phrase))printf(#phrase " at line %d file %s\n",__LINE__,__FILE__)
143# else
144# define ASSERT(phrase) if(!(phrase))printf("phrase" " at line %d file %s\n",__LINE__,__FILE__)
145# endif
146#else /* DEBUG_ASR */
147# define debug_asr_printf(fmt,args...)
148# define debug_asr_dump_message(message)
149# define debug_asr_print_path(ccb)
150# define ASSERT(x)
151#endif /* DEBUG_ASR */
152
153/*
154 * If DEBUG_ASR_CMD is defined:
155 * 0 - Display incoming SCSI commands
156 * 1 - add in a quick character before queueing.
157 * 2 - add in outgoing message frames.
158 */
159#if (defined(DEBUG_ASR_CMD))
160# define debug_asr_cmd_printf(fmt,args...) printf(fmt,##args)
161# define debug_asr_dump_ccb(ccb) \
162 { \
163 u_int8_t * cp = (unsigned char *)&(ccb->csio.cdb_io); \
164 int len = ccb->csio.cdb_len; \
165 \
166 while (len) { \
167 debug_asr_cmd_printf (" %02x", *(cp++)); \
168 --len; \
169 } \
170 }
171# if (DEBUG_ASR_CMD > 0)
172# define debug_asr_cmd1_printf debug_asr_cmd_printf
173# else
174# define debug_asr_cmd1_printf(fmt,args...)
175# endif
176# if (DEBUG_ASR_CMD > 1)
177# define debug_asr_cmd2_printf debug_asr_cmd_printf
178# define debug_asr_cmd2_dump_message(message) debug_asr_message(message)
179# else
180# define debug_asr_cmd2_printf(fmt,args...)
181# define debug_asr_cmd2_dump_message(message)
182# endif
183#else /* DEBUG_ASR_CMD */
184# define debug_asr_cmd_printf(fmt,args...)
185# define debug_asr_cmd_dump_ccb(ccb)
186# define debug_asr_cmd1_printf(fmt,args...)
187# define debug_asr_cmd2_printf(fmt,args...)
188# define debug_asr_cmd2_dump_message(message)
189#endif /* DEBUG_ASR_CMD */
190
191#if (defined(DEBUG_ASR_USR_CMD))
192# define debug_usr_cmd_printf(fmt,args...) printf(fmt,##args)
193# define debug_usr_cmd_dump_message(message) debug_usr_message(message)
194#else /* DEBUG_ASR_USR_CMD */
195# define debug_usr_cmd_printf(fmt,args...)
196# define debug_usr_cmd_dump_message(message)
197#endif /* DEBUG_ASR_USR_CMD */
198
199#define dsDescription_size 46 /* Snug as a bug in a rug */
1f2de5d4 200#include "dptsig.h"
984263bc
MD
201
202static dpt_sig_S ASR_sig = {
203 { 'd', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION, PROC_INTEL,
204 PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM, FT_HBADRVR, 0,
205 OEM_DPT, OS_FREE_BSD, CAP_ABOVE16MB, DEV_ALL,
206 ADF_ALL_SC5,
207 0, 0, ASR_VERSION, ASR_REVISION, ASR_SUBREVISION,
208 ASR_MONTH, ASR_DAY, ASR_YEAR,
209/* 01234567890123456789012345678901234567890123456789 < 50 chars */
210 "Adaptec FreeBSD 4.0.0 Unix SCSI I2O HBA Driver"
211 /* ^^^^^ asr_attach alters these to match OS */
212};
213
214#include <sys/param.h> /* TRUE=1 and FALSE=0 defined here */
215#include <sys/kernel.h>
216#include <sys/systm.h>
217#include <sys/malloc.h>
218#include <sys/proc.h>
219#include <sys/conf.h>
220#include <sys/disklabel.h>
221#include <sys/bus.h>
222#include <machine/resource.h>
223#include <machine/bus.h>
224#include <sys/rman.h>
225#include <sys/stat.h>
f15db79e 226#include <sys/device.h>
984263bc 227
1f2de5d4
MD
228#include <bus/cam/cam.h>
229#include <bus/cam/cam_ccb.h>
230#include <bus/cam/cam_sim.h>
231#include <bus/cam/cam_xpt_sim.h>
232#include <bus/cam/cam_xpt_periph.h>
984263bc 233
1f2de5d4
MD
234#include <bus/cam/scsi/scsi_all.h>
235#include <bus/cam/scsi/scsi_message.h>
984263bc
MD
236
237#include <vm/vm.h>
238#include <vm/pmap.h>
239#include <machine/cputypes.h>
240#include <machine/clock.h>
241#include <i386/include/vmparam.h>
242
1f2de5d4
MD
243#include <bus/pci/pcivar.h>
244#include <bus/pci/pcireg.h>
984263bc
MD
245
246#define STATIC static
247#define INLINE
248
249#if (defined(DEBUG_ASR) && (DEBUG_ASR > 0))
250# undef STATIC
251# define STATIC
252# undef INLINE
253# define INLINE
254#endif
255#define IN
256#define OUT
257#define INOUT
258
259#define osdSwap4(x) ((u_long)ntohl((u_long)(x)))
260#define KVTOPHYS(x) vtophys(x)
1f2de5d4
MD
261#include "dptalign.h"
262#include "i2oexec.h"
263#include "i2obscsi.h"
264#include "i2odpt.h"
265#include "i2oadptr.h"
984263bc
MD
266#include "opt_asr.h"
267
1f2de5d4 268#include "sys_info.h"
984263bc
MD
269
270/* Configuration Definitions */
271
272#define SG_SIZE 58 /* Scatter Gather list Size */
273#define MAX_TARGET_ID 126 /* Maximum Target ID supported */
274#define MAX_LUN 255 /* Maximum LUN Supported */
275#define MAX_CHANNEL 7 /* Maximum Channel # Supported by driver */
276#define MAX_INBOUND 2000 /* Max CCBs, Also Max Queue Size */
277#define MAX_OUTBOUND 256 /* Maximum outbound frames/adapter */
278#define MAX_INBOUND_SIZE 512 /* Maximum inbound frame size */
279#define MAX_MAP 4194304L /* Maximum mapping size of IOP */
280 /* Also serves as the minimum map for */
281 /* the 2005S zero channel RAID product */
282
283/**************************************************************************
284** ASR Host Adapter structure - One Structure For Each Host Adapter That **
285** Is Configured Into The System. The Structure Supplies Configuration **
286** Information, Status Info, Queue Info And An Active CCB List Pointer. **
287***************************************************************************/
288
289/* I2O register set */
290typedef struct {
291 U8 Address[0x30];
292 volatile U32 Status;
293 volatile U32 Mask;
294# define Mask_InterruptsDisabled 0x08
295 U32 x[2];
296 volatile U32 ToFIFO; /* In Bound FIFO */
297 volatile U32 FromFIFO; /* Out Bound FIFO */
298} i2oRegs_t;
299
300/*
301 * A MIX of performance and space considerations for TID lookups
302 */
303typedef u_int16_t tid_t;
304
305typedef struct {
306 u_int32_t size; /* up to MAX_LUN */
307 tid_t TID[1];
308} lun2tid_t;
309
310typedef struct {
311 u_int32_t size; /* up to MAX_TARGET */
312 lun2tid_t * LUN[1];
313} target2lun_t;
314
315/*
316 * To ensure that we only allocate and use the worst case ccb here, lets
317 * make our own local ccb union. If asr_alloc_ccb is utilized for another
318 * ccb type, ensure that you add the additional structures into our local
319 * ccb union. To ensure strict type checking, we will utilize the local
320 * ccb definition wherever possible.
321 */
322union asr_ccb {
323 struct ccb_hdr ccb_h; /* For convenience */
324 struct ccb_scsiio csio;
325 struct ccb_setasync csa;
326};
327
328typedef struct Asr_softc {
329 u_int16_t ha_irq;
330 void * ha_Base; /* base port for each board */
331 u_int8_t * volatile ha_blinkLED;
332 i2oRegs_t * ha_Virt; /* Base address of IOP */
333 U8 * ha_Fvirt; /* Base address of Frames */
334 I2O_IOP_ENTRY ha_SystemTable;
335 LIST_HEAD(,ccb_hdr) ha_ccb; /* ccbs in use */
336 struct cam_path * ha_path[MAX_CHANNEL+1];
337 struct cam_sim * ha_sim[MAX_CHANNEL+1];
42cdd4ab 338#if defined(__DragonFly__) || __FreeBSD_version >= 400000
984263bc
MD
339 struct resource * ha_mem_res;
340 struct resource * ha_mes_res;
341 struct resource * ha_irq_res;
342 void * ha_intr;
343#endif
344 PI2O_LCT ha_LCT; /* Complete list of devices */
345# define le_type IdentityTag[0]
346# define I2O_BSA 0x20
347# define I2O_FCA 0x40
348# define I2O_SCSI 0x00
349# define I2O_PORT 0x80
350# define I2O_UNKNOWN 0x7F
351# define le_bus IdentityTag[1]
352# define le_target IdentityTag[2]
353# define le_lun IdentityTag[3]
354 target2lun_t * ha_targets[MAX_CHANNEL+1];
355 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME ha_Msgs;
356 u_long ha_Msgs_Phys;
357
358 u_int8_t ha_in_reset;
359# define HA_OPERATIONAL 0
360# define HA_IN_RESET 1
361# define HA_OFF_LINE 2
362# define HA_OFF_LINE_RECOVERY 3
363 /* Configuration information */
364 /* The target id maximums we take */
365 u_int8_t ha_MaxBus; /* Maximum bus */
366 u_int8_t ha_MaxId; /* Maximum target ID */
367 u_int8_t ha_MaxLun; /* Maximum target LUN */
368 u_int8_t ha_SgSize; /* Max SG elements */
369 u_int8_t ha_pciBusNum;
370 u_int8_t ha_pciDeviceNum;
371 u_int8_t ha_adapter_target[MAX_CHANNEL+1];
372 u_int16_t ha_QueueSize; /* Max outstanding commands */
373 u_int16_t ha_Msgs_Count;
374
375 /* Links into other parents and HBAs */
376 struct Asr_softc * ha_next; /* HBA list */
377
378#ifdef ASR_MEASURE_PERFORMANCE
379#define MAX_TIMEQ_SIZE 256 // assumes MAX 256 scsi commands sent
380 asr_perf_t ha_performance;
381 u_int32_t ha_submitted_ccbs_count;
382
383 // Queueing macros for a circular queue
384#define TIMEQ_FREE_LIST_EMPTY(head, tail) (-1 == (head) && -1 == (tail))
385#define TIMEQ_FREE_LIST_FULL(head, tail) ((((tail) + 1) % MAX_TIMEQ_SIZE) == (head))
386#define ENQ_TIMEQ_FREE_LIST(item, Q, head, tail) \
387 if (!TIMEQ_FREE_LIST_FULL((head), (tail))) { \
388 if TIMEQ_FREE_LIST_EMPTY((head),(tail)) { \
389 (head) = (tail) = 0; \
390 } \
391 else (tail) = ((tail) + 1) % MAX_TIMEQ_SIZE; \
392 Q[(tail)] = (item); \
393 } \
394 else { \
395 debug_asr_printf("asr: Enqueueing when TimeQ Free List is full... This should not happen!\n"); \
396 }
397#define DEQ_TIMEQ_FREE_LIST(item, Q, head, tail) \
398 if (!TIMEQ_FREE_LIST_EMPTY((head), (tail))) { \
399 item = Q[(head)]; \
400 if ((head) == (tail)) { (head) = (tail) = -1; } \
401 else (head) = ((head) + 1) % MAX_TIMEQ_SIZE; \
402 } \
403 else { \
404 (item) = -1; \
405 debug_asr_printf("asr: Dequeueing when TimeQ Free List is empty... This should not happen!\n"); \
406 }
407
408 // Circular queue of time stamps
409 struct timeval ha_timeQ[MAX_TIMEQ_SIZE];
410 u_int32_t ha_timeQFreeList[MAX_TIMEQ_SIZE];
411 int ha_timeQFreeHead;
412 int ha_timeQFreeTail;
413#endif
414} Asr_softc_t;
415
416STATIC Asr_softc_t * Asr_softc;
417
418/*
419 * Prototypes of the routines we have in this object.
420 */
421
422/* Externally callable routines */
42cdd4ab 423#if defined(__DragonFly__) || __FreeBSD_version >= 400000
984263bc
MD
424#define PROBE_ARGS IN device_t tag
425#define PROBE_RET int
426#define PROBE_SET() u_long id = (pci_get_device(tag)<<16)|pci_get_vendor(tag)
427#define PROBE_RETURN(retval) if(retval){device_set_desc(tag,retval);return(0);}else{return(ENXIO);}
428#define ATTACH_ARGS IN device_t tag
429#define ATTACH_RET int
430#define ATTACH_SET() int unit = device_get_unit(tag)
431#define ATTACH_RETURN(retval) return(retval)
432#else
433#define PROBE_ARGS IN pcici_t tag, IN pcidi_t id
434#define PROBE_RET const char *
435#define PROBE_SET()
436#define PROBE_RETURN(retval) return(retval)
437#define ATTACH_ARGS IN pcici_t tag, IN int unit
438#define ATTACH_RET void
439#define ATTACH_SET()
440#define ATTACH_RETURN(retval) return
441#endif
442/* I2O HDM interface */
5ca58d54
RG
443STATIC PROBE_RET asr_probe (PROBE_ARGS);
444STATIC ATTACH_RET asr_attach (ATTACH_ARGS);
984263bc 445/* DOMINO placeholder */
5ca58d54
RG
446STATIC PROBE_RET domino_probe (PROBE_ARGS);
447STATIC ATTACH_RET domino_attach (ATTACH_ARGS);
984263bc 448/* MODE0 adapter placeholder */
5ca58d54
RG
449STATIC PROBE_RET mode0_probe (PROBE_ARGS);
450STATIC ATTACH_RET mode0_attach (ATTACH_ARGS);
984263bc 451
5ca58d54
RG
452STATIC Asr_softc_t * ASR_get_sc (
453 IN dev_t dev);
454STATIC int asr_ioctl (
984263bc
MD
455 IN dev_t dev,
456 IN u_long cmd,
457 INOUT caddr_t data,
458 int flag,
5ca58d54
RG
459 d_thread_t *td);
460STATIC int asr_open (
984263bc
MD
461 IN dev_t dev,
462 int32_t flags,
463 int32_t ifmt,
5ca58d54
RG
464 IN d_thread_t *td);
465STATIC int asr_close (
984263bc
MD
466 dev_t dev,
467 int flags,
468 int ifmt,
5ca58d54
RG
469 d_thread_t *td);
470STATIC int asr_intr (
471 IN Asr_softc_t * sc);
472STATIC void asr_timeout (
473 INOUT void * arg);
474STATIC int ASR_init (
475 IN Asr_softc_t * sc);
476STATIC INLINE int ASR_acquireLct (
477 INOUT Asr_softc_t * sc);
478STATIC INLINE int ASR_acquireHrt (
479 INOUT Asr_softc_t * sc);
480STATIC void asr_action (
984263bc 481 IN struct cam_sim * sim,
5ca58d54
RG
482 IN union ccb * ccb);
483STATIC void asr_poll (
484 IN struct cam_sim * sim);
984263bc
MD
485
486/*
487 * Here is the auto-probe structure used to nest our tests appropriately
488 * during the startup phase of the operating system.
489 */
42cdd4ab 490#if defined(__DragonFly__) || __FreeBSD_version >= 400000
984263bc
MD
491STATIC device_method_t asr_methods[] = {
492 DEVMETHOD(device_probe, asr_probe),
493 DEVMETHOD(device_attach, asr_attach),
494 { 0, 0 }
495};
496
497STATIC driver_t asr_driver = {
498 "asr",
499 asr_methods,
500 sizeof(Asr_softc_t)
501};
502
503STATIC devclass_t asr_devclass;
504
32832096 505DECLARE_DUMMY_MODULE(asr);
984263bc
MD
506DRIVER_MODULE(asr, pci, asr_driver, asr_devclass, 0, 0);
507
508STATIC device_method_t domino_methods[] = {
509 DEVMETHOD(device_probe, domino_probe),
510 DEVMETHOD(device_attach, domino_attach),
511 { 0, 0 }
512};
513
514STATIC driver_t domino_driver = {
515 "domino",
516 domino_methods,
517 0
518};
519
520STATIC devclass_t domino_devclass;
521
522DRIVER_MODULE(domino, pci, domino_driver, domino_devclass, 0, 0);
523
524STATIC device_method_t mode0_methods[] = {
525 DEVMETHOD(device_probe, mode0_probe),
526 DEVMETHOD(device_attach, mode0_attach),
527 { 0, 0 }
528};
529
530STATIC driver_t mode0_driver = {
531 "mode0",
532 mode0_methods,
533 0
534};
535
536STATIC devclass_t mode0_devclass;
537
538DRIVER_MODULE(mode0, pci, mode0_driver, mode0_devclass, 0, 0);
539#else
540STATIC u_long asr_pcicount = 0;
541STATIC struct pci_device asr_pcidev = {
542 "asr",
543 asr_probe,
544 asr_attach,
545 &asr_pcicount,
546 NULL
547};
548DATA_SET (asr_pciset, asr_pcidev);
549
550STATIC u_long domino_pcicount = 0;
551STATIC struct pci_device domino_pcidev = {
552 "domino",
553 domino_probe,
554 domino_attach,
555 &domino_pcicount,
556 NULL
557};
558DATA_SET (domino_pciset, domino_pcidev);
559
560STATIC u_long mode0_pcicount = 0;
561STATIC struct pci_device mode0_pcidev = {
562 "mode0",
563 mode0_probe,
564 mode0_attach,
565 &mode0_pcicount,
566 NULL
567};
568DATA_SET (mode0_pciset, mode0_pcidev);
569#endif
570
571/*
572 * devsw for asr hba driver
573 *
574 * only ioctl is used. the sd driver provides all other access.
575 */
576#define CDEV_MAJOR 154 /* prefered default character major */
577STATIC struct cdevsw asr_cdevsw = {
fabb8ceb
MD
578 "asr", /* name */
579 CDEV_MAJOR, /* maj */
580 0, /* flags */
581 NULL, /* port */
582 0, /* auto */
583
984263bc
MD
584 asr_open, /* open */
585 asr_close, /* close */
586 noread, /* read */
587 nowrite, /* write */
588 asr_ioctl, /* ioctl */
589 nopoll, /* poll */
590 nommap, /* mmap */
591 nostrategy, /* strategy */
984263bc 592 nodump, /* dump */
fabb8ceb 593 nopsize /* psize */
984263bc
MD
594};
595
596#ifdef ASR_MEASURE_PERFORMANCE
5ca58d54
RG
597STATIC u_int32_t asr_time_delta (IN struct timeval start,
598 IN struct timeval end);
984263bc
MD
599#endif
600
601/*
602 * Initialize the dynamic cdevsw hooks.
603 */
604STATIC void
605asr_drvinit (
606 void * unused)
607{
608 static int asr_devsw_installed = 0;
609
610 if (asr_devsw_installed) {
611 return;
612 }
613 asr_devsw_installed++;
614 /*
615 * Find a free spot (the report during driver load used by
616 * osd layer in engine to generate the controlling nodes).
617 */
618 while ((asr_cdevsw.d_maj < NUMCDEVSW)
f15db79e 619 && (dev_dport(makedev(asr_cdevsw.d_maj,0)) != NULL)) {
984263bc
MD
620 ++asr_cdevsw.d_maj;
621 }
622 if (asr_cdevsw.d_maj >= NUMCDEVSW) for (
623 asr_cdevsw.d_maj = 0;
624 (asr_cdevsw.d_maj < CDEV_MAJOR)
f15db79e 625 && (dev_dport(makedev(asr_cdevsw.d_maj,0)) != NULL);
984263bc
MD
626 ++asr_cdevsw.d_maj);
627 /*
628 * Come to papa
629 */
630 cdevsw_add(&asr_cdevsw);
631 /*
632 * delete any nodes that would attach to the primary adapter,
633 * let the adapter scans add them.
634 */
635 destroy_dev(makedev(asr_cdevsw.d_maj,0));
636} /* asr_drvinit */
637
638/* Must initialize before CAM layer picks up our HBA driver */
639SYSINIT(asrdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,asr_drvinit,NULL)
640
641/* I2O support routines */
642#define defAlignLong(STRUCT,NAME) char NAME[sizeof(STRUCT)]
643#define getAlignLong(STRUCT,NAME) ((STRUCT *)(NAME))
644
645/*
646 * Fill message with default.
647 */
648STATIC PI2O_MESSAGE_FRAME
649ASR_fillMessage (
650 IN char * Message,
651 IN u_int16_t size)
652{
653 OUT PI2O_MESSAGE_FRAME Message_Ptr;
654
655 Message_Ptr = getAlignLong(I2O_MESSAGE_FRAME, Message);
656 bzero ((void *)Message_Ptr, size);
657 I2O_MESSAGE_FRAME_setVersionOffset(Message_Ptr, I2O_VERSION_11);
658 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
659 (size + sizeof(U32) - 1) >> 2);
660 I2O_MESSAGE_FRAME_setInitiatorAddress (Message_Ptr, 1);
661 return (Message_Ptr);
662} /* ASR_fillMessage */
663
664#define EMPTY_QUEUE ((U32)-1L)
665
666STATIC INLINE U32
667ASR_getMessage(
668 IN i2oRegs_t * virt)
669{
670 OUT U32 MessageOffset;
671
672 if ((MessageOffset = virt->ToFIFO) == EMPTY_QUEUE) {
673 MessageOffset = virt->ToFIFO;
674 }
675 return (MessageOffset);
676} /* ASR_getMessage */
677
678/* Issue a polled command */
679STATIC U32
680ASR_initiateCp (
681 INOUT i2oRegs_t * virt,
682 INOUT U8 * fvirt,
683 IN PI2O_MESSAGE_FRAME Message)
684{
685 OUT U32 Mask = -1L;
686 U32 MessageOffset;
687 u_int Delay = 1500;
688
689 /*
690 * ASR_initiateCp is only used for synchronous commands and will
691 * be made more resiliant to adapter delays since commands like
692 * resetIOP can cause the adapter to be deaf for a little time.
693 */
694 while (((MessageOffset = ASR_getMessage(virt)) == EMPTY_QUEUE)
695 && (--Delay != 0)) {
696 DELAY (10000);
697 }
698 if (MessageOffset != EMPTY_QUEUE) {
699 bcopy (Message, fvirt + MessageOffset,
700 I2O_MESSAGE_FRAME_getMessageSize(Message) << 2);
701 /*
702 * Disable the Interrupts
703 */
704 virt->Mask = (Mask = virt->Mask) | Mask_InterruptsDisabled;
705 virt->ToFIFO = MessageOffset;
706 }
707 return (Mask);
708} /* ASR_initiateCp */
709
710/*
711 * Reset the adapter.
712 */
713STATIC U32
714ASR_resetIOP (
715 INOUT i2oRegs_t * virt,
716 INOUT U8 * fvirt)
717{
718 struct resetMessage {
719 I2O_EXEC_IOP_RESET_MESSAGE M;
720 U32 R;
721 };
722 defAlignLong(struct resetMessage,Message);
723 PI2O_EXEC_IOP_RESET_MESSAGE Message_Ptr;
724 OUT U32 * volatile Reply_Ptr;
725 U32 Old;
726
727 /*
728 * Build up our copy of the Message.
729 */
730 Message_Ptr = (PI2O_EXEC_IOP_RESET_MESSAGE)ASR_fillMessage(Message,
731 sizeof(I2O_EXEC_IOP_RESET_MESSAGE));
732 I2O_EXEC_IOP_RESET_MESSAGE_setFunction(Message_Ptr, I2O_EXEC_IOP_RESET);
733 /*
734 * Reset the Reply Status
735 */
736 *(Reply_Ptr = (U32 *)((char *)Message_Ptr
737 + sizeof(I2O_EXEC_IOP_RESET_MESSAGE))) = 0;
738 I2O_EXEC_IOP_RESET_MESSAGE_setStatusWordLowAddress(Message_Ptr,
739 KVTOPHYS((void *)Reply_Ptr));
740 /*
741 * Send the Message out
742 */
743 if ((Old = ASR_initiateCp (virt, fvirt, (PI2O_MESSAGE_FRAME)Message_Ptr)) != (U32)-1L) {
744 /*
745 * Wait for a response (Poll), timeouts are dangerous if
746 * the card is truly responsive. We assume response in 2s.
747 */
748 u_int8_t Delay = 200;
749
750 while ((*Reply_Ptr == 0) && (--Delay != 0)) {
751 DELAY (10000);
752 }
753 /*
754 * Re-enable the interrupts.
755 */
756 virt->Mask = Old;
757 ASSERT (*Reply_Ptr);
758 return (*Reply_Ptr);
759 }
760 ASSERT (Old != (U32)-1L);
761 return (0);
762} /* ASR_resetIOP */
763
764/*
765 * Get the curent state of the adapter
766 */
767STATIC INLINE PI2O_EXEC_STATUS_GET_REPLY
768ASR_getStatus (
769 INOUT i2oRegs_t * virt,
770 INOUT U8 * fvirt,
771 OUT PI2O_EXEC_STATUS_GET_REPLY buffer)
772{
773 defAlignLong(I2O_EXEC_STATUS_GET_MESSAGE,Message);
774 PI2O_EXEC_STATUS_GET_MESSAGE Message_Ptr;
775 U32 Old;
776
777 /*
778 * Build up our copy of the Message.
779 */
780 Message_Ptr = (PI2O_EXEC_STATUS_GET_MESSAGE)ASR_fillMessage(Message,
781 sizeof(I2O_EXEC_STATUS_GET_MESSAGE));
782 I2O_EXEC_STATUS_GET_MESSAGE_setFunction(Message_Ptr,
783 I2O_EXEC_STATUS_GET);
784 I2O_EXEC_STATUS_GET_MESSAGE_setReplyBufferAddressLow(Message_Ptr,
785 KVTOPHYS((void *)buffer));
786 /* This one is a Byte Count */
787 I2O_EXEC_STATUS_GET_MESSAGE_setReplyBufferLength(Message_Ptr,
788 sizeof(I2O_EXEC_STATUS_GET_REPLY));
789 /*
790 * Reset the Reply Status
791 */
792 bzero ((void *)buffer, sizeof(I2O_EXEC_STATUS_GET_REPLY));
793 /*
794 * Send the Message out
795 */
796 if ((Old = ASR_initiateCp (virt, fvirt, (PI2O_MESSAGE_FRAME)Message_Ptr)) != (U32)-1L) {
797 /*
798 * Wait for a response (Poll), timeouts are dangerous if
799 * the card is truly responsive. We assume response in 50ms.
800 */
801 u_int8_t Delay = 255;
802
803 while (*((U8 * volatile)&(buffer->SyncByte)) == 0) {
804 if (--Delay == 0) {
805 buffer = (PI2O_EXEC_STATUS_GET_REPLY)NULL;
806 break;
807 }
808 DELAY (1000);
809 }
810 /*
811 * Re-enable the interrupts.
812 */
813 virt->Mask = Old;
814 return (buffer);
815 }
816 return ((PI2O_EXEC_STATUS_GET_REPLY)NULL);
817} /* ASR_getStatus */
818
819/*
820 * Check if the device is a SCSI I2O HBA, and add it to the list.
821 */
822
823/*
824 * Probe for ASR controller. If we find it, we will use it.
825 * virtual adapters.
826 */
827STATIC PROBE_RET
828asr_probe(PROBE_ARGS)
829{
830 PROBE_SET();
831 if ((id == 0xA5011044) || (id == 0xA5111044)) {
832 PROBE_RETURN ("Adaptec Caching SCSI RAID");
833 }
834 PROBE_RETURN (NULL);
835} /* asr_probe */
836
837/*
838 * Probe/Attach for DOMINO chipset.
839 */
840STATIC PROBE_RET
841domino_probe(PROBE_ARGS)
842{
843 PROBE_SET();
844 if (id == 0x10121044) {
845 PROBE_RETURN ("Adaptec Caching Memory Controller");
846 }
847 PROBE_RETURN (NULL);
848} /* domino_probe */
849
850STATIC ATTACH_RET
851domino_attach (ATTACH_ARGS)
852{
853 ATTACH_RETURN (0);
854} /* domino_attach */
855
856/*
857 * Probe/Attach for MODE0 adapters.
858 */
859STATIC PROBE_RET
860mode0_probe(PROBE_ARGS)
861{
862 PROBE_SET();
863
864 /*
865 * If/When we can get a business case to commit to a
866 * Mode0 driver here, we can make all these tests more
867 * specific and robust. Mode0 adapters have their processors
868 * turned off, this the chips are in a raw state.
869 */
870
871 /* This is a PLX9054 */
872 if (id == 0x905410B5) {
873 PROBE_RETURN ("Adaptec Mode0 PM3757");
874 }
875 /* This is a PLX9080 */
876 if (id == 0x908010B5) {
877 PROBE_RETURN ("Adaptec Mode0 PM3754/PM3755");
878 }
879 /* This is a ZION 80303 */
880 if (id == 0x53098086) {
881 PROBE_RETURN ("Adaptec Mode0 3010S");
882 }
883 /* This is an i960RS */
884 if (id == 0x39628086) {
885 PROBE_RETURN ("Adaptec Mode0 2100S");
886 }
887 /* This is an i960RN */
888 if (id == 0x19648086) {
889 PROBE_RETURN ("Adaptec Mode0 PM2865/2400A/3200S/3400S");
890 }
891#if 0 /* this would match any generic i960 -- mjs */
892 /* This is an i960RP (typically also on Motherboards) */
893 if (id == 0x19608086) {
894 PROBE_RETURN ("Adaptec Mode0 PM2554/PM1554/PM2654");
895 }
896#endif
897 PROBE_RETURN (NULL);
898} /* mode0_probe */
899
900STATIC ATTACH_RET
901mode0_attach (ATTACH_ARGS)
902{
903 ATTACH_RETURN (0);
904} /* mode0_attach */
905
906STATIC INLINE union asr_ccb *
907asr_alloc_ccb (
908 IN Asr_softc_t * sc)
909{
910 OUT union asr_ccb * new_ccb;
911
912 if ((new_ccb = (union asr_ccb *)malloc(sizeof(*new_ccb),
913 M_DEVBUF, M_WAITOK)) != (union asr_ccb *)NULL) {
914 bzero (new_ccb, sizeof(*new_ccb));
915 new_ccb->ccb_h.pinfo.priority = 1;
916 new_ccb->ccb_h.pinfo.index = CAM_UNQUEUED_INDEX;
917 new_ccb->ccb_h.spriv_ptr0 = sc;
918 }
919 return (new_ccb);
920} /* asr_alloc_ccb */
921
922STATIC INLINE void
923asr_free_ccb (
924 IN union asr_ccb * free_ccb)
925{
926 free(free_ccb, M_DEVBUF);
927} /* asr_free_ccb */
928
929/*
930 * Print inquiry data `carefully'
931 */
932STATIC void
933ASR_prstring (
934 u_int8_t * s,
935 int len)
936{
937 while ((--len >= 0) && (*s) && (*s != ' ') && (*s != '-')) {
938 printf ("%c", *(s++));
939 }
940} /* ASR_prstring */
941
942/*
943 * Prototypes
944 */
5ca58d54 945STATIC INLINE int ASR_queue (
984263bc 946 IN Asr_softc_t * sc,
5ca58d54 947 IN PI2O_MESSAGE_FRAME Message);
984263bc
MD
948/*
949 * Send a message synchronously and without Interrupt to a ccb.
950 */
951STATIC int
952ASR_queue_s (
953 INOUT union asr_ccb * ccb,
954 IN PI2O_MESSAGE_FRAME Message)
955{
956 int s;
957 U32 Mask;
958 Asr_softc_t * sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
959
960 /*
961 * We do not need any (optional byteswapping) method access to
962 * the Initiator context field.
963 */
964 I2O_MESSAGE_FRAME_setInitiatorContext64(Message, (long)ccb);
965
966 /* Prevent interrupt service */
967 s = splcam ();
968 sc->ha_Virt->Mask = (Mask = sc->ha_Virt->Mask)
969 | Mask_InterruptsDisabled;
970
971 if (ASR_queue (sc, Message) == EMPTY_QUEUE) {
972 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
973 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
974 }
975
976 /*
977 * Wait for this board to report a finished instruction.
978 */
979 while ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
980 (void)asr_intr (sc);
981 }
982
983 /* Re-enable Interrupts */
984 sc->ha_Virt->Mask = Mask;
985 splx(s);
986
987 return (ccb->ccb_h.status);
988} /* ASR_queue_s */
989
990/*
991 * Send a message synchronously to a Asr_softc_t
992 */
993STATIC int
994ASR_queue_c (
995 IN Asr_softc_t * sc,
996 IN PI2O_MESSAGE_FRAME Message)
997{
998 union asr_ccb * ccb;
999 OUT int status;
1000
1001 if ((ccb = asr_alloc_ccb (sc)) == (union asr_ccb *)NULL) {
1002 return (CAM_REQUEUE_REQ);
1003 }
1004
1005 status = ASR_queue_s (ccb, Message);
1006
1007 asr_free_ccb(ccb);
1008
1009 return (status);
1010} /* ASR_queue_c */
1011
1012/*
1013 * Add the specified ccb to the active queue
1014 */
1015STATIC INLINE void
1016ASR_ccbAdd (
1017 IN Asr_softc_t * sc,
1018 INOUT union asr_ccb * ccb)
1019{
1020 int s;
1021
1022 s = splcam();
1023 LIST_INSERT_HEAD(&(sc->ha_ccb), &(ccb->ccb_h), sim_links.le);
1024 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1025 if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT) {
1026 /*
1027 * RAID systems can take considerable time to
1028 * complete some commands given the large cache
1029 * flashes switching from write back to write thru.
1030 */
1031 ccb->ccb_h.timeout = 6 * 60 * 1000;
1032 }
1033 ccb->ccb_h.timeout_ch = timeout(asr_timeout, (caddr_t)ccb,
1034 (ccb->ccb_h.timeout * hz) / 1000);
1035 }
1036 splx(s);
1037} /* ASR_ccbAdd */
1038
1039/*
1040 * Remove the specified ccb from the active queue.
1041 */
1042STATIC INLINE void
1043ASR_ccbRemove (
1044 IN Asr_softc_t * sc,
1045 INOUT union asr_ccb * ccb)
1046{
1047 int s;
1048
1049 s = splcam();
1050 untimeout(asr_timeout, (caddr_t)ccb, ccb->ccb_h.timeout_ch);
1051 LIST_REMOVE(&(ccb->ccb_h), sim_links.le);
1052 splx(s);
1053} /* ASR_ccbRemove */
1054
1055/*
1056 * Fail all the active commands, so they get re-issued by the operating
1057 * system.
1058 */
1059STATIC INLINE void
1060ASR_failActiveCommands (
1061 IN Asr_softc_t * sc)
1062{
1063 struct ccb_hdr * ccb;
1064 int s;
1065
1066#if 0 /* Currently handled by callers, unnecessary paranoia currently */
1067 /* Left in for historical perspective. */
1068 defAlignLong(I2O_EXEC_LCT_NOTIFY_MESSAGE,Message);
1069 PI2O_EXEC_LCT_NOTIFY_MESSAGE Message_Ptr;
1070
1071 /* Send a blind LCT command to wait for the enableSys to complete */
1072 Message_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)ASR_fillMessage(Message,
1073 sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) - sizeof(I2O_SG_ELEMENT));
1074 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
1075 I2O_EXEC_LCT_NOTIFY);
1076 I2O_EXEC_LCT_NOTIFY_MESSAGE_setClassIdentifier(Message_Ptr,
1077 I2O_CLASS_MATCH_ANYCLASS);
1078 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1079#endif
1080
1081 s = splcam();
1082 /*
1083 * We do not need to inform the CAM layer that we had a bus
1084 * reset since we manage it on our own, this also prevents the
1085 * SCSI_DELAY settling that would be required on other systems.
1086 * The `SCSI_DELAY' has already been handled by the card via the
1087 * acquisition of the LCT table while we are at CAM priority level.
1088 * for (int bus = 0; bus <= sc->ha_MaxBus; ++bus) {
1089 * xpt_async (AC_BUS_RESET, sc->ha_path[bus], NULL);
1090 * }
1091 */
1092 while ((ccb = LIST_FIRST(&(sc->ha_ccb))) != (struct ccb_hdr *)NULL) {
1093 ASR_ccbRemove (sc, (union asr_ccb *)ccb);
1094
1095 ccb->status &= ~CAM_STATUS_MASK;
1096 ccb->status |= CAM_REQUEUE_REQ;
1097 /* Nothing Transfered */
1098 ((struct ccb_scsiio *)ccb)->resid
1099 = ((struct ccb_scsiio *)ccb)->dxfer_len;
1100
1101 if (ccb->path) {
1102 xpt_done ((union ccb *)ccb);
1103 } else {
1104 wakeup ((caddr_t)ccb);
1105 }
1106 }
1107 splx(s);
1108} /* ASR_failActiveCommands */
1109
1110/*
1111 * The following command causes the HBA to reset the specific bus
1112 */
1113STATIC INLINE void
1114ASR_resetBus(
1115 IN Asr_softc_t * sc,
1116 IN int bus)
1117{
1118 defAlignLong(I2O_HBA_BUS_RESET_MESSAGE,Message);
1119 I2O_HBA_BUS_RESET_MESSAGE * Message_Ptr;
1120 PI2O_LCT_ENTRY Device;
1121
1122 Message_Ptr = (I2O_HBA_BUS_RESET_MESSAGE *)ASR_fillMessage(Message,
1123 sizeof(I2O_HBA_BUS_RESET_MESSAGE));
1124 I2O_MESSAGE_FRAME_setFunction(&Message_Ptr->StdMessageFrame,
1125 I2O_HBA_BUS_RESET);
1126 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
1127 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
1128 ++Device) {
1129 if (((Device->le_type & I2O_PORT) != 0)
1130 && (Device->le_bus == bus)) {
1131 I2O_MESSAGE_FRAME_setTargetAddress(
1132 &Message_Ptr->StdMessageFrame,
1133 I2O_LCT_ENTRY_getLocalTID(Device));
1134 /* Asynchronous command, with no expectations */
1135 (void)ASR_queue(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1136 break;
1137 }
1138 }
1139} /* ASR_resetBus */
1140
1141STATIC INLINE int
1142ASR_getBlinkLedCode (
1143 IN Asr_softc_t * sc)
1144{
1145 if ((sc != (Asr_softc_t *)NULL)
1146 && (sc->ha_blinkLED != (u_int8_t *)NULL)
1147 && (sc->ha_blinkLED[1] == 0xBC)) {
1148 return (sc->ha_blinkLED[0]);
1149 }
1150 return (0);
1151} /* ASR_getBlinkCode */
1152
1153/*
1154 * Determine the address of an TID lookup. Must be done at high priority
1155 * since the address can be changed by other threads of execution.
1156 *
1157 * Returns NULL pointer if not indexible (but will attempt to generate
1158 * an index if `new_entry' flag is set to TRUE).
1159 *
1160 * All addressible entries are to be guaranteed zero if never initialized.
1161 */
1162STATIC INLINE tid_t *
1163ASR_getTidAddress(
1164 INOUT Asr_softc_t * sc,
1165 IN int bus,
1166 IN int target,
1167 IN int lun,
1168 IN int new_entry)
1169{
1170 target2lun_t * bus_ptr;
1171 lun2tid_t * target_ptr;
1172 unsigned new_size;
1173
1174 /*
1175 * Validity checking of incoming parameters. More of a bound
1176 * expansion limit than an issue with the code dealing with the
1177 * values.
1178 *
1179 * sc must be valid before it gets here, so that check could be
1180 * dropped if speed a critical issue.
1181 */
1182 if ((sc == (Asr_softc_t *)NULL)
1183 || (bus > MAX_CHANNEL)
1184 || (target > sc->ha_MaxId)
1185 || (lun > sc->ha_MaxLun)) {
1186 debug_asr_printf("(%lx,%d,%d,%d) target out of range\n",
1187 (u_long)sc, bus, target, lun);
1188 return ((tid_t *)NULL);
1189 }
1190 /*
1191 * See if there is an associated bus list.
1192 *
1193 * for performance, allocate in size of BUS_CHUNK chunks.
1194 * BUS_CHUNK must be a power of two. This is to reduce
1195 * fragmentation effects on the allocations.
1196 */
1197# define BUS_CHUNK 8
1198 new_size = ((target + BUS_CHUNK - 1) & ~(BUS_CHUNK - 1));
1199 if ((bus_ptr = sc->ha_targets[bus]) == (target2lun_t *)NULL) {
1200 /*
1201 * Allocate a new structure?
1202 * Since one element in structure, the +1
1203 * needed for size has been abstracted.
1204 */
1205 if ((new_entry == FALSE)
1206 || ((sc->ha_targets[bus] = bus_ptr = (target2lun_t *)malloc (
1207 sizeof(*bus_ptr) + (sizeof(bus_ptr->LUN) * new_size),
1208 M_TEMP, M_WAITOK))
1209 == (target2lun_t *)NULL)) {
1210 debug_asr_printf("failed to allocate bus list\n");
1211 return ((tid_t *)NULL);
1212 }
1213 bzero (bus_ptr, sizeof(*bus_ptr)
1214 + (sizeof(bus_ptr->LUN) * new_size));
1215 bus_ptr->size = new_size + 1;
1216 } else if (bus_ptr->size <= new_size) {
1217 target2lun_t * new_bus_ptr;
1218
1219 /*
1220 * Reallocate a new structure?
1221 * Since one element in structure, the +1
1222 * needed for size has been abstracted.
1223 */
1224 if ((new_entry == FALSE)
1225 || ((new_bus_ptr = (target2lun_t *)malloc (
1226 sizeof(*bus_ptr) + (sizeof(bus_ptr->LUN) * new_size),
1227 M_TEMP, M_WAITOK))
1228 == (target2lun_t *)NULL)) {
1229 debug_asr_printf("failed to reallocate bus list\n");
1230 return ((tid_t *)NULL);
1231 }
1232 /*
1233 * Zero and copy the whole thing, safer, simpler coding
1234 * and not really performance critical at this point.
1235 */
1236 bzero (new_bus_ptr, sizeof(*bus_ptr)
1237 + (sizeof(bus_ptr->LUN) * new_size));
1238 bcopy (bus_ptr, new_bus_ptr, sizeof(*bus_ptr)
1239 + (sizeof(bus_ptr->LUN) * (bus_ptr->size - 1)));
1240 sc->ha_targets[bus] = new_bus_ptr;
1241 free (bus_ptr, M_TEMP);
1242 bus_ptr = new_bus_ptr;
1243 bus_ptr->size = new_size + 1;
1244 }
1245 /*
1246 * We now have the bus list, lets get to the target list.
1247 * Since most systems have only *one* lun, we do not allocate
1248 * in chunks as above, here we allow one, then in chunk sizes.
1249 * TARGET_CHUNK must be a power of two. This is to reduce
1250 * fragmentation effects on the allocations.
1251 */
1252# define TARGET_CHUNK 8
1253 if ((new_size = lun) != 0) {
1254 new_size = ((lun + TARGET_CHUNK - 1) & ~(TARGET_CHUNK - 1));
1255 }
1256 if ((target_ptr = bus_ptr->LUN[target]) == (lun2tid_t *)NULL) {
1257 /*
1258 * Allocate a new structure?
1259 * Since one element in structure, the +1
1260 * needed for size has been abstracted.
1261 */
1262 if ((new_entry == FALSE)
1263 || ((bus_ptr->LUN[target] = target_ptr = (lun2tid_t *)malloc (
1264 sizeof(*target_ptr) + (sizeof(target_ptr->TID) * new_size),
1265 M_TEMP, M_WAITOK))
1266 == (lun2tid_t *)NULL)) {
1267 debug_asr_printf("failed to allocate target list\n");
1268 return ((tid_t *)NULL);
1269 }
1270 bzero (target_ptr, sizeof(*target_ptr)
1271 + (sizeof(target_ptr->TID) * new_size));
1272 target_ptr->size = new_size + 1;
1273 } else if (target_ptr->size <= new_size) {
1274 lun2tid_t * new_target_ptr;
1275
1276 /*
1277 * Reallocate a new structure?
1278 * Since one element in structure, the +1
1279 * needed for size has been abstracted.
1280 */
1281 if ((new_entry == FALSE)
1282 || ((new_target_ptr = (lun2tid_t *)malloc (
1283 sizeof(*target_ptr) + (sizeof(target_ptr->TID) * new_size),
1284 M_TEMP, M_WAITOK))
1285 == (lun2tid_t *)NULL)) {
1286 debug_asr_printf("failed to reallocate target list\n");
1287 return ((tid_t *)NULL);
1288 }
1289 /*
1290 * Zero and copy the whole thing, safer, simpler coding
1291 * and not really performance critical at this point.
1292 */
1293 bzero (new_target_ptr, sizeof(*target_ptr)
1294 + (sizeof(target_ptr->TID) * new_size));
1295 bcopy (target_ptr, new_target_ptr,
1296 sizeof(*target_ptr)
1297 + (sizeof(target_ptr->TID) * (target_ptr->size - 1)));
1298 bus_ptr->LUN[target] = new_target_ptr;
1299 free (target_ptr, M_TEMP);
1300 target_ptr = new_target_ptr;
1301 target_ptr->size = new_size + 1;
1302 }
1303 /*
1304 * Now, acquire the TID address from the LUN indexed list.
1305 */
1306 return (&(target_ptr->TID[lun]));
1307} /* ASR_getTidAddress */
1308
1309/*
1310 * Get a pre-existing TID relationship.
1311 *
1312 * If the TID was never set, return (tid_t)-1.
1313 *
1314 * should use mutex rather than spl.
1315 */
1316STATIC INLINE tid_t
1317ASR_getTid (
1318 IN Asr_softc_t * sc,
1319 IN int bus,
1320 IN int target,
1321 IN int lun)
1322{
1323 tid_t * tid_ptr;
1324 int s;
1325 OUT tid_t retval;
1326
1327 s = splcam();
1328 if (((tid_ptr = ASR_getTidAddress (sc, bus, target, lun, FALSE))
1329 == (tid_t *)NULL)
1330 /* (tid_t)0 or (tid_t)-1 indicate no TID */
1331 || (*tid_ptr == (tid_t)0)) {
1332 splx(s);
1333 return ((tid_t)-1);
1334 }
1335 retval = *tid_ptr;
1336 splx(s);
1337 return (retval);
1338} /* ASR_getTid */
1339
1340/*
1341 * Set a TID relationship.
1342 *
1343 * If the TID was not set, return (tid_t)-1.
1344 *
1345 * should use mutex rather than spl.
1346 */
1347STATIC INLINE tid_t
1348ASR_setTid (
1349 INOUT Asr_softc_t * sc,
1350 IN int bus,
1351 IN int target,
1352 IN int lun,
1353 INOUT tid_t TID)
1354{
1355 tid_t * tid_ptr;
1356 int s;
1357
1358 if (TID != (tid_t)-1) {
1359 if (TID == 0) {
1360 return ((tid_t)-1);
1361 }
1362 s = splcam();
1363 if ((tid_ptr = ASR_getTidAddress (sc, bus, target, lun, TRUE))
1364 == (tid_t *)NULL) {
1365 splx(s);
1366 return ((tid_t)-1);
1367 }
1368 *tid_ptr = TID;
1369 splx(s);
1370 }
1371 return (TID);
1372} /* ASR_setTid */
1373
1374/*-------------------------------------------------------------------------*/
1375/* Function ASR_rescan */
1376/*-------------------------------------------------------------------------*/
1377/* The Parameters Passed To This Function Are : */
1378/* Asr_softc_t * : HBA miniport driver's adapter data storage. */
1379/* */
1380/* This Function Will rescan the adapter and resynchronize any data */
1381/* */
1382/* Return : 0 For OK, Error Code Otherwise */
1383/*-------------------------------------------------------------------------*/
1384
1385STATIC INLINE int
1386ASR_rescan(
1387 IN Asr_softc_t * sc)
1388{
1389 int bus;
1390 OUT int error;
1391
1392 /*
1393 * Re-acquire the LCT table and synchronize us to the adapter.
1394 */
1395 if ((error = ASR_acquireLct(sc)) == 0) {
1396 error = ASR_acquireHrt(sc);
1397 }
1398
1399 if (error != 0) {
1400 return error;
1401 }
1402
1403 bus = sc->ha_MaxBus;
1404 /* Reset all existing cached TID lookups */
1405 do {
1406 int target, event = 0;
1407
1408 /*
1409 * Scan for all targets on this bus to see if they
1410 * got affected by the rescan.
1411 */
1412 for (target = 0; target <= sc->ha_MaxId; ++target) {
1413 int lun;
1414
1415 /* Stay away from the controller ID */
1416 if (target == sc->ha_adapter_target[bus]) {
1417 continue;
1418 }
1419 for (lun = 0; lun <= sc->ha_MaxLun; ++lun) {
1420 PI2O_LCT_ENTRY Device;
1421 tid_t TID = (tid_t)-1;
1422 tid_t LastTID;
1423
1424 /*
1425 * See if the cached TID changed. Search for
1426 * the device in our new LCT.
1427 */
1428 for (Device = sc->ha_LCT->LCTEntry;
1429 Device < (PI2O_LCT_ENTRY)(((U32 *)sc->ha_LCT)
1430 + I2O_LCT_getTableSize(sc->ha_LCT));
1431 ++Device) {
1432 if ((Device->le_type != I2O_UNKNOWN)
1433 && (Device->le_bus == bus)
1434 && (Device->le_target == target)
1435 && (Device->le_lun == lun)
1436 && (I2O_LCT_ENTRY_getUserTID(Device)
1437 == 0xFFF)) {
1438 TID = I2O_LCT_ENTRY_getLocalTID(
1439 Device);
1440 break;
1441 }
1442 }
1443 /*
1444 * Indicate to the OS that the label needs
1445 * to be recalculated, or that the specific
1446 * open device is no longer valid (Merde)
1447 * because the cached TID changed.
1448 */
1449 LastTID = ASR_getTid (sc, bus, target, lun);
1450 if (LastTID != TID) {
1451 struct cam_path * path;
1452
1453 if (xpt_create_path(&path,
1454 /*periph*/NULL,
1455 cam_sim_path(sc->ha_sim[bus]),
1456 target, lun) != CAM_REQ_CMP) {
1457 if (TID == (tid_t)-1) {
1458 event |= AC_LOST_DEVICE;
1459 } else {
1460 event |= AC_INQ_CHANGED
1461 | AC_GETDEV_CHANGED;
1462 }
1463 } else {
1464 if (TID == (tid_t)-1) {
1465 xpt_async(
1466 AC_LOST_DEVICE,
1467 path, NULL);
1468 } else if (LastTID == (tid_t)-1) {
1469 struct ccb_getdev ccb;
1470
1471 xpt_setup_ccb(
1472 &(ccb.ccb_h),
1473 path, /*priority*/5);
1474 xpt_async(
1475 AC_FOUND_DEVICE,
1476 path,
1477 &ccb);
1478 } else {
1479 xpt_async(
1480 AC_INQ_CHANGED,
1481 path, NULL);
1482 xpt_async(
1483 AC_GETDEV_CHANGED,
1484 path, NULL);
1485 }
1486 }
1487 }
1488 /*
1489 * We have the option of clearing the
1490 * cached TID for it to be rescanned, or to
1491 * set it now even if the device never got
1492 * accessed. We chose the later since we
1493 * currently do not use the condition that
1494 * the TID ever got cached.
1495 */
1496 ASR_setTid (sc, bus, target, lun, TID);
1497 }
1498 }
1499 /*
1500 * The xpt layer can not handle multiple events at the
1501 * same call.
1502 */
1503 if (event & AC_LOST_DEVICE) {
1504 xpt_async(AC_LOST_DEVICE, sc->ha_path[bus], NULL);
1505 }
1506 if (event & AC_INQ_CHANGED) {
1507 xpt_async(AC_INQ_CHANGED, sc->ha_path[bus], NULL);
1508 }
1509 if (event & AC_GETDEV_CHANGED) {
1510 xpt_async(AC_GETDEV_CHANGED, sc->ha_path[bus], NULL);
1511 }
1512 } while (--bus >= 0);
1513 return (error);
1514} /* ASR_rescan */
1515
1516/*-------------------------------------------------------------------------*/
1517/* Function ASR_reset */
1518/*-------------------------------------------------------------------------*/
1519/* The Parameters Passed To This Function Are : */
1520/* Asr_softc_t * : HBA miniport driver's adapter data storage. */
1521/* */
1522/* This Function Will reset the adapter and resynchronize any data */
1523/* */
1524/* Return : None */
1525/*-------------------------------------------------------------------------*/
1526
1527STATIC INLINE int
1528ASR_reset(
1529 IN Asr_softc_t * sc)
1530{
1531 int s, retVal;
1532
1533 s = splcam();
1534 if ((sc->ha_in_reset == HA_IN_RESET)
1535 || (sc->ha_in_reset == HA_OFF_LINE_RECOVERY)) {
1536 splx (s);
1537 return (EBUSY);
1538 }
1539 /*
1540 * Promotes HA_OPERATIONAL to HA_IN_RESET,
1541 * or HA_OFF_LINE to HA_OFF_LINE_RECOVERY.
1542 */
1543 ++(sc->ha_in_reset);
1544 if (ASR_resetIOP (sc->ha_Virt, sc->ha_Fvirt) == 0) {
1545 debug_asr_printf ("ASR_resetIOP failed\n");
1546 /*
1547 * We really need to take this card off-line, easier said
1548 * than make sense. Better to keep retrying for now since if a
1549 * UART cable is connected the blinkLEDs the adapter is now in
1550 * a hard state requiring action from the monitor commands to
1551 * the HBA to continue. For debugging waiting forever is a
1552 * good thing. In a production system, however, one may wish
1553 * to instead take the card off-line ...
1554 */
1555# if 0 && (defined(HA_OFF_LINE))
1556 /*
1557 * Take adapter off-line.
1558 */
1559 printf ("asr%d: Taking adapter off-line\n",
1560 sc->ha_path[0]
1561 ? cam_sim_unit(xpt_path_sim(sc->ha_path[0]))
1562 : 0);
1563 sc->ha_in_reset = HA_OFF_LINE;
1564 splx (s);
1565 return (ENXIO);
1566# else
1567 /* Wait Forever */
1568 while (ASR_resetIOP (sc->ha_Virt, sc->ha_Fvirt) == 0);
1569# endif
1570 }
1571 retVal = ASR_init (sc);
1572 splx (s);
1573 if (retVal != 0) {
1574 debug_asr_printf ("ASR_init failed\n");
1575 sc->ha_in_reset = HA_OFF_LINE;
1576 return (ENXIO);
1577 }
1578 if (ASR_rescan (sc) != 0) {
1579 debug_asr_printf ("ASR_rescan failed\n");
1580 }
1581 ASR_failActiveCommands (sc);
1582 if (sc->ha_in_reset == HA_OFF_LINE_RECOVERY) {
1583 printf ("asr%d: Brining adapter back on-line\n",
1584 sc->ha_path[0]
1585 ? cam_sim_unit(xpt_path_sim(sc->ha_path[0]))
1586 : 0);
1587 }
1588 sc->ha_in_reset = HA_OPERATIONAL;
1589 return (0);
1590} /* ASR_reset */
1591
1592/*
1593 * Device timeout handler.
1594 */
1595STATIC void
1596asr_timeout(
1597 INOUT void * arg)
1598{
1599 union asr_ccb * ccb = (union asr_ccb *)arg;
1600 Asr_softc_t * sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
1601 int s;
1602
1603 debug_asr_print_path(ccb);
1604 debug_asr_printf("timed out");
1605
1606 /*
1607 * Check if the adapter has locked up?
1608 */
1609 if ((s = ASR_getBlinkLedCode(sc)) != 0) {
1610 /* Reset Adapter */
1611 printf ("asr%d: Blink LED 0x%x resetting adapter\n",
1612 cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)), s);
1613 if (ASR_reset (sc) == ENXIO) {
1614 /* Try again later */
1615 ccb->ccb_h.timeout_ch = timeout(asr_timeout,
1616 (caddr_t)ccb,
1617 (ccb->ccb_h.timeout * hz) / 1000);
1618 }
1619 return;
1620 }
1621 /*
1622 * Abort does not function on the ASR card!!! Walking away from
1623 * the SCSI command is also *very* dangerous. A SCSI BUS reset is
1624 * our best bet, followed by a complete adapter reset if that fails.
1625 */
1626 s = splcam();
1627 /* Check if we already timed out once to raise the issue */
1628 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_CMD_TIMEOUT) {
1629 debug_asr_printf (" AGAIN\nreinitializing adapter\n");
1630 if (ASR_reset (sc) == ENXIO) {
1631 ccb->ccb_h.timeout_ch = timeout(asr_timeout,
1632 (caddr_t)ccb,
1633 (ccb->ccb_h.timeout * hz) / 1000);
1634 }
1635 splx(s);
1636 return;
1637 }
1638 debug_asr_printf ("\nresetting bus\n");
1639 /* If the BUS reset does not take, then an adapter reset is next! */
1640 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1641 ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
1642 ccb->ccb_h.timeout_ch = timeout(asr_timeout, (caddr_t)ccb,
1643 (ccb->ccb_h.timeout * hz) / 1000);
1644 ASR_resetBus (sc, cam_sim_bus(xpt_path_sim(ccb->ccb_h.path)));
1645 xpt_async (AC_BUS_RESET, ccb->ccb_h.path, NULL);
1646 splx(s);
1647} /* asr_timeout */
1648
1649/*
1650 * send a message asynchronously
1651 */
1652STATIC INLINE int
1653ASR_queue(
1654 IN Asr_softc_t * sc,
1655 IN PI2O_MESSAGE_FRAME Message)
1656{
1657 OUT U32 MessageOffset;
1658 union asr_ccb * ccb;
1659
1660 debug_asr_printf ("Host Command Dump:\n");
1661 debug_asr_dump_message (Message);
1662
1663 ccb = (union asr_ccb *)(long)
1664 I2O_MESSAGE_FRAME_getInitiatorContext64(Message);
1665
1666 if ((MessageOffset = ASR_getMessage(sc->ha_Virt)) != EMPTY_QUEUE) {
1667#ifdef ASR_MEASURE_PERFORMANCE
1668 int startTimeIndex;
1669
1670 if (ccb) {
1671 ++sc->ha_performance.command_count[
1672 (int) ccb->csio.cdb_io.cdb_bytes[0]];
1673 DEQ_TIMEQ_FREE_LIST(startTimeIndex,
1674 sc->ha_timeQFreeList,
1675 sc->ha_timeQFreeHead,
1676 sc->ha_timeQFreeTail);
1677 if (-1 != startTimeIndex) {
1678 microtime(&(sc->ha_timeQ[startTimeIndex]));
1679 }
1680 /* Time stamp the command before we send it out */
1681 ((PRIVATE_SCSI_SCB_EXECUTE_MESSAGE *) Message)->
1682 PrivateMessageFrame.TransactionContext
1683 = (I2O_TRANSACTION_CONTEXT) startTimeIndex;
1684
1685 ++sc->ha_submitted_ccbs_count;
1686 if (sc->ha_performance.max_submit_count
1687 < sc->ha_submitted_ccbs_count) {
1688 sc->ha_performance.max_submit_count
1689 = sc->ha_submitted_ccbs_count;
1690 }
1691 }
1692#endif
1693 bcopy (Message, sc->ha_Fvirt + MessageOffset,
1694 I2O_MESSAGE_FRAME_getMessageSize(Message) << 2);
1695 if (ccb) {
1696 ASR_ccbAdd (sc, ccb);
1697 }
1698 /* Post the command */
1699 sc->ha_Virt->ToFIFO = MessageOffset;
1700 } else {
1701 if (ASR_getBlinkLedCode(sc)) {
1702 /*
1703 * Unlikely we can do anything if we can't grab a
1704 * message frame :-(, but lets give it a try.
1705 */
1706 (void)ASR_reset (sc);
1707 }
1708 }
1709 return (MessageOffset);
1710} /* ASR_queue */
1711
1712
1713/* Simple Scatter Gather elements */
1714#define SG(SGL,Index,Flags,Buffer,Size) \
1715 I2O_FLAGS_COUNT_setCount( \
1716 &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index].FlagsCount), \
1717 Size); \
1718 I2O_FLAGS_COUNT_setFlags( \
1719 &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index].FlagsCount), \
1720 I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT | (Flags)); \
1721 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress( \
1722 &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index]), \
1723 (Buffer == NULL) ? NULL : KVTOPHYS(Buffer))
1724
1725/*
1726 * Retrieve Parameter Group.
1727 * Buffer must be allocated using defAlignLong macro.
1728 */
1729STATIC void *
1730ASR_getParams(
1731 IN Asr_softc_t * sc,
1732 IN tid_t TID,
1733 IN int Group,
1734 OUT void * Buffer,
1735 IN unsigned BufferSize)
1736{
1737 struct paramGetMessage {
1738 I2O_UTIL_PARAMS_GET_MESSAGE M;
1739 char F[
1740 sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT)];
1741 struct Operations {
1742 I2O_PARAM_OPERATIONS_LIST_HEADER Header;
1743 I2O_PARAM_OPERATION_ALL_TEMPLATE Template[1];
1744 } O;
1745 };
1746 defAlignLong(struct paramGetMessage, Message);
1747 struct Operations * Operations_Ptr;
1748 I2O_UTIL_PARAMS_GET_MESSAGE * Message_Ptr;
1749 struct ParamBuffer {
1750 I2O_PARAM_RESULTS_LIST_HEADER Header;
1751 I2O_PARAM_READ_OPERATION_RESULT Read;
1752 char Info[1];
1753 } * Buffer_Ptr;
1754
1755 Message_Ptr = (I2O_UTIL_PARAMS_GET_MESSAGE *)ASR_fillMessage(Message,
1756 sizeof(I2O_UTIL_PARAMS_GET_MESSAGE)
1757 + sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT));
1758 Operations_Ptr = (struct Operations *)((char *)Message_Ptr
1759 + sizeof(I2O_UTIL_PARAMS_GET_MESSAGE)
1760 + sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT));
1761 bzero ((void *)Operations_Ptr, sizeof(struct Operations));
1762 I2O_PARAM_OPERATIONS_LIST_HEADER_setOperationCount(
1763 &(Operations_Ptr->Header), 1);
1764 I2O_PARAM_OPERATION_ALL_TEMPLATE_setOperation(
1765 &(Operations_Ptr->Template[0]), I2O_PARAMS_OPERATION_FIELD_GET);
1766 I2O_PARAM_OPERATION_ALL_TEMPLATE_setFieldCount(
1767 &(Operations_Ptr->Template[0]), 0xFFFF);
1768 I2O_PARAM_OPERATION_ALL_TEMPLATE_setGroupNumber(
1769 &(Operations_Ptr->Template[0]), Group);
1770 bzero ((void *)(Buffer_Ptr = getAlignLong(struct ParamBuffer, Buffer)),
1771 BufferSize);
1772
1773 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
1774 I2O_VERSION_11
1775 + (((sizeof(I2O_UTIL_PARAMS_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1776 / sizeof(U32)) << 4));
1777 I2O_MESSAGE_FRAME_setTargetAddress (&(Message_Ptr->StdMessageFrame),
1778 TID);
1779 I2O_MESSAGE_FRAME_setFunction (&(Message_Ptr->StdMessageFrame),
1780 I2O_UTIL_PARAMS_GET);
1781 /*
1782 * Set up the buffers as scatter gather elements.
1783 */
1784 SG(&(Message_Ptr->SGL), 0,
1785 I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER,
1786 Operations_Ptr, sizeof(struct Operations));
1787 SG(&(Message_Ptr->SGL), 1,
1788 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
1789 Buffer_Ptr, BufferSize);
1790
1791 if ((ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) == CAM_REQ_CMP)
1792 && (Buffer_Ptr->Header.ResultCount)) {
1793 return ((void *)(Buffer_Ptr->Info));
1794 }
1795 return ((void *)NULL);
1796} /* ASR_getParams */
1797
1798/*
1799 * Acquire the LCT information.
1800 */
1801STATIC INLINE int
1802ASR_acquireLct (
1803 INOUT Asr_softc_t * sc)
1804{
1805 PI2O_EXEC_LCT_NOTIFY_MESSAGE Message_Ptr;
1806 PI2O_SGE_SIMPLE_ELEMENT sg;
1807 int MessageSizeInBytes;
1808 caddr_t v;
1809 int len;
1810 I2O_LCT Table;
1811 PI2O_LCT_ENTRY Entry;
1812
1813 /*
1814 * sc value assumed valid
1815 */
1816 MessageSizeInBytes = sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE)
1817 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT);
1818 if ((Message_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)malloc (
1819 MessageSizeInBytes, M_TEMP, M_WAITOK))
1820 == (PI2O_EXEC_LCT_NOTIFY_MESSAGE)NULL) {
1821 return (ENOMEM);
1822 }
1823 (void)ASR_fillMessage((char *)Message_Ptr, MessageSizeInBytes);
1824 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
1825 (I2O_VERSION_11 +
1826 (((sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1827 / sizeof(U32)) << 4)));
1828 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
1829 I2O_EXEC_LCT_NOTIFY);
1830 I2O_EXEC_LCT_NOTIFY_MESSAGE_setClassIdentifier(Message_Ptr,
1831 I2O_CLASS_MATCH_ANYCLASS);
1832 /*
1833 * Call the LCT table to determine the number of device entries
1834 * to reserve space for.
1835 */
1836 SG(&(Message_Ptr->SGL), 0,
1837 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER, &Table,
1838 sizeof(I2O_LCT));
1839 /*
1840 * since this code is reused in several systems, code efficiency
1841 * is greater by using a shift operation rather than a divide by
1842 * sizeof(u_int32_t).
1843 */
1844 I2O_LCT_setTableSize(&Table,
1845 (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)) >> 2);
1846 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1847 /*
1848 * Determine the size of the LCT table.
1849 */
1850 if (sc->ha_LCT) {
1851 free (sc->ha_LCT, M_TEMP);
1852 }
1853 /*
1854 * malloc only generates contiguous memory when less than a
1855 * page is expected. We must break the request up into an SG list ...
1856 */
1857 if (((len = (I2O_LCT_getTableSize(&Table) << 2)) <=
1858 (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)))
1859 || (len > (128 * 1024))) { /* Arbitrary */
1860 free (Message_Ptr, M_TEMP);
1861 return (EINVAL);
1862 }
1863 if ((sc->ha_LCT = (PI2O_LCT)malloc (len, M_TEMP, M_WAITOK))
1864 == (PI2O_LCT)NULL) {
1865 free (Message_Ptr, M_TEMP);
1866 return (ENOMEM);
1867 }
1868 /*
1869 * since this code is reused in several systems, code efficiency
1870 * is greater by using a shift operation rather than a divide by
1871 * sizeof(u_int32_t).
1872 */
1873 I2O_LCT_setTableSize(sc->ha_LCT,
1874 (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)) >> 2);
1875 /*
1876 * Convert the access to the LCT table into a SG list.
1877 */
1878 sg = Message_Ptr->SGL.u.Simple;
1879 v = (caddr_t)(sc->ha_LCT);
1880 for (;;) {
1881 int next, base, span;
1882
1883 span = 0;
1884 next = base = KVTOPHYS(v);
1885 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, base);
1886
1887 /* How far can we go contiguously */
1888 while ((len > 0) && (base == next)) {
1889 int size;
1890
1891 next = trunc_page(base) + PAGE_SIZE;
1892 size = next - base;
1893 if (size > len) {
1894 size = len;
1895 }
1896 span += size;
1897 v += size;
1898 len -= size;
1899 base = KVTOPHYS(v);
1900 }
1901
1902 /* Construct the Flags */
1903 I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), span);
1904 {
1905 int rw = I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT;
1906 if (len <= 0) {
1907 rw = (I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT
1908 | I2O_SGL_FLAGS_LAST_ELEMENT
1909 | I2O_SGL_FLAGS_END_OF_BUFFER);
1910 }
1911 I2O_FLAGS_COUNT_setFlags(&(sg->FlagsCount), rw);
1912 }
1913
1914 if (len <= 0) {
1915 break;
1916 }
1917
1918 /*
1919 * Incrementing requires resizing of the packet.
1920 */
1921 ++sg;
1922 MessageSizeInBytes += sizeof(*sg);
1923 I2O_MESSAGE_FRAME_setMessageSize(
1924 &(Message_Ptr->StdMessageFrame),
1925 I2O_MESSAGE_FRAME_getMessageSize(
1926 &(Message_Ptr->StdMessageFrame))
1927 + (sizeof(*sg) / sizeof(U32)));
1928 {
1929 PI2O_EXEC_LCT_NOTIFY_MESSAGE NewMessage_Ptr;
1930
1931 if ((NewMessage_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)
1932 malloc (MessageSizeInBytes, M_TEMP, M_WAITOK))
1933 == (PI2O_EXEC_LCT_NOTIFY_MESSAGE)NULL) {
1934 free (sc->ha_LCT, M_TEMP);
1935 sc->ha_LCT = (PI2O_LCT)NULL;
1936 free (Message_Ptr, M_TEMP);
1937 return (ENOMEM);
1938 }
1939 span = ((caddr_t)sg) - (caddr_t)Message_Ptr;
1940 bcopy ((caddr_t)Message_Ptr,
1941 (caddr_t)NewMessage_Ptr, span);
1942 free (Message_Ptr, M_TEMP);
1943 sg = (PI2O_SGE_SIMPLE_ELEMENT)
1944 (((caddr_t)NewMessage_Ptr) + span);
1945 Message_Ptr = NewMessage_Ptr;
1946 }
1947 }
1948 { int retval;
1949
1950 retval = ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1951 free (Message_Ptr, M_TEMP);
1952 if (retval != CAM_REQ_CMP) {
1953 return (ENODEV);
1954 }
1955 }
1956 /* If the LCT table grew, lets truncate accesses */
1957 if (I2O_LCT_getTableSize(&Table) < I2O_LCT_getTableSize(sc->ha_LCT)) {
1958 I2O_LCT_setTableSize(sc->ha_LCT, I2O_LCT_getTableSize(&Table));
1959 }
1960 for (Entry = sc->ha_LCT->LCTEntry; Entry < (PI2O_LCT_ENTRY)
1961 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
1962 ++Entry) {
1963 Entry->le_type = I2O_UNKNOWN;
1964 switch (I2O_CLASS_ID_getClass(&(Entry->ClassID))) {
1965
1966 case I2O_CLASS_RANDOM_BLOCK_STORAGE:
1967 Entry->le_type = I2O_BSA;
1968 break;
1969
1970 case I2O_CLASS_SCSI_PERIPHERAL:
1971 Entry->le_type = I2O_SCSI;
1972 break;
1973
1974 case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
1975 Entry->le_type = I2O_FCA;
1976 break;
1977
1978 case I2O_CLASS_BUS_ADAPTER_PORT:
1979 Entry->le_type = I2O_PORT | I2O_SCSI;
1980 /* FALLTHRU */
1981 case I2O_CLASS_FIBRE_CHANNEL_PORT:
1982 if (I2O_CLASS_ID_getClass(&(Entry->ClassID)) ==
1983 I2O_CLASS_FIBRE_CHANNEL_PORT) {
1984 Entry->le_type = I2O_PORT | I2O_FCA;
1985 }
1986 { struct ControllerInfo {
1987 I2O_PARAM_RESULTS_LIST_HEADER Header;
1988 I2O_PARAM_READ_OPERATION_RESULT Read;
1989 I2O_HBA_SCSI_CONTROLLER_INFO_SCALAR Info;
1990 };
1991 defAlignLong(struct ControllerInfo, Buffer);
1992 PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR Info;
1993
1994 Entry->le_bus = 0xff;
1995 Entry->le_target = 0xff;
1996 Entry->le_lun = 0xff;
1997
1998 if ((Info = (PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR)
1999 ASR_getParams(sc,
2000 I2O_LCT_ENTRY_getLocalTID(Entry),
2001 I2O_HBA_SCSI_CONTROLLER_INFO_GROUP_NO,
2002 Buffer, sizeof(struct ControllerInfo)))
2003 == (PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR)NULL) {
2004 continue;
2005 }
2006 Entry->le_target
2007 = I2O_HBA_SCSI_CONTROLLER_INFO_SCALAR_getInitiatorID(
2008 Info);
2009 Entry->le_lun = 0;
2010 } /* FALLTHRU */
2011 default:
2012 continue;
2013 }
2014 { struct DeviceInfo {
2015 I2O_PARAM_RESULTS_LIST_HEADER Header;
2016 I2O_PARAM_READ_OPERATION_RESULT Read;
2017 I2O_DPT_DEVICE_INFO_SCALAR Info;
2018 };
2019 defAlignLong (struct DeviceInfo, Buffer);
2020 PI2O_DPT_DEVICE_INFO_SCALAR Info;
2021
2022 Entry->le_bus = 0xff;
2023 Entry->le_target = 0xff;
2024 Entry->le_lun = 0xff;
2025
2026 if ((Info = (PI2O_DPT_DEVICE_INFO_SCALAR)
2027 ASR_getParams(sc,
2028 I2O_LCT_ENTRY_getLocalTID(Entry),
2029 I2O_DPT_DEVICE_INFO_GROUP_NO,
2030 Buffer, sizeof(struct DeviceInfo)))
2031 == (PI2O_DPT_DEVICE_INFO_SCALAR)NULL) {
2032 continue;
2033 }
2034 Entry->le_type
2035 |= I2O_DPT_DEVICE_INFO_SCALAR_getDeviceType(Info);
2036 Entry->le_bus
2037 = I2O_DPT_DEVICE_INFO_SCALAR_getBus(Info);
2038 if ((Entry->le_bus > sc->ha_MaxBus)
2039 && (Entry->le_bus <= MAX_CHANNEL)) {
2040 sc->ha_MaxBus = Entry->le_bus;
2041 }
2042 Entry->le_target
2043 = I2O_DPT_DEVICE_INFO_SCALAR_getIdentifier(Info);
2044 Entry->le_lun
2045 = I2O_DPT_DEVICE_INFO_SCALAR_getLunInfo(Info);
2046 }
2047 }
2048 /*
2049 * A zero return value indicates success.
2050 */
2051 return (0);
2052} /* ASR_acquireLct */
2053
2054/*
2055 * Initialize a message frame.
2056 * We assume that the CDB has already been set up, so all we do here is
2057 * generate the Scatter Gather list.
2058 */
2059STATIC INLINE PI2O_MESSAGE_FRAME
2060ASR_init_message(
2061 IN union asr_ccb * ccb,
2062 OUT PI2O_MESSAGE_FRAME Message)
2063{
2064 int next, span, base, rw;
2065 OUT PI2O_MESSAGE_FRAME Message_Ptr;
2066 Asr_softc_t * sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
2067 PI2O_SGE_SIMPLE_ELEMENT sg;
2068 caddr_t v;
2069 vm_size_t size, len;
2070 U32 MessageSize;
2071
2072 /* We only need to zero out the PRIVATE_SCSI_SCB_EXECUTE_MESSAGE */
2073 bzero (Message_Ptr = getAlignLong(I2O_MESSAGE_FRAME, Message),
2074 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) - sizeof(I2O_SG_ELEMENT)));
2075
2076 {
2077 int target = ccb->ccb_h.target_id;
2078 int lun = ccb->ccb_h.target_lun;
2079 int bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path));
2080 tid_t TID;
2081
2082 if ((TID = ASR_getTid (sc, bus, target, lun)) == (tid_t)-1) {
2083 PI2O_LCT_ENTRY Device;
2084
2085 TID = (tid_t)0;
2086 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
2087 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
2088 ++Device) {
2089 if ((Device->le_type != I2O_UNKNOWN)
2090 && (Device->le_bus == bus)
2091 && (Device->le_target == target)
2092 && (Device->le_lun == lun)
2093 && (I2O_LCT_ENTRY_getUserTID(Device) == 0xFFF)) {
2094 TID = I2O_LCT_ENTRY_getLocalTID(Device);
2095 ASR_setTid (sc, Device->le_bus,
2096 Device->le_target, Device->le_lun,
2097 TID);
2098 break;
2099 }
2100 }
2101 }
2102 if (TID == (tid_t)0) {
2103 return ((PI2O_MESSAGE_FRAME)NULL);
2104 }
2105 I2O_MESSAGE_FRAME_setTargetAddress(Message_Ptr, TID);
2106 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setTID(
2107 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, TID);
2108 }
2109 I2O_MESSAGE_FRAME_setVersionOffset(Message_Ptr, I2O_VERSION_11 |
2110 (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) - sizeof(I2O_SG_ELEMENT))
2111 / sizeof(U32)) << 4));
2112 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
2113 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2114 - sizeof(I2O_SG_ELEMENT)) / sizeof(U32));
2115 I2O_MESSAGE_FRAME_setInitiatorAddress (Message_Ptr, 1);
2116 I2O_MESSAGE_FRAME_setFunction(Message_Ptr, I2O_PRIVATE_MESSAGE);
2117 I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode (
2118 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, I2O_SCSI_SCB_EXEC);
2119 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (
2120 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
2121 I2O_SCB_FLAG_ENABLE_DISCONNECT
2122 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2123 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
2124 /*
2125 * We do not need any (optional byteswapping) method access to
2126 * the Initiator & Transaction context field.
2127 */
2128 I2O_MESSAGE_FRAME_setInitiatorContext64(Message, (long)ccb);
2129
2130 I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
2131 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, DPT_ORGANIZATION_ID);
2132 /*
2133 * copy the cdb over
2134 */
2135 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(
2136 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, ccb->csio.cdb_len);
2137 bcopy (&(ccb->csio.cdb_io),
2138 ((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)->CDB, ccb->csio.cdb_len);
2139
2140 /*
2141 * Given a buffer describing a transfer, set up a scatter/gather map
2142 * in a ccb to map that SCSI transfer.
2143 */
2144
2145 rw = (ccb->ccb_h.flags & CAM_DIR_IN) ? 0 : I2O_SGL_FLAGS_DIR;
2146
2147 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (
2148 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
2149 (ccb->csio.dxfer_len)
2150 ? ((rw) ? (I2O_SCB_FLAG_XFER_TO_DEVICE
2151 | I2O_SCB_FLAG_ENABLE_DISCONNECT
2152 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2153 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER)
2154 : (I2O_SCB_FLAG_XFER_FROM_DEVICE
2155 | I2O_SCB_FLAG_ENABLE_DISCONNECT
2156 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2157 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER))
2158 : (I2O_SCB_FLAG_ENABLE_DISCONNECT
2159 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2160 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
2161
2162 /*
2163 * Given a transfer described by a `data', fill in the SG list.
2164 */
2165 sg = &((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)->SGL.u.Simple[0];
2166
2167 len = ccb->csio.dxfer_len;
2168 v = ccb->csio.data_ptr;
2169 ASSERT (ccb->csio.dxfer_len >= 0);
2170 MessageSize = I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr);
2171 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setByteCount(
2172 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, len);
2173 while ((len > 0) && (sg < &((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2174 Message_Ptr)->SGL.u.Simple[SG_SIZE])) {
2175 span = 0;
2176 next = base = KVTOPHYS(v);
2177 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, base);
2178
2179 /* How far can we go contiguously */
2180 while ((len > 0) && (base == next)) {
2181 next = trunc_page(base) + PAGE_SIZE;
2182 size = next - base;
2183 if (size > len) {
2184 size = len;
2185 }
2186 span += size;
2187 v += size;
2188 len -= size;
2189 base = KVTOPHYS(v);
2190 }
2191
2192 I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), span);
2193 if (len == 0) {
2194 rw |= I2O_SGL_FLAGS_LAST_ELEMENT;
2195 }
2196 I2O_FLAGS_COUNT_setFlags(&(sg->FlagsCount),
2197 I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT | rw);
2198 ++sg;
2199 MessageSize += sizeof(*sg) / sizeof(U32);
2200 }
2201 /* We always do the request sense ... */
2202 if ((span = ccb->csio.sense_len) == 0) {
2203 span = sizeof(ccb->csio.sense_data);
2204 }
2205 SG(sg, 0, I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
2206 &(ccb->csio.sense_data), span);
2207 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
2208 MessageSize + (sizeof(*sg) / sizeof(U32)));
2209 return (Message_Ptr);
2210} /* ASR_init_message */
2211
2212/*
2213 * Reset the adapter.
2214 */
2215STATIC INLINE U32
2216ASR_initOutBound (
2217 INOUT Asr_softc_t * sc)
2218{
2219 struct initOutBoundMessage {
2220 I2O_EXEC_OUTBOUND_INIT_MESSAGE M;
2221 U32 R;
2222 };
2223 defAlignLong(struct initOutBoundMessage,Message);
2224 PI2O_EXEC_OUTBOUND_INIT_MESSAGE Message_Ptr;
2225 OUT U32 * volatile Reply_Ptr;
2226 U32 Old;
2227
2228 /*
2229 * Build up our copy of the Message.
2230 */
2231 Message_Ptr = (PI2O_EXEC_OUTBOUND_INIT_MESSAGE)ASR_fillMessage(Message,
2232 sizeof(I2O_EXEC_OUTBOUND_INIT_MESSAGE));
2233 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
2234 I2O_EXEC_OUTBOUND_INIT);
2235 I2O_EXEC_OUTBOUND_INIT_MESSAGE_setHostPageFrameSize(Message_Ptr, PAGE_SIZE);
2236 I2O_EXEC_OUTBOUND_INIT_MESSAGE_setOutboundMFrameSize(Message_Ptr,
2237 sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME));
2238 /*
2239 * Reset the Reply Status
2240 */
2241 *(Reply_Ptr = (U32 *)((char *)Message_Ptr
2242 + sizeof(I2O_EXEC_OUTBOUND_INIT_MESSAGE))) = 0;
2243 SG (&(Message_Ptr->SGL), 0, I2O_SGL_FLAGS_LAST_ELEMENT, Reply_Ptr,
2244 sizeof(U32));
2245 /*
2246 * Send the Message out
2247 */
2248 if ((Old = ASR_initiateCp (sc->ha_Virt, sc->ha_Fvirt, (PI2O_MESSAGE_FRAME)Message_Ptr)) != (U32)-1L) {
2249 u_long size, addr;
2250
2251 /*
2252 * Wait for a response (Poll).
2253 */
2254 while (*Reply_Ptr < I2O_EXEC_OUTBOUND_INIT_REJECTED);
2255 /*
2256 * Re-enable the interrupts.
2257 */
2258 sc->ha_Virt->Mask = Old;
2259 /*
2260 * Populate the outbound table.
2261 */
2262 if (sc->ha_Msgs == (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL) {
2263
2264 /* Allocate the reply frames */
2265 size = sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
2266 * sc->ha_Msgs_Count;
2267
2268 /*
2269 * contigmalloc only works reliably at
2270 * initialization time.
2271 */
2272 if ((sc->ha_Msgs = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
2273 contigmalloc (size, M_DEVBUF, M_WAITOK, 0ul,
2274 0xFFFFFFFFul, (u_long)sizeof(U32), 0ul))
2275 != (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL) {
2276 (void)bzero ((char *)sc->ha_Msgs, size);
2277 sc->ha_Msgs_Phys = KVTOPHYS(sc->ha_Msgs);
2278 }
2279 }
2280
2281 /* Initialize the outbound FIFO */
2282 if (sc->ha_Msgs != (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL)
2283 for (size = sc->ha_Msgs_Count, addr = sc->ha_Msgs_Phys;
2284 size; --size) {
2285 sc->ha_Virt->FromFIFO = addr;
2286 addr += sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME);
2287 }
2288 return (*Reply_Ptr);
2289 }
2290 return (0);
2291} /* ASR_initOutBound */
2292
2293/*
2294 * Set the system table
2295 */
2296STATIC INLINE int
2297ASR_setSysTab(
2298 IN Asr_softc_t * sc)
2299{
2300 PI2O_EXEC_SYS_TAB_SET_MESSAGE Message_Ptr;
2301 PI2O_SET_SYSTAB_HEADER SystemTable;
2302 Asr_softc_t * ha;
2303 PI2O_SGE_SIMPLE_ELEMENT sg;
2304 int retVal;
2305
2306 if ((SystemTable = (PI2O_SET_SYSTAB_HEADER)malloc (
2307 sizeof(I2O_SET_SYSTAB_HEADER), M_TEMP, M_WAITOK))
2308 == (PI2O_SET_SYSTAB_HEADER)NULL) {
2309 return (ENOMEM);
2310 }
2311 bzero (SystemTable, sizeof(I2O_SET_SYSTAB_HEADER));
2312 for (ha = Asr_softc; ha; ha = ha->ha_next) {
2313 ++SystemTable->NumberEntries;
2314 }
2315 if ((Message_Ptr = (PI2O_EXEC_SYS_TAB_SET_MESSAGE)malloc (
2316 sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
2317 + ((3+SystemTable->NumberEntries) * sizeof(I2O_SGE_SIMPLE_ELEMENT)),
2318 M_TEMP, M_WAITOK)) == (PI2O_EXEC_SYS_TAB_SET_MESSAGE)NULL) {
2319 free (SystemTable, M_TEMP);
2320 return (ENOMEM);
2321 }
2322 (void)ASR_fillMessage((char *)Message_Ptr,
2323 sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
2324 + ((3+SystemTable->NumberEntries) * sizeof(I2O_SGE_SIMPLE_ELEMENT)));
2325 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
2326 (I2O_VERSION_11 +
2327 (((sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
2328 / sizeof(U32)) << 4)));
2329 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
2330 I2O_EXEC_SYS_TAB_SET);
2331 /*
2332 * Call the LCT table to determine the number of device entries
2333 * to reserve space for.
2334 * since this code is reused in several systems, code efficiency
2335 * is greater by using a shift operation rather than a divide by
2336 * sizeof(u_int32_t).
2337 */
2338 sg = (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr
2339 + ((I2O_MESSAGE_FRAME_getVersionOffset(
2340 &(Message_Ptr->StdMessageFrame)) & 0xF0) >> 2));
2341 SG(sg, 0, I2O_SGL_FLAGS_DIR, SystemTable, sizeof(I2O_SET_SYSTAB_HEADER));
2342 ++sg;
2343 for (ha = Asr_softc; ha; ha = ha->ha_next) {
2344 SG(sg, 0,
2345 ((ha->ha_next)
2346 ? (I2O_SGL_FLAGS_DIR)
2347 : (I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER)),
2348 &(ha->ha_SystemTable), sizeof(ha->ha_SystemTable));
2349 ++sg;
2350 }
2351 SG(sg, 0, I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER, NULL, 0);
2352 SG(sg, 1, I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_LAST_ELEMENT
2353 | I2O_SGL_FLAGS_END_OF_BUFFER, NULL, 0);
2354 retVal = ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
2355 free (Message_Ptr, M_TEMP);
2356 free (SystemTable, M_TEMP);
2357 return (retVal);
2358} /* ASR_setSysTab */
2359
2360STATIC INLINE int
2361ASR_acquireHrt (
2362 INOUT Asr_softc_t * sc)
2363{
2364 defAlignLong(I2O_EXEC_HRT_GET_MESSAGE,Message);
2365 I2O_EXEC_HRT_GET_MESSAGE * Message_Ptr;
2366 struct {
2367 I2O_HRT Header;
2368 I2O_HRT_ENTRY Entry[MAX_CHANNEL];
2369 } Hrt;
2370 u_int8_t NumberOfEntries;
2371 PI2O_HRT_ENTRY Entry;
2372
2373 bzero ((void *)&Hrt, sizeof (Hrt));
2374 Message_Ptr = (I2O_EXEC_HRT_GET_MESSAGE *)ASR_fillMessage(Message,
2375 sizeof(I2O_EXEC_HRT_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
2376 + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2377 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
2378 (I2O_VERSION_11
2379 + (((sizeof(I2O_EXEC_HRT_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
2380 / sizeof(U32)) << 4)));
2381 I2O_MESSAGE_FRAME_setFunction (&(Message_Ptr->StdMessageFrame),
2382 I2O_EXEC_HRT_GET);
2383
2384 /*
2385 * Set up the buffers as scatter gather elements.
2386 */
2387 SG(&(Message_Ptr->SGL), 0,
2388 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
2389 &Hrt, sizeof(Hrt));
2390 if (ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) != CAM_REQ_CMP) {
2391 return (ENODEV);
2392 }
2393 if ((NumberOfEntries = I2O_HRT_getNumberEntries(&Hrt.Header))
2394 > (MAX_CHANNEL + 1)) {
2395 NumberOfEntries = MAX_CHANNEL + 1;
2396 }
2397 for (Entry = Hrt.Header.HRTEntry;
2398 NumberOfEntries != 0;
2399 ++Entry, --NumberOfEntries) {
2400 PI2O_LCT_ENTRY Device;
2401
2402 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
2403 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
2404 ++Device) {
2405 if (I2O_LCT_ENTRY_getLocalTID(Device)
2406 == (I2O_HRT_ENTRY_getAdapterID(Entry) & 0xFFF)) {
2407 Device->le_bus = I2O_HRT_ENTRY_getAdapterID(
2408 Entry) >> 16;
2409 if ((Device->le_bus > sc->ha_MaxBus)
2410 && (Device->le_bus <= MAX_CHANNEL)) {
2411 sc->ha_MaxBus = Device->le_bus;
2412 }
2413 }
2414 }
2415 }
2416 return (0);
2417} /* ASR_acquireHrt */
2418
2419/*
2420 * Enable the adapter.
2421 */
2422STATIC INLINE int
2423ASR_enableSys (
2424 IN Asr_softc_t * sc)
2425{
2426 defAlignLong(I2O_EXEC_SYS_ENABLE_MESSAGE,Message);
2427 PI2O_EXEC_SYS_ENABLE_MESSAGE Message_Ptr;
2428
2429 Message_Ptr = (PI2O_EXEC_SYS_ENABLE_MESSAGE)ASR_fillMessage(Message,
2430 sizeof(I2O_EXEC_SYS_ENABLE_MESSAGE));
2431 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
2432 I2O_EXEC_SYS_ENABLE);
2433 return (ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) != 0);
2434} /* ASR_enableSys */
2435
2436/*
2437 * Perform the stages necessary to initialize the adapter
2438 */
2439STATIC int
2440ASR_init(
2441 IN Asr_softc_t * sc)
2442{
2443 return ((ASR_initOutBound(sc) == 0)
2444 || (ASR_setSysTab(sc) != CAM_REQ_CMP)
2445 || (ASR_enableSys(sc) != CAM_REQ_CMP));
2446} /* ASR_init */
2447
2448/*
2449 * Send a Synchronize Cache command to the target device.
2450 */
2451STATIC INLINE void
2452ASR_sync (
2453 IN Asr_softc_t * sc,
2454 IN int bus,
2455 IN int target,
2456 IN int lun)
2457{
2458 tid_t TID;
2459
2460 /*
2461 * We will not synchronize the device when there are outstanding
2462 * commands issued by the OS (this is due to a locked up device,
2463 * as the OS normally would flush all outstanding commands before
2464 * issuing a shutdown or an adapter reset).
2465 */
2466 if ((sc != (Asr_softc_t *)NULL)
2467 && (LIST_FIRST(&(sc->ha_ccb)) != (struct ccb_hdr *)NULL)
2468 && ((TID = ASR_getTid (sc, bus, target, lun)) != (tid_t)-1)
2469 && (TID != (tid_t)0)) {
2470 defAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE,Message);
2471 PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE Message_Ptr;
2472
2473 bzero (Message_Ptr
2474 = getAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE, Message),
2475 sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2476 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2477
2478 I2O_MESSAGE_FRAME_setVersionOffset(
2479 (PI2O_MESSAGE_FRAME)Message_Ptr,
2480 I2O_VERSION_11
2481 | (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2482 - sizeof(I2O_SG_ELEMENT))
2483 / sizeof(U32)) << 4));
2484 I2O_MESSAGE_FRAME_setMessageSize(
2485 (PI2O_MESSAGE_FRAME)Message_Ptr,
2486 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2487 - sizeof(I2O_SG_ELEMENT))
2488 / sizeof(U32));
2489 I2O_MESSAGE_FRAME_setInitiatorAddress (
2490 (PI2O_MESSAGE_FRAME)Message_Ptr, 1);
2491 I2O_MESSAGE_FRAME_setFunction(
2492 (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_PRIVATE_MESSAGE);
2493 I2O_MESSAGE_FRAME_setTargetAddress(
2494 (PI2O_MESSAGE_FRAME)Message_Ptr, TID);
2495 I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode (
2496 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2497 I2O_SCSI_SCB_EXEC);
2498 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setTID(Message_Ptr, TID);
2499 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2500 I2O_SCB_FLAG_ENABLE_DISCONNECT
2501 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2502 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
2503 I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
2504 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2505 DPT_ORGANIZATION_ID);
2506 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(Message_Ptr, 6);
2507 Message_Ptr->CDB[0] = SYNCHRONIZE_CACHE;
2508 Message_Ptr->CDB[1] = (lun << 5);
2509
2510 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2511 (I2O_SCB_FLAG_XFER_FROM_DEVICE
2512 | I2O_SCB_FLAG_ENABLE_DISCONNECT
2513 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2514 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
2515
2516 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
2517
2518 }
2519}
2520
2521STATIC INLINE void
2522ASR_synchronize (
2523 IN Asr_softc_t * sc)
2524{
2525 int bus, target, lun;
2526
2527 for (bus = 0; bus <= sc->ha_MaxBus; ++bus) {
2528 for (target = 0; target <= sc->ha_MaxId; ++target) {
2529 for (lun = 0; lun <= sc->ha_MaxLun; ++lun) {
2530 ASR_sync(sc,bus,target,lun);
2531 }
2532 }
2533 }
2534}
2535
2536/*
2537 * Reset the HBA, targets and BUS.
2538 * Currently this resets *all* the SCSI busses.
2539 */
2540STATIC INLINE void
2541asr_hbareset(
2542 IN Asr_softc_t * sc)
2543{
2544 ASR_synchronize (sc);
2545 (void)ASR_reset (sc);
2546} /* asr_hbareset */
2547
2548/*
2549 * A reduced copy of the real pci_map_mem, incorporating the MAX_MAP
2550 * limit and a reduction in error checking (in the pre 4.0 case).
2551 */
2552STATIC int
2553asr_pci_map_mem (
42cdd4ab 2554#if defined(__DragonFly__) || __FreeBSD_version >= 400000
984263bc
MD
2555 IN device_t tag,
2556#else
2557 IN pcici_t tag,
2558#endif
2559 IN Asr_softc_t * sc)
2560{
2561 int rid;
2562 u_int32_t p, l, s;
2563
42cdd4ab 2564#if defined(__DragonFly__) || __FreeBSD_version >= 400000
984263bc
MD
2565 /*
2566 * I2O specification says we must find first *memory* mapped BAR
2567 */
2568 for (rid = PCIR_MAPS;
2569 rid < (PCIR_MAPS + 4 * sizeof(u_int32_t));
2570 rid += sizeof(u_int32_t)) {
2571 p = pci_read_config(tag, rid, sizeof(p));
2572 if ((p & 1) == 0) {
2573 break;
2574 }
2575 }
2576 /*
2577 * Give up?
2578 */
2579 if (rid >= (PCIR_MAPS + 4 * sizeof(u_int32_t))) {
2580 rid = PCIR_MAPS;
2581 }
2582 p = pci_read_config(tag, rid, sizeof(p));
2583 pci_write_config(tag, rid, -1, sizeof(p));
2584 l = 0 - (pci_read_config(tag, rid, sizeof(l)) & ~15);
2585 pci_write_config(tag, rid, p, sizeof(p));
2586 if (l > MAX_MAP) {
2587 l = MAX_MAP;
2588 }
2589 /*
2590 * The 2005S Zero Channel RAID solution is not a perfect PCI
2591 * citizen. It asks for 4MB on BAR0, and 0MB on BAR1, once
2592 * enabled it rewrites the size of BAR0 to 2MB, sets BAR1 to
2593 * BAR0+2MB and sets it's size to 2MB. The IOP registers are
2594 * accessible via BAR0, the messaging registers are accessible
2595 * via BAR1. If the subdevice code is 50 to 59 decimal.
2596 */
2597 s = pci_read_config(tag, PCIR_DEVVENDOR, sizeof(s));
2598 if (s != 0xA5111044) {
2599 s = pci_read_config(tag, PCIR_SUBVEND_0, sizeof(s));
2600 if ((((ADPTDOMINATOR_SUB_ID_START ^ s) & 0xF000FFFF) == 0)
2601 && (ADPTDOMINATOR_SUB_ID_START <= s)
2602 && (s <= ADPTDOMINATOR_SUB_ID_END)) {
2603 l = MAX_MAP; /* Conjoined BAR Raptor Daptor */
2604 }
2605 }
2606 p &= ~15;
2607 sc->ha_mem_res = bus_alloc_resource(tag, SYS_RES_MEMORY, &rid,
2608 p, p + l, l, RF_ACTIVE);
2609 if (sc->ha_mem_res == (struct resource *)NULL) {
2610 return (0);
2611 }
2612 sc->ha_Base = (void *)rman_get_start(sc->ha_mem_res);
2613 if (sc->ha_Base == (void *)NULL) {
2614 return (0);
2615 }
2616 sc->ha_Virt = (i2oRegs_t *) rman_get_virtual(sc->ha_mem_res);
2617 if (s == 0xA5111044) { /* Split BAR Raptor Daptor */
2618 if ((rid += sizeof(u_int32_t))
2619 >= (PCIR_MAPS + 4 * sizeof(u_int32_t))) {
2620 return (0);
2621 }
2622 p = pci_read_config(tag, rid, sizeof(p));
2623 pci_write_config(tag, rid, -1, sizeof(p));
2624 l = 0 - (pci_read_config(tag, rid, sizeof(l)) & ~15);
2625 pci_write_config(tag, rid, p, sizeof(p));
2626 if (l > MAX_MAP) {
2627 l = MAX_MAP;
2628 }
2629 p &= ~15;
2630 sc->ha_mes_res = bus_alloc_resource(tag, SYS_RES_MEMORY, &rid,
2631 p, p + l, l, RF_ACTIVE);
2632 if (sc->ha_mes_res == (struct resource *)NULL) {
2633 return (0);
2634 }
2635 if ((void *)rman_get_start(sc->ha_mes_res) == (void *)NULL) {
2636 return (0);
2637 }
2638 sc->ha_Fvirt = (U8 *) rman_get_virtual(sc->ha_mes_res);
2639 } else {
2640 sc->ha_Fvirt = (U8 *)(sc->ha_Virt);
2641 }
2642#else
2643 vm_size_t psize, poffs;
2644
2645 /*
2646 * I2O specification says we must find first *memory* mapped BAR
2647 */
2648 for (rid = PCI_MAP_REG_START;
2649 rid < (PCI_MAP_REG_START + 4 * sizeof(u_int32_t));
2650 rid += sizeof(u_int32_t)) {
2651 p = pci_conf_read (tag, rid);
2652 if ((p & 1) == 0) {
2653 break;
2654 }
2655 }
2656 if (rid >= (PCI_MAP_REG_START + 4 * sizeof(u_int32_t))) {
2657 rid = PCI_MAP_REG_START;
2658 }
2659 /*
2660 ** save old mapping, get size and type of memory
2661 **
2662 ** type is in the lowest four bits.
2663 ** If device requires 2^n bytes, the next
2664 ** n-4 bits are read as 0.
2665 */
2666
2667 sc->ha_Base = (void *)((p = pci_conf_read (tag, rid))
2668 & PCI_MAP_MEMORY_ADDRESS_MASK);
2669 pci_conf_write (tag, rid, 0xfffffffful);
2670 l = pci_conf_read (tag, rid);
2671 pci_conf_write (tag, rid, p);
2672
2673 /*
2674 ** check the type
2675 */
2676
2677 if (!((l & PCI_MAP_MEMORY_TYPE_MASK) == PCI_MAP_MEMORY_TYPE_32BIT_1M
2678 && ((u_long)sc->ha_Base & ~0xfffff) == 0)
2679 && ((l & PCI_MAP_MEMORY_TYPE_MASK) != PCI_MAP_MEMORY_TYPE_32BIT)) {
2680 debug_asr_printf (
2681 "asr_pci_map_mem failed: bad memory type=0x%x\n",
2682 (unsigned) l);
2683 return (0);
2684 };
2685
2686 /*
2687 ** get the size.
2688 */
2689
2690 psize = -(l & PCI_MAP_MEMORY_ADDRESS_MASK);
2691 if (psize > MAX_MAP) {
2692 psize = MAX_MAP;
2693 }
2694 /*
2695 * The 2005S Zero Channel RAID solution is not a perfect PCI
2696 * citizen. It asks for 4MB on BAR0, and 0MB on BAR1, once
2697 * enabled it rewrites the size of BAR0 to 2MB, sets BAR1 to
2698 * BAR0+2MB and sets it's size to 2MB. The IOP registers are
2699 * accessible via BAR0, the messaging registers are accessible
2700 * via BAR1. If the subdevice code is 50 to 59 decimal.
2701 */
2702 s = pci_read_config(tag, PCIR_DEVVENDOR, sizeof(s));
2703 if (s != 0xA5111044) {
2704 s = pci_conf_read (tag, PCIR_SUBVEND_0)
2705 if ((((ADPTDOMINATOR_SUB_ID_START ^ s) & 0xF000FFFF) == 0)
2706 && (ADPTDOMINATOR_SUB_ID_START <= s)
2707 && (s <= ADPTDOMINATOR_SUB_ID_END)) {
2708 psize = MAX_MAP;
2709 }
2710 }
2711
2712 if ((sc->ha_Base == (void *)NULL)
2713 || (sc->ha_Base == (void *)PCI_MAP_MEMORY_ADDRESS_MASK)) {
2714 debug_asr_printf ("asr_pci_map_mem: not configured by bios.\n");
2715 return (0);
2716 };
2717
2718 /*
2719 ** Truncate sc->ha_Base to page boundary.
2720 ** (Or does pmap_mapdev the job?)
2721 */
2722
2723 poffs = (u_long)sc->ha_Base - trunc_page ((u_long)sc->ha_Base);
2724 sc->ha_Virt = (i2oRegs_t *)pmap_mapdev ((u_long)sc->ha_Base - poffs,
2725 psize + poffs);
2726
2727 if (sc->ha_Virt == (i2oRegs_t *)NULL) {
2728 return (0);
2729 }
2730
2731 sc->ha_Virt = (i2oRegs_t *)((u_long)sc->ha_Virt + poffs);
2732 if (s == 0xA5111044) {
2733 if ((rid += sizeof(u_int32_t))
2734 >= (PCI_MAP_REG_START + 4 * sizeof(u_int32_t))) {
2735 return (0);
2736 }
2737
2738 /*
2739 ** save old mapping, get size and type of memory
2740 **
2741 ** type is in the lowest four bits.
2742 ** If device requires 2^n bytes, the next
2743 ** n-4 bits are read as 0.
2744 */
2745
2746 if ((((p = pci_conf_read (tag, rid))
2747 & PCI_MAP_MEMORY_ADDRESS_MASK) == 0L)
2748 || ((p & PCI_MAP_MEMORY_ADDRESS_MASK)
2749 == PCI_MAP_MEMORY_ADDRESS_MASK)) {
2750 debug_asr_printf ("asr_pci_map_mem: not configured by bios.\n");
2751 }
2752 pci_conf_write (tag, rid, 0xfffffffful);
2753 l = pci_conf_read (tag, rid);
2754 pci_conf_write (tag, rid, p);
2755 p &= PCI_MAP_MEMORY_TYPE_MASK;
2756
2757 /*
2758 ** check the type
2759 */
2760
2761 if (!((l & PCI_MAP_MEMORY_TYPE_MASK)
2762 == PCI_MAP_MEMORY_TYPE_32BIT_1M
2763 && (p & ~0xfffff) == 0)
2764 && ((l & PCI_MAP_MEMORY_TYPE_MASK)
2765 != PCI_MAP_MEMORY_TYPE_32BIT)) {
2766 debug_asr_printf (
2767 "asr_pci_map_mem failed: bad memory type=0x%x\n",
2768 (unsigned) l);
2769 return (0);
2770 };
2771
2772 /*
2773 ** get the size.
2774 */
2775
2776 psize = -(l & PCI_MAP_MEMORY_ADDRESS_MASK);
2777 if (psize > MAX_MAP) {
2778 psize = MAX_MAP;
2779 }
2780
2781 /*
2782 ** Truncate p to page boundary.
2783 ** (Or does pmap_mapdev the job?)
2784 */
2785
2786 poffs = p - trunc_page (p);
2787 sc->ha_Fvirt = (U8 *)pmap_mapdev (p - poffs, psize + poffs);
2788
2789 if (sc->ha_Fvirt == (U8 *)NULL) {
2790 return (0);
2791 }
2792
2793 sc->ha_Fvirt = (U8 *)((u_long)sc->ha_Fvirt + poffs);
2794 } else {
2795 sc->ha_Fvirt = (U8 *)(sc->ha_Virt);
2796 }
2797#endif
2798 return (1);
2799} /* asr_pci_map_mem */
2800
2801/*
2802 * A simplified copy of the real pci_map_int with additional
2803 * registration requirements.
2804 */
2805STATIC int
2806asr_pci_map_int (
42cdd4ab 2807#if defined(__DragonFly__) || __FreeBSD_version >= 400000
984263bc
MD
2808 IN device_t tag,
2809#else
2810 IN pcici_t tag,
2811#endif
2812 IN Asr_softc_t * sc)
2813{
42cdd4ab 2814#if defined(__DragonFly__) || __FreeBSD_version >= 400000
984263bc
MD
2815 int rid = 0;
2816
2817 sc->ha_irq_res = bus_alloc_resource(tag, SYS_RES_IRQ, &rid,
2818 0, ~0, 1, RF_ACTIVE | RF_SHAREABLE);
2819 if (sc->ha_irq_res == (struct resource *)NULL) {
2820 return (0);
2821 }
2822 if (bus_setup_intr(tag, sc->ha_irq_res, INTR_TYPE_CAM,
2823 (driver_intr_t *)asr_intr, (void *)sc, &(sc->ha_intr))) {
2824 return (0);
2825 }
2826 sc->ha_irq = pci_read_config(tag, PCIR_INTLINE, sizeof(char));
2827#else
2828 if (!pci_map_int(tag, (pci_inthand_t *)asr_intr,
2829 (void *)sc, &cam_imask)) {
2830 return (0);
2831 }
2832 sc->ha_irq = pci_conf_read(tag, PCIR_INTLINE);
2833#endif
2834 return (1);
2835} /* asr_pci_map_int */
2836
2837/*
2838 * Attach the devices, and virtual devices to the driver list.
2839 */
2840STATIC ATTACH_RET
2841asr_attach (ATTACH_ARGS)
2842{
2843 Asr_softc_t * sc;
2844 struct scsi_inquiry_data * iq;
2845 ATTACH_SET();
2846
2847 if ((sc = malloc(sizeof(*sc), M_DEVBUF, M_NOWAIT)) == (Asr_softc_t *)NULL) {
2848 ATTACH_RETURN(ENOMEM);
2849 }
2850 if (Asr_softc == (Asr_softc_t *)NULL) {
2851 /*
2852 * Fixup the OS revision as saved in the dptsig for the
2853 * engine (dptioctl.h) to pick up.
2854 */
2855 bcopy (osrelease, &ASR_sig.dsDescription[16], 5);
2856 printf ("asr%d: major=%d\n", unit, asr_cdevsw.d_maj);
2857 }
2858 /*
2859 * Initialize the software structure
2860 */
2861 bzero (sc, sizeof(*sc));
2862 LIST_INIT(&(sc->ha_ccb));
2863# ifdef ASR_MEASURE_PERFORMANCE
2864 {
2865 u_int32_t i;
2866
2867 // initialize free list for timeQ
2868 sc->ha_timeQFreeHead = 0;
2869 sc->ha_timeQFreeTail = MAX_TIMEQ_SIZE - 1;
2870 for (i = 0; i < MAX_TIMEQ_SIZE; i++) {
2871 sc->ha_timeQFreeList[i] = i;
2872 }
2873 }
2874# endif
2875 /* Link us into the HA list */
2876 {
2877 Asr_softc_t **ha;
2878
2879 for (ha = &Asr_softc; *ha; ha = &((*ha)->ha_next));
2880 *(ha) = sc;
2881 }
2882 {
2883 PI2O_EXEC_STATUS_GET_REPLY status;
2884 int size;
2885
2886 /*
2887 * This is the real McCoy!
2888 */
2889 if (!asr_pci_map_mem(tag, sc)) {
2890 printf ("asr%d: could not map memory\n", unit);
2891 ATTACH_RETURN(ENXIO);
2892 }
2893 /* Enable if not formerly enabled */
42cdd4ab 2894#if defined(__DragonFly__) || __FreeBSD_version >= 400000
984263bc
MD
2895 pci_write_config (tag, PCIR_COMMAND,
2896 pci_read_config (tag, PCIR_COMMAND, sizeof(char))
2897 | PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN, sizeof(char));
2898 /* Knowledge is power, responsibility is direct */
2899 {
2900 struct pci_devinfo {
2901 STAILQ_ENTRY(pci_devinfo) pci_links;
2902 struct resource_list resources;
2903 pcicfgregs cfg;
2904 } * dinfo = device_get_ivars(tag);
2905 sc->ha_pciBusNum = dinfo->cfg.bus;
2906 sc->ha_pciDeviceNum = (dinfo->cfg.slot << 3)
2907 | dinfo->cfg.func;
2908 }
2909#else
2910 pci_conf_write (tag, PCIR_COMMAND,
2911 pci_conf_read (tag, PCIR_COMMAND)
2912 | PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN);
2913 /* Knowledge is power, responsibility is direct */
2914 switch (pci_mechanism) {
2915
2916 case 1:
2917 sc->ha_pciBusNum = tag.cfg1 >> 16;
2918 sc->ha_pciDeviceNum = tag.cfg1 >> 8;
2919
2920 case 2:
2921 sc->ha_pciBusNum = tag.cfg2.forward;
2922 sc->ha_pciDeviceNum = ((tag.cfg2.enable >> 1) & 7)
2923 | (tag.cfg2.port >> 5);
2924 }
2925#endif
2926 /* Check if the device is there? */
2927 if ((ASR_resetIOP(sc->ha_Virt, sc->ha_Fvirt) == 0)
2928 || ((status = (PI2O_EXEC_STATUS_GET_REPLY)malloc (
2929 sizeof(I2O_EXEC_STATUS_GET_REPLY), M_TEMP, M_WAITOK))
2930 == (PI2O_EXEC_STATUS_GET_REPLY)NULL)
2931 || (ASR_getStatus(sc->ha_Virt, sc->ha_Fvirt, status) == NULL)) {
2932 printf ("asr%d: could not initialize hardware\n", unit);
2933 ATTACH_RETURN(ENODEV); /* Get next, maybe better luck */
2934 }
2935 sc->ha_SystemTable.OrganizationID = status->OrganizationID;
2936 sc->ha_SystemTable.IOP_ID = status->IOP_ID;
2937 sc->ha_SystemTable.I2oVersion = status->I2oVersion;
2938 sc->ha_SystemTable.IopState = status->IopState;
2939 sc->ha_SystemTable.MessengerType = status->MessengerType;
2940 sc->ha_SystemTable.InboundMessageFrameSize
2941 = status->InboundMFrameSize;
2942 sc->ha_SystemTable.MessengerInfo.InboundMessagePortAddressLow
2943 = (U32)(sc->ha_Base) + (U32)(&(((i2oRegs_t *)NULL)->ToFIFO));
2944
2945 if (!asr_pci_map_int(tag, (void *)sc)) {
2946 printf ("asr%d: could not map interrupt\n", unit);
2947 ATTACH_RETURN(ENXIO);
2948 }
2949
2950 /* Adjust the maximim inbound count */
2951 if (((sc->ha_QueueSize
2952 = I2O_EXEC_STATUS_GET_REPLY_getMaxInboundMFrames(status))
2953 > MAX_INBOUND)
2954 || (sc->ha_QueueSize == 0)) {
2955 sc->ha_QueueSize = MAX_INBOUND;
2956 }
2957
2958 /* Adjust the maximum outbound count */
2959 if (((sc->ha_Msgs_Count
2960 = I2O_EXEC_STATUS_GET_REPLY_getMaxOutboundMFrames(status))
2961 > MAX_OUTBOUND)
2962 || (sc->ha_Msgs_Count == 0)) {
2963 sc->ha_Msgs_Count = MAX_OUTBOUND;
2964 }
2965 if (sc->ha_Msgs_Count > sc->ha_QueueSize) {
2966 sc->ha_Msgs_Count = sc->ha_QueueSize;
2967 }
2968
2969 /* Adjust the maximum SG size to adapter */
2970 if ((size = (I2O_EXEC_STATUS_GET_REPLY_getInboundMFrameSize(
2971 status) << 2)) > MAX_INBOUND_SIZE) {
2972 size = MAX_INBOUND_SIZE;
2973 }
2974 free (status, M_TEMP);
2975 sc->ha_SgSize = (size - sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2976 + sizeof(I2O_SG_ELEMENT)) / sizeof(I2O_SGE_SIMPLE_ELEMENT);
2977 }
2978
2979 /*
2980 * Only do a bus/HBA reset on the first time through. On this
2981 * first time through, we do not send a flush to the devices.
2982 */
2983 if (ASR_init(sc) == 0) {
2984 struct BufferInfo {
2985 I2O_PARAM_RESULTS_LIST_HEADER Header;
2986 I2O_PARAM_READ_OPERATION_RESULT Read;
2987 I2O_DPT_EXEC_IOP_BUFFERS_SCALAR Info;
2988 };
2989 defAlignLong (struct BufferInfo, Buffer);
2990 PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR Info;
2991# define FW_DEBUG_BLED_OFFSET 8
2992
2993 if ((Info = (PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR)
2994 ASR_getParams(sc, 0,
2995 I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO,
2996 Buffer, sizeof(struct BufferInfo)))
2997 != (PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR)NULL) {
2998 sc->ha_blinkLED = sc->ha_Fvirt
2999 + I2O_DPT_EXEC_IOP_BUFFERS_SCALAR_getSerialOutputOffset(Info)
3000 + FW_DEBUG_BLED_OFFSET;
3001 }
3002 if (ASR_acquireLct(sc) == 0) {
3003 (void)ASR_acquireHrt(sc);
3004 }
3005 } else {
3006 printf ("asr%d: failed to initialize\n", unit);
3007 ATTACH_RETURN(ENXIO);
3008 }
3009 /*
3010 * Add in additional probe responses for more channels. We
3011 * are reusing the variable `target' for a channel loop counter.
3012 * Done here because of we need both the acquireLct and
3013 * acquireHrt data.
3014 */
3015 { PI2O_LCT_ENTRY Device;
3016
3017 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
3018 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
3019 ++Device) {
3020 if (Device->le_type == I2O_UNKNOWN) {
3021 continue;
3022 }
3023 if (I2O_LCT_ENTRY_getUserTID(Device) == 0xFFF) {
3024 if (Device->le_target > sc->ha_MaxId) {
3025 sc->ha_MaxId = Device->le_target;
3026 }
3027 if (Device->le_lun > sc->ha_MaxLun) {
3028 sc->ha_MaxLun = Device->le_lun;
3029 }
3030 }
3031 if (((Device->le_type & I2O_PORT) != 0)
3032 && (Device->le_bus <= MAX_CHANNEL)) {
3033 /* Do not increase MaxId for efficiency */
3034 sc->ha_adapter_target[Device->le_bus]
3035 = Device->le_target;
3036 }
3037 }
3038 }
3039
3040
3041 /*
3042 * Print the HBA model number as inquired from the card.
3043 */
3044
3045 printf ("asr%d:", unit);
3046
3047 if ((iq = (struct scsi_inquiry_data *)malloc (
3048 sizeof(struct scsi_inquiry_data), M_TEMP, M_WAITOK))
3049 != (struct scsi_inquiry_data *)NULL) {
3050 defAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE,Message);
3051 PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE Message_Ptr;
3052 int posted = 0;
3053
3054 bzero (iq, sizeof(struct scsi_inquiry_data));
3055 bzero (Message_Ptr
3056 = getAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE, Message),
3057 sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
3058 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT));
3059
3060 I2O_MESSAGE_FRAME_setVersionOffset(
3061 (PI2O_MESSAGE_FRAME)Message_Ptr,
3062 I2O_VERSION_11
3063 | (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
3064 - sizeof(I2O_SG_ELEMENT))
3065 / sizeof(U32)) << 4));
3066 I2O_MESSAGE_FRAME_setMessageSize(
3067 (PI2O_MESSAGE_FRAME)Message_Ptr,
3068 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
3069 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT))
3070 / sizeof(U32));
3071 I2O_MESSAGE_FRAME_setInitiatorAddress (
3072 (PI2O_MESSAGE_FRAME)Message_Ptr, 1);
3073 I2O_MESSAGE_FRAME_setFunction(
3074 (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_PRIVATE_MESSAGE);
3075 I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode (
3076 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
3077 I2O_SCSI_SCB_EXEC);
3078 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
3079 I2O_SCB_FLAG_ENABLE_DISCONNECT
3080 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
3081 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
3082 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setInterpret(Message_Ptr, 1);
3083 I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
3084 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
3085 DPT_ORGANIZATION_ID);
3086 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(Message_Ptr, 6);
3087 Message_Ptr->CDB[0] = INQUIRY;
3088 Message_Ptr->CDB[4] = (unsigned char)sizeof(struct scsi_inquiry_data);
3089 if (Message_Ptr->CDB[4] == 0) {
3090 Message_Ptr->CDB[4] = 255;
3091 }
3092
3093 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
3094 (I2O_SCB_FLAG_XFER_FROM_DEVICE
3095 | I2O_SCB_FLAG_ENABLE_DISCONNECT
3096 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
3097 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
3098
3099 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setByteCount(
3100 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
3101 sizeof(struct scsi_inquiry_data));
3102 SG(&(Message_Ptr->SGL), 0,
3103 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
3104 iq, sizeof(struct scsi_inquiry_data));
3105 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
3106
3107 if (iq->vendor[0] && (iq->vendor[0] != ' ')) {
3108 printf (" ");
3109 ASR_prstring (iq->vendor, 8);
3110 ++posted;
3111 }
3112 if (iq->product[0] && (iq->product[0] != ' ')) {
3113 printf (" ");
3114 ASR_prstring (iq->product, 16);
3115 ++posted;
3116 }
3117 if (iq->revision[0] && (iq->revision[0] != ' ')) {
3118 printf (" FW Rev. ");
3119 ASR_prstring (iq->revision, 4);
3120 ++posted;
3121 }
3122 free ((caddr_t)iq, M_TEMP);
3123 if (posted) {
3124 printf (",");
3125 }
3126 }
3127 printf (" %d channel, %d CCBs, Protocol I2O\n", sc->ha_MaxBus + 1,
3128 (sc->ha_QueueSize > MAX_INBOUND) ? MAX_INBOUND : sc->ha_QueueSize);
3129
3130 /*
3131 * fill in the prototype cam_path.
3132 */
3133 {
3134 int bus;
3135 union asr_ccb * ccb;
3136
3137 if ((ccb = asr_alloc_ccb (sc)) == (union asr_ccb *)NULL) {
3138 printf ("asr%d: CAM could not be notified of asynchronous callback parameters\n", unit);
3139 ATTACH_RETURN(ENOMEM);
3140 }
3141 for (bus = 0; bus <= sc->ha_MaxBus; ++bus) {
984263bc
MD
3142 int QueueSize = sc->ha_QueueSize;
3143
3144 if (QueueSize > MAX_INBOUND) {
3145 QueueSize = MAX_INBOUND;
3146 }
3147
984263bc
MD
3148 /*
3149 * Construct our first channel SIM entry
3150 */
3151 sc->ha_sim[bus] = cam_sim_alloc(
3152 asr_action, asr_poll, "asr", sc,
521cf4d2
MD
3153 unit, 1, QueueSize, NULL);
3154 if (sc->ha_sim[bus] == NULL)
984263bc 3155 continue;
984263bc
MD
3156
3157 if (xpt_bus_register(sc->ha_sim[bus], bus)
3158 != CAM_SUCCESS) {
521cf4d2 3159 cam_sim_free(sc->ha_sim[bus]);
984263bc
MD
3160 sc->ha_sim[bus] = NULL;
3161 continue;
3162 }
3163
3164 if (xpt_create_path(&(sc->ha_path[bus]), /*periph*/NULL,
3165 cam_sim_path(sc->ha_sim[bus]), CAM_TARGET_WILDCARD,
3166 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
3167 xpt_bus_deregister(
3168 cam_sim_path(sc->ha_sim[bus]));
521cf4d2 3169 cam_sim_free(sc->ha_sim[bus]);
984263bc
MD
3170 sc->ha_sim[bus] = NULL;
3171 continue;
3172 }
3173 }
3174 asr_free_ccb (ccb);
3175 }
3176 /*
3177 * Generate the device node information
3178 */
3179 (void)make_dev(&asr_cdevsw, unit, 0, 0, S_IRWXU, "rasr%d", unit);
3180 destroy_dev(makedev(asr_cdevsw.d_maj,unit+1));
3181 ATTACH_RETURN(0);
3182} /* asr_attach */
3183
3184STATIC void
3185asr_poll(
3186 IN struct cam_sim *sim)
3187{
3188 asr_intr(cam_sim_softc(sim));
3189} /* asr_poll */
3190
3191STATIC void
3192asr_action(
3193 IN struct cam_sim * sim,
3194 IN union ccb * ccb)
3195{
3196 struct Asr_softc * sc;
3197
3198 debug_asr_printf ("asr_action(%lx,%lx{%x})\n",
3199 (u_long)sim, (u_long)ccb, ccb->ccb_h.func_code);
3200
3201 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("asr_action\n"));
3202
3203 ccb->ccb_h.spriv_ptr0 = sc = (struct Asr_softc *)cam_sim_softc(sim);
3204
3205 switch (ccb->ccb_h.func_code) {
3206
3207 /* Common cases first */
3208 case XPT_SCSI_IO: /* Execute the requested I/O operation */
3209 {
3210 struct Message {
3211 char M[MAX_INBOUND_SIZE];
3212 };
3213 defAlignLong(struct Message,Message);
3214 PI2O_MESSAGE_FRAME Message_Ptr;
3215
3216 /* Reject incoming commands while we are resetting the card */
3217 if (sc->ha_in_reset != HA_OPERATIONAL) {
3218 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3219 if (sc->ha_in_reset >= HA_OFF_LINE) {
3220 /* HBA is now off-line */
3221 ccb->ccb_h.status |= CAM_UNREC_HBA_ERROR;
3222 } else {
3223 /* HBA currently resetting, try again later. */
3224 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
3225 }
3226 debug_asr_cmd_printf (" e\n");
3227 xpt_done(ccb);
3228 debug_asr_cmd_printf (" q\n");
3229 break;
3230 }
3231 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
3232 printf(
3233 "asr%d WARNING: scsi_cmd(%x) already done on b%dt%du%d\n",
3234 cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)),
3235 ccb->csio.cdb_io.cdb_bytes[0],
3236 cam_sim_bus(sim),
3237 ccb->ccb_h.target_id,
3238 ccb->ccb_h.target_lun);
3239 }
3240 debug_asr_cmd_printf ("(%d,%d,%d,%d)",
3241 cam_sim_unit(sim),
3242 cam_sim_bus(sim),
3243 ccb->ccb_h.target_id,
3244 ccb->ccb_h.target_lun);
3245 debug_asr_cmd_dump_ccb(ccb);
3246
3247 if ((Message_Ptr = ASR_init_message ((union asr_ccb *)ccb,
3248 (PI2O_MESSAGE_FRAME)Message)) != (PI2O_MESSAGE_FRAME)NULL) {
3249 debug_asr_cmd2_printf ("TID=%x:\n",
3250 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_getTID(
3251 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr));
3252 debug_asr_cmd2_dump_message(Message_Ptr);
3253 debug_asr_cmd1_printf (" q");
3254
3255 if (ASR_queue (sc, Message_Ptr) == EMPTY_QUEUE) {
3256#ifdef ASR_MEASURE_PERFORMANCE
3257 ++sc->ha_performance.command_too_busy;
3258#endif
3259 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3260 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
3261 debug_asr_cmd_printf (" E\n");
3262 xpt_done(ccb);
3263 }
3264 debug_asr_cmd_printf (" Q\n");
3265 break;
3266 }
3267 /*
3268 * We will get here if there is no valid TID for the device
3269 * referenced in the scsi command packet.
3270 */
3271 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3272 ccb->ccb_h.status |= CAM_SEL_TIMEOUT;
3273 debug_asr_cmd_printf (" B\n");
3274 xpt_done(ccb);
3275 break;
3276 }
3277
3278 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */
3279 /* Rese HBA device ... */
3280 asr_hbareset (sc);
3281 ccb->ccb_h.status = CAM_REQ_CMP;
3282 xpt_done(ccb);
3283 break;
3284
3285# if (defined(REPORT_LUNS))
3286 case REPORT_LUNS:
3287# endif
3288 case XPT_ABORT: /* Abort the specified CCB */
3289 /* XXX Implement */
3290 ccb->ccb_h.status = CAM_REQ_INVALID;
3291 xpt_done(ccb);
3292 break;
3293
3294 case XPT_SET_TRAN_SETTINGS:
3295 /* XXX Implement */
3296 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
3297 xpt_done(ccb);
3298 break;
3299
3300 case XPT_GET_TRAN_SETTINGS:
3301 /* Get default/user set transfer settings for the target */
3302 {
3303 struct ccb_trans_settings *cts;
3304 u_int target_mask;
3305
3306 cts = &(ccb->cts);
3307 target_mask = 0x01 << ccb->ccb_h.target_id;
3308 if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) {
3309 cts->flags = CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB;
3310 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
3311 cts->sync_period = 6; /* 40MHz */
3312 cts->sync_offset = 15;
3313
3314 cts->valid = CCB_TRANS_SYNC_RATE_VALID
3315 | CCB_TRANS_SYNC_OFFSET_VALID
3316 | CCB_TRANS_BUS_WIDTH_VALID
3317 | CCB_TRANS_DISC_VALID
3318 | CCB_TRANS_TQ_VALID;
3319 ccb->ccb_h.status = CAM_REQ_CMP;
3320 } else {
3321 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
3322 }
3323 xpt_done(ccb);
3324 break;
3325 }
3326
3327 case XPT_CALC_GEOMETRY:
3328 {
3329 struct ccb_calc_geometry *ccg;
3330 u_int32_t size_mb;
3331 u_int32_t secs_per_cylinder;
3332
3333 ccg = &(ccb->ccg);
3334 size_mb = ccg->volume_size
3335 / ((1024L * 1024L) / ccg->block_size);
3336
3337 if (size_mb > 4096) {
3338 ccg->heads = 255;
3339 ccg->secs_per_track = 63;
3340 } else if (size_mb > 2048) {
3341 ccg->heads = 128;
3342 ccg->secs_per_track = 63;
3343 } else if (size_mb > 1024) {
3344 ccg->heads = 65;
3345 ccg->secs_per_track = 63;
3346 } else {
3347 ccg->heads = 64;
3348 ccg->secs_per_track = 32;
3349 }
3350 secs_per_cylinder = ccg->heads * ccg->secs_per_track;
3351 ccg->cylinders = ccg->volume_size / secs_per_cylinder;
3352 ccb->ccb_h.status = CAM_REQ_CMP;
3353 xpt_done(ccb);
3354 break;
3355 }
3356
3357 case XPT_RESET_BUS: /* Reset the specified SCSI bus */
3358 ASR_resetBus (sc, cam_sim_bus(sim));
3359 ccb->ccb_h.status = CAM_REQ_CMP;
3360 xpt_done(ccb);
3361 break;
3362
3363 case XPT_TERM_IO: /* Terminate the I/O process */
3364 /* XXX Implement */
3365 ccb->ccb_h.status = CAM_REQ_INVALID;
3366 xpt_done(ccb);
3367 break;
3368
3369 case XPT_PATH_INQ: /* Path routing inquiry */
3370 {
3371 struct ccb_pathinq *cpi = &(ccb->cpi);
3372
3373 cpi->version_num = 1; /* XXX??? */
3374 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
3375 cpi->target_sprt = 0;
3376 /* Not necessary to reset bus, done by HDM initialization */
3377 cpi->hba_misc = PIM_NOBUSRESET;
3378 cpi->hba_eng_cnt = 0;
3379 cpi->max_target = sc->ha_MaxId;
3380 cpi->max_lun = sc->ha_MaxLun;
3381 cpi->initiator_id = sc->ha_adapter_target[cam_sim_bus(sim)];
3382 cpi->bus_id = cam_sim_bus(sim);
3383 cpi->base_transfer_speed = 3300;
3384 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
3385 strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN);
3386 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
3387 cpi->unit_number = cam_sim_unit(sim);
3388 cpi->ccb_h.status = CAM_REQ_CMP;
3389 xpt_done(ccb);
3390 break;
3391 }
3392 default:
3393 ccb->ccb_h.status = CAM_REQ_INVALID;
3394 xpt_done(ccb);
3395 break;
3396 }
3397} /* asr_action */
3398
3399#ifdef ASR_MEASURE_PERFORMANCE
3400#define WRITE_OP 1
3401#define READ_OP 2
3402#define min_submitR sc->ha_performance.read_by_size_min_time[index]
3403#define max_submitR sc->ha_performance.read_by_size_max_time[index]
3404#define min_submitW sc->ha_performance.write_by_size_min_time[index]
3405#define max_submitW sc->ha_performance.write_by_size_max_time[index]
3406
3407STATIC INLINE void
3408asr_IObySize(
3409 IN Asr_softc_t * sc,
3410 IN u_int32_t submitted_time,
3411 IN int op,
3412 IN int index)
3413{
3414 struct timeval submitted_timeval;
3415
3416 submitted_timeval.tv_sec = 0;
3417 submitted_timeval.tv_usec = submitted_time;
3418
3419 if ( op == READ_OP ) {
3420 ++sc->ha_performance.read_by_size_count[index];
3421
3422 if ( submitted_time != 0xffffffff ) {
3423 timevaladd(
3424 &(sc->ha_performance.read_by_size_total_time[index]),
3425 &submitted_timeval);
3426 if ( (min_submitR == 0)
3427 || (submitted_time < min_submitR) ) {
3428 min_submitR = submitted_time;
3429 }
3430
3431 if ( submitted_time > max_submitR ) {
3432 max_submitR = submitted_time;
3433 }
3434 }
3435 } else {
3436 ++sc->ha_performance.write_by_size_count[index];
3437 if ( submitted_time != 0xffffffff ) {
3438 timevaladd(
3439 &(sc->ha_performance.write_by_size_total_time[index]),
3440 &submitted_timeval);
3441 if ( (submitted_time < min_submitW)
3442 || (min_submitW == 0) ) {
3443 min_submitW = submitted_time;
3444 }
3445
3446 if ( submitted_time > max_submitW ) {
3447 max_submitW = submitted_time;
3448 }
3449 }
3450 }
3451} /* asr_IObySize */
3452#endif
3453
3454/*
3455 * Handle processing of current CCB as pointed to by the Status.
3456 */
3457STATIC int
3458asr_intr (
3459 IN Asr_softc_t * sc)
3460{
3461 OUT int processed;
3462
3463#ifdef ASR_MEASURE_PERFORMANCE
3464 struct timeval junk;
3465
3466 microtime(&junk);
3467 sc->ha_performance.intr_started = junk;
3468#endif
3469
3470 for (processed = 0;
3471 sc->ha_Virt->Status & Mask_InterruptsDisabled;
3472 processed = 1) {
3473 union asr_ccb * ccb;
3474 U32 ReplyOffset;
3475 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply;
3476
3477 if (((ReplyOffset = sc->ha_Virt->FromFIFO) == EMPTY_QUEUE)
3478 && ((ReplyOffset = sc->ha_Virt->FromFIFO) == EMPTY_QUEUE)) {
3479 break;
3480 }
3481 Reply = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)(ReplyOffset
3482 - sc->ha_Msgs_Phys + (char *)(sc->ha_Msgs));
3483 /*
3484 * We do not need any (optional byteswapping) method access to
3485 * the Initiator context field.
3486 */
3487 ccb = (union asr_ccb *)(long)
3488 I2O_MESSAGE_FRAME_getInitiatorContext64(
3489 &(Reply->StdReplyFrame.StdMessageFrame));
3490 if (I2O_MESSAGE_FRAME_getMsgFlags(
3491 &(Reply->StdReplyFrame.StdMessageFrame))
3492 & I2O_MESSAGE_FLAGS_FAIL) {
3493 defAlignLong(I2O_UTIL_NOP_MESSAGE,Message);
3494 PI2O_UTIL_NOP_MESSAGE Message_Ptr;
3495 U32 MessageOffset;
3496
3497 MessageOffset = (u_long)
3498 I2O_FAILURE_REPLY_MESSAGE_FRAME_getPreservedMFA(
3499 (PI2O_FAILURE_REPLY_MESSAGE_FRAME)Reply);
3500 /*
3501 * Get the Original Message Frame's address, and get
3502 * it's Transaction Context into our space. (Currently
3503 * unused at original authorship, but better to be
3504 * safe than sorry). Straight copy means that we
3505 * need not concern ourselves with the (optional
3506 * byteswapping) method access.
3507 */
3508 Reply->StdReplyFrame.TransactionContext
3509 = ((PI2O_SINGLE_REPLY_MESSAGE_FRAME)
3510 (sc->ha_Fvirt + MessageOffset))->TransactionContext;
3511 /*
3512 * For 64 bit machines, we need to reconstruct the
3513 * 64 bit context.
3514 */
3515 ccb = (union asr_ccb *)(long)
3516 I2O_MESSAGE_FRAME_getInitiatorContext64(
3517 &(Reply->StdReplyFrame.StdMessageFrame));
3518 /*
3519 * Unique error code for command failure.
3520 */
3521 I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode(
3522 &(Reply->StdReplyFrame), (u_int16_t)-2);
3523 /*
3524 * Modify the message frame to contain a NOP and
3525 * re-issue it to the controller.
3526 */
3527 Message_Ptr = (PI2O_UTIL_NOP_MESSAGE)ASR_fillMessage(
3528 Message, sizeof(I2O_UTIL_NOP_MESSAGE));
3529# if (I2O_UTIL_NOP != 0)
3530 I2O_MESSAGE_FRAME_setFunction (
3531 &(Message_Ptr->StdMessageFrame),
3532 I2O_UTIL_NOP);
3533# endif
3534 /*
3535 * Copy the packet out to the Original Message
3536 */
3537 bcopy ((caddr_t)Message_Ptr,
3538 sc->ha_Fvirt + MessageOffset,
3539 sizeof(I2O_UTIL_NOP_MESSAGE));
3540 /*
3541 * Issue the NOP
3542 */
3543 sc->ha_Virt->ToFIFO = MessageOffset;
3544 }
3545
3546 /*
3547 * Asynchronous command with no return requirements,
3548 * and a generic handler for immunity against odd error
3549 * returns from the adapter.
3550 */
3551 if (ccb == (union asr_ccb *)NULL) {
3552 /*
3553 * Return Reply so that it can be used for the
3554 * next command
3555 */
3556 sc->ha_Virt->FromFIFO = ReplyOffset;
3557 continue;
3558 }
3559
3560 /* Welease Wadjah! (and stop timeouts) */
3561 ASR_ccbRemove (sc, ccb);
3562
3563 switch (
3564 I2O_SINGLE_REPLY_MESSAGE_FRAME_getDetailedStatusCode(
3565 &(Reply->StdReplyFrame))) {
3566
3567 case I2O_SCSI_DSC_SUCCESS:
3568 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3569 ccb->ccb_h.status |= CAM_REQ_CMP;
3570 break;
3571
3572 case I2O_SCSI_DSC_CHECK_CONDITION:
3573 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3574 ccb->ccb_h.status |= CAM_REQ_CMP|CAM_AUTOSNS_VALID;
3575 break;
3576
3577 case I2O_SCSI_DSC_BUSY:
3578 /* FALLTHRU */
3579 case I2O_SCSI_HBA_DSC_ADAPTER_BUSY:
3580 /* FALLTHRU */
3581 case I2O_SCSI_HBA_DSC_SCSI_BUS_RESET:
3582 /* FALLTHRU */
3583 case I2O_SCSI_HBA_DSC_BUS_BUSY:
3584 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3585 ccb->ccb_h.status |= CAM_SCSI_BUSY;
3586 break;
3587
3588 case I2O_SCSI_HBA_DSC_SELECTION_TIMEOUT:
3589 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3590 ccb->ccb_h.status |= CAM_SEL_TIMEOUT;
3591 break;
3592
3593 case I2O_SCSI_HBA_DSC_COMMAND_TIMEOUT:
3594 /* FALLTHRU */
3595 case I2O_SCSI_HBA_DSC_DEVICE_NOT_PRESENT:
3596 /* FALLTHRU */
3597 case I2O_SCSI_HBA_DSC_LUN_INVALID:
3598 /* FALLTHRU */
3599 case I2O_SCSI_HBA_DSC_SCSI_TID_INVALID:
3600 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3601 ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
3602 break;
3603
3604 case I2O_SCSI_HBA_DSC_DATA_OVERRUN:
3605 /* FALLTHRU */
3606 case I2O_SCSI_HBA_DSC_REQUEST_LENGTH_ERROR:
3607 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3608 ccb->ccb_h.status |= CAM_DATA_RUN_ERR;
3609 break;
3610
3611 default:
3612 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3613 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
3614 break;
3615 }
3616 if ((ccb->csio.resid = ccb->csio.dxfer_len) != 0) {
3617 ccb->csio.resid -=
3618 I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_getTransferCount(
3619 Reply);
3620 }
3621
3622#ifdef ASR_MEASURE_PERFORMANCE
3623 {
3624 struct timeval endTime;
3625 u_int32_t submitted_time;
3626 u_int32_t size;
3627 int op_type;
3628 int startTimeIndex;
3629
3630 --sc->ha_submitted_ccbs_count;
3631 startTimeIndex
3632 = (int)Reply->StdReplyFrame.TransactionContext;
3633 if (-1 != startTimeIndex) {
3634 /* Compute the time spent in device/adapter */
3635 microtime(&endTime);
3636 submitted_time = asr_time_delta(sc->ha_timeQ[
3637 startTimeIndex], endTime);
3638 /* put the startTimeIndex back on free list */
3639 ENQ_TIMEQ_FREE_LIST(startTimeIndex,
3640 sc->ha_timeQFreeList,
3641 sc->ha_timeQFreeHead,
3642 sc->ha_timeQFreeTail);
3643 } else {
3644 submitted_time = 0xffffffff;
3645 }
3646
3647#define maxctime sc->ha_performance.max_command_time[ccb->csio.cdb_io.cdb_bytes[0]]
3648#define minctime sc->ha_performance.min_command_time[ccb->csio.cdb_io.cdb_bytes[0]]
3649 if (submitted_time != 0xffffffff) {
3650 if ( maxctime < submitted_time ) {
3651 maxctime = submitted_time;
3652 }
3653 if ( (minctime == 0)
3654 || (minctime > submitted_time) ) {
3655 minctime = submitted_time;
3656 }
3657
3658 if ( sc->ha_performance.max_submit_time
3659 < submitted_time ) {
3660 sc->ha_performance.max_submit_time
3661 = submitted_time;
3662 }
3663 if ( sc->ha_performance.min_submit_time == 0
3664 || sc->ha_performance.min_submit_time
3665 > submitted_time) {
3666 sc->ha_performance.min_submit_time
3667 = submitted_time;
3668 }
3669
3670 switch ( ccb->csio.cdb_io.cdb_bytes[0] ) {
3671
3672 case 0xa8: /* 12-byte READ */
3673 /* FALLTHRU */
3674 case 0x08: /* 6-byte READ */
3675 /* FALLTHRU */
3676 case 0x28: /* 10-byte READ */
3677 op_type = READ_OP;
3678 break;
3679
3680 case 0x0a: /* 6-byte WRITE */
3681 /* FALLTHRU */
3682 case 0xaa: /* 12-byte WRITE */
3683 /* FALLTHRU */
3684 case 0x2a: /* 10-byte WRITE */
3685 op_type = WRITE_OP;
3686 break;
3687
3688 default:
3689 op_type = 0;
3690 break;
3691 }
3692
3693 if ( op_type != 0 ) {
3694 struct scsi_rw_big * cmd;
3695
3696 cmd = (struct scsi_rw_big *)
3697 &(ccb->csio.cdb_io);
3698
3699 size = (((u_int32_t) cmd->length2 << 8)
3700 | ((u_int32_t) cmd->length1)) << 9;
3701
3702 switch ( size ) {
3703
3704 case 512:
3705 asr_IObySize(sc,
3706 submitted_time, op_type,
3707 SIZE_512);
3708 break;
3709
3710 case 1024:
3711 asr_IObySize(sc,
3712 submitted_time, op_type,
3713 SIZE_1K);
3714 break;
3715
3716 case 2048:
3717 asr_IObySize(sc,
3718 submitted_time, op_type,
3719 SIZE_2K);
3720 break;
3721
3722 case 4096:
3723 asr_IObySize(sc,
3724 submitted_time, op_type,
3725 SIZE_4K);
3726 break;
3727
3728 case 8192:
3729 asr_IObySize(sc,
3730 submitted_time, op_type,
3731 SIZE_8K);
3732 break;
3733
3734 case 16384:
3735 asr_IObySize(sc,
3736 submitted_time, op_type,
3737 SIZE_16K);
3738 break;
3739
3740 case 32768:
3741 asr_IObySize(sc,
3742 submitted_time, op_type,
3743 SIZE_32K);
3744 break;
3745
3746 case 65536:
3747 asr_IObySize(sc,
3748 submitted_time, op_type,
3749 SIZE_64K);
3750 break;
3751
3752 default:
3753 if ( size > (1 << 16) ) {
3754 asr_IObySize(sc,
3755 submitted_time,
3756 op_type,
3757 SIZE_BIGGER);
3758 } else {
3759 asr_IObySize(sc,
3760 submitted_time,
3761 op_type,
3762 SIZE_OTHER);
3763 }
3764 break;
3765 }
3766 }
3767 }
3768 }
3769#endif
3770 /* Sense data in reply packet */
3771 if (ccb->ccb_h.status & CAM_AUTOSNS_VALID) {
3772 u_int16_t size = I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_getAutoSenseTransferCount(Reply);
3773
3774 if (size) {
3775 if (size > sizeof(ccb->csio.sense_data)) {
3776 size = sizeof(ccb->csio.sense_data);
3777 }
3778 if (size > I2O_SCSI_SENSE_DATA_SZ) {
3779 size = I2O_SCSI_SENSE_DATA_SZ;
3780 }
3781 if ((ccb->csio.sense_len)
3782 && (size > ccb->csio.sense_len)) {
3783 size = ccb->csio.sense_len;
3784 }
3785 bcopy ((caddr_t)Reply->SenseData,
3786 (caddr_t)&(ccb->csio.sense_data), size);
3787 }
3788 }
3789
3790 /*
3791 * Return Reply so that it can be used for the next command
3792 * since we have no more need for it now
3793 */
3794 sc->ha_Virt->FromFIFO = ReplyOffset;
3795
3796 if (ccb->ccb_h.path) {
3797 xpt_done ((union ccb *)ccb);
3798 } else {
3799 wakeup ((caddr_t)ccb);
3800 }
3801 }
3802#ifdef ASR_MEASURE_PERFORMANCE
3803 {
3804 u_int32_t result;
3805
3806 microtime(&junk);
3807 result = asr_time_delta(sc->ha_performance.intr_started, junk);
3808
3809 if (result != 0xffffffff) {
3810 if ( sc->ha_performance.max_intr_time < result ) {
3811 sc->ha_performance.max_intr_time = result;
3812 }
3813
3814 if ( (sc->ha_performance.min_intr_time == 0)
3815 || (sc->ha_performance.min_intr_time > result) ) {
3816 sc->ha_performance.min_intr_time = result;
3817 }
3818 }
3819 }
3820#endif
3821 return (processed);
3822} /* asr_intr */
3823
3824#undef QueueSize /* Grrrr */
3825#undef SG_Size /* Grrrr */
3826
3827/*
3828 * Meant to be included at the bottom of asr.c !!!
3829 */
3830
3831/*
3832 * Included here as hard coded. Done because other necessary include
3833 * files utilize C++ comment structures which make them a nuisance to
3834 * included here just to pick up these three typedefs.
3835 */
3836typedef U32 DPT_TAG_T;
3837typedef U32 DPT_MSG_T;
3838typedef U32 DPT_RTN_T;
3839
3840#undef SCSI_RESET /* Conflicts with "scsi/scsiconf.h" defintion */
1f2de5d4 3841#include "osd_unix.h"
984263bc
MD
3842
3843#define asr_unit(dev) minor(dev)
3844
3845STATIC INLINE Asr_softc_t *
3846ASR_get_sc (
3847 IN dev_t dev)
3848{
3849 int unit = asr_unit(dev);
3850 OUT Asr_softc_t * sc = Asr_softc;
3851
3852 while (sc && sc->ha_sim[0] && (cam_sim_unit(sc->ha_sim[0]) != unit)) {
3853 sc = sc->ha_next;
3854 }
3855 return (sc);
3856} /* ASR_get_sc */
3857
3858STATIC u_int8_t ASR_ctlr_held;