1 /* $FreeBSD: src/sys/dev/asr/asr.c,v 1.3.2.2 2001/08/23 05:21:29 scottl Exp $ */
2 /* $DragonFly: src/sys/dev/raid/asr/asr.c,v 1.17 2004/08/23 16:13:03 joerg Exp $ */
4 * Copyright (c) 1996-2000 Distributed Processing Technology Corporation
5 * Copyright (c) 2000-2001 Adaptec Corporation
8 * TERMS AND CONDITIONS OF USE
10 * Redistribution and use in source form, with or without modification, are
11 * permitted provided that redistributions of source code must retain the
12 * above copyright notice, this list of conditions and the following disclaimer.
14 * This software is provided `as is' by Adaptec and any express or implied
15 * warranties, including, but not limited to, the implied warranties of
16 * merchantability and fitness for a particular purpose, are disclaimed. In no
17 * event shall Adaptec be liable for any direct, indirect, incidental, special,
18 * exemplary or consequential damages (including, but not limited to,
19 * procurement of substitute goods or services; loss of use, data, or profits;
20 * or business interruptions) however caused and on any theory of liability,
21 * whether in contract, strict liability, or tort (including negligence or
22 * otherwise) arising in any way out of the use of this driver software, even
23 * if advised of the possibility of such damage.
25 * SCSI I2O host adapter driver
27 * V1.08 2001/08/21 Mark_Salyzyn@adaptec.com
28 * - The 2000S and 2005S do not initialize on some machines,
29 * increased timeout to 255ms from 50ms for the StatusGet
31 * V1.07 2001/05/22 Mark_Salyzyn@adaptec.com
32 * - I knew this one was too good to be true. The error return
33 * on ioctl commands needs to be compared to CAM_REQ_CMP, not
34 * to the bit masked status.
35 * V1.06 2001/05/08 Mark_Salyzyn@adaptec.com
36 * - The 2005S that was supported is affectionately called the
37 * Conjoined BAR Firmware. In order to support RAID-5 in a
38 * 16MB low-cost configuration, Firmware was forced to go
39 * to a Split BAR Firmware. This requires a separate IOP and
40 * Messaging base address.
41 * V1.05 2001/04/25 Mark_Salyzyn@adaptec.com
42 * - Handle support for 2005S Zero Channel RAID solution.
43 * - System locked up if the Adapter locked up. Do not try
44 * to send other commands if the resetIOP command fails. The
45 * fail outstanding command discovery loop was flawed as the
46 * removal of the command from the list prevented discovering
48 * - Comment changes to clarify driver.
49 * - SysInfo searched for an EATA SmartROM, not an I2O SmartROM.
50 * - We do not use the AC_FOUND_DEV event because of I2O.
52 * V1.04 2000/09/22 Mark_Salyzyn@adaptec.com, msmith@freebsd.org,
53 * lampa@fee.vutbr.cz and Scott_Long@adaptec.com.
54 * - Removed support for PM1554, PM2554 and PM2654 in Mode-0
55 * mode as this is confused with competitor adapters in run
57 * - critical locking needed in ASR_ccbAdd and ASR_ccbRemove
58 * to prevent operating system panic.
59 * - moved default major number to 154 from 97.
60 * V1.03 2000/07/12 Mark_Salyzyn@adaptec.com
61 * - The controller is not actually an ASR (Adaptec SCSI RAID)
62 * series that is visible, it's more of an internal code name.
63 * remove any visible references within reason for now.
64 * - bus_ptr->LUN was not correctly zeroed when initially
65 * allocated causing a possible panic of the operating system
67 * V1.02 2000/06/26 Mark_Salyzyn@adaptec.com
68 * - Code always fails for ASR_getTid affecting performance.
69 * - initiated a set of changes that resulted from a formal
70 * code inspection by Mark_Salyzyn@adaptec.com,
71 * George_Dake@adaptec.com, Jeff_Zeak@adaptec.com,
72 * Martin_Wilson@adaptec.com and Vincent_Trandoan@adaptec.com.
73 * Their findings were focussed on the LCT & TID handler, and
74 * all resulting changes were to improve code readability,
75 * consistency or have a positive effect on performance.
76 * V1.01 2000/06/14 Mark_Salyzyn@adaptec.com
77 * - Passthrough returned an incorrect error.
78 * - Passthrough did not migrate the intrinsic scsi layer wakeup
79 * on command completion.
80 * - generate control device nodes using make_dev and delete_dev.
81 * - Performance affected by TID caching reallocing.
82 * - Made suggested changes by Justin_Gibbs@adaptec.com
83 * - use splcam instead of splbio.
84 * - use cam_imask instead of bio_imask.
85 * - use u_int8_t instead of u_char.
86 * - use u_int16_t instead of u_short.
87 * - use u_int32_t instead of u_long where appropriate.
88 * - use 64 bit context handler instead of 32 bit.
89 * - create_ccb should only allocate the worst case
90 * requirements for the driver since CAM may evolve
91 * making union ccb much larger than needed here.
92 * renamed create_ccb to asr_alloc_ccb.
93 * - go nutz justifying all debug prints as macros
94 * defined at the top and remove unsightly ifdefs.
95 * - INLINE STATIC viewed as confusing. Historically
96 * utilized to affect code performance and debug
97 * issues in OS, Compiler or OEM specific situations.
98 * V1.00 2000/05/31 Mark_Salyzyn@adaptec.com
99 * - Ported from FreeBSD 2.2.X DPT I2O driver.
100 * changed struct scsi_xfer to union ccb/struct ccb_hdr
101 * changed variable name xs to ccb
102 * changed struct scsi_link to struct cam_path
103 * changed struct scsibus_data to struct cam_sim
104 * stopped using fordriver for holding on to the TID
105 * use proprietary packet creation instead of scsi_inquire
106 * CAM layer sends synchronize commands.
109 #define ASR_VERSION 1
110 #define ASR_REVISION '0'
111 #define ASR_SUBREVISION '8'
114 #define ASR_YEAR 2001 - 1980
117 * Debug macros to reduce the unsightly ifdefs
119 #if (defined(DEBUG_ASR) || defined(DEBUG_ASR_USR_CMD) || defined(DEBUG_ASR_CMD))
120 # define debug_asr_message(message) \
122 u_int32_t * pointer = (u_int32_t *)message; \
123 u_int32_t length = I2O_MESSAGE_FRAME_getMessageSize(message);\
124 u_int32_t counter = 0; \
127 printf ("%08lx%c", (u_long)*(pointer++), \
128 (((++counter & 7) == 0) || (length == 0)) \
133 #endif /* DEBUG_ASR || DEBUG_ASR_USR_CMD || DEBUG_ASR_CMD */
135 #if (defined(DEBUG_ASR))
136 /* Breaks on none STDC based compilers :-( */
137 # define debug_asr_printf(fmt,args...) printf(fmt, ##args)
138 # define debug_asr_dump_message(message) debug_asr_message(message)
139 # define debug_asr_print_path(ccb) xpt_print_path(ccb->ccb_h.path);
140 /* None fatal version of the ASSERT macro */
141 # if (defined(__STDC__))
142 # define ASSERT(phrase) if(!(phrase))printf(#phrase " at line %d file %s\n",__LINE__,__FILE__)
144 # define ASSERT(phrase) if(!(phrase))printf("phrase" " at line %d file %s\n",__LINE__,__FILE__)
146 #else /* DEBUG_ASR */
147 # define debug_asr_printf(fmt,args...)
148 # define debug_asr_dump_message(message)
149 # define debug_asr_print_path(ccb)
151 #endif /* DEBUG_ASR */
154 * If DEBUG_ASR_CMD is defined:
155 * 0 - Display incoming SCSI commands
156 * 1 - add in a quick character before queueing.
157 * 2 - add in outgoing message frames.
159 #if (defined(DEBUG_ASR_CMD))
160 # define debug_asr_cmd_printf(fmt,args...) printf(fmt,##args)
161 # define debug_asr_dump_ccb(ccb) \
163 u_int8_t * cp = (unsigned char *)&(ccb->csio.cdb_io); \
164 int len = ccb->csio.cdb_len; \
167 debug_asr_cmd_printf (" %02x", *(cp++)); \
171 # if (DEBUG_ASR_CMD > 0)
172 # define debug_asr_cmd1_printf debug_asr_cmd_printf
174 # define debug_asr_cmd1_printf(fmt,args...)
176 # if (DEBUG_ASR_CMD > 1)
177 # define debug_asr_cmd2_printf debug_asr_cmd_printf
178 # define debug_asr_cmd2_dump_message(message) debug_asr_message(message)
180 # define debug_asr_cmd2_printf(fmt,args...)
181 # define debug_asr_cmd2_dump_message(message)
183 #else /* DEBUG_ASR_CMD */
184 # define debug_asr_cmd_printf(fmt,args...)
185 # define debug_asr_cmd_dump_ccb(ccb)
186 # define debug_asr_cmd1_printf(fmt,args...)
187 # define debug_asr_cmd2_printf(fmt,args...)
188 # define debug_asr_cmd2_dump_message(message)
189 #endif /* DEBUG_ASR_CMD */
191 #if (defined(DEBUG_ASR_USR_CMD))
192 # define debug_usr_cmd_printf(fmt,args...) printf(fmt,##args)
193 # define debug_usr_cmd_dump_message(message) debug_usr_message(message)
194 #else /* DEBUG_ASR_USR_CMD */
195 # define debug_usr_cmd_printf(fmt,args...)
196 # define debug_usr_cmd_dump_message(message)
197 #endif /* DEBUG_ASR_USR_CMD */
199 #define dsDescription_size 46 /* Snug as a bug in a rug */
202 static dpt_sig_S ASR_sig = {
203 { 'd', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION, PROC_INTEL,
204 PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM, FT_HBADRVR, 0,
205 OEM_DPT, OS_FREE_BSD, CAP_ABOVE16MB, DEV_ALL,
207 0, 0, ASR_VERSION, ASR_REVISION, ASR_SUBREVISION,
208 ASR_MONTH, ASR_DAY, ASR_YEAR,
209 /* 01234567890123456789012345678901234567890123456789 < 50 chars */
210 "Adaptec FreeBSD 4.0.0 Unix SCSI I2O HBA Driver"
211 /* ^^^^^ asr_attach alters these to match OS */
214 #include <sys/param.h> /* TRUE=1 and FALSE=0 defined here */
215 #include <sys/kernel.h>
216 #include <sys/systm.h>
217 #include <sys/malloc.h>
218 #include <sys/proc.h>
219 #include <sys/conf.h>
220 #include <sys/disklabel.h>
222 #include <machine/resource.h>
223 #include <machine/bus.h>
224 #include <sys/rman.h>
225 #include <sys/stat.h>
226 #include <sys/device.h>
228 #include <bus/cam/cam.h>
229 #include <bus/cam/cam_ccb.h>
230 #include <bus/cam/cam_sim.h>
231 #include <bus/cam/cam_xpt_sim.h>
232 #include <bus/cam/cam_xpt_periph.h>
234 #include <bus/cam/scsi/scsi_all.h>
235 #include <bus/cam/scsi/scsi_message.h>
239 #include <machine/cputypes.h>
240 #include <machine/clock.h>
241 #include <i386/include/vmparam.h>
243 #include <bus/pci/pcivar.h>
244 #include <bus/pci/pcireg.h>
246 #define STATIC static
249 #if (defined(DEBUG_ASR) && (DEBUG_ASR > 0))
259 #define osdSwap4(x) ((u_long)ntohl((u_long)(x)))
260 #define KVTOPHYS(x) vtophys(x)
261 #include "dptalign.h"
263 #include "i2obscsi.h"
265 #include "i2oadptr.h"
266 #include "sys_info.h"
268 /* Configuration Definitions */
270 #define SG_SIZE 58 /* Scatter Gather list Size */
271 #define MAX_TARGET_ID 126 /* Maximum Target ID supported */
272 #define MAX_LUN 255 /* Maximum LUN Supported */
273 #define MAX_CHANNEL 7 /* Maximum Channel # Supported by driver */
274 #define MAX_INBOUND 2000 /* Max CCBs, Also Max Queue Size */
275 #define MAX_OUTBOUND 256 /* Maximum outbound frames/adapter */
276 #define MAX_INBOUND_SIZE 512 /* Maximum inbound frame size */
277 #define MAX_MAP 4194304L /* Maximum mapping size of IOP */
278 /* Also serves as the minimum map for */
279 /* the 2005S zero channel RAID product */
281 /**************************************************************************
282 ** ASR Host Adapter structure - One Structure For Each Host Adapter That **
283 ** Is Configured Into The System. The Structure Supplies Configuration **
284 ** Information, Status Info, Queue Info And An Active CCB List Pointer. **
285 ***************************************************************************/
287 /* I2O register set */
292 # define Mask_InterruptsDisabled 0x08
294 volatile U32 ToFIFO; /* In Bound FIFO */
295 volatile U32 FromFIFO; /* Out Bound FIFO */
299 * A MIX of performance and space considerations for TID lookups
301 typedef u_int16_t tid_t;
304 u_int32_t size; /* up to MAX_LUN */
309 u_int32_t size; /* up to MAX_TARGET */
314 * To ensure that we only allocate and use the worst case ccb here, lets
315 * make our own local ccb union. If asr_alloc_ccb is utilized for another
316 * ccb type, ensure that you add the additional structures into our local
317 * ccb union. To ensure strict type checking, we will utilize the local
318 * ccb definition wherever possible.
321 struct ccb_hdr ccb_h; /* For convenience */
322 struct ccb_scsiio csio;
323 struct ccb_setasync csa;
326 typedef struct Asr_softc {
328 void * ha_Base; /* base port for each board */
329 u_int8_t * volatile ha_blinkLED;
330 i2oRegs_t * ha_Virt; /* Base address of IOP */
331 U8 * ha_Fvirt; /* Base address of Frames */
332 I2O_IOP_ENTRY ha_SystemTable;
333 LIST_HEAD(,ccb_hdr) ha_ccb; /* ccbs in use */
334 struct cam_path * ha_path[MAX_CHANNEL+1];
335 struct cam_sim * ha_sim[MAX_CHANNEL+1];
336 struct resource * ha_mem_res;
337 struct resource * ha_mes_res;
338 struct resource * ha_irq_res;
340 PI2O_LCT ha_LCT; /* Complete list of devices */
341 # define le_type IdentityTag[0]
342 # define I2O_BSA 0x20
343 # define I2O_FCA 0x40
344 # define I2O_SCSI 0x00
345 # define I2O_PORT 0x80
346 # define I2O_UNKNOWN 0x7F
347 # define le_bus IdentityTag[1]
348 # define le_target IdentityTag[2]
349 # define le_lun IdentityTag[3]
350 target2lun_t * ha_targets[MAX_CHANNEL+1];
351 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME ha_Msgs;
354 u_int8_t ha_in_reset;
355 # define HA_OPERATIONAL 0
356 # define HA_IN_RESET 1
357 # define HA_OFF_LINE 2
358 # define HA_OFF_LINE_RECOVERY 3
359 /* Configuration information */
360 /* The target id maximums we take */
361 u_int8_t ha_MaxBus; /* Maximum bus */
362 u_int8_t ha_MaxId; /* Maximum target ID */
363 u_int8_t ha_MaxLun; /* Maximum target LUN */
364 u_int8_t ha_SgSize; /* Max SG elements */
365 u_int8_t ha_pciBusNum;
366 u_int8_t ha_pciDeviceNum;
367 u_int8_t ha_adapter_target[MAX_CHANNEL+1];
368 u_int16_t ha_QueueSize; /* Max outstanding commands */
369 u_int16_t ha_Msgs_Count;
371 /* Links into other parents and HBAs */
372 struct Asr_softc * ha_next; /* HBA list */
375 STATIC Asr_softc_t * Asr_softc;
378 * Prototypes of the routines we have in this object.
381 /* Externally callable routines */
382 #define PROBE_ARGS IN device_t tag
383 #define PROBE_RET int
384 #define PROBE_SET() u_long id = (pci_get_device(tag)<<16)|pci_get_vendor(tag)
385 #define PROBE_RETURN(retval) if(retval){device_set_desc(tag,retval);return(0);}else{return(ENXIO);}
386 #define ATTACH_ARGS IN device_t tag
387 #define ATTACH_RET int
388 #define ATTACH_SET() int unit = device_get_unit(tag)
389 #define ATTACH_RETURN(retval) return(retval)
390 /* I2O HDM interface */
391 STATIC PROBE_RET asr_probe (PROBE_ARGS);
392 STATIC ATTACH_RET asr_attach (ATTACH_ARGS);
393 /* DOMINO placeholder */
394 STATIC PROBE_RET domino_probe (PROBE_ARGS);
395 STATIC ATTACH_RET domino_attach (ATTACH_ARGS);
396 /* MODE0 adapter placeholder */
397 STATIC PROBE_RET mode0_probe (PROBE_ARGS);
398 STATIC ATTACH_RET mode0_attach (ATTACH_ARGS);
400 STATIC Asr_softc_t * ASR_get_sc (
402 STATIC int asr_ioctl (
408 STATIC int asr_open (
413 STATIC int asr_close (
418 STATIC int asr_intr (
419 IN Asr_softc_t * sc);
420 STATIC void asr_timeout (
422 STATIC int ASR_init (
423 IN Asr_softc_t * sc);
424 STATIC INLINE int ASR_acquireLct (
425 INOUT Asr_softc_t * sc);
426 STATIC INLINE int ASR_acquireHrt (
427 INOUT Asr_softc_t * sc);
428 STATIC void asr_action (
429 IN struct cam_sim * sim,
431 STATIC void asr_poll (
432 IN struct cam_sim * sim);
435 * Here is the auto-probe structure used to nest our tests appropriately
436 * during the startup phase of the operating system.
438 STATIC device_method_t asr_methods[] = {
439 DEVMETHOD(device_probe, asr_probe),
440 DEVMETHOD(device_attach, asr_attach),
444 STATIC driver_t asr_driver = {
450 STATIC devclass_t asr_devclass;
452 DECLARE_DUMMY_MODULE(asr);
453 DRIVER_MODULE(asr, pci, asr_driver, asr_devclass, 0, 0);
455 STATIC device_method_t domino_methods[] = {
456 DEVMETHOD(device_probe, domino_probe),
457 DEVMETHOD(device_attach, domino_attach),
461 STATIC driver_t domino_driver = {
467 STATIC devclass_t domino_devclass;
469 DRIVER_MODULE(domino, pci, domino_driver, domino_devclass, 0, 0);
471 STATIC device_method_t mode0_methods[] = {
472 DEVMETHOD(device_probe, mode0_probe),
473 DEVMETHOD(device_attach, mode0_attach),
477 STATIC driver_t mode0_driver = {
483 STATIC devclass_t mode0_devclass;
485 DRIVER_MODULE(mode0, pci, mode0_driver, mode0_devclass, 0, 0);
488 * devsw for asr hba driver
490 * only ioctl is used. the sd driver provides all other access.
492 #define CDEV_MAJOR 154 /* prefered default character major */
493 STATIC struct cdevsw asr_cdevsw = {
495 CDEV_MAJOR, /* maj */
501 asr_close, /* close */
504 asr_ioctl, /* ioctl */
507 nostrategy, /* strategy */
513 * Initialize the dynamic cdevsw hooks.
516 asr_drvinit (void * unused)
518 static int asr_devsw_installed = 0;
520 if (asr_devsw_installed) {
523 asr_devsw_installed++;
525 * Find a free spot (the report during driver load used by
526 * osd layer in engine to generate the controlling nodes).
528 * XXX this is garbage code, store a unit number in asr_cdevsw
529 * and iterate through that instead?
531 while (asr_cdevsw.d_maj < NUMCDEVSW &&
532 cdevsw_get(asr_cdevsw.d_maj, -1) != NULL
536 if (asr_cdevsw.d_maj >= NUMCDEVSW) {
537 asr_cdevsw.d_maj = 0;
538 while (asr_cdevsw.d_maj < CDEV_MAJOR &&
539 cdevsw_get(asr_cdevsw.d_maj, -1) != NULL
548 cdevsw_add(&asr_cdevsw, 0, 0);
551 /* Must initialize before CAM layer picks up our HBA driver */
552 SYSINIT(asrdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,asr_drvinit,NULL)
554 /* I2O support routines */
555 #define defAlignLong(STRUCT,NAME) char NAME[sizeof(STRUCT)]
556 #define getAlignLong(STRUCT,NAME) ((STRUCT *)(NAME))
559 * Fill message with default.
561 STATIC PI2O_MESSAGE_FRAME
566 OUT PI2O_MESSAGE_FRAME Message_Ptr;
568 Message_Ptr = getAlignLong(I2O_MESSAGE_FRAME, Message);
569 bzero ((void *)Message_Ptr, size);
570 I2O_MESSAGE_FRAME_setVersionOffset(Message_Ptr, I2O_VERSION_11);
571 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
572 (size + sizeof(U32) - 1) >> 2);
573 I2O_MESSAGE_FRAME_setInitiatorAddress (Message_Ptr, 1);
574 return (Message_Ptr);
575 } /* ASR_fillMessage */
577 #define EMPTY_QUEUE ((U32)-1L)
583 OUT U32 MessageOffset;
585 if ((MessageOffset = virt->ToFIFO) == EMPTY_QUEUE) {
586 MessageOffset = virt->ToFIFO;
588 return (MessageOffset);
589 } /* ASR_getMessage */
591 /* Issue a polled command */
594 INOUT i2oRegs_t * virt,
596 IN PI2O_MESSAGE_FRAME Message)
603 * ASR_initiateCp is only used for synchronous commands and will
604 * be made more resiliant to adapter delays since commands like
605 * resetIOP can cause the adapter to be deaf for a little time.
607 while (((MessageOffset = ASR_getMessage(virt)) == EMPTY_QUEUE)
611 if (MessageOffset != EMPTY_QUEUE) {
612 bcopy (Message, fvirt + MessageOffset,
613 I2O_MESSAGE_FRAME_getMessageSize(Message) << 2);
615 * Disable the Interrupts
617 virt->Mask = (Mask = virt->Mask) | Mask_InterruptsDisabled;
618 virt->ToFIFO = MessageOffset;
621 } /* ASR_initiateCp */
628 INOUT i2oRegs_t * virt,
631 struct resetMessage {
632 I2O_EXEC_IOP_RESET_MESSAGE M;
635 defAlignLong(struct resetMessage,Message);
636 PI2O_EXEC_IOP_RESET_MESSAGE Message_Ptr;
637 OUT U32 * volatile Reply_Ptr;
641 * Build up our copy of the Message.
643 Message_Ptr = (PI2O_EXEC_IOP_RESET_MESSAGE)ASR_fillMessage(Message,
644 sizeof(I2O_EXEC_IOP_RESET_MESSAGE));
645 I2O_EXEC_IOP_RESET_MESSAGE_setFunction(Message_Ptr, I2O_EXEC_IOP_RESET);
647 * Reset the Reply Status
649 *(Reply_Ptr = (U32 *)((char *)Message_Ptr
650 + sizeof(I2O_EXEC_IOP_RESET_MESSAGE))) = 0;
651 I2O_EXEC_IOP_RESET_MESSAGE_setStatusWordLowAddress(Message_Ptr,
652 KVTOPHYS((void *)Reply_Ptr));
654 * Send the Message out
656 if ((Old = ASR_initiateCp (virt, fvirt, (PI2O_MESSAGE_FRAME)Message_Ptr)) != (U32)-1L) {
658 * Wait for a response (Poll), timeouts are dangerous if
659 * the card is truly responsive. We assume response in 2s.
661 u_int8_t Delay = 200;
663 while ((*Reply_Ptr == 0) && (--Delay != 0)) {
667 * Re-enable the interrupts.
673 ASSERT (Old != (U32)-1L);
678 * Get the curent state of the adapter
680 STATIC INLINE PI2O_EXEC_STATUS_GET_REPLY
682 INOUT i2oRegs_t * virt,
684 OUT PI2O_EXEC_STATUS_GET_REPLY buffer)
686 defAlignLong(I2O_EXEC_STATUS_GET_MESSAGE,Message);
687 PI2O_EXEC_STATUS_GET_MESSAGE Message_Ptr;
691 * Build up our copy of the Message.
693 Message_Ptr = (PI2O_EXEC_STATUS_GET_MESSAGE)ASR_fillMessage(Message,
694 sizeof(I2O_EXEC_STATUS_GET_MESSAGE));
695 I2O_EXEC_STATUS_GET_MESSAGE_setFunction(Message_Ptr,
696 I2O_EXEC_STATUS_GET);
697 I2O_EXEC_STATUS_GET_MESSAGE_setReplyBufferAddressLow(Message_Ptr,
698 KVTOPHYS((void *)buffer));
699 /* This one is a Byte Count */
700 I2O_EXEC_STATUS_GET_MESSAGE_setReplyBufferLength(Message_Ptr,
701 sizeof(I2O_EXEC_STATUS_GET_REPLY));
703 * Reset the Reply Status
705 bzero ((void *)buffer, sizeof(I2O_EXEC_STATUS_GET_REPLY));
707 * Send the Message out
709 if ((Old = ASR_initiateCp (virt, fvirt, (PI2O_MESSAGE_FRAME)Message_Ptr)) != (U32)-1L) {
711 * Wait for a response (Poll), timeouts are dangerous if
712 * the card is truly responsive. We assume response in 50ms.
714 u_int8_t Delay = 255;
716 while (*((U8 * volatile)&(buffer->SyncByte)) == 0) {
718 buffer = (PI2O_EXEC_STATUS_GET_REPLY)NULL;
724 * Re-enable the interrupts.
729 return ((PI2O_EXEC_STATUS_GET_REPLY)NULL);
730 } /* ASR_getStatus */
733 * Check if the device is a SCSI I2O HBA, and add it to the list.
737 * Probe for ASR controller. If we find it, we will use it.
741 asr_probe(PROBE_ARGS)
744 if ((id == 0xA5011044) || (id == 0xA5111044)) {
745 PROBE_RETURN ("Adaptec Caching SCSI RAID");
751 * Probe/Attach for DOMINO chipset.
754 domino_probe(PROBE_ARGS)
757 if (id == 0x10121044) {
758 PROBE_RETURN ("Adaptec Caching Memory Controller");
764 domino_attach (ATTACH_ARGS)
767 } /* domino_attach */
770 * Probe/Attach for MODE0 adapters.
773 mode0_probe(PROBE_ARGS)
778 * If/When we can get a business case to commit to a
779 * Mode0 driver here, we can make all these tests more
780 * specific and robust. Mode0 adapters have their processors
781 * turned off, this the chips are in a raw state.
784 /* This is a PLX9054 */
785 if (id == 0x905410B5) {
786 PROBE_RETURN ("Adaptec Mode0 PM3757");
788 /* This is a PLX9080 */
789 if (id == 0x908010B5) {
790 PROBE_RETURN ("Adaptec Mode0 PM3754/PM3755");
792 /* This is a ZION 80303 */
793 if (id == 0x53098086) {
794 PROBE_RETURN ("Adaptec Mode0 3010S");
796 /* This is an i960RS */
797 if (id == 0x39628086) {
798 PROBE_RETURN ("Adaptec Mode0 2100S");
800 /* This is an i960RN */
801 if (id == 0x19648086) {
802 PROBE_RETURN ("Adaptec Mode0 PM2865/2400A/3200S/3400S");
804 #if 0 /* this would match any generic i960 -- mjs */
805 /* This is an i960RP (typically also on Motherboards) */
806 if (id == 0x19608086) {
807 PROBE_RETURN ("Adaptec Mode0 PM2554/PM1554/PM2654");
814 mode0_attach (ATTACH_ARGS)
819 STATIC INLINE union asr_ccb *
823 OUT union asr_ccb * new_ccb;
825 if ((new_ccb = (union asr_ccb *)malloc(sizeof(*new_ccb),
826 M_DEVBUF, M_WAITOK)) != (union asr_ccb *)NULL) {
827 bzero (new_ccb, sizeof(*new_ccb));
828 new_ccb->ccb_h.pinfo.priority = 1;
829 new_ccb->ccb_h.pinfo.index = CAM_UNQUEUED_INDEX;
830 new_ccb->ccb_h.spriv_ptr0 = sc;
833 } /* asr_alloc_ccb */
837 IN union asr_ccb * free_ccb)
839 free(free_ccb, M_DEVBUF);
843 * Print inquiry data `carefully'
850 while ((--len >= 0) && (*s) && (*s != ' ') && (*s != '-')) {
851 printf ("%c", *(s++));
858 STATIC INLINE int ASR_queue (
860 IN PI2O_MESSAGE_FRAME Message);
862 * Send a message synchronously and without Interrupt to a ccb.
866 INOUT union asr_ccb * ccb,
867 IN PI2O_MESSAGE_FRAME Message)
871 Asr_softc_t * sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
874 * We do not need any (optional byteswapping) method access to
875 * the Initiator context field.
877 I2O_MESSAGE_FRAME_setInitiatorContext64(Message, (long)ccb);
879 /* Prevent interrupt service */
881 sc->ha_Virt->Mask = (Mask = sc->ha_Virt->Mask)
882 | Mask_InterruptsDisabled;
884 if (ASR_queue (sc, Message) == EMPTY_QUEUE) {
885 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
886 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
890 * Wait for this board to report a finished instruction.
892 while ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
896 /* Re-enable Interrupts */
897 sc->ha_Virt->Mask = Mask;
900 return (ccb->ccb_h.status);
904 * Send a message synchronously to a Asr_softc_t
909 IN PI2O_MESSAGE_FRAME Message)
914 if ((ccb = asr_alloc_ccb (sc)) == (union asr_ccb *)NULL) {
915 return (CAM_REQUEUE_REQ);
918 status = ASR_queue_s (ccb, Message);
926 * Add the specified ccb to the active queue
931 INOUT union asr_ccb * ccb)
936 LIST_INSERT_HEAD(&(sc->ha_ccb), &(ccb->ccb_h), sim_links.le);
937 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
938 if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT) {
940 * RAID systems can take considerable time to
941 * complete some commands given the large cache
942 * flashes switching from write back to write thru.
944 ccb->ccb_h.timeout = 6 * 60 * 1000;
946 ccb->ccb_h.timeout_ch = timeout(asr_timeout, (caddr_t)ccb,
947 (ccb->ccb_h.timeout * hz) / 1000);
953 * Remove the specified ccb from the active queue.
958 INOUT union asr_ccb * ccb)
963 untimeout(asr_timeout, (caddr_t)ccb, ccb->ccb_h.timeout_ch);
964 LIST_REMOVE(&(ccb->ccb_h), sim_links.le);
966 } /* ASR_ccbRemove */
969 * Fail all the active commands, so they get re-issued by the operating
973 ASR_failActiveCommands (
976 struct ccb_hdr * ccb;
979 #if 0 /* Currently handled by callers, unnecessary paranoia currently */
980 /* Left in for historical perspective. */
981 defAlignLong(I2O_EXEC_LCT_NOTIFY_MESSAGE,Message);
982 PI2O_EXEC_LCT_NOTIFY_MESSAGE Message_Ptr;
984 /* Send a blind LCT command to wait for the enableSys to complete */
985 Message_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)ASR_fillMessage(Message,
986 sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) - sizeof(I2O_SG_ELEMENT));
987 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
988 I2O_EXEC_LCT_NOTIFY);
989 I2O_EXEC_LCT_NOTIFY_MESSAGE_setClassIdentifier(Message_Ptr,
990 I2O_CLASS_MATCH_ANYCLASS);
991 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
996 * We do not need to inform the CAM layer that we had a bus
997 * reset since we manage it on our own, this also prevents the
998 * SCSI_DELAY settling that would be required on other systems.
999 * The `SCSI_DELAY' has already been handled by the card via the
1000 * acquisition of the LCT table while we are at CAM priority level.
1001 * for (int bus = 0; bus <= sc->ha_MaxBus; ++bus) {
1002 * xpt_async (AC_BUS_RESET, sc->ha_path[bus], NULL);
1005 while ((ccb = LIST_FIRST(&(sc->ha_ccb))) != (struct ccb_hdr *)NULL) {
1006 ASR_ccbRemove (sc, (union asr_ccb *)ccb);
1008 ccb->status &= ~CAM_STATUS_MASK;
1009 ccb->status |= CAM_REQUEUE_REQ;
1010 /* Nothing Transfered */
1011 ((struct ccb_scsiio *)ccb)->resid
1012 = ((struct ccb_scsiio *)ccb)->dxfer_len;
1015 xpt_done ((union ccb *)ccb);
1017 wakeup ((caddr_t)ccb);
1021 } /* ASR_failActiveCommands */
1024 * The following command causes the HBA to reset the specific bus
1028 IN Asr_softc_t * sc,
1031 defAlignLong(I2O_HBA_BUS_RESET_MESSAGE,Message);
1032 I2O_HBA_BUS_RESET_MESSAGE * Message_Ptr;
1033 PI2O_LCT_ENTRY Device;
1035 Message_Ptr = (I2O_HBA_BUS_RESET_MESSAGE *)ASR_fillMessage(Message,
1036 sizeof(I2O_HBA_BUS_RESET_MESSAGE));
1037 I2O_MESSAGE_FRAME_setFunction(&Message_Ptr->StdMessageFrame,
1039 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
1040 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
1042 if (((Device->le_type & I2O_PORT) != 0)
1043 && (Device->le_bus == bus)) {
1044 I2O_MESSAGE_FRAME_setTargetAddress(
1045 &Message_Ptr->StdMessageFrame,
1046 I2O_LCT_ENTRY_getLocalTID(Device));
1047 /* Asynchronous command, with no expectations */
1048 (void)ASR_queue(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1052 } /* ASR_resetBus */
1055 ASR_getBlinkLedCode (
1056 IN Asr_softc_t * sc)
1058 if ((sc != (Asr_softc_t *)NULL)
1059 && (sc->ha_blinkLED != (u_int8_t *)NULL)
1060 && (sc->ha_blinkLED[1] == 0xBC)) {
1061 return (sc->ha_blinkLED[0]);
1064 } /* ASR_getBlinkCode */
1067 * Determine the address of an TID lookup. Must be done at high priority
1068 * since the address can be changed by other threads of execution.
1070 * Returns NULL pointer if not indexible (but will attempt to generate
1071 * an index if `new_entry' flag is set to TRUE).
1073 * All addressible entries are to be guaranteed zero if never initialized.
1075 STATIC INLINE tid_t *
1077 INOUT Asr_softc_t * sc,
1083 target2lun_t * bus_ptr;
1084 lun2tid_t * target_ptr;
1088 * Validity checking of incoming parameters. More of a bound
1089 * expansion limit than an issue with the code dealing with the
1092 * sc must be valid before it gets here, so that check could be
1093 * dropped if speed a critical issue.
1095 if ((sc == (Asr_softc_t *)NULL)
1096 || (bus > MAX_CHANNEL)
1097 || (target > sc->ha_MaxId)
1098 || (lun > sc->ha_MaxLun)) {
1099 debug_asr_printf("(%lx,%d,%d,%d) target out of range\n",
1100 (u_long)sc, bus, target, lun);
1101 return ((tid_t *)NULL);
1104 * See if there is an associated bus list.
1106 * for performance, allocate in size of BUS_CHUNK chunks.
1107 * BUS_CHUNK must be a power of two. This is to reduce
1108 * fragmentation effects on the allocations.
1110 # define BUS_CHUNK 8
1111 new_size = ((target + BUS_CHUNK - 1) & ~(BUS_CHUNK - 1));
1112 if ((bus_ptr = sc->ha_targets[bus]) == (target2lun_t *)NULL) {
1114 * Allocate a new structure?
1115 * Since one element in structure, the +1
1116 * needed for size has been abstracted.
1118 if ((new_entry == FALSE)
1119 || ((sc->ha_targets[bus] = bus_ptr = (target2lun_t *)malloc (
1120 sizeof(*bus_ptr) + (sizeof(bus_ptr->LUN) * new_size),
1122 == (target2lun_t *)NULL)) {
1123 debug_asr_printf("failed to allocate bus list\n");
1124 return ((tid_t *)NULL);
1126 bzero (bus_ptr, sizeof(*bus_ptr)
1127 + (sizeof(bus_ptr->LUN) * new_size));
1128 bus_ptr->size = new_size + 1;
1129 } else if (bus_ptr->size <= new_size) {
1130 target2lun_t * new_bus_ptr;
1133 * Reallocate a new structure?
1134 * Since one element in structure, the +1
1135 * needed for size has been abstracted.
1137 if ((new_entry == FALSE)
1138 || ((new_bus_ptr = (target2lun_t *)malloc (
1139 sizeof(*bus_ptr) + (sizeof(bus_ptr->LUN) * new_size),
1141 == (target2lun_t *)NULL)) {
1142 debug_asr_printf("failed to reallocate bus list\n");
1143 return ((tid_t *)NULL);
1146 * Zero and copy the whole thing, safer, simpler coding
1147 * and not really performance critical at this point.
1149 bzero (new_bus_ptr, sizeof(*bus_ptr)
1150 + (sizeof(bus_ptr->LUN) * new_size));
1151 bcopy (bus_ptr, new_bus_ptr, sizeof(*bus_ptr)
1152 + (sizeof(bus_ptr->LUN) * (bus_ptr->size - 1)));
1153 sc->ha_targets[bus] = new_bus_ptr;
1154 free (bus_ptr, M_TEMP);
1155 bus_ptr = new_bus_ptr;
1156 bus_ptr->size = new_size + 1;
1159 * We now have the bus list, lets get to the target list.
1160 * Since most systems have only *one* lun, we do not allocate
1161 * in chunks as above, here we allow one, then in chunk sizes.
1162 * TARGET_CHUNK must be a power of two. This is to reduce
1163 * fragmentation effects on the allocations.
1165 # define TARGET_CHUNK 8
1166 if ((new_size = lun) != 0) {
1167 new_size = ((lun + TARGET_CHUNK - 1) & ~(TARGET_CHUNK - 1));
1169 if ((target_ptr = bus_ptr->LUN[target]) == (lun2tid_t *)NULL) {
1171 * Allocate a new structure?
1172 * Since one element in structure, the +1
1173 * needed for size has been abstracted.
1175 if ((new_entry == FALSE)
1176 || ((bus_ptr->LUN[target] = target_ptr = (lun2tid_t *)malloc (
1177 sizeof(*target_ptr) + (sizeof(target_ptr->TID) * new_size),
1179 == (lun2tid_t *)NULL)) {
1180 debug_asr_printf("failed to allocate target list\n");
1181 return ((tid_t *)NULL);
1183 bzero (target_ptr, sizeof(*target_ptr)
1184 + (sizeof(target_ptr->TID) * new_size));
1185 target_ptr->size = new_size + 1;
1186 } else if (target_ptr->size <= new_size) {
1187 lun2tid_t * new_target_ptr;
1190 * Reallocate a new structure?
1191 * Since one element in structure, the +1
1192 * needed for size has been abstracted.
1194 if ((new_entry == FALSE)
1195 || ((new_target_ptr = (lun2tid_t *)malloc (
1196 sizeof(*target_ptr) + (sizeof(target_ptr->TID) * new_size),
1198 == (lun2tid_t *)NULL)) {
1199 debug_asr_printf("failed to reallocate target list\n");
1200 return ((tid_t *)NULL);
1203 * Zero and copy the whole thing, safer, simpler coding
1204 * and not really performance critical at this point.
1206 bzero (new_target_ptr, sizeof(*target_ptr)
1207 + (sizeof(target_ptr->TID) * new_size));
1208 bcopy (target_ptr, new_target_ptr,
1210 + (sizeof(target_ptr->TID) * (target_ptr->size - 1)));
1211 bus_ptr->LUN[target] = new_target_ptr;
1212 free (target_ptr, M_TEMP);
1213 target_ptr = new_target_ptr;
1214 target_ptr->size = new_size + 1;
1217 * Now, acquire the TID address from the LUN indexed list.
1219 return (&(target_ptr->TID[lun]));
1220 } /* ASR_getTidAddress */
1223 * Get a pre-existing TID relationship.
1225 * If the TID was never set, return (tid_t)-1.
1227 * should use mutex rather than spl.
1231 IN Asr_softc_t * sc,
1241 if (((tid_ptr = ASR_getTidAddress (sc, bus, target, lun, FALSE))
1243 /* (tid_t)0 or (tid_t)-1 indicate no TID */
1244 || (*tid_ptr == (tid_t)0)) {
1254 * Set a TID relationship.
1256 * If the TID was not set, return (tid_t)-1.
1258 * should use mutex rather than spl.
1262 INOUT Asr_softc_t * sc,
1271 if (TID != (tid_t)-1) {
1276 if ((tid_ptr = ASR_getTidAddress (sc, bus, target, lun, TRUE))
1287 /*-------------------------------------------------------------------------*/
1288 /* Function ASR_rescan */
1289 /*-------------------------------------------------------------------------*/
1290 /* The Parameters Passed To This Function Are : */
1291 /* Asr_softc_t * : HBA miniport driver's adapter data storage. */
1293 /* This Function Will rescan the adapter and resynchronize any data */
1295 /* Return : 0 For OK, Error Code Otherwise */
1296 /*-------------------------------------------------------------------------*/
1300 IN Asr_softc_t * sc)
1306 * Re-acquire the LCT table and synchronize us to the adapter.
1308 if ((error = ASR_acquireLct(sc)) == 0) {
1309 error = ASR_acquireHrt(sc);
1316 bus = sc->ha_MaxBus;
1317 /* Reset all existing cached TID lookups */
1319 int target, event = 0;
1322 * Scan for all targets on this bus to see if they
1323 * got affected by the rescan.
1325 for (target = 0; target <= sc->ha_MaxId; ++target) {
1328 /* Stay away from the controller ID */
1329 if (target == sc->ha_adapter_target[bus]) {
1332 for (lun = 0; lun <= sc->ha_MaxLun; ++lun) {
1333 PI2O_LCT_ENTRY Device;
1334 tid_t TID = (tid_t)-1;
1338 * See if the cached TID changed. Search for
1339 * the device in our new LCT.
1341 for (Device = sc->ha_LCT->LCTEntry;
1342 Device < (PI2O_LCT_ENTRY)(((U32 *)sc->ha_LCT)
1343 + I2O_LCT_getTableSize(sc->ha_LCT));
1345 if ((Device->le_type != I2O_UNKNOWN)
1346 && (Device->le_bus == bus)
1347 && (Device->le_target == target)
1348 && (Device->le_lun == lun)
1349 && (I2O_LCT_ENTRY_getUserTID(Device)
1351 TID = I2O_LCT_ENTRY_getLocalTID(
1357 * Indicate to the OS that the label needs
1358 * to be recalculated, or that the specific
1359 * open device is no longer valid (Merde)
1360 * because the cached TID changed.
1362 LastTID = ASR_getTid (sc, bus, target, lun);
1363 if (LastTID != TID) {
1364 struct cam_path * path;
1366 if (xpt_create_path(&path,
1368 cam_sim_path(sc->ha_sim[bus]),
1369 target, lun) != CAM_REQ_CMP) {
1370 if (TID == (tid_t)-1) {
1371 event |= AC_LOST_DEVICE;
1373 event |= AC_INQ_CHANGED
1374 | AC_GETDEV_CHANGED;
1377 if (TID == (tid_t)-1) {
1381 } else if (LastTID == (tid_t)-1) {
1382 struct ccb_getdev ccb;
1386 path, /*priority*/5);
1402 * We have the option of clearing the
1403 * cached TID for it to be rescanned, or to
1404 * set it now even if the device never got
1405 * accessed. We chose the later since we
1406 * currently do not use the condition that
1407 * the TID ever got cached.
1409 ASR_setTid (sc, bus, target, lun, TID);
1413 * The xpt layer can not handle multiple events at the
1416 if (event & AC_LOST_DEVICE) {
1417 xpt_async(AC_LOST_DEVICE, sc->ha_path[bus], NULL);
1419 if (event & AC_INQ_CHANGED) {
1420 xpt_async(AC_INQ_CHANGED, sc->ha_path[bus], NULL);
1422 if (event & AC_GETDEV_CHANGED) {
1423 xpt_async(AC_GETDEV_CHANGED, sc->ha_path[bus], NULL);
1425 } while (--bus >= 0);
1429 /*-------------------------------------------------------------------------*/
1430 /* Function ASR_reset */
1431 /*-------------------------------------------------------------------------*/
1432 /* The Parameters Passed To This Function Are : */
1433 /* Asr_softc_t * : HBA miniport driver's adapter data storage. */
1435 /* This Function Will reset the adapter and resynchronize any data */
1438 /*-------------------------------------------------------------------------*/
1442 IN Asr_softc_t * sc)
1447 if ((sc->ha_in_reset == HA_IN_RESET)
1448 || (sc->ha_in_reset == HA_OFF_LINE_RECOVERY)) {
1453 * Promotes HA_OPERATIONAL to HA_IN_RESET,
1454 * or HA_OFF_LINE to HA_OFF_LINE_RECOVERY.
1456 ++(sc->ha_in_reset);
1457 if (ASR_resetIOP (sc->ha_Virt, sc->ha_Fvirt) == 0) {
1458 debug_asr_printf ("ASR_resetIOP failed\n");
1460 * We really need to take this card off-line, easier said
1461 * than make sense. Better to keep retrying for now since if a
1462 * UART cable is connected the blinkLEDs the adapter is now in
1463 * a hard state requiring action from the monitor commands to
1464 * the HBA to continue. For debugging waiting forever is a
1465 * good thing. In a production system, however, one may wish
1466 * to instead take the card off-line ...
1468 # if 0 && (defined(HA_OFF_LINE))
1470 * Take adapter off-line.
1472 printf ("asr%d: Taking adapter off-line\n",
1474 ? cam_sim_unit(xpt_path_sim(sc->ha_path[0]))
1476 sc->ha_in_reset = HA_OFF_LINE;
1481 while (ASR_resetIOP (sc->ha_Virt, sc->ha_Fvirt) == 0);
1484 retVal = ASR_init (sc);
1487 debug_asr_printf ("ASR_init failed\n");
1488 sc->ha_in_reset = HA_OFF_LINE;
1491 if (ASR_rescan (sc) != 0) {
1492 debug_asr_printf ("ASR_rescan failed\n");
1494 ASR_failActiveCommands (sc);
1495 if (sc->ha_in_reset == HA_OFF_LINE_RECOVERY) {
1496 printf ("asr%d: Brining adapter back on-line\n",
1498 ? cam_sim_unit(xpt_path_sim(sc->ha_path[0]))
1501 sc->ha_in_reset = HA_OPERATIONAL;
1506 * Device timeout handler.
1512 union asr_ccb * ccb = (union asr_ccb *)arg;
1513 Asr_softc_t * sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
1516 debug_asr_print_path(ccb);
1517 debug_asr_printf("timed out");
1520 * Check if the adapter has locked up?
1522 if ((s = ASR_getBlinkLedCode(sc)) != 0) {
1524 printf ("asr%d: Blink LED 0x%x resetting adapter\n",
1525 cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)), s);
1526 if (ASR_reset (sc) == ENXIO) {
1527 /* Try again later */
1528 ccb->ccb_h.timeout_ch = timeout(asr_timeout,
1530 (ccb->ccb_h.timeout * hz) / 1000);
1535 * Abort does not function on the ASR card!!! Walking away from
1536 * the SCSI command is also *very* dangerous. A SCSI BUS reset is
1537 * our best bet, followed by a complete adapter reset if that fails.
1540 /* Check if we already timed out once to raise the issue */
1541 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_CMD_TIMEOUT) {
1542 debug_asr_printf (" AGAIN\nreinitializing adapter\n");
1543 if (ASR_reset (sc) == ENXIO) {
1544 ccb->ccb_h.timeout_ch = timeout(asr_timeout,
1546 (ccb->ccb_h.timeout * hz) / 1000);
1551 debug_asr_printf ("\nresetting bus\n");
1552 /* If the BUS reset does not take, then an adapter reset is next! */
1553 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1554 ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
1555 ccb->ccb_h.timeout_ch = timeout(asr_timeout, (caddr_t)ccb,
1556 (ccb->ccb_h.timeout * hz) / 1000);
1557 ASR_resetBus (sc, cam_sim_bus(xpt_path_sim(ccb->ccb_h.path)));
1558 xpt_async (AC_BUS_RESET, ccb->ccb_h.path, NULL);
1563 * send a message asynchronously
1567 IN Asr_softc_t * sc,
1568 IN PI2O_MESSAGE_FRAME Message)
1570 OUT U32 MessageOffset;
1571 union asr_ccb * ccb;
1573 debug_asr_printf ("Host Command Dump:\n");
1574 debug_asr_dump_message (Message);
1576 ccb = (union asr_ccb *)(long)
1577 I2O_MESSAGE_FRAME_getInitiatorContext64(Message);
1579 if ((MessageOffset = ASR_getMessage(sc->ha_Virt)) != EMPTY_QUEUE) {
1580 bcopy (Message, sc->ha_Fvirt + MessageOffset,
1581 I2O_MESSAGE_FRAME_getMessageSize(Message) << 2);
1583 ASR_ccbAdd (sc, ccb);
1585 /* Post the command */
1586 sc->ha_Virt->ToFIFO = MessageOffset;
1588 if (ASR_getBlinkLedCode(sc)) {
1590 * Unlikely we can do anything if we can't grab a
1591 * message frame :-(, but lets give it a try.
1593 (void)ASR_reset (sc);
1596 return (MessageOffset);
1600 /* Simple Scatter Gather elements */
1601 #define SG(SGL,Index,Flags,Buffer,Size) \
1602 I2O_FLAGS_COUNT_setCount( \
1603 &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index].FlagsCount), \
1605 I2O_FLAGS_COUNT_setFlags( \
1606 &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index].FlagsCount), \
1607 I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT | (Flags)); \
1608 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress( \
1609 &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index]), \
1610 (Buffer == NULL) ? NULL : KVTOPHYS(Buffer))
1613 * Retrieve Parameter Group.
1614 * Buffer must be allocated using defAlignLong macro.
1618 IN Asr_softc_t * sc,
1622 IN unsigned BufferSize)
1624 struct paramGetMessage {
1625 I2O_UTIL_PARAMS_GET_MESSAGE M;
1627 sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT)];
1629 I2O_PARAM_OPERATIONS_LIST_HEADER Header;
1630 I2O_PARAM_OPERATION_ALL_TEMPLATE Template[1];
1633 defAlignLong(struct paramGetMessage, Message);
1634 struct Operations * Operations_Ptr;
1635 I2O_UTIL_PARAMS_GET_MESSAGE * Message_Ptr;
1636 struct ParamBuffer {
1637 I2O_PARAM_RESULTS_LIST_HEADER Header;
1638 I2O_PARAM_READ_OPERATION_RESULT Read;
1642 Message_Ptr = (I2O_UTIL_PARAMS_GET_MESSAGE *)ASR_fillMessage(Message,
1643 sizeof(I2O_UTIL_PARAMS_GET_MESSAGE)
1644 + sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT));
1645 Operations_Ptr = (struct Operations *)((char *)Message_Ptr
1646 + sizeof(I2O_UTIL_PARAMS_GET_MESSAGE)
1647 + sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT));
1648 bzero ((void *)Operations_Ptr, sizeof(struct Operations));
1649 I2O_PARAM_OPERATIONS_LIST_HEADER_setOperationCount(
1650 &(Operations_Ptr->Header), 1);
1651 I2O_PARAM_OPERATION_ALL_TEMPLATE_setOperation(
1652 &(Operations_Ptr->Template[0]), I2O_PARAMS_OPERATION_FIELD_GET);
1653 I2O_PARAM_OPERATION_ALL_TEMPLATE_setFieldCount(
1654 &(Operations_Ptr->Template[0]), 0xFFFF);
1655 I2O_PARAM_OPERATION_ALL_TEMPLATE_setGroupNumber(
1656 &(Operations_Ptr->Template[0]), Group);
1657 bzero ((void *)(Buffer_Ptr = getAlignLong(struct ParamBuffer, Buffer)),
1660 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
1662 + (((sizeof(I2O_UTIL_PARAMS_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1663 / sizeof(U32)) << 4));
1664 I2O_MESSAGE_FRAME_setTargetAddress (&(Message_Ptr->StdMessageFrame),
1666 I2O_MESSAGE_FRAME_setFunction (&(Message_Ptr->StdMessageFrame),
1667 I2O_UTIL_PARAMS_GET);
1669 * Set up the buffers as scatter gather elements.
1671 SG(&(Message_Ptr->SGL), 0,
1672 I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER,
1673 Operations_Ptr, sizeof(struct Operations));
1674 SG(&(Message_Ptr->SGL), 1,
1675 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
1676 Buffer_Ptr, BufferSize);
1678 if ((ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) == CAM_REQ_CMP)
1679 && (Buffer_Ptr->Header.ResultCount)) {
1680 return ((void *)(Buffer_Ptr->Info));
1682 return ((void *)NULL);
1683 } /* ASR_getParams */
1686 * Acquire the LCT information.
1690 INOUT Asr_softc_t * sc)
1692 PI2O_EXEC_LCT_NOTIFY_MESSAGE Message_Ptr;
1693 PI2O_SGE_SIMPLE_ELEMENT sg;
1694 int MessageSizeInBytes;
1698 PI2O_LCT_ENTRY Entry;
1701 * sc value assumed valid
1703 MessageSizeInBytes = sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE)
1704 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT);
1705 if ((Message_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)malloc (
1706 MessageSizeInBytes, M_TEMP, M_WAITOK))
1707 == (PI2O_EXEC_LCT_NOTIFY_MESSAGE)NULL) {
1710 (void)ASR_fillMessage((char *)Message_Ptr, MessageSizeInBytes);
1711 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
1713 (((sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1714 / sizeof(U32)) << 4)));
1715 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
1716 I2O_EXEC_LCT_NOTIFY);
1717 I2O_EXEC_LCT_NOTIFY_MESSAGE_setClassIdentifier(Message_Ptr,
1718 I2O_CLASS_MATCH_ANYCLASS);
1720 * Call the LCT table to determine the number of device entries
1721 * to reserve space for.
1723 SG(&(Message_Ptr->SGL), 0,
1724 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER, &Table,
1727 * since this code is reused in several systems, code efficiency
1728 * is greater by using a shift operation rather than a divide by
1729 * sizeof(u_int32_t).
1731 I2O_LCT_setTableSize(&Table,
1732 (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)) >> 2);
1733 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1735 * Determine the size of the LCT table.
1738 free (sc->ha_LCT, M_TEMP);
1741 * malloc only generates contiguous memory when less than a
1742 * page is expected. We must break the request up into an SG list ...
1744 if (((len = (I2O_LCT_getTableSize(&Table) << 2)) <=
1745 (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)))
1746 || (len > (128 * 1024))) { /* Arbitrary */
1747 free (Message_Ptr, M_TEMP);
1750 if ((sc->ha_LCT = (PI2O_LCT)malloc (len, M_TEMP, M_WAITOK))
1751 == (PI2O_LCT)NULL) {
1752 free (Message_Ptr, M_TEMP);
1756 * since this code is reused in several systems, code efficiency
1757 * is greater by using a shift operation rather than a divide by
1758 * sizeof(u_int32_t).
1760 I2O_LCT_setTableSize(sc->ha_LCT,
1761 (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)) >> 2);
1763 * Convert the access to the LCT table into a SG list.
1765 sg = Message_Ptr->SGL.u.Simple;
1766 v = (caddr_t)(sc->ha_LCT);
1768 int next, base, span;
1771 next = base = KVTOPHYS(v);
1772 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, base);
1774 /* How far can we go contiguously */
1775 while ((len > 0) && (base == next)) {
1778 next = trunc_page(base) + PAGE_SIZE;
1789 /* Construct the Flags */
1790 I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), span);
1792 int rw = I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT;
1794 rw = (I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT
1795 | I2O_SGL_FLAGS_LAST_ELEMENT
1796 | I2O_SGL_FLAGS_END_OF_BUFFER);
1798 I2O_FLAGS_COUNT_setFlags(&(sg->FlagsCount), rw);
1806 * Incrementing requires resizing of the packet.
1809 MessageSizeInBytes += sizeof(*sg);
1810 I2O_MESSAGE_FRAME_setMessageSize(
1811 &(Message_Ptr->StdMessageFrame),
1812 I2O_MESSAGE_FRAME_getMessageSize(
1813 &(Message_Ptr->StdMessageFrame))
1814 + (sizeof(*sg) / sizeof(U32)));
1816 PI2O_EXEC_LCT_NOTIFY_MESSAGE NewMessage_Ptr;
1818 if ((NewMessage_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)
1819 malloc (MessageSizeInBytes, M_TEMP, M_WAITOK))
1820 == (PI2O_EXEC_LCT_NOTIFY_MESSAGE)NULL) {
1821 free (sc->ha_LCT, M_TEMP);
1822 sc->ha_LCT = (PI2O_LCT)NULL;
1823 free (Message_Ptr, M_TEMP);
1826 span = ((caddr_t)sg) - (caddr_t)Message_Ptr;
1827 bcopy ((caddr_t)Message_Ptr,
1828 (caddr_t)NewMessage_Ptr, span);
1829 free (Message_Ptr, M_TEMP);
1830 sg = (PI2O_SGE_SIMPLE_ELEMENT)
1831 (((caddr_t)NewMessage_Ptr) + span);
1832 Message_Ptr = NewMessage_Ptr;
1837 retval = ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1838 free (Message_Ptr, M_TEMP);
1839 if (retval != CAM_REQ_CMP) {
1843 /* If the LCT table grew, lets truncate accesses */
1844 if (I2O_LCT_getTableSize(&Table) < I2O_LCT_getTableSize(sc->ha_LCT)) {
1845 I2O_LCT_setTableSize(sc->ha_LCT, I2O_LCT_getTableSize(&Table));
1847 for (Entry = sc->ha_LCT->LCTEntry; Entry < (PI2O_LCT_ENTRY)
1848 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
1850 Entry->le_type = I2O_UNKNOWN;
1851 switch (I2O_CLASS_ID_getClass(&(Entry->ClassID))) {
1853 case I2O_CLASS_RANDOM_BLOCK_STORAGE:
1854 Entry->le_type = I2O_BSA;
1857 case I2O_CLASS_SCSI_PERIPHERAL:
1858 Entry->le_type = I2O_SCSI;
1861 case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
1862 Entry->le_type = I2O_FCA;
1865 case I2O_CLASS_BUS_ADAPTER_PORT:
1866 Entry->le_type = I2O_PORT | I2O_SCSI;
1868 case I2O_CLASS_FIBRE_CHANNEL_PORT:
1869 if (I2O_CLASS_ID_getClass(&(Entry->ClassID)) ==
1870 I2O_CLASS_FIBRE_CHANNEL_PORT) {
1871 Entry->le_type = I2O_PORT | I2O_FCA;
1873 { struct ControllerInfo {
1874 I2O_PARAM_RESULTS_LIST_HEADER Header;
1875 I2O_PARAM_READ_OPERATION_RESULT Read;
1876 I2O_HBA_SCSI_CONTROLLER_INFO_SCALAR Info;
1878 defAlignLong(struct ControllerInfo, Buffer);
1879 PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR Info;
1881 Entry->le_bus = 0xff;
1882 Entry->le_target = 0xff;
1883 Entry->le_lun = 0xff;
1885 if ((Info = (PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR)
1887 I2O_LCT_ENTRY_getLocalTID(Entry),
1888 I2O_HBA_SCSI_CONTROLLER_INFO_GROUP_NO,
1889 Buffer, sizeof(struct ControllerInfo)))
1890 == (PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR)NULL) {
1894 = I2O_HBA_SCSI_CONTROLLER_INFO_SCALAR_getInitiatorID(
1901 { struct DeviceInfo {
1902 I2O_PARAM_RESULTS_LIST_HEADER Header;
1903 I2O_PARAM_READ_OPERATION_RESULT Read;
1904 I2O_DPT_DEVICE_INFO_SCALAR Info;
1906 defAlignLong (struct DeviceInfo, Buffer);
1907 PI2O_DPT_DEVICE_INFO_SCALAR Info;
1909 Entry->le_bus = 0xff;
1910 Entry->le_target = 0xff;
1911 Entry->le_lun = 0xff;
1913 if ((Info = (PI2O_DPT_DEVICE_INFO_SCALAR)
1915 I2O_LCT_ENTRY_getLocalTID(Entry),
1916 I2O_DPT_DEVICE_INFO_GROUP_NO,
1917 Buffer, sizeof(struct DeviceInfo)))
1918 == (PI2O_DPT_DEVICE_INFO_SCALAR)NULL) {
1922 |= I2O_DPT_DEVICE_INFO_SCALAR_getDeviceType(Info);
1924 = I2O_DPT_DEVICE_INFO_SCALAR_getBus(Info);
1925 if ((Entry->le_bus > sc->ha_MaxBus)
1926 && (Entry->le_bus <= MAX_CHANNEL)) {
1927 sc->ha_MaxBus = Entry->le_bus;
1930 = I2O_DPT_DEVICE_INFO_SCALAR_getIdentifier(Info);
1932 = I2O_DPT_DEVICE_INFO_SCALAR_getLunInfo(Info);
1936 * A zero return value indicates success.
1939 } /* ASR_acquireLct */
1942 * Initialize a message frame.
1943 * We assume that the CDB has already been set up, so all we do here is
1944 * generate the Scatter Gather list.
1946 STATIC INLINE PI2O_MESSAGE_FRAME
1948 IN union asr_ccb * ccb,
1949 OUT PI2O_MESSAGE_FRAME Message)
1951 int next, span, base, rw;
1952 OUT PI2O_MESSAGE_FRAME Message_Ptr;
1953 Asr_softc_t * sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
1954 PI2O_SGE_SIMPLE_ELEMENT sg;
1956 vm_size_t size, len;
1959 /* We only need to zero out the PRIVATE_SCSI_SCB_EXECUTE_MESSAGE */
1960 bzero (Message_Ptr = getAlignLong(I2O_MESSAGE_FRAME, Message),
1961 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) - sizeof(I2O_SG_ELEMENT)));
1964 int target = ccb->ccb_h.target_id;
1965 int lun = ccb->ccb_h.target_lun;
1966 int bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path));
1969 if ((TID = ASR_getTid (sc, bus, target, lun)) == (tid_t)-1) {
1970 PI2O_LCT_ENTRY Device;
1973 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
1974 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
1976 if ((Device->le_type != I2O_UNKNOWN)
1977 && (Device->le_bus == bus)
1978 && (Device->le_target == target)
1979 && (Device->le_lun == lun)
1980 && (I2O_LCT_ENTRY_getUserTID(Device) == 0xFFF)) {
1981 TID = I2O_LCT_ENTRY_getLocalTID(Device);
1982 ASR_setTid (sc, Device->le_bus,
1983 Device->le_target, Device->le_lun,
1989 if (TID == (tid_t)0) {
1990 return ((PI2O_MESSAGE_FRAME)NULL);
1992 I2O_MESSAGE_FRAME_setTargetAddress(Message_Ptr, TID);
1993 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setTID(
1994 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, TID);
1996 I2O_MESSAGE_FRAME_setVersionOffset(Message_Ptr, I2O_VERSION_11 |
1997 (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1998 / sizeof(U32)) << 4));
1999 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
2000 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2001 - sizeof(I2O_SG_ELEMENT)) / sizeof(U32));
2002 I2O_MESSAGE_FRAME_setInitiatorAddress (Message_Ptr, 1);
2003 I2O_MESSAGE_FRAME_setFunction(Message_Ptr, I2O_PRIVATE_MESSAGE);
2004 I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode (
2005 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, I2O_SCSI_SCB_EXEC);
2006 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (
2007 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
2008 I2O_SCB_FLAG_ENABLE_DISCONNECT
2009 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2010 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
2012 * We do not need any (optional byteswapping) method access to
2013 * the Initiator & Transaction context field.
2015 I2O_MESSAGE_FRAME_setInitiatorContext64(Message, (long)ccb);
2017 I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
2018 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, DPT_ORGANIZATION_ID);
2022 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(
2023 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, ccb->csio.cdb_len);
2024 bcopy (&(ccb->csio.cdb_io),
2025 ((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)->CDB, ccb->csio.cdb_len);
2028 * Given a buffer describing a transfer, set up a scatter/gather map
2029 * in a ccb to map that SCSI transfer.
2032 rw = (ccb->ccb_h.flags & CAM_DIR_IN) ? 0 : I2O_SGL_FLAGS_DIR;
2034 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (
2035 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
2036 (ccb->csio.dxfer_len)
2037 ? ((rw) ? (I2O_SCB_FLAG_XFER_TO_DEVICE
2038 | I2O_SCB_FLAG_ENABLE_DISCONNECT
2039 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2040 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER)
2041 : (I2O_SCB_FLAG_XFER_FROM_DEVICE
2042 | I2O_SCB_FLAG_ENABLE_DISCONNECT
2043 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2044 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER))
2045 : (I2O_SCB_FLAG_ENABLE_DISCONNECT
2046 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2047 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
2050 * Given a transfer described by a `data', fill in the SG list.
2052 sg = &((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)->SGL.u.Simple[0];
2054 len = ccb->csio.dxfer_len;
2055 v = ccb->csio.data_ptr;
2056 ASSERT (ccb->csio.dxfer_len >= 0);
2057 MessageSize = I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr);
2058 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setByteCount(
2059 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, len);
2060 while ((len > 0) && (sg < &((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2061 Message_Ptr)->SGL.u.Simple[SG_SIZE])) {
2063 next = base = KVTOPHYS(v);
2064 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, base);
2066 /* How far can we go contiguously */
2067 while ((len > 0) && (base == next)) {
2068 next = trunc_page(base) + PAGE_SIZE;
2079 I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), span);
2081 rw |= I2O_SGL_FLAGS_LAST_ELEMENT;
2083 I2O_FLAGS_COUNT_setFlags(&(sg->FlagsCount),
2084 I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT | rw);
2086 MessageSize += sizeof(*sg) / sizeof(U32);
2088 /* We always do the request sense ... */
2089 if ((span = ccb->csio.sense_len) == 0) {
2090 span = sizeof(ccb->csio.sense_data);
2092 SG(sg, 0, I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
2093 &(ccb->csio.sense_data), span);
2094 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
2095 MessageSize + (sizeof(*sg) / sizeof(U32)));
2096 return (Message_Ptr);
2097 } /* ASR_init_message */
2100 * Reset the adapter.
2104 INOUT Asr_softc_t * sc)
2106 struct initOutBoundMessage {
2107 I2O_EXEC_OUTBOUND_INIT_MESSAGE M;
2110 defAlignLong(struct initOutBoundMessage,Message);
2111 PI2O_EXEC_OUTBOUND_INIT_MESSAGE Message_Ptr;
2112 OUT U32 * volatile Reply_Ptr;
2116 * Build up our copy of the Message.
2118 Message_Ptr = (PI2O_EXEC_OUTBOUND_INIT_MESSAGE)ASR_fillMessage(Message,
2119 sizeof(I2O_EXEC_OUTBOUND_INIT_MESSAGE));
2120 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
2121 I2O_EXEC_OUTBOUND_INIT);
2122 I2O_EXEC_OUTBOUND_INIT_MESSAGE_setHostPageFrameSize(Message_Ptr, PAGE_SIZE);
2123 I2O_EXEC_OUTBOUND_INIT_MESSAGE_setOutboundMFrameSize(Message_Ptr,
2124 sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME));
2126 * Reset the Reply Status
2128 *(Reply_Ptr = (U32 *)((char *)Message_Ptr
2129 + sizeof(I2O_EXEC_OUTBOUND_INIT_MESSAGE))) = 0;
2130 SG (&(Message_Ptr->SGL), 0, I2O_SGL_FLAGS_LAST_ELEMENT, Reply_Ptr,
2133 * Send the Message out
2135 if ((Old = ASR_initiateCp (sc->ha_Virt, sc->ha_Fvirt, (PI2O_MESSAGE_FRAME)Message_Ptr)) != (U32)-1L) {
2139 * Wait for a response (Poll).
2141 while (*Reply_Ptr < I2O_EXEC_OUTBOUND_INIT_REJECTED);
2143 * Re-enable the interrupts.
2145 sc->ha_Virt->Mask = Old;
2147 * Populate the outbound table.
2149 if (sc->ha_Msgs == (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL) {
2151 /* Allocate the reply frames */
2152 size = sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
2153 * sc->ha_Msgs_Count;
2156 * contigmalloc only works reliably at
2157 * initialization time.
2159 if ((sc->ha_Msgs = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
2160 contigmalloc (size, M_DEVBUF, M_WAITOK, 0ul,
2161 0xFFFFFFFFul, (u_long)sizeof(U32), 0ul))
2162 != (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL) {
2163 (void)bzero ((char *)sc->ha_Msgs, size);
2164 sc->ha_Msgs_Phys = KVTOPHYS(sc->ha_Msgs);
2168 /* Initialize the outbound FIFO */
2169 if (sc->ha_Msgs != (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL)
2170 for (size = sc->ha_Msgs_Count, addr = sc->ha_Msgs_Phys;
2172 sc->ha_Virt->FromFIFO = addr;
2173 addr += sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME);
2175 return (*Reply_Ptr);
2178 } /* ASR_initOutBound */
2181 * Set the system table
2185 IN Asr_softc_t * sc)
2187 PI2O_EXEC_SYS_TAB_SET_MESSAGE Message_Ptr;
2188 PI2O_SET_SYSTAB_HEADER SystemTable;
2190 PI2O_SGE_SIMPLE_ELEMENT sg;
2193 if ((SystemTable = (PI2O_SET_SYSTAB_HEADER)malloc (
2194 sizeof(I2O_SET_SYSTAB_HEADER), M_TEMP, M_WAITOK))
2195 == (PI2O_SET_SYSTAB_HEADER)NULL) {
2198 bzero (SystemTable, sizeof(I2O_SET_SYSTAB_HEADER));
2199 for (ha = Asr_softc; ha; ha = ha->ha_next) {
2200 ++SystemTable->NumberEntries;
2202 if ((Message_Ptr = (PI2O_EXEC_SYS_TAB_SET_MESSAGE)malloc (
2203 sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
2204 + ((3+SystemTable->NumberEntries) * sizeof(I2O_SGE_SIMPLE_ELEMENT)),
2205 M_TEMP, M_WAITOK)) == (PI2O_EXEC_SYS_TAB_SET_MESSAGE)NULL) {
2206 free (SystemTable, M_TEMP);
2209 (void)ASR_fillMessage((char *)Message_Ptr,
2210 sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
2211 + ((3+SystemTable->NumberEntries) * sizeof(I2O_SGE_SIMPLE_ELEMENT)));
2212 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
2214 (((sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
2215 / sizeof(U32)) << 4)));
2216 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
2217 I2O_EXEC_SYS_TAB_SET);
2219 * Call the LCT table to determine the number of device entries
2220 * to reserve space for.
2221 * since this code is reused in several systems, code efficiency
2222 * is greater by using a shift operation rather than a divide by
2223 * sizeof(u_int32_t).
2225 sg = (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr
2226 + ((I2O_MESSAGE_FRAME_getVersionOffset(
2227 &(Message_Ptr->StdMessageFrame)) & 0xF0) >> 2));
2228 SG(sg, 0, I2O_SGL_FLAGS_DIR, SystemTable, sizeof(I2O_SET_SYSTAB_HEADER));
2230 for (ha = Asr_softc; ha; ha = ha->ha_next) {
2233 ? (I2O_SGL_FLAGS_DIR)
2234 : (I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER)),
2235 &(ha->ha_SystemTable), sizeof(ha->ha_SystemTable));
2238 SG(sg, 0, I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER, NULL, 0);
2239 SG(sg, 1, I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_LAST_ELEMENT
2240 | I2O_SGL_FLAGS_END_OF_BUFFER, NULL, 0);
2241 retVal = ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
2242 free (Message_Ptr, M_TEMP);
2243 free (SystemTable, M_TEMP);
2245 } /* ASR_setSysTab */
2249 INOUT Asr_softc_t * sc)
2251 defAlignLong(I2O_EXEC_HRT_GET_MESSAGE,Message);
2252 I2O_EXEC_HRT_GET_MESSAGE * Message_Ptr;
2255 I2O_HRT_ENTRY Entry[MAX_CHANNEL];
2257 u_int8_t NumberOfEntries;
2258 PI2O_HRT_ENTRY Entry;
2260 bzero ((void *)&Hrt, sizeof (Hrt));
2261 Message_Ptr = (I2O_EXEC_HRT_GET_MESSAGE *)ASR_fillMessage(Message,
2262 sizeof(I2O_EXEC_HRT_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
2263 + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2264 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
2266 + (((sizeof(I2O_EXEC_HRT_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
2267 / sizeof(U32)) << 4)));
2268 I2O_MESSAGE_FRAME_setFunction (&(Message_Ptr->StdMessageFrame),
2272 * Set up the buffers as scatter gather elements.
2274 SG(&(Message_Ptr->SGL), 0,
2275 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
2277 if (ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) != CAM_REQ_CMP) {
2280 if ((NumberOfEntries = I2O_HRT_getNumberEntries(&Hrt.Header))
2281 > (MAX_CHANNEL + 1)) {
2282 NumberOfEntries = MAX_CHANNEL + 1;
2284 for (Entry = Hrt.Header.HRTEntry;
2285 NumberOfEntries != 0;
2286 ++Entry, --NumberOfEntries) {
2287 PI2O_LCT_ENTRY Device;
2289 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
2290 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
2292 if (I2O_LCT_ENTRY_getLocalTID(Device)
2293 == (I2O_HRT_ENTRY_getAdapterID(Entry) & 0xFFF)) {
2294 Device->le_bus = I2O_HRT_ENTRY_getAdapterID(
2296 if ((Device->le_bus > sc->ha_MaxBus)
2297 && (Device->le_bus <= MAX_CHANNEL)) {
2298 sc->ha_MaxBus = Device->le_bus;
2304 } /* ASR_acquireHrt */
2307 * Enable the adapter.
2311 IN Asr_softc_t * sc)
2313 defAlignLong(I2O_EXEC_SYS_ENABLE_MESSAGE,Message);
2314 PI2O_EXEC_SYS_ENABLE_MESSAGE Message_Ptr;
2316 Message_Ptr = (PI2O_EXEC_SYS_ENABLE_MESSAGE)ASR_fillMessage(Message,
2317 sizeof(I2O_EXEC_SYS_ENABLE_MESSAGE));
2318 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
2319 I2O_EXEC_SYS_ENABLE);
2320 return (ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) != 0);
2321 } /* ASR_enableSys */
2324 * Perform the stages necessary to initialize the adapter
2328 IN Asr_softc_t * sc)
2330 return ((ASR_initOutBound(sc) == 0)
2331 || (ASR_setSysTab(sc) != CAM_REQ_CMP)
2332 || (ASR_enableSys(sc) != CAM_REQ_CMP));
2336 * Send a Synchronize Cache command to the target device.
2340 IN Asr_softc_t * sc,
2348 * We will not synchronize the device when there are outstanding
2349 * commands issued by the OS (this is due to a locked up device,
2350 * as the OS normally would flush all outstanding commands before
2351 * issuing a shutdown or an adapter reset).
2353 if ((sc != (Asr_softc_t *)NULL)
2354 && (LIST_FIRST(&(sc->ha_ccb)) != (struct ccb_hdr *)NULL)
2355 && ((TID = ASR_getTid (sc, bus, target, lun)) != (tid_t)-1)
2356 && (TID != (tid_t)0)) {
2357 defAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE,Message);
2358 PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE Message_Ptr;
2361 = getAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE, Message),
2362 sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2363 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2365 I2O_MESSAGE_FRAME_setVersionOffset(
2366 (PI2O_MESSAGE_FRAME)Message_Ptr,
2368 | (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2369 - sizeof(I2O_SG_ELEMENT))
2370 / sizeof(U32)) << 4));
2371 I2O_MESSAGE_FRAME_setMessageSize(
2372 (PI2O_MESSAGE_FRAME)Message_Ptr,
2373 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2374 - sizeof(I2O_SG_ELEMENT))
2376 I2O_MESSAGE_FRAME_setInitiatorAddress (
2377 (PI2O_MESSAGE_FRAME)Message_Ptr, 1);
2378 I2O_MESSAGE_FRAME_setFunction(
2379 (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_PRIVATE_MESSAGE);
2380 I2O_MESSAGE_FRAME_setTargetAddress(
2381 (PI2O_MESSAGE_FRAME)Message_Ptr, TID);
2382 I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode (
2383 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2385 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setTID(Message_Ptr, TID);
2386 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2387 I2O_SCB_FLAG_ENABLE_DISCONNECT
2388 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2389 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
2390 I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
2391 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2392 DPT_ORGANIZATION_ID);
2393 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(Message_Ptr, 6);
2394 Message_Ptr->CDB[0] = SYNCHRONIZE_CACHE;
2395 Message_Ptr->CDB[1] = (lun << 5);
2397 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2398 (I2O_SCB_FLAG_XFER_FROM_DEVICE
2399 | I2O_SCB_FLAG_ENABLE_DISCONNECT
2400 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2401 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
2403 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
2410 IN Asr_softc_t * sc)
2412 int bus, target, lun;
2414 for (bus = 0; bus <= sc->ha_MaxBus; ++bus) {
2415 for (target = 0; target <= sc->ha_MaxId; ++target) {
2416 for (lun = 0; lun <= sc->ha_MaxLun; ++lun) {
2417 ASR_sync(sc,bus,target,lun);
2424 * Reset the HBA, targets and BUS.
2425 * Currently this resets *all* the SCSI busses.
2429 IN Asr_softc_t * sc)
2431 ASR_synchronize (sc);
2432 (void)ASR_reset (sc);
2433 } /* asr_hbareset */
2436 * A reduced copy of the real pci_map_mem, incorporating the MAX_MAP
2437 * limit and a reduction in error checking (in the pre 4.0 case).
2442 IN Asr_softc_t * sc)
2448 * I2O specification says we must find first *memory* mapped BAR
2450 for (rid = PCIR_MAPS;
2451 rid < (PCIR_MAPS + 4 * sizeof(u_int32_t));
2452 rid += sizeof(u_int32_t)) {
2453 p = pci_read_config(tag, rid, sizeof(p));
2461 if (rid >= (PCIR_MAPS + 4 * sizeof(u_int32_t))) {
2464 p = pci_read_config(tag, rid, sizeof(p));
2465 pci_write_config(tag, rid, -1, sizeof(p));
2466 l = 0 - (pci_read_config(tag, rid, sizeof(l)) & ~15);
2467 pci_write_config(tag, rid, p, sizeof(p));
2472 * The 2005S Zero Channel RAID solution is not a perfect PCI
2473 * citizen. It asks for 4MB on BAR0, and 0MB on BAR1, once
2474 * enabled it rewrites the size of BAR0 to 2MB, sets BAR1 to
2475 * BAR0+2MB and sets it's size to 2MB. The IOP registers are
2476 * accessible via BAR0, the messaging registers are accessible
2477 * via BAR1. If the subdevice code is 50 to 59 decimal.
2479 s = pci_read_config(tag, PCIR_DEVVENDOR, sizeof(s));
2480 if (s != 0xA5111044) {
2481 s = pci_read_config(tag, PCIR_SUBVEND_0, sizeof(s));
2482 if ((((ADPTDOMINATOR_SUB_ID_START ^ s) & 0xF000FFFF) == 0)
2483 && (ADPTDOMINATOR_SUB_ID_START <= s)
2484 && (s <= ADPTDOMINATOR_SUB_ID_END)) {
2485 l = MAX_MAP; /* Conjoined BAR Raptor Daptor */
2489 sc->ha_mem_res = bus_alloc_resource(tag, SYS_RES_MEMORY, &rid,
2490 p, p + l, l, RF_ACTIVE);
2491 if (sc->ha_mem_res == (struct resource *)NULL) {
2494 sc->ha_Base = (void *)rman_get_start(sc->ha_mem_res);
2495 if (sc->ha_Base == (void *)NULL) {
2498 sc->ha_Virt = (i2oRegs_t *) rman_get_virtual(sc->ha_mem_res);
2499 if (s == 0xA5111044) { /* Split BAR Raptor Daptor */
2500 if ((rid += sizeof(u_int32_t))
2501 >= (PCIR_MAPS + 4 * sizeof(u_int32_t))) {
2504 p = pci_read_config(tag, rid, sizeof(p));
2505 pci_write_config(tag, rid, -1, sizeof(p));
2506 l = 0 - (pci_read_config(tag, rid, sizeof(l)) & ~15);
2507 pci_write_config(tag, rid, p, sizeof(p));
2512 sc->ha_mes_res = bus_alloc_resource(tag, SYS_RES_MEMORY, &rid,
2513 p, p + l, l, RF_ACTIVE);
2514 if (sc->ha_mes_res == (struct resource *)NULL) {
2517 if ((void *)rman_get_start(sc->ha_mes_res) == (void *)NULL) {
2520 sc->ha_Fvirt = (U8 *) rman_get_virtual(sc->ha_mes_res);
2522 sc->ha_Fvirt = (U8 *)(sc->ha_Virt);
2525 } /* asr_pci_map_mem */
2528 * A simplified copy of the real pci_map_int with additional
2529 * registration requirements.
2534 IN Asr_softc_t * sc)
2538 sc->ha_irq_res = bus_alloc_resource(tag, SYS_RES_IRQ, &rid,
2539 0, ~0, 1, RF_ACTIVE | RF_SHAREABLE);
2540 if (sc->ha_irq_res == (struct resource *)NULL) {
2543 if (bus_setup_intr(tag, sc->ha_irq_res, INTR_TYPE_CAM,
2544 (driver_intr_t *)asr_intr, (void *)sc, &(sc->ha_intr))) {
2547 sc->ha_irq = pci_read_config(tag, PCIR_INTLINE, sizeof(char));
2549 } /* asr_pci_map_int */
2552 * Attach the devices, and virtual devices to the driver list.
2555 asr_attach (ATTACH_ARGS)
2558 struct scsi_inquiry_data * iq;
2561 sc = malloc(sizeof(*sc), M_DEVBUF, M_INTWAIT);
2562 if (Asr_softc == (Asr_softc_t *)NULL) {
2564 * Fixup the OS revision as saved in the dptsig for the
2565 * engine (dptioctl.h) to pick up.
2567 bcopy (osrelease, &ASR_sig.dsDescription[16], 5);
2568 printf ("asr%d: major=%d\n", unit, asr_cdevsw.d_maj);
2571 * Initialize the software structure
2573 bzero (sc, sizeof(*sc));
2574 LIST_INIT(&(sc->ha_ccb));
2575 /* Link us into the HA list */
2579 for (ha = &Asr_softc; *ha; ha = &((*ha)->ha_next));
2583 PI2O_EXEC_STATUS_GET_REPLY status;
2587 * This is the real McCoy!
2589 if (!asr_pci_map_mem(tag, sc)) {
2590 printf ("asr%d: could not map memory\n", unit);
2591 ATTACH_RETURN(ENXIO);
2593 /* Enable if not formerly enabled */
2594 pci_write_config (tag, PCIR_COMMAND,
2595 pci_read_config (tag, PCIR_COMMAND, sizeof(char))
2596 | PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN, sizeof(char));
2597 /* Knowledge is power, responsibility is direct */
2599 struct pci_devinfo {
2600 STAILQ_ENTRY(pci_devinfo) pci_links;
2601 struct resource_list resources;
2603 } * dinfo = device_get_ivars(tag);
2604 sc->ha_pciBusNum = dinfo->cfg.bus;
2605 sc->ha_pciDeviceNum = (dinfo->cfg.slot << 3)
2608 /* Check if the device is there? */
2609 if ((ASR_resetIOP(sc->ha_Virt, sc->ha_Fvirt) == 0)
2610 || ((status = (PI2O_EXEC_STATUS_GET_REPLY)malloc (
2611 sizeof(I2O_EXEC_STATUS_GET_REPLY), M_TEMP, M_WAITOK))
2612 == (PI2O_EXEC_STATUS_GET_REPLY)NULL)
2613 || (ASR_getStatus(sc->ha_Virt, sc->ha_Fvirt, status) == NULL)) {
2614 printf ("asr%d: could not initialize hardware\n", unit);
2615 ATTACH_RETURN(ENODEV); /* Get next, maybe better luck */
2617 sc->ha_SystemTable.OrganizationID = status->OrganizationID;
2618 sc->ha_SystemTable.IOP_ID = status->IOP_ID;
2619 sc->ha_SystemTable.I2oVersion = status->I2oVersion;
2620 sc->ha_SystemTable.IopState = status->IopState;
2621 sc->ha_SystemTable.MessengerType = status->MessengerType;
2622 sc->ha_SystemTable.InboundMessageFrameSize
2623 = status->InboundMFrameSize;
2624 sc->ha_SystemTable.MessengerInfo.InboundMessagePortAddressLow
2625 = (U32)(sc->ha_Base) + (U32)(&(((i2oRegs_t *)NULL)->ToFIFO));
2627 if (!asr_pci_map_int(tag, (void *)sc)) {
2628 printf ("asr%d: could not map interrupt\n", unit);
2629 ATTACH_RETURN(ENXIO);
2632 /* Adjust the maximim inbound count */
2633 if (((sc->ha_QueueSize
2634 = I2O_EXEC_STATUS_GET_REPLY_getMaxInboundMFrames(status))
2636 || (sc->ha_QueueSize == 0)) {
2637 sc->ha_QueueSize = MAX_INBOUND;
2640 /* Adjust the maximum outbound count */
2641 if (((sc->ha_Msgs_Count
2642 = I2O_EXEC_STATUS_GET_REPLY_getMaxOutboundMFrames(status))
2644 || (sc->ha_Msgs_Count == 0)) {
2645 sc->ha_Msgs_Count = MAX_OUTBOUND;
2647 if (sc->ha_Msgs_Count > sc->ha_QueueSize) {
2648 sc->ha_Msgs_Count = sc->ha_QueueSize;
2651 /* Adjust the maximum SG size to adapter */
2652 if ((size = (I2O_EXEC_STATUS_GET_REPLY_getInboundMFrameSize(
2653 status) << 2)) > MAX_INBOUND_SIZE) {
2654 size = MAX_INBOUND_SIZE;
2656 free (status, M_TEMP);
2657 sc->ha_SgSize = (size - sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2658 + sizeof(I2O_SG_ELEMENT)) / sizeof(I2O_SGE_SIMPLE_ELEMENT);
2662 * Only do a bus/HBA reset on the first time through. On this
2663 * first time through, we do not send a flush to the devices.
2665 if (ASR_init(sc) == 0) {
2667 I2O_PARAM_RESULTS_LIST_HEADER Header;
2668 I2O_PARAM_READ_OPERATION_RESULT Read;
2669 I2O_DPT_EXEC_IOP_BUFFERS_SCALAR Info;
2671 defAlignLong (struct BufferInfo, Buffer);
2672 PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR Info;
2673 # define FW_DEBUG_BLED_OFFSET 8
2675 if ((Info = (PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR)
2676 ASR_getParams(sc, 0,
2677 I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO,
2678 Buffer, sizeof(struct BufferInfo)))
2679 != (PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR)NULL) {
2680 sc->ha_blinkLED = sc->ha_Fvirt
2681 + I2O_DPT_EXEC_IOP_BUFFERS_SCALAR_getSerialOutputOffset(Info)
2682 + FW_DEBUG_BLED_OFFSET;
2684 if (ASR_acquireLct(sc) == 0) {
2685 (void)ASR_acquireHrt(sc);
2688 printf ("asr%d: failed to initialize\n", unit);
2689 ATTACH_RETURN(ENXIO);
2692 * Add in additional probe responses for more channels. We
2693 * are reusing the variable `target' for a channel loop counter.
2694 * Done here because of we need both the acquireLct and
2697 { PI2O_LCT_ENTRY Device;
2699 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
2700 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
2702 if (Device->le_type == I2O_UNKNOWN) {
2705 if (I2O_LCT_ENTRY_getUserTID(Device) == 0xFFF) {
2706 if (Device->le_target > sc->ha_MaxId) {
2707 sc->ha_MaxId = Device->le_target;
2709 if (Device->le_lun > sc->ha_MaxLun) {
2710 sc->ha_MaxLun = Device->le_lun;
2713 if (((Device->le_type & I2O_PORT) != 0)
2714 && (Device->le_bus <= MAX_CHANNEL)) {
2715 /* Do not increase MaxId for efficiency */
2716 sc->ha_adapter_target[Device->le_bus]
2717 = Device->le_target;
2724 * Print the HBA model number as inquired from the card.
2727 printf ("asr%d:", unit);
2729 if ((iq = (struct scsi_inquiry_data *)malloc (
2730 sizeof(struct scsi_inquiry_data), M_TEMP, M_WAITOK))
2731 != (struct scsi_inquiry_data *)NULL) {
2732 defAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE,Message);
2733 PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE Message_Ptr;
2736 bzero (iq, sizeof(struct scsi_inquiry_data));
2738 = getAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE, Message),
2739 sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2740 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2742 I2O_MESSAGE_FRAME_setVersionOffset(
2743 (PI2O_MESSAGE_FRAME)Message_Ptr,
2745 | (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2746 - sizeof(I2O_SG_ELEMENT))
2747 / sizeof(U32)) << 4));
2748 I2O_MESSAGE_FRAME_setMessageSize(
2749 (PI2O_MESSAGE_FRAME)Message_Ptr,
2750 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2751 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT))
2753 I2O_MESSAGE_FRAME_setInitiatorAddress (
2754 (PI2O_MESSAGE_FRAME)Message_Ptr, 1);
2755 I2O_MESSAGE_FRAME_setFunction(
2756 (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_PRIVATE_MESSAGE);
2757 I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode (
2758 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2760 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2761 I2O_SCB_FLAG_ENABLE_DISCONNECT
2762 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2763 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
2764 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setInterpret(Message_Ptr, 1);
2765 I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
2766 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2767 DPT_ORGANIZATION_ID);
2768 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(Message_Ptr, 6);
2769 Message_Ptr->CDB[0] = INQUIRY;
2770 Message_Ptr->CDB[4] = (unsigned char)sizeof(struct scsi_inquiry_data);
2771 if (Message_Ptr->CDB[4] == 0) {
2772 Message_Ptr->CDB[4] = 255;
2775 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2776 (I2O_SCB_FLAG_XFER_FROM_DEVICE
2777 | I2O_SCB_FLAG_ENABLE_DISCONNECT
2778 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2779 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
2781 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setByteCount(
2782 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
2783 sizeof(struct scsi_inquiry_data));
2784 SG(&(Message_Ptr->SGL), 0,
2785 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
2786 iq, sizeof(struct scsi_inquiry_data));
2787 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
2789 if (iq->vendor[0] && (iq->vendor[0] != ' ')) {
2791 ASR_prstring (iq->vendor, 8);
2794 if (iq->product[0] && (iq->product[0] != ' ')) {
2796 ASR_prstring (iq->product, 16);
2799 if (iq->revision[0] && (iq->revision[0] != ' ')) {
2800 printf (" FW Rev. ");
2801 ASR_prstring (iq->revision, 4);
2804 free ((caddr_t)iq, M_TEMP);
2809 printf (" %d channel, %d CCBs, Protocol I2O\n", sc->ha_MaxBus + 1,
2810 (sc->ha_QueueSize > MAX_INBOUND) ? MAX_INBOUND : sc->ha_QueueSize);
2813 * fill in the prototype cam_path.
2817 union asr_ccb * ccb;
2819 if ((ccb = asr_alloc_ccb (sc)) == (union asr_ccb *)NULL) {
2820 printf ("asr%d: CAM could not be notified of asynchronous callback parameters\n", unit);
2821 ATTACH_RETURN(ENOMEM);
2823 for (bus = 0; bus <= sc->ha_MaxBus; ++bus) {
2824 int QueueSize = sc->ha_QueueSize;
2826 if (QueueSize > MAX_INBOUND) {
2827 QueueSize = MAX_INBOUND;
2831 * Construct our first channel SIM entry
2833 sc->ha_sim[bus] = cam_sim_alloc(
2834 asr_action, asr_poll, "asr", sc,
2835 unit, 1, QueueSize, NULL);
2836 if (sc->ha_sim[bus] == NULL)
2839 if (xpt_bus_register(sc->ha_sim[bus], bus)
2841 cam_sim_free(sc->ha_sim[bus]);
2842 sc->ha_sim[bus] = NULL;
2846 if (xpt_create_path(&(sc->ha_path[bus]), /*periph*/NULL,
2847 cam_sim_path(sc->ha_sim[bus]), CAM_TARGET_WILDCARD,
2848 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2850 cam_sim_path(sc->ha_sim[bus]));
2851 cam_sim_free(sc->ha_sim[bus]);
2852 sc->ha_sim[bus] = NULL;
2859 * Generate the device node information
2861 make_dev(&asr_cdevsw, unit, 0, 0, S_IRWXU, "rasr%d", unit);
2867 IN struct cam_sim *sim)
2869 asr_intr(cam_sim_softc(sim));
2874 IN struct cam_sim * sim,
2877 struct Asr_softc * sc;
2879 debug_asr_printf ("asr_action(%lx,%lx{%x})\n",
2880 (u_long)sim, (u_long)ccb, ccb->ccb_h.func_code);
2882 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("asr_action\n"));
2884 ccb->ccb_h.spriv_ptr0 = sc = (struct Asr_softc *)cam_sim_softc(sim);
2886 switch (ccb->ccb_h.func_code) {
2888 /* Common cases first */
2889 case XPT_SCSI_IO: /* Execute the requested I/O operation */
2892 char M[MAX_INBOUND_SIZE];
2894 defAlignLong(struct Message,Message);
2895 PI2O_MESSAGE_FRAME Message_Ptr;
2897 /* Reject incoming commands while we are resetting the card */
2898 if (sc->ha_in_reset != HA_OPERATIONAL) {
2899 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2900 if (sc->ha_in_reset >= HA_OFF_LINE) {
2901 /* HBA is now off-line */
2902 ccb->ccb_h.status |= CAM_UNREC_HBA_ERROR;
2904 /* HBA currently resetting, try again later. */
2905 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
2907 debug_asr_cmd_printf (" e\n");
2909 debug_asr_cmd_printf (" q\n");
2912 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
2914 "asr%d WARNING: scsi_cmd(%x) already done on b%dt%du%d\n",
2915 cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)),
2916 ccb->csio.cdb_io.cdb_bytes[0],
2918 ccb->ccb_h.target_id,
2919 ccb->ccb_h.target_lun);
2921 debug_asr_cmd_printf ("(%d,%d,%d,%d)",
2924 ccb->ccb_h.target_id,
2925 ccb->ccb_h.target_lun);
2926 debug_asr_cmd_dump_ccb(ccb);
2928 if ((Message_Ptr = ASR_init_message ((union asr_ccb *)ccb,
2929 (PI2O_MESSAGE_FRAME)Message)) != (PI2O_MESSAGE_FRAME)NULL) {
2930 debug_asr_cmd2_printf ("TID=%x:\n",
2931 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_getTID(
2932 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr));
2933 debug_asr_cmd2_dump_message(Message_Ptr);
2934 debug_asr_cmd1_printf (" q");
2936 if (ASR_queue (sc, Message_Ptr) == EMPTY_QUEUE) {
2937 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2938 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
2939 debug_asr_cmd_printf (" E\n");
2942 debug_asr_cmd_printf (" Q\n");
2946 * We will get here if there is no valid TID for the device
2947 * referenced in the scsi command packet.
2949 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2950 ccb->ccb_h.status |= CAM_SEL_TIMEOUT;
2951 debug_asr_cmd_printf (" B\n");
2956 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */
2957 /* Rese HBA device ... */
2959 ccb->ccb_h.status = CAM_REQ_CMP;
2963 # if (defined(REPORT_LUNS))
2966 case XPT_ABORT: /* Abort the specified CCB */
2968 ccb->ccb_h.status = CAM_REQ_INVALID;
2972 case XPT_SET_TRAN_SETTINGS:
2974 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
2978 case XPT_GET_TRAN_SETTINGS:
2979 /* Get default/user set transfer settings for the target */
2981 struct ccb_trans_settings *cts;
2985 target_mask = 0x01 << ccb->ccb_h.target_id;
2986 if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) {
2987 cts->flags = CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB;
2988 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2989 cts->sync_period = 6; /* 40MHz */
2990 cts->sync_offset = 15;
2992 cts->valid = CCB_TRANS_SYNC_RATE_VALID
2993 | CCB_TRANS_SYNC_OFFSET_VALID
2994 | CCB_TRANS_BUS_WIDTH_VALID
2995 | CCB_TRANS_DISC_VALID
2996 | CCB_TRANS_TQ_VALID;
2997 ccb->ccb_h.status = CAM_REQ_CMP;
2999 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
3005 case XPT_CALC_GEOMETRY:
3007 struct ccb_calc_geometry *ccg;
3009 u_int32_t secs_per_cylinder;
3012 size_mb = ccg->volume_size
3013 / ((1024L * 1024L) / ccg->block_size);
3015 if (size_mb > 4096) {
3017 ccg->secs_per_track = 63;
3018 } else if (size_mb > 2048) {
3020 ccg->secs_per_track = 63;
3021 } else if (size_mb > 1024) {
3023 ccg->secs_per_track = 63;
3026 ccg->secs_per_track = 32;
3028 secs_per_cylinder = ccg->heads * ccg->secs_per_track;
3029 ccg->cylinders = ccg->volume_size / secs_per_cylinder;
3030 ccb->ccb_h.status = CAM_REQ_CMP;
3035 case XPT_RESET_BUS: /* Reset the specified SCSI bus */
3036 ASR_resetBus (sc, cam_sim_bus(sim));
3037 ccb->ccb_h.status = CAM_REQ_CMP;
3041 case XPT_TERM_IO: /* Terminate the I/O process */
3043 ccb->ccb_h.status = CAM_REQ_INVALID;
3047 case XPT_PATH_INQ: /* Path routing inquiry */
3049 struct ccb_pathinq *cpi = &(ccb->cpi);
3051 cpi->version_num = 1; /* XXX??? */
3052 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
3053 cpi->target_sprt = 0;
3054 /* Not necessary to reset bus, done by HDM initialization */
3055 cpi->hba_misc = PIM_NOBUSRESET;
3056 cpi->hba_eng_cnt = 0;
3057 cpi->max_target = sc->ha_MaxId;
3058 cpi->max_lun = sc->ha_MaxLun;
3059 cpi->initiator_id = sc->ha_adapter_target[cam_sim_bus(sim)];
3060 cpi->bus_id = cam_sim_bus(sim);
3061 cpi->base_transfer_speed = 3300;
3062 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
3063 strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN);
3064 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
3065 cpi->unit_number = cam_sim_unit(sim);
3066 cpi->ccb_h.status = CAM_REQ_CMP;
3071 ccb->ccb_h.status = CAM_REQ_INVALID;
3079 * Handle processing of current CCB as pointed to by the Status.
3083 IN Asr_softc_t * sc)
3088 sc->ha_Virt->Status & Mask_InterruptsDisabled;
3090 union asr_ccb * ccb;
3092 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply;
3094 if (((ReplyOffset = sc->ha_Virt->FromFIFO) == EMPTY_QUEUE)
3095 && ((ReplyOffset = sc->ha_Virt->FromFIFO) == EMPTY_QUEUE)) {
3098 Reply = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)(ReplyOffset
3099 - sc->ha_Msgs_Phys + (char *)(sc->ha_Msgs));
3101 * We do not need any (optional byteswapping) method access to
3102 * the Initiator context field.
3104 ccb = (union asr_ccb *)(long)
3105 I2O_MESSAGE_FRAME_getInitiatorContext64(
3106 &(Reply->StdReplyFrame.StdMessageFrame));
3107 if (I2O_MESSAGE_FRAME_getMsgFlags(
3108 &(Reply->StdReplyFrame.StdMessageFrame))
3109 & I2O_MESSAGE_FLAGS_FAIL) {
3110 defAlignLong(I2O_UTIL_NOP_MESSAGE,Message);
3111 PI2O_UTIL_NOP_MESSAGE Message_Ptr;
3114 MessageOffset = (u_long)
3115 I2O_FAILURE_REPLY_MESSAGE_FRAME_getPreservedMFA(
3116 (PI2O_FAILURE_REPLY_MESSAGE_FRAME)Reply);
3118 * Get the Original Message Frame's address, and get
3119 * it's Transaction Context into our space. (Currently
3120 * unused at original authorship, but better to be
3121 * safe than sorry). Straight copy means that we
3122 * need not concern ourselves with the (optional
3123 * byteswapping) method access.
3125 Reply->StdReplyFrame.TransactionContext
3126 = ((PI2O_SINGLE_REPLY_MESSAGE_FRAME)
3127 (sc->ha_Fvirt + MessageOffset))->TransactionContext;
3129 * For 64 bit machines, we need to reconstruct the
3132 ccb = (union asr_ccb *)(long)
3133 I2O_MESSAGE_FRAME_getInitiatorContext64(
3134 &(Reply->StdReplyFrame.StdMessageFrame));
3136 * Unique error code for command failure.
3138 I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode(
3139 &(Reply->StdReplyFrame), (u_int16_t)-2);
3141 * Modify the message frame to contain a NOP and
3142 * re-issue it to the controller.
3144 Message_Ptr = (PI2O_UTIL_NOP_MESSAGE)ASR_fillMessage(
3145 Message, sizeof(I2O_UTIL_NOP_MESSAGE));
3146 # if (I2O_UTIL_NOP != 0)
3147 I2O_MESSAGE_FRAME_setFunction (
3148 &(Message_Ptr->StdMessageFrame),
3152 * Copy the packet out to the Original Message
3154 bcopy ((caddr_t)Message_Ptr,
3155 sc->ha_Fvirt + MessageOffset,
3156 sizeof(I2O_UTIL_NOP_MESSAGE));
3160 sc->ha_Virt->ToFIFO = MessageOffset;
3164 * Asynchronous command with no return requirements,
3165 * and a generic handler for immunity against odd error
3166 * returns from the adapter.
3168 if (ccb == (union asr_ccb *)NULL) {
3170 * Return Reply so that it can be used for the
3173 sc->ha_Virt->FromFIFO = ReplyOffset;
3177 /* Welease Wadjah! (and stop timeouts) */
3178 ASR_ccbRemove (sc, ccb);
3181 I2O_SINGLE_REPLY_MESSAGE_FRAME_getDetailedStatusCode(
3182 &(Reply->StdReplyFrame))) {
3184 case I2O_SCSI_DSC_SUCCESS:
3185 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3186 ccb->ccb_h.status |= CAM_REQ_CMP;
3189 case I2O_SCSI_DSC_CHECK_CONDITION:
3190 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3191 ccb->ccb_h.status |= CAM_REQ_CMP|CAM_AUTOSNS_VALID;
3194 case I2O_SCSI_DSC_BUSY:
3196 case I2O_SCSI_HBA_DSC_ADAPTER_BUSY:
3198 case I2O_SCSI_HBA_DSC_SCSI_BUS_RESET:
3200 case I2O_SCSI_HBA_DSC_BUS_BUSY:
3201 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3202 ccb->ccb_h.status |= CAM_SCSI_BUSY;
3205 case I2O_SCSI_HBA_DSC_SELECTION_TIMEOUT:
3206 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3207 ccb->ccb_h.status |= CAM_SEL_TIMEOUT;
3210 case I2O_SCSI_HBA_DSC_COMMAND_TIMEOUT:
3212 case I2O_SCSI_HBA_DSC_DEVICE_NOT_PRESENT:
3214 case I2O_SCSI_HBA_DSC_LUN_INVALID:
3216 case I2O_SCSI_HBA_DSC_SCSI_TID_INVALID:
3217 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3218 ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
3221 case I2O_SCSI_HBA_DSC_DATA_OVERRUN:
3223 case I2O_SCSI_HBA_DSC_REQUEST_LENGTH_ERROR:
3224 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3225 ccb->ccb_h.status |= CAM_DATA_RUN_ERR;
3229 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3230 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
3233 if ((ccb->csio.resid = ccb->csio.dxfer_len) != 0) {
3235 I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_getTransferCount(
3239 /* Sense data in reply packet */
3240 if (ccb->ccb_h.status & CAM_AUTOSNS_VALID) {
3241 u_int16_t size = I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_getAutoSenseTransferCount(Reply);
3244 if (size > sizeof(ccb->csio.sense_data)) {
3245 size = sizeof(ccb->csio.sense_data);
3247 if (size > I2O_SCSI_SENSE_DATA_SZ) {
3248 size = I2O_SCSI_SENSE_DATA_SZ;
3250 if ((ccb->csio.sense_len)
3251 && (size > ccb->csio.sense_len)) {
3252 size = ccb->csio.sense_len;
3254 bcopy ((caddr_t)Reply->SenseData,
3255 (caddr_t)&(ccb->csio.sense_data), size);
3260 * Return Reply so that it can be used for the next command
3261 * since we have no more need for it now
3263 sc->ha_Virt->FromFIFO = ReplyOffset;
3265 if (ccb->ccb_h.path) {
3266 xpt_done ((union ccb *)ccb);
3268 wakeup ((caddr_t)ccb);
3274 #undef QueueSize /* Grrrr */
3275 #undef SG_Size /* Grrrr */
3278 * Meant to be included at the bottom of asr.c !!!
3282 * Included here as hard coded. Done because other necessary include
3283 * files utilize C++ comment structures which make them a nuisance to
3284 * included here just to pick up these three typedefs.
3286 typedef U32 DPT_TAG_T;
3287 typedef U32 DPT_MSG_T;
3288 typedef U32 DPT_RTN_T;
3290 #undef SCSI_RESET /* Conflicts with "scsi/scsiconf.h" defintion */
3291 #include "osd_unix.h"
3293 #define asr_unit(dev) minor(dev)
3295 STATIC INLINE Asr_softc_t *
3299 int unit = asr_unit(dev);
3300 OUT Asr_softc_t * sc = Asr_softc;
3302 while (sc && sc->ha_sim[0] && (cam_sim_unit(sc->ha_sim[0]) != unit)) {
3308 STATIC u_int8_t ASR_ctlr_held;
3309 #if (!defined(UNREFERENCED_PARAMETER))
3310 # define UNREFERENCED_PARAMETER(x) (void)(x)
3322 UNREFERENCED_PARAMETER(flags);
3323 UNREFERENCED_PARAMETER(ifmt);
3325 if (ASR_get_sc (dev) == (Asr_softc_t *)NULL) {
3328 KKASSERT(td->td_proc);
3330 if (ASR_ctlr_held) {
3332 } else if ((error = suser_cred(td->td_proc->p_ucred, 0)) == 0) {
3346 UNREFERENCED_PARAMETER(dev);
3347 UNREFERENCED_PARAMETER(flags);
3348 UNREFERENCED_PARAMETER(ifmt);
3349 UNREFERENCED_PARAMETER(td);
3356 /*-------------------------------------------------------------------------*/
3357 /* Function ASR_queue_i */
3358 /*-------------------------------------------------------------------------*/
3359 /* The Parameters Passed To This Function Are : */
3360 /* Asr_softc_t * : HBA miniport driver's adapter data storage. */
3361 /* PI2O_MESSAGE_FRAME : Msg Structure Pointer For This Command */
3362 /* I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME following the Msg Structure */
3364 /* This Function Will Take The User Request Packet And Convert It To An */
3365 /* I2O MSG And Send It Off To The Adapter. */
3367 /* Return : 0 For OK, Error Code Otherwise */
3368 /*-------------------------------------------------------------------------*/
3371 IN Asr_softc_t * sc,
3372 INOUT PI2O_MESSAGE_FRAME Packet)
3374 union asr_ccb * ccb;
3375 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply;
3376 PI2O_MESSAGE_FRAME Message_Ptr;
3377 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply_Ptr;
3378 int MessageSizeInBytes;
3379 int ReplySizeInBytes;
3382 /* Scatter Gather buffer list */
3383 struct ioctlSgList_S {
3384 SLIST_ENTRY(ioctlSgList_S) link;
3386 I2O_FLAGS_COUNT FlagsCount;
3387 char KernelSpace[sizeof(long)];
3389 /* Generates a `first' entry */
3390 SLIST_HEAD(ioctlSgListHead_S, ioctlSgList_S) sgList;
3392 if (ASR_getBlinkLedCode(sc)) {
3393 debug_usr_cmd_printf ("Adapter currently in BlinkLed %x\n",
3394 ASR_getBlinkLedCode(sc));
3397 /* Copy in the message into a local allocation */
3398 if ((Message_Ptr = (PI2O_MESSAGE_FRAME)malloc (
3399 sizeof(I2O_MESSAGE_FRAME), M_TEMP, M_WAITOK))
3400 == (PI2O_MESSAGE_FRAME)NULL) {
3401 debug_usr_cmd_printf (
3402 "Failed to acquire I2O_MESSAGE_FRAME memory\n");
3405 if ((error = copyin ((caddr_t)Packet, (caddr_t)Message_Ptr,
3406 sizeof(I2O_MESSAGE_FRAME))) != 0) {
3407 free (Message_Ptr, M_TEMP);
3408 debug_usr_cmd_printf ("Can't copy in packet errno=%d\n", error);
3411 /* Acquire information to determine type of packet */
3412 MessageSizeInBytes = (I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr)<<2);
3413 /* The offset of the reply information within the user packet */
3414 Reply = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)((char *)Packet
3415 + MessageSizeInBytes);
3417 /* Check if the message is a synchronous initialization command */
3418 s = I2O_MESSAGE_FRAME_getFunction(Message_Ptr);
3419 free (Message_Ptr, M_TEMP);
3422 case I2O_EXEC_IOP_RESET:
3425 status = ASR_resetIOP(sc->ha_Virt, sc->ha_Fvirt);
3426 ReplySizeInBytes = sizeof(status);
3427 debug_usr_cmd_printf ("resetIOP done\n");
3428 return (copyout ((caddr_t)&status, (caddr_t)Reply,
3432 case I2O_EXEC_STATUS_GET:
3433 { I2O_EXEC_STATUS_GET_REPLY status;
3435 if (ASR_getStatus (sc->ha_Virt, sc->ha_Fvirt, &status)
3436 == (PI2O_EXEC_STATUS_GET_REPLY)NULL) {
3437 debug_usr_cmd_printf ("getStatus failed\n");
3440 ReplySizeInBytes = sizeof(status);
3441 debug_usr_cmd_printf ("getStatus done\n");
3442 return (copyout ((caddr_t)&status, (caddr_t)Reply,
3446 case I2O_EXEC_OUTBOUND_INIT:
3449 status = ASR_initOutBound(sc);
3450 ReplySizeInBytes = sizeof(status);
3451 debug_usr_cmd_printf ("intOutBound done\n");
3452 return (copyout ((caddr_t)&status, (caddr_t)Reply,
3457 /* Determine if the message size is valid */
3458 if ((MessageSizeInBytes < sizeof(I2O_MESSAGE_FRAME))
3459 || (MAX_INBOUND_SIZE < MessageSizeInBytes)) {
3460 debug_usr_cmd_printf ("Packet size %d incorrect\n",
3461 MessageSizeInBytes);
3465 if ((Message_Ptr = (PI2O_MESSAGE_FRAME)malloc (MessageSizeInBytes,
3466 M_TEMP, M_WAITOK)) == (PI2O_MESSAGE_FRAME)NULL) {
3467 debug_usr_cmd_printf ("Failed to acquire frame[%d] memory\n",
3468 MessageSizeInBytes);
3471 if ((error = copyin ((caddr_t)Packet, (caddr_t)Message_Ptr,
3472 MessageSizeInBytes)) != 0) {
3473 free (Message_Ptr, M_TEMP);
3474 debug_usr_cmd_printf ("Can't copy in packet[%d] errno=%d\n",
3475 MessageSizeInBytes, error);
3479 /* Check the size of the reply frame, and start constructing */
3481 if ((Reply_Ptr = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)malloc (
3482 sizeof(I2O_MESSAGE_FRAME), M_TEMP, M_WAITOK))
3483 == (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL) {
3484 free (Message_Ptr, M_TEMP);
3485 debug_usr_cmd_printf (
3486 "Failed to acquire I2O_MESSAGE_FRAME memory\n");
3489 if ((error = copyin ((caddr_t)Reply, (caddr_t)Reply_Ptr,
3490 sizeof(I2O_MESSAGE_FRAME))) != 0) {
3491 free (Reply_Ptr, M_TEMP);
3492 free (Message_Ptr, M_TEMP);
3493 debug_usr_cmd_printf (
3494 "Failed to copy in reply frame, errno=%d\n",
3498 ReplySizeInBytes = (I2O_MESSAGE_FRAME_getMessageSize(
3499 &(Reply_Ptr->StdReplyFrame.StdMessageFrame)) << 2);
3500 free (Reply_Ptr, M_TEMP);
3501 if (ReplySizeInBytes < sizeof(I2O_SINGLE_REPLY_MESSAGE_FRAME)) {
3502 free (Message_Ptr, M_TEMP);
3503 debug_usr_cmd_printf (
3504 "Failed to copy in reply frame[%d], errno=%d\n",
3505 ReplySizeInBytes, error);
3509 if ((Reply_Ptr = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)malloc (
3510 ((ReplySizeInBytes > sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME))
3512 : sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)),
3513 M_TEMP, M_WAITOK)) == (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL) {
3514 free (Message_Ptr, M_TEMP);
3515 debug_usr_cmd_printf ("Failed to acquire frame[%d] memory\n",
3519 (void)ASR_fillMessage ((char *)Reply_Ptr, ReplySizeInBytes);
3520 Reply_Ptr->StdReplyFrame.StdMessageFrame.InitiatorContext
3521 = Message_Ptr->InitiatorContext;
3522 Reply_Ptr->StdReplyFrame.TransactionContext
3523 = ((PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr)->TransactionContext;
3524 I2O_MESSAGE_FRAME_setMsgFlags(
3525 &(Reply_Ptr->StdReplyFrame.StdMessageFrame),
3526 I2O_MESSAGE_FRAME_getMsgFlags(
3527 &(Reply_Ptr->StdReplyFrame.StdMessageFrame))
3528 | I2O_MESSAGE_FLAGS_REPLY);
3530 /* Check if the message is a special case command */
3531 switch (I2O_MESSAGE_FRAME_getFunction(Message_Ptr)) {
3532 case I2O_EXEC_SYS_TAB_SET: /* Special Case of empty Scatter Gather */
3533 if (MessageSizeInBytes == ((I2O_MESSAGE_FRAME_getVersionOffset(
3534 Message_Ptr) & 0xF0) >> 2)) {
3535 free (Message_Ptr, M_TEMP);
3536 I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode(
3537 &(Reply_Ptr->StdReplyFrame),
3538 (ASR_setSysTab(sc) != CAM_REQ_CMP));
3539 I2O_MESSAGE_FRAME_setMessageSize(
3540 &(Reply_Ptr->StdReplyFrame.StdMessageFrame),
3541 sizeof(I2O_SINGLE_REPLY_MESSAGE_FRAME));
3542 error = copyout ((caddr_t)Reply_Ptr, (caddr_t)Reply,
3544 free (Reply_Ptr, M_TEMP);
3549 /* Deal in the general case */
3550 /* First allocate and optionally copy in each scatter gather element */
3551 SLIST_INIT(&sgList);
3552 if ((I2O_MESSAGE_FRAME_getVersionOffset(Message_Ptr) & 0xF0) != 0) {
3553 PI2O_SGE_SIMPLE_ELEMENT sg;
3556 * since this code is reused in several systems, code
3557 * efficiency is greater by using a shift operation rather
3558 * than a divide by sizeof(u_int32_t).
3560 sg = (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr
3561 + ((I2O_MESSAGE_FRAME_getVersionOffset(Message_Ptr) & 0xF0)
3563 while (sg < (PI2O_SGE_SIMPLE_ELEMENT)(((caddr_t)Message_Ptr)
3564 + MessageSizeInBytes)) {
3568 if ((I2O_FLAGS_COUNT_getFlags(&(sg->FlagsCount))
3569 & I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT) == 0) {
3573 len = I2O_FLAGS_COUNT_getCount(&(sg->FlagsCount));
3574 debug_usr_cmd_printf ("SG[%d] = %x[%d]\n",
3575 sg - (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr
3576 + ((I2O_MESSAGE_FRAME_getVersionOffset(
3577 Message_Ptr) & 0xF0) >> 2)),
3578 I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg), len);
3580 if ((elm = (struct ioctlSgList_S *)malloc (
3581 sizeof(*elm) - sizeof(elm->KernelSpace) + len,
3583 == (struct ioctlSgList_S *)NULL) {
3584 debug_usr_cmd_printf (
3585 "Failed to allocate SG[%d]\n", len);
3589 SLIST_INSERT_HEAD(&sgList, elm, link);
3590 elm->FlagsCount = sg->FlagsCount;
3591 elm->UserSpace = (caddr_t)
3592 (I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg));
3593 v = elm->KernelSpace;
3594 /* Copy in outgoing data (DIR bit could be invalid) */
3595 if ((error = copyin (elm->UserSpace, (caddr_t)v, len))
3600 * If the buffer is not contiguous, lets
3601 * break up the scatter/gather entries.
3604 && (sg < (PI2O_SGE_SIMPLE_ELEMENT)
3605 (((caddr_t)Message_Ptr) + MAX_INBOUND_SIZE))) {
3606 int next, base, span;
3609 next = base = KVTOPHYS(v);
3610 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg,
3613 /* How far can we go physically contiguously */
3614 while ((len > 0) && (base == next)) {
3617 next = trunc_page(base) + PAGE_SIZE;
3628 /* Construct the Flags */
3629 I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount),
3632 int flags = I2O_FLAGS_COUNT_getFlags(
3633 &(elm->FlagsCount));
3634 /* Any remaining length? */
3637 ~(I2O_SGL_FLAGS_END_OF_BUFFER
3638 | I2O_SGL_FLAGS_LAST_ELEMENT);
3640 I2O_FLAGS_COUNT_setFlags(
3641 &(sg->FlagsCount), flags);
3644 debug_usr_cmd_printf ("sg[%d] = %x[%d]\n",
3645 sg - (PI2O_SGE_SIMPLE_ELEMENT)
3646 ((char *)Message_Ptr
3647 + ((I2O_MESSAGE_FRAME_getVersionOffset(
3648 Message_Ptr) & 0xF0) >> 2)),
3649 I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg),
3656 * Incrementing requires resizing of the
3657 * packet, and moving up the existing SG
3661 MessageSizeInBytes += sizeof(*sg);
3662 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
3663 I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr)
3664 + (sizeof(*sg) / sizeof(U32)));
3666 PI2O_MESSAGE_FRAME NewMessage_Ptr;
3669 = (PI2O_MESSAGE_FRAME)
3670 malloc (MessageSizeInBytes,
3672 == (PI2O_MESSAGE_FRAME)NULL) {
3673 debug_usr_cmd_printf (
3674 "Failed to acquire frame[%d] memory\n",
3675 MessageSizeInBytes);
3679 span = ((caddr_t)sg)
3680 - (caddr_t)Message_Ptr;
3681 bcopy ((caddr_t)Message_Ptr,
3682 (caddr_t)NewMessage_Ptr, span);
3683 bcopy ((caddr_t)(sg-1),
3684 ((caddr_t)NewMessage_Ptr) + span,
3685 MessageSizeInBytes - span);
3686 free (Message_Ptr, M_TEMP);
3687 sg = (PI2O_SGE_SIMPLE_ELEMENT)
3688 (((caddr_t)NewMessage_Ptr) + span);
3689 Message_Ptr = NewMessage_Ptr;