1 /* $FreeBSD: src/sys/dev/asr/asr.c,v 1.3.2.2 2001/08/23 05:21:29 scottl Exp $ */
3 * Copyright (c) 1996-2000 Distributed Processing Technology Corporation
4 * Copyright (c) 2000-2001 Adaptec Corporation
7 * TERMS AND CONDITIONS OF USE
9 * Redistribution and use in source form, with or without modification, are
10 * permitted provided that redistributions of source code must retain the
11 * above copyright notice, this list of conditions and the following disclaimer.
13 * This software is provided `as is' by Adaptec and any express or implied
14 * warranties, including, but not limited to, the implied warranties of
15 * merchantability and fitness for a particular purpose, are disclaimed. In no
16 * event shall Adaptec be liable for any direct, indirect, incidental, special,
17 * exemplary or consequential damages (including, but not limited to,
18 * procurement of substitute goods or services; loss of use, data, or profits;
19 * or business interruptions) however caused and on any theory of liability,
20 * whether in contract, strict liability, or tort (including negligence or
21 * otherwise) arising in any way out of the use of this driver software, even
22 * if advised of the possibility of such damage.
24 * SCSI I2O host adapter driver
26 * V1.08 2001/08/21 Mark_Salyzyn@adaptec.com
27 * - The 2000S and 2005S do not initialize on some machines,
28 * increased timeout to 255ms from 50ms for the StatusGet
30 * V1.07 2001/05/22 Mark_Salyzyn@adaptec.com
31 * - I knew this one was too good to be true. The error return
32 * on ioctl commands needs to be compared to CAM_REQ_CMP, not
33 * to the bit masked status.
34 * V1.06 2001/05/08 Mark_Salyzyn@adaptec.com
35 * - The 2005S that was supported is affectionately called the
36 * Conjoined BAR Firmware. In order to support RAID-5 in a
37 * 16MB low-cost configuration, Firmware was forced to go
38 * to a Split BAR Firmware. This requires a separate IOP and
39 * Messaging base address.
40 * V1.05 2001/04/25 Mark_Salyzyn@adaptec.com
41 * - Handle support for 2005S Zero Channel RAID solution.
42 * - System locked up if the Adapter locked up. Do not try
43 * to send other commands if the resetIOP command fails. The
44 * fail outstanding command discovery loop was flawed as the
45 * removal of the command from the list prevented discovering
47 * - Comment changes to clarify driver.
48 * - SysInfo searched for an EATA SmartROM, not an I2O SmartROM.
49 * - We do not use the AC_FOUND_DEV event because of I2O.
51 * V1.04 2000/09/22 Mark_Salyzyn@adaptec.com, msmith@freebsd.org,
52 * lampa@fee.vutbr.cz and Scott_Long@adaptec.com.
53 * - Removed support for PM1554, PM2554 and PM2654 in Mode-0
54 * mode as this is confused with competitor adapters in run
56 * - critical locking needed in ASR_ccbAdd and ASR_ccbRemove
57 * to prevent operating system panic.
58 * - moved default major number to 154 from 97.
59 * V1.03 2000/07/12 Mark_Salyzyn@adaptec.com
60 * - The controller is not actually an ASR (Adaptec SCSI RAID)
61 * series that is visible, it's more of an internal code name.
62 * remove any visible references within reason for now.
63 * - bus_ptr->LUN was not correctly zeroed when initially
64 * allocated causing a possible panic of the operating system
66 * V1.02 2000/06/26 Mark_Salyzyn@adaptec.com
67 * - Code always fails for ASR_getTid affecting performance.
68 * - initiated a set of changes that resulted from a formal
69 * code inspection by Mark_Salyzyn@adaptec.com,
70 * George_Dake@adaptec.com, Jeff_Zeak@adaptec.com,
71 * Martin_Wilson@adaptec.com and Vincent_Trandoan@adaptec.com.
72 * Their findings were focussed on the LCT & TID handler, and
73 * all resulting changes were to improve code readability,
74 * consistency or have a positive effect on performance.
75 * V1.01 2000/06/14 Mark_Salyzyn@adaptec.com
76 * - Passthrough returned an incorrect error.
77 * - Passthrough did not migrate the intrinsic scsi layer wakeup
78 * on command completion.
79 * - generate control device nodes using make_dev and delete_dev.
80 * - Performance affected by TID caching reallocing.
81 * - Made suggested changes by Justin_Gibbs@adaptec.com
82 * - use splcam instead of splbio.
83 * - use u_int8_t instead of u_char.
84 * - use u_int16_t instead of u_short.
85 * - use u_int32_t instead of u_long where appropriate.
86 * - use 64 bit context handler instead of 32 bit.
87 * - create_ccb should only allocate the worst case
88 * requirements for the driver since CAM may evolve
89 * making union ccb much larger than needed here.
90 * renamed create_ccb to asr_alloc_ccb.
91 * - go nutz justifying all debug prints as macros
92 * defined at the top and remove unsightly ifdefs.
93 * - INLINE STATIC viewed as confusing. Historically
94 * utilized to affect code performance and debug
95 * issues in OS, Compiler or OEM specific situations.
96 * V1.00 2000/05/31 Mark_Salyzyn@adaptec.com
97 * - Ported from FreeBSD 2.2.X DPT I2O driver.
98 * changed struct scsi_xfer to union ccb/struct ccb_hdr
99 * changed variable name xs to ccb
100 * changed struct scsi_link to struct cam_path
101 * changed struct scsibus_data to struct cam_sim
102 * stopped using fordriver for holding on to the TID
103 * use proprietary packet creation instead of scsi_inquire
104 * CAM layer sends synchronize commands.
107 #define ASR_VERSION 1
108 #define ASR_REVISION '0'
109 #define ASR_SUBREVISION '8'
112 #define ASR_YEAR 2001 - 1980
115 * Debug macros to reduce the unsightly ifdefs
117 #if (defined(DEBUG_ASR) || defined(DEBUG_ASR_USR_CMD) || defined(DEBUG_ASR_CMD))
118 # define debug_asr_message(message) \
120 u_int32_t * pointer = (u_int32_t *)message; \
121 u_int32_t length = I2O_MESSAGE_FRAME_getMessageSize(message);\
122 u_int32_t counter = 0; \
125 kprintf ("%08lx%c", (u_long)*(pointer++), \
126 (((++counter & 7) == 0) || (length == 0)) \
131 #endif /* DEBUG_ASR || DEBUG_ASR_USR_CMD || DEBUG_ASR_CMD */
133 #if (defined(DEBUG_ASR))
134 /* Breaks on none STDC based compilers :-( */
135 # define debug_asr_printf(fmt,args...) kprintf(fmt, ##args)
136 # define debug_asr_dump_message(message) debug_asr_message(message)
137 # define debug_asr_print_path(ccb) xpt_print_path(ccb->ccb_h.path);
138 /* None fatal version of the ASSERT macro */
139 # if (defined(__STDC__))
140 # define ASSERT(phrase) if(!(phrase))kprintf(#phrase " at line %d file %s\n",__LINE__,__FILE__)
142 # define ASSERT(phrase) if(!(phrase))kprintf("phrase" " at line %d file %s\n",__LINE__,__FILE__)
144 #else /* DEBUG_ASR */
145 # define debug_asr_printf(fmt,args...)
146 # define debug_asr_dump_message(message)
147 # define debug_asr_print_path(ccb)
149 #endif /* DEBUG_ASR */
152 * If DEBUG_ASR_CMD is defined:
153 * 0 - Display incoming SCSI commands
154 * 1 - add in a quick character before queueing.
155 * 2 - add in outgoing message frames.
157 #if (defined(DEBUG_ASR_CMD))
158 # define debug_asr_cmd_printf(fmt,args...) kprintf(fmt,##args)
159 # define debug_asr_dump_ccb(ccb) \
161 u_int8_t * cp = (unsigned char *)&(ccb->csio.cdb_io); \
162 int len = ccb->csio.cdb_len; \
165 debug_asr_cmd_printf (" %02x", *(cp++)); \
169 # if (DEBUG_ASR_CMD > 0)
170 # define debug_asr_cmd1_printf debug_asr_cmd_printf
172 # define debug_asr_cmd1_printf(fmt,args...)
174 # if (DEBUG_ASR_CMD > 1)
175 # define debug_asr_cmd2_printf debug_asr_cmd_printf
176 # define debug_asr_cmd2_dump_message(message) debug_asr_message(message)
178 # define debug_asr_cmd2_printf(fmt,args...)
179 # define debug_asr_cmd2_dump_message(message)
181 #else /* DEBUG_ASR_CMD */
182 # define debug_asr_cmd_printf(fmt,args...)
183 # define debug_asr_cmd_dump_ccb(ccb)
184 # define debug_asr_cmd1_printf(fmt,args...)
185 # define debug_asr_cmd2_printf(fmt,args...)
186 # define debug_asr_cmd2_dump_message(message)
187 #endif /* DEBUG_ASR_CMD */
189 #if (defined(DEBUG_ASR_USR_CMD))
190 # define debug_usr_cmd_printf(fmt,args...) kprintf(fmt,##args)
191 # define debug_usr_cmd_dump_message(message) debug_usr_message(message)
192 #else /* DEBUG_ASR_USR_CMD */
193 # define debug_usr_cmd_printf(fmt,args...)
194 # define debug_usr_cmd_dump_message(message)
195 #endif /* DEBUG_ASR_USR_CMD */
197 #define dsDescription_size 46 /* Snug as a bug in a rug */
200 static dpt_sig_S ASR_sig = {
201 { 'd', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION, PROC_INTEL,
202 PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM, FT_HBADRVR, 0,
203 OEM_DPT, OS_FREE_BSD, CAP_ABOVE16MB, DEV_ALL,
205 0, 0, ASR_VERSION, ASR_REVISION, ASR_SUBREVISION,
206 ASR_MONTH, ASR_DAY, ASR_YEAR,
207 /* 01234567890123456789012345678901234567890123456789 < 50 chars */
208 "Adaptec FreeBSD 4.0.0 Unix SCSI I2O HBA Driver"
209 /* ^^^^^ asr_attach alters these to match OS */
212 #include <sys/param.h> /* TRUE=1 and FALSE=0 defined here */
213 #include <sys/kernel.h>
214 #include <sys/systm.h>
215 #include <sys/malloc.h>
216 #include <sys/proc.h>
217 #include <sys/priv.h>
218 #include <sys/conf.h>
220 #include <sys/rman.h>
221 #include <sys/stat.h>
222 #include <sys/device.h>
223 #include <sys/thread2.h>
225 #include <bus/cam/cam.h>
226 #include <bus/cam/cam_ccb.h>
227 #include <bus/cam/cam_sim.h>
228 #include <bus/cam/cam_xpt_sim.h>
229 #include <bus/cam/cam_xpt_periph.h>
231 #include <bus/cam/scsi/scsi_all.h>
232 #include <bus/cam/scsi/scsi_message.h>
236 #include <machine/cputypes.h>
237 #include <machine/clock.h>
238 #include <machine/vmparam.h>
240 #include <bus/pci/pcivar.h>
241 #include <bus/pci/pcireg.h>
243 #define STATIC static
246 #if (defined(DEBUG_ASR) && (DEBUG_ASR > 0))
256 #define osdSwap4(x) ((u_long)ntohl((u_long)(x)))
257 #define KVTOPHYS(x) vtophys(x)
258 #include "dptalign.h"
260 #include "i2obscsi.h"
262 #include "i2oadptr.h"
263 #include "sys_info.h"
265 /* Configuration Definitions */
267 #define SG_SIZE 58 /* Scatter Gather list Size */
268 #define MAX_TARGET_ID 126 /* Maximum Target ID supported */
269 #define MAX_LUN 255 /* Maximum LUN Supported */
270 #define MAX_CHANNEL 7 /* Maximum Channel # Supported by driver */
271 #define MAX_INBOUND 2000 /* Max CCBs, Also Max Queue Size */
272 #define MAX_OUTBOUND 256 /* Maximum outbound frames/adapter */
273 #define MAX_INBOUND_SIZE 512 /* Maximum inbound frame size */
274 #define MAX_MAP 4194304L /* Maximum mapping size of IOP */
275 /* Also serves as the minimum map for */
276 /* the 2005S zero channel RAID product */
278 /**************************************************************************
279 ** ASR Host Adapter structure - One Structure For Each Host Adapter That **
280 ** Is Configured Into The System. The Structure Supplies Configuration **
281 ** Information, Status Info, Queue Info And An Active CCB List Pointer. **
282 ***************************************************************************/
284 /* I2O register set */
289 # define Mask_InterruptsDisabled 0x08
291 volatile U32 ToFIFO; /* In Bound FIFO */
292 volatile U32 FromFIFO; /* Out Bound FIFO */
296 * A MIX of performance and space considerations for TID lookups
298 typedef u_int16_t tid_t;
301 u_int32_t size; /* up to MAX_LUN */
306 u_int32_t size; /* up to MAX_TARGET */
311 * To ensure that we only allocate and use the worst case ccb here, lets
312 * make our own local ccb union. If asr_alloc_ccb is utilized for another
313 * ccb type, ensure that you add the additional structures into our local
314 * ccb union. To ensure strict type checking, we will utilize the local
315 * ccb definition wherever possible.
318 struct ccb_hdr ccb_h; /* For convenience */
319 struct ccb_scsiio csio;
320 struct ccb_setasync csa;
323 typedef struct Asr_softc {
325 void * ha_Base; /* base port for each board */
326 u_int8_t * volatile ha_blinkLED;
327 i2oRegs_t * ha_Virt; /* Base address of IOP */
328 U8 * ha_Fvirt; /* Base address of Frames */
329 I2O_IOP_ENTRY ha_SystemTable;
330 LIST_HEAD(,ccb_hdr) ha_ccb; /* ccbs in use */
331 struct cam_path * ha_path[MAX_CHANNEL+1];
332 struct cam_sim * ha_sim[MAX_CHANNEL+1];
333 struct resource * ha_mem_res;
334 struct resource * ha_mes_res;
335 struct resource * ha_irq_res;
337 PI2O_LCT ha_LCT; /* Complete list of devices */
338 # define le_type IdentityTag[0]
339 # define I2O_BSA 0x20
340 # define I2O_FCA 0x40
341 # define I2O_SCSI 0x00
342 # define I2O_PORT 0x80
343 # define I2O_UNKNOWN 0x7F
344 # define le_bus IdentityTag[1]
345 # define le_target IdentityTag[2]
346 # define le_lun IdentityTag[3]
347 target2lun_t * ha_targets[MAX_CHANNEL+1];
348 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME ha_Msgs;
351 u_int8_t ha_in_reset;
352 # define HA_OPERATIONAL 0
353 # define HA_IN_RESET 1
354 # define HA_OFF_LINE 2
355 # define HA_OFF_LINE_RECOVERY 3
356 /* Configuration information */
357 /* The target id maximums we take */
358 u_int8_t ha_MaxBus; /* Maximum bus */
359 u_int8_t ha_MaxId; /* Maximum target ID */
360 u_int8_t ha_MaxLun; /* Maximum target LUN */
361 u_int8_t ha_SgSize; /* Max SG elements */
362 u_int8_t ha_pciBusNum;
363 u_int8_t ha_pciDeviceNum;
364 u_int8_t ha_adapter_target[MAX_CHANNEL+1];
365 u_int16_t ha_QueueSize; /* Max outstanding commands */
366 u_int16_t ha_Msgs_Count;
368 /* Links into other parents and HBAs */
369 struct Asr_softc * ha_next; /* HBA list */
372 STATIC Asr_softc_t * Asr_softc;
375 * Prototypes of the routines we have in this object.
378 /* Externally callable routines */
379 #define PROBE_ARGS IN device_t tag
380 #define PROBE_RET int
381 #define PROBE_SET() u_long id = (pci_get_device(tag)<<16)|pci_get_vendor(tag)
382 #define PROBE_RETURN(retval) if(retval){device_set_desc(tag,retval);return(0);}else{return(ENXIO);}
383 #define ATTACH_ARGS IN device_t tag
384 #define ATTACH_RET int
385 #define ATTACH_SET() int unit = device_get_unit(tag)
386 #define ATTACH_RETURN(retval) return(retval)
387 /* I2O HDM interface */
388 STATIC PROBE_RET asr_probe (PROBE_ARGS);
389 STATIC ATTACH_RET asr_attach (ATTACH_ARGS);
390 /* DOMINO placeholder */
391 STATIC PROBE_RET domino_probe (PROBE_ARGS);
392 STATIC ATTACH_RET domino_attach (ATTACH_ARGS);
393 /* MODE0 adapter placeholder */
394 STATIC PROBE_RET mode0_probe (PROBE_ARGS);
395 STATIC ATTACH_RET mode0_attach (ATTACH_ARGS);
397 STATIC Asr_softc_t * ASR_get_sc (cdev_t dev);
398 STATIC d_ioctl_t asr_ioctl;
399 STATIC d_open_t asr_open;
400 STATIC d_close_t asr_close;
401 STATIC int asr_intr (IN Asr_softc_t *sc);
402 STATIC void asr_timeout (INOUT void *arg);
403 STATIC int ASR_init (IN Asr_softc_t *sc);
404 STATIC INLINE int ASR_acquireLct (INOUT Asr_softc_t *sc);
405 STATIC INLINE int ASR_acquireHrt (INOUT Asr_softc_t *sc);
406 STATIC void asr_action (IN struct cam_sim *sim,
408 STATIC void asr_poll (IN struct cam_sim * sim);
411 * Here is the auto-probe structure used to nest our tests appropriately
412 * during the startup phase of the operating system.
414 STATIC device_method_t asr_methods[] = {
415 DEVMETHOD(device_probe, asr_probe),
416 DEVMETHOD(device_attach, asr_attach),
420 STATIC driver_t asr_driver = {
426 STATIC devclass_t asr_devclass;
428 DECLARE_DUMMY_MODULE(asr);
429 DRIVER_MODULE(asr, pci, asr_driver, asr_devclass, NULL, NULL);
431 STATIC device_method_t domino_methods[] = {
432 DEVMETHOD(device_probe, domino_probe),
433 DEVMETHOD(device_attach, domino_attach),
437 STATIC driver_t domino_driver = {
443 STATIC devclass_t domino_devclass;
445 DRIVER_MODULE(domino, pci, domino_driver, domino_devclass, NULL, NULL);
447 STATIC device_method_t mode0_methods[] = {
448 DEVMETHOD(device_probe, mode0_probe),
449 DEVMETHOD(device_attach, mode0_attach),
453 STATIC driver_t mode0_driver = {
459 STATIC devclass_t mode0_devclass;
461 DRIVER_MODULE(mode0, pci, mode0_driver, mode0_devclass, NULL, NULL);
464 * devsw for asr hba driver
466 * only ioctl is used. the sd driver provides all other access.
468 STATIC struct dev_ops asr_ops = {
471 .d_close = asr_close,
472 .d_ioctl = asr_ioctl,
475 /* I2O support routines */
476 #define defAlignLong(STRUCT,NAME) char NAME[sizeof(STRUCT)]
477 #define getAlignLong(STRUCT,NAME) ((STRUCT *)(NAME))
480 * Fill message with default.
482 STATIC PI2O_MESSAGE_FRAME
487 OUT PI2O_MESSAGE_FRAME Message_Ptr;
489 Message_Ptr = getAlignLong(I2O_MESSAGE_FRAME, Message);
490 bzero ((void *)Message_Ptr, size);
491 I2O_MESSAGE_FRAME_setVersionOffset(Message_Ptr, I2O_VERSION_11);
492 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
493 (size + sizeof(U32) - 1) >> 2);
494 I2O_MESSAGE_FRAME_setInitiatorAddress (Message_Ptr, 1);
495 return (Message_Ptr);
496 } /* ASR_fillMessage */
498 #define EMPTY_QUEUE ((U32)-1L)
504 OUT U32 MessageOffset;
506 if ((MessageOffset = virt->ToFIFO) == EMPTY_QUEUE) {
507 MessageOffset = virt->ToFIFO;
509 return (MessageOffset);
510 } /* ASR_getMessage */
512 /* Issue a polled command */
515 INOUT i2oRegs_t * virt,
517 IN PI2O_MESSAGE_FRAME Message)
524 * ASR_initiateCp is only used for synchronous commands and will
525 * be made more resiliant to adapter delays since commands like
526 * resetIOP can cause the adapter to be deaf for a little time.
528 while (((MessageOffset = ASR_getMessage(virt)) == EMPTY_QUEUE)
532 if (MessageOffset != EMPTY_QUEUE) {
533 bcopy (Message, fvirt + MessageOffset,
534 I2O_MESSAGE_FRAME_getMessageSize(Message) << 2);
536 * Disable the Interrupts
538 virt->Mask = (Mask = virt->Mask) | Mask_InterruptsDisabled;
539 virt->ToFIFO = MessageOffset;
542 } /* ASR_initiateCp */
549 INOUT i2oRegs_t * virt,
552 struct resetMessage {
553 I2O_EXEC_IOP_RESET_MESSAGE M;
556 defAlignLong(struct resetMessage,Message);
557 PI2O_EXEC_IOP_RESET_MESSAGE Message_Ptr;
558 OUT U32 * volatile Reply_Ptr;
562 * Build up our copy of the Message.
564 Message_Ptr = (PI2O_EXEC_IOP_RESET_MESSAGE)ASR_fillMessage(Message,
565 sizeof(I2O_EXEC_IOP_RESET_MESSAGE));
566 I2O_EXEC_IOP_RESET_MESSAGE_setFunction(Message_Ptr, I2O_EXEC_IOP_RESET);
568 * Reset the Reply Status
570 *(Reply_Ptr = (U32 *)((char *)Message_Ptr
571 + sizeof(I2O_EXEC_IOP_RESET_MESSAGE))) = 0;
572 I2O_EXEC_IOP_RESET_MESSAGE_setStatusWordLowAddress(Message_Ptr,
573 KVTOPHYS((void *)Reply_Ptr));
575 * Send the Message out
577 if ((Old = ASR_initiateCp (virt, fvirt, (PI2O_MESSAGE_FRAME)Message_Ptr)) != (U32)-1L) {
579 * Wait for a response (Poll), timeouts are dangerous if
580 * the card is truly responsive. We assume response in 2s.
582 u_int8_t Delay = 200;
584 while ((*Reply_Ptr == 0) && (--Delay != 0)) {
588 * Re-enable the interrupts.
594 ASSERT (Old != (U32)-1L);
599 * Get the curent state of the adapter
601 STATIC INLINE PI2O_EXEC_STATUS_GET_REPLY
603 INOUT i2oRegs_t * virt,
605 OUT PI2O_EXEC_STATUS_GET_REPLY buffer)
607 defAlignLong(I2O_EXEC_STATUS_GET_MESSAGE,Message);
608 PI2O_EXEC_STATUS_GET_MESSAGE Message_Ptr;
612 * Build up our copy of the Message.
614 Message_Ptr = (PI2O_EXEC_STATUS_GET_MESSAGE)ASR_fillMessage(Message,
615 sizeof(I2O_EXEC_STATUS_GET_MESSAGE));
616 I2O_EXEC_STATUS_GET_MESSAGE_setFunction(Message_Ptr,
617 I2O_EXEC_STATUS_GET);
618 I2O_EXEC_STATUS_GET_MESSAGE_setReplyBufferAddressLow(Message_Ptr,
619 KVTOPHYS((void *)buffer));
620 /* This one is a Byte Count */
621 I2O_EXEC_STATUS_GET_MESSAGE_setReplyBufferLength(Message_Ptr,
622 sizeof(I2O_EXEC_STATUS_GET_REPLY));
624 * Reset the Reply Status
626 bzero ((void *)buffer, sizeof(I2O_EXEC_STATUS_GET_REPLY));
628 * Send the Message out
630 if ((Old = ASR_initiateCp (virt, fvirt, (PI2O_MESSAGE_FRAME)Message_Ptr)) != (U32)-1L) {
632 * Wait for a response (Poll), timeouts are dangerous if
633 * the card is truly responsive. We assume response in 50ms.
635 u_int8_t Delay = 255;
637 while (*((U8 * volatile)&(buffer->SyncByte)) == 0) {
639 buffer = (PI2O_EXEC_STATUS_GET_REPLY)NULL;
645 * Re-enable the interrupts.
650 return ((PI2O_EXEC_STATUS_GET_REPLY)NULL);
651 } /* ASR_getStatus */
654 * Check if the device is a SCSI I2O HBA, and add it to the list.
658 * Probe for ASR controller. If we find it, we will use it.
662 asr_probe(PROBE_ARGS)
665 if ((id == 0xA5011044) || (id == 0xA5111044)) {
666 PROBE_RETURN ("Adaptec Caching SCSI RAID");
672 * Probe/Attach for DOMINO chipset.
675 domino_probe(PROBE_ARGS)
678 if (id == 0x10121044) {
679 PROBE_RETURN ("Adaptec Caching Memory Controller");
685 domino_attach (ATTACH_ARGS)
688 } /* domino_attach */
691 * Probe/Attach for MODE0 adapters.
694 mode0_probe(PROBE_ARGS)
699 * If/When we can get a business case to commit to a
700 * Mode0 driver here, we can make all these tests more
701 * specific and robust. Mode0 adapters have their processors
702 * turned off, this the chips are in a raw state.
705 /* This is a PLX9054 */
706 if (id == 0x905410B5) {
707 PROBE_RETURN ("Adaptec Mode0 PM3757");
709 /* This is a PLX9080 */
710 if (id == 0x908010B5) {
711 PROBE_RETURN ("Adaptec Mode0 PM3754/PM3755");
713 /* This is a ZION 80303 */
714 if (id == 0x53098086) {
715 PROBE_RETURN ("Adaptec Mode0 3010S");
717 /* This is an i960RS */
718 if (id == 0x39628086) {
719 PROBE_RETURN ("Adaptec Mode0 2100S");
721 /* This is an i960RN */
722 if (id == 0x19648086) {
723 PROBE_RETURN ("Adaptec Mode0 PM2865/2400A/3200S/3400S");
725 #if 0 /* this would match any generic i960 -- mjs */
726 /* This is an i960RP (typically also on Motherboards) */
727 if (id == 0x19608086) {
728 PROBE_RETURN ("Adaptec Mode0 PM2554/PM1554/PM2654");
735 mode0_attach (ATTACH_ARGS)
740 STATIC INLINE union asr_ccb *
744 OUT union asr_ccb * new_ccb;
746 new_ccb = (union asr_ccb *)kmalloc(sizeof(*new_ccb), M_DEVBUF,
748 new_ccb->ccb_h.pinfo.priority = 1;
749 new_ccb->ccb_h.pinfo.index = CAM_UNQUEUED_INDEX;
750 new_ccb->ccb_h.spriv_ptr0 = sc;
752 } /* asr_alloc_ccb */
756 IN union asr_ccb * free_ccb)
758 kfree(free_ccb, M_DEVBUF);
762 * Print inquiry data `carefully'
769 while ((--len >= 0) && (*s) && (*s != ' ') && (*s != '-')) {
770 kprintf ("%c", *(s++));
777 STATIC INLINE int ASR_queue (
779 IN PI2O_MESSAGE_FRAME Message);
781 * Send a message synchronously and without Interrupt to a ccb.
785 INOUT union asr_ccb * ccb,
786 IN PI2O_MESSAGE_FRAME Message)
789 Asr_softc_t * sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
792 * We do not need any (optional byteswapping) method access to
793 * the Initiator context field.
795 I2O_MESSAGE_FRAME_setInitiatorContext64(Message, (long)ccb);
797 /* Prevent interrupt service */
799 sc->ha_Virt->Mask = (Mask = sc->ha_Virt->Mask)
800 | Mask_InterruptsDisabled;
802 if (ASR_queue (sc, Message) == EMPTY_QUEUE) {
803 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
804 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
808 * Wait for this board to report a finished instruction.
810 while ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
814 /* Re-enable Interrupts */
815 sc->ha_Virt->Mask = Mask;
818 return (ccb->ccb_h.status);
822 * Send a message synchronously to a Asr_softc_t
827 IN PI2O_MESSAGE_FRAME Message)
832 if ((ccb = asr_alloc_ccb (sc)) == NULL) {
833 return (CAM_REQUEUE_REQ);
836 status = ASR_queue_s (ccb, Message);
844 * Add the specified ccb to the active queue
849 INOUT union asr_ccb * ccb)
852 LIST_INSERT_HEAD(&(sc->ha_ccb), &(ccb->ccb_h), sim_links.le);
853 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
854 if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT) {
856 * RAID systems can take considerable time to
857 * complete some commands given the large cache
858 * flashes switching from write back to write thru.
860 ccb->ccb_h.timeout = 6 * 60 * 1000;
862 callout_reset(&ccb->ccb_h.timeout_ch,
863 (ccb->ccb_h.timeout * hz) / 1000, asr_timeout, ccb);
869 * Remove the specified ccb from the active queue.
874 INOUT union asr_ccb * ccb)
877 callout_stop(&ccb->ccb_h.timeout_ch);
878 LIST_REMOVE(&(ccb->ccb_h), sim_links.le);
880 } /* ASR_ccbRemove */
883 * Fail all the active commands, so they get re-issued by the operating
887 ASR_failActiveCommands (
890 struct ccb_hdr * ccb;
892 #if 0 /* Currently handled by callers, unnecessary paranoia currently */
893 /* Left in for historical perspective. */
894 defAlignLong(I2O_EXEC_LCT_NOTIFY_MESSAGE,Message);
895 PI2O_EXEC_LCT_NOTIFY_MESSAGE Message_Ptr;
897 /* Send a blind LCT command to wait for the enableSys to complete */
898 Message_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)ASR_fillMessage(Message,
899 sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) - sizeof(I2O_SG_ELEMENT));
900 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
901 I2O_EXEC_LCT_NOTIFY);
902 I2O_EXEC_LCT_NOTIFY_MESSAGE_setClassIdentifier(Message_Ptr,
903 I2O_CLASS_MATCH_ANYCLASS);
904 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
909 * We do not need to inform the CAM layer that we had a bus
910 * reset since we manage it on our own, this also prevents the
911 * SCSI_DELAY settling that would be required on other systems.
912 * The `SCSI_DELAY' has already been handled by the card via the
913 * acquisition of the LCT table while we are at CAM priority level.
914 * for (int bus = 0; bus <= sc->ha_MaxBus; ++bus) {
915 * xpt_async (AC_BUS_RESET, sc->ha_path[bus], NULL);
918 while ((ccb = LIST_FIRST(&(sc->ha_ccb))) != NULL) {
919 ASR_ccbRemove (sc, (union asr_ccb *)ccb);
921 ccb->status &= ~CAM_STATUS_MASK;
922 ccb->status |= CAM_REQUEUE_REQ;
923 /* Nothing Transfered */
924 ((struct ccb_scsiio *)ccb)->resid
925 = ((struct ccb_scsiio *)ccb)->dxfer_len;
928 xpt_done ((union ccb *)ccb);
930 wakeup ((caddr_t)ccb);
934 } /* ASR_failActiveCommands */
937 * The following command causes the HBA to reset the specific bus
944 defAlignLong(I2O_HBA_BUS_RESET_MESSAGE,Message);
945 I2O_HBA_BUS_RESET_MESSAGE * Message_Ptr;
946 PI2O_LCT_ENTRY Device;
948 Message_Ptr = (I2O_HBA_BUS_RESET_MESSAGE *)ASR_fillMessage(Message,
949 sizeof(I2O_HBA_BUS_RESET_MESSAGE));
950 I2O_MESSAGE_FRAME_setFunction(&Message_Ptr->StdMessageFrame,
952 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
953 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
955 if (((Device->le_type & I2O_PORT) != 0)
956 && (Device->le_bus == bus)) {
957 I2O_MESSAGE_FRAME_setTargetAddress(
958 &Message_Ptr->StdMessageFrame,
959 I2O_LCT_ENTRY_getLocalTID(Device));
960 /* Asynchronous command, with no expectations */
961 (void)ASR_queue(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
968 ASR_getBlinkLedCode (
972 && (sc->ha_blinkLED != NULL)
973 && (sc->ha_blinkLED[1] == 0xBC)) {
974 return (sc->ha_blinkLED[0]);
977 } /* ASR_getBlinkCode */
980 * Determine the address of an TID lookup. Must be done at high priority
981 * since the address can be changed by other threads of execution.
983 * Returns NULL pointer if not indexible (but will attempt to generate
984 * an index if `new_entry' flag is set to TRUE).
986 * All addressible entries are to be guaranteed zero if never initialized.
988 STATIC INLINE tid_t *
990 INOUT Asr_softc_t * sc,
996 target2lun_t * bus_ptr;
997 lun2tid_t * target_ptr;
1001 * Validity checking of incoming parameters. More of a bound
1002 * expansion limit than an issue with the code dealing with the
1005 * sc must be valid before it gets here, so that check could be
1006 * dropped if speed a critical issue.
1009 || (bus > MAX_CHANNEL)
1010 || (target > sc->ha_MaxId)
1011 || (lun > sc->ha_MaxLun)) {
1012 debug_asr_printf("(%lx,%d,%d,%d) target out of range\n",
1013 (u_long)sc, bus, target, lun);
1017 * See if there is an associated bus list.
1019 * for performance, allocate in size of BUS_CHUNK chunks.
1020 * BUS_CHUNK must be a power of two. This is to reduce
1021 * fragmentation effects on the allocations.
1023 # define BUS_CHUNK 8
1024 new_size = ((target + BUS_CHUNK - 1) & ~(BUS_CHUNK - 1));
1025 if ((bus_ptr = sc->ha_targets[bus]) == NULL) {
1027 * Allocate a new structure?
1028 * Since one element in structure, the +1
1029 * needed for size has been abstracted.
1031 if ((new_entry == FALSE)
1032 || ((sc->ha_targets[bus] = bus_ptr = (target2lun_t *)kmalloc (
1033 sizeof(*bus_ptr) + (sizeof(bus_ptr->LUN) * new_size),
1036 debug_asr_printf("failed to allocate bus list\n");
1039 bzero (bus_ptr, sizeof(*bus_ptr)
1040 + (sizeof(bus_ptr->LUN) * new_size));
1041 bus_ptr->size = new_size + 1;
1042 } else if (bus_ptr->size <= new_size) {
1043 target2lun_t * new_bus_ptr;
1046 * Reallocate a new structure?
1047 * Since one element in structure, the +1
1048 * needed for size has been abstracted.
1050 if ((new_entry == FALSE)
1051 || ((new_bus_ptr = (target2lun_t *)kmalloc (
1052 sizeof(*bus_ptr) + (sizeof(bus_ptr->LUN) * new_size),
1055 debug_asr_printf("failed to reallocate bus list\n");
1059 * Zero and copy the whole thing, safer, simpler coding
1060 * and not really performance critical at this point.
1062 bzero (new_bus_ptr, sizeof(*bus_ptr)
1063 + (sizeof(bus_ptr->LUN) * new_size));
1064 bcopy (bus_ptr, new_bus_ptr, sizeof(*bus_ptr)
1065 + (sizeof(bus_ptr->LUN) * (bus_ptr->size - 1)));
1066 sc->ha_targets[bus] = new_bus_ptr;
1067 kfree (bus_ptr, M_TEMP);
1068 bus_ptr = new_bus_ptr;
1069 bus_ptr->size = new_size + 1;
1072 * We now have the bus list, lets get to the target list.
1073 * Since most systems have only *one* lun, we do not allocate
1074 * in chunks as above, here we allow one, then in chunk sizes.
1075 * TARGET_CHUNK must be a power of two. This is to reduce
1076 * fragmentation effects on the allocations.
1078 # define TARGET_CHUNK 8
1079 if ((new_size = lun) != 0) {
1080 new_size = ((lun + TARGET_CHUNK - 1) & ~(TARGET_CHUNK - 1));
1082 if ((target_ptr = bus_ptr->LUN[target]) == NULL) {
1084 * Allocate a new structure?
1085 * Since one element in structure, the +1
1086 * needed for size has been abstracted.
1088 if ((new_entry == FALSE)
1089 || ((bus_ptr->LUN[target] = target_ptr = (lun2tid_t *)kmalloc (
1090 sizeof(*target_ptr) + (sizeof(target_ptr->TID) * new_size),
1093 debug_asr_printf("failed to allocate target list\n");
1096 bzero (target_ptr, sizeof(*target_ptr)
1097 + (sizeof(target_ptr->TID) * new_size));
1098 target_ptr->size = new_size + 1;
1099 } else if (target_ptr->size <= new_size) {
1100 lun2tid_t * new_target_ptr;
1103 * Reallocate a new structure?
1104 * Since one element in structure, the +1
1105 * needed for size has been abstracted.
1107 if ((new_entry == FALSE)
1108 || ((new_target_ptr = (lun2tid_t *)kmalloc (
1109 sizeof(*target_ptr) + (sizeof(target_ptr->TID) * new_size),
1112 debug_asr_printf("failed to reallocate target list\n");
1116 * Zero and copy the whole thing, safer, simpler coding
1117 * and not really performance critical at this point.
1119 bzero (new_target_ptr, sizeof(*target_ptr)
1120 + (sizeof(target_ptr->TID) * new_size));
1121 bcopy (target_ptr, new_target_ptr,
1123 + (sizeof(target_ptr->TID) * (target_ptr->size - 1)));
1124 bus_ptr->LUN[target] = new_target_ptr;
1125 kfree (target_ptr, M_TEMP);
1126 target_ptr = new_target_ptr;
1127 target_ptr->size = new_size + 1;
1130 * Now, acquire the TID address from the LUN indexed list.
1132 return (&(target_ptr->TID[lun]));
1133 } /* ASR_getTidAddress */
1136 * Get a pre-existing TID relationship.
1138 * If the TID was never set, return (tid_t)-1.
1140 * should use mutex rather than spl.
1144 IN Asr_softc_t * sc,
1153 if (((tid_ptr = ASR_getTidAddress (sc, bus, target, lun, FALSE))
1155 /* (tid_t)0 or (tid_t)-1 indicate no TID */
1156 || (*tid_ptr == (tid_t)0)) {
1166 * Set a TID relationship.
1168 * If the TID was not set, return (tid_t)-1.
1170 * should use mutex rather than spl.
1174 INOUT Asr_softc_t * sc,
1182 if (TID != (tid_t)-1) {
1187 if ((tid_ptr = ASR_getTidAddress (sc, bus, target, lun, TRUE))
1198 /*-------------------------------------------------------------------------*/
1199 /* Function ASR_rescan */
1200 /*-------------------------------------------------------------------------*/
1201 /* The Parameters Passed To This Function Are : */
1202 /* Asr_softc_t * : HBA miniport driver's adapter data storage. */
1204 /* This Function Will rescan the adapter and resynchronize any data */
1206 /* Return : 0 For OK, Error Code Otherwise */
1207 /*-------------------------------------------------------------------------*/
1211 IN Asr_softc_t * sc)
1217 * Re-acquire the LCT table and synchronize us to the adapter.
1219 if ((error = ASR_acquireLct(sc)) == 0) {
1220 error = ASR_acquireHrt(sc);
1227 bus = sc->ha_MaxBus;
1228 /* Reset all existing cached TID lookups */
1230 int target, event = 0;
1233 * Scan for all targets on this bus to see if they
1234 * got affected by the rescan.
1236 for (target = 0; target <= sc->ha_MaxId; ++target) {
1239 /* Stay away from the controller ID */
1240 if (target == sc->ha_adapter_target[bus]) {
1243 for (lun = 0; lun <= sc->ha_MaxLun; ++lun) {
1244 PI2O_LCT_ENTRY Device;
1245 tid_t TID = (tid_t)-1;
1249 * See if the cached TID changed. Search for
1250 * the device in our new LCT.
1252 for (Device = sc->ha_LCT->LCTEntry;
1253 Device < (PI2O_LCT_ENTRY)(((U32 *)sc->ha_LCT)
1254 + I2O_LCT_getTableSize(sc->ha_LCT));
1256 if ((Device->le_type != I2O_UNKNOWN)
1257 && (Device->le_bus == bus)
1258 && (Device->le_target == target)
1259 && (Device->le_lun == lun)
1260 && (I2O_LCT_ENTRY_getUserTID(Device)
1262 TID = I2O_LCT_ENTRY_getLocalTID(
1268 * Indicate to the OS that the label needs
1269 * to be recalculated, or that the specific
1270 * open device is no longer valid (Merde)
1271 * because the cached TID changed.
1273 LastTID = ASR_getTid (sc, bus, target, lun);
1274 if (LastTID != TID) {
1275 struct cam_path * path;
1277 if (xpt_create_path(&path,
1279 cam_sim_path(sc->ha_sim[bus]),
1280 target, lun) != CAM_REQ_CMP) {
1281 if (TID == (tid_t)-1) {
1282 event |= AC_LOST_DEVICE;
1284 event |= AC_INQ_CHANGED
1285 | AC_GETDEV_CHANGED;
1288 if (TID == (tid_t)-1) {
1292 } else if (LastTID == (tid_t)-1) {
1293 struct ccb_getdev ccb;
1297 path, /*priority*/5);
1313 * We have the option of clearing the
1314 * cached TID for it to be rescanned, or to
1315 * set it now even if the device never got
1316 * accessed. We chose the later since we
1317 * currently do not use the condition that
1318 * the TID ever got cached.
1320 ASR_setTid (sc, bus, target, lun, TID);
1324 * The xpt layer can not handle multiple events at the
1327 if (event & AC_LOST_DEVICE) {
1328 xpt_async(AC_LOST_DEVICE, sc->ha_path[bus], NULL);
1330 if (event & AC_INQ_CHANGED) {
1331 xpt_async(AC_INQ_CHANGED, sc->ha_path[bus], NULL);
1333 if (event & AC_GETDEV_CHANGED) {
1334 xpt_async(AC_GETDEV_CHANGED, sc->ha_path[bus], NULL);
1336 } while (--bus >= 0);
1340 /*-------------------------------------------------------------------------*/
1341 /* Function ASR_reset */
1342 /*-------------------------------------------------------------------------*/
1343 /* The Parameters Passed To This Function Are : */
1344 /* Asr_softc_t * : HBA miniport driver's adapter data storage. */
1346 /* This Function Will reset the adapter and resynchronize any data */
1349 /*-------------------------------------------------------------------------*/
1353 IN Asr_softc_t * sc)
1358 if ((sc->ha_in_reset == HA_IN_RESET)
1359 || (sc->ha_in_reset == HA_OFF_LINE_RECOVERY)) {
1364 * Promotes HA_OPERATIONAL to HA_IN_RESET,
1365 * or HA_OFF_LINE to HA_OFF_LINE_RECOVERY.
1367 ++(sc->ha_in_reset);
1368 if (ASR_resetIOP (sc->ha_Virt, sc->ha_Fvirt) == 0) {
1369 debug_asr_printf ("ASR_resetIOP failed\n");
1371 * We really need to take this card off-line, easier said
1372 * than make sense. Better to keep retrying for now since if a
1373 * UART cable is connected the blinkLEDs the adapter is now in
1374 * a hard state requiring action from the monitor commands to
1375 * the HBA to continue. For debugging waiting forever is a
1376 * good thing. In a production system, however, one may wish
1377 * to instead take the card off-line ...
1379 # if 0 && (defined(HA_OFF_LINE))
1381 * Take adapter off-line.
1383 kprintf ("asr%d: Taking adapter off-line\n",
1385 ? cam_sim_unit(xpt_path_sim(sc->ha_path[0]))
1387 sc->ha_in_reset = HA_OFF_LINE;
1392 while (ASR_resetIOP (sc->ha_Virt, sc->ha_Fvirt) == 0);
1395 retVal = ASR_init (sc);
1398 debug_asr_printf ("ASR_init failed\n");
1399 sc->ha_in_reset = HA_OFF_LINE;
1402 if (ASR_rescan (sc) != 0) {
1403 debug_asr_printf ("ASR_rescan failed\n");
1405 ASR_failActiveCommands (sc);
1406 if (sc->ha_in_reset == HA_OFF_LINE_RECOVERY) {
1407 kprintf ("asr%d: Brining adapter back on-line\n",
1409 ? cam_sim_unit(xpt_path_sim(sc->ha_path[0]))
1412 sc->ha_in_reset = HA_OPERATIONAL;
1417 * Device timeout handler.
1423 union asr_ccb * ccb = (union asr_ccb *)arg;
1424 Asr_softc_t * sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
1427 debug_asr_print_path(ccb);
1428 debug_asr_printf("timed out");
1431 * Check if the adapter has locked up?
1433 if ((s = ASR_getBlinkLedCode(sc)) != 0) {
1435 kprintf ("asr%d: Blink LED 0x%x resetting adapter\n",
1436 cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)), s);
1437 if (ASR_reset (sc) == ENXIO) {
1438 /* Try again later */
1439 callout_reset(&ccb->ccb_h.timeout_ch,
1440 (ccb->ccb_h.timeout * hz) / 1000, asr_timeout, ccb);
1445 * Abort does not function on the ASR card!!! Walking away from
1446 * the SCSI command is also *very* dangerous. A SCSI BUS reset is
1447 * our best bet, followed by a complete adapter reset if that fails.
1450 /* Check if we already timed out once to raise the issue */
1451 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_CMD_TIMEOUT) {
1452 debug_asr_printf (" AGAIN\nreinitializing adapter\n");
1453 if (ASR_reset (sc) == ENXIO) {
1454 callout_reset(&ccb->ccb_h.timeout_ch,
1455 (ccb->ccb_h.timeout * hz) / 1000, asr_timeout, ccb);
1460 debug_asr_printf ("\nresetting bus\n");
1461 /* If the BUS reset does not take, then an adapter reset is next! */
1462 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1463 ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
1464 callout_reset(&ccb->ccb_h.timeout_ch, (ccb->ccb_h.timeout * hz) / 1000,
1466 ASR_resetBus (sc, cam_sim_bus(xpt_path_sim(ccb->ccb_h.path)));
1467 xpt_async (AC_BUS_RESET, ccb->ccb_h.path, NULL);
1472 * send a message asynchronously
1476 IN Asr_softc_t * sc,
1477 IN PI2O_MESSAGE_FRAME Message)
1479 OUT U32 MessageOffset;
1480 union asr_ccb * ccb;
1482 debug_asr_printf ("Host Command Dump:\n");
1483 debug_asr_dump_message (Message);
1485 ccb = (union asr_ccb *)(long)
1486 I2O_MESSAGE_FRAME_getInitiatorContext64(Message);
1488 if ((MessageOffset = ASR_getMessage(sc->ha_Virt)) != EMPTY_QUEUE) {
1489 bcopy (Message, sc->ha_Fvirt + MessageOffset,
1490 I2O_MESSAGE_FRAME_getMessageSize(Message) << 2);
1492 ASR_ccbAdd (sc, ccb);
1494 /* Post the command */
1495 sc->ha_Virt->ToFIFO = MessageOffset;
1497 if (ASR_getBlinkLedCode(sc)) {
1499 * Unlikely we can do anything if we can't grab a
1500 * message frame :-(, but lets give it a try.
1502 (void)ASR_reset (sc);
1505 return (MessageOffset);
1509 /* Simple Scatter Gather elements */
1510 #define SG(SGL,Index,Flags,Buffer,Size) \
1511 I2O_FLAGS_COUNT_setCount( \
1512 &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index].FlagsCount), \
1514 I2O_FLAGS_COUNT_setFlags( \
1515 &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index].FlagsCount), \
1516 I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT | (Flags)); \
1517 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress( \
1518 &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index]), \
1519 (Buffer == NULL) ? 0 : KVTOPHYS(Buffer))
1522 * Retrieve Parameter Group.
1523 * Buffer must be allocated using defAlignLong macro.
1527 IN Asr_softc_t * sc,
1531 IN unsigned BufferSize)
1533 struct paramGetMessage {
1534 I2O_UTIL_PARAMS_GET_MESSAGE M;
1536 sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT)];
1538 I2O_PARAM_OPERATIONS_LIST_HEADER Header;
1539 I2O_PARAM_OPERATION_ALL_TEMPLATE Template[1];
1542 defAlignLong(struct paramGetMessage, Message);
1543 struct Operations * Operations_Ptr;
1544 I2O_UTIL_PARAMS_GET_MESSAGE * Message_Ptr;
1545 struct ParamBuffer {
1546 I2O_PARAM_RESULTS_LIST_HEADER Header;
1547 I2O_PARAM_READ_OPERATION_RESULT Read;
1551 Message_Ptr = (I2O_UTIL_PARAMS_GET_MESSAGE *)ASR_fillMessage(Message,
1552 sizeof(I2O_UTIL_PARAMS_GET_MESSAGE)
1553 + sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT));
1554 Operations_Ptr = (struct Operations *)((char *)Message_Ptr
1555 + sizeof(I2O_UTIL_PARAMS_GET_MESSAGE)
1556 + sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT));
1557 bzero ((void *)Operations_Ptr, sizeof(struct Operations));
1558 I2O_PARAM_OPERATIONS_LIST_HEADER_setOperationCount(
1559 &(Operations_Ptr->Header), 1);
1560 I2O_PARAM_OPERATION_ALL_TEMPLATE_setOperation(
1561 &(Operations_Ptr->Template[0]), I2O_PARAMS_OPERATION_FIELD_GET);
1562 I2O_PARAM_OPERATION_ALL_TEMPLATE_setFieldCount(
1563 &(Operations_Ptr->Template[0]), 0xFFFF);
1564 I2O_PARAM_OPERATION_ALL_TEMPLATE_setGroupNumber(
1565 &(Operations_Ptr->Template[0]), Group);
1566 bzero ((void *)(Buffer_Ptr = getAlignLong(struct ParamBuffer, Buffer)),
1569 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
1571 + (((sizeof(I2O_UTIL_PARAMS_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1572 / sizeof(U32)) << 4));
1573 I2O_MESSAGE_FRAME_setTargetAddress (&(Message_Ptr->StdMessageFrame),
1575 I2O_MESSAGE_FRAME_setFunction (&(Message_Ptr->StdMessageFrame),
1576 I2O_UTIL_PARAMS_GET);
1578 * Set up the buffers as scatter gather elements.
1580 SG(&(Message_Ptr->SGL), 0,
1581 I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER,
1582 Operations_Ptr, sizeof(struct Operations));
1583 SG(&(Message_Ptr->SGL), 1,
1584 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
1585 Buffer_Ptr, BufferSize);
1587 if ((ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) == CAM_REQ_CMP)
1588 && (Buffer_Ptr->Header.ResultCount)) {
1589 return ((void *)(Buffer_Ptr->Info));
1592 } /* ASR_getParams */
1595 * Acquire the LCT information.
1599 INOUT Asr_softc_t * sc)
1601 PI2O_EXEC_LCT_NOTIFY_MESSAGE Message_Ptr;
1602 PI2O_SGE_SIMPLE_ELEMENT sg;
1603 int MessageSizeInBytes;
1607 PI2O_LCT_ENTRY Entry;
1610 * sc value assumed valid
1612 MessageSizeInBytes = sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE)
1613 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT);
1614 Message_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)kmalloc (
1615 MessageSizeInBytes, M_TEMP, M_WAITOK);
1616 (void)ASR_fillMessage((char *)Message_Ptr, MessageSizeInBytes);
1617 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
1619 (((sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1620 / sizeof(U32)) << 4)));
1621 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
1622 I2O_EXEC_LCT_NOTIFY);
1623 I2O_EXEC_LCT_NOTIFY_MESSAGE_setClassIdentifier(Message_Ptr,
1624 I2O_CLASS_MATCH_ANYCLASS);
1626 * Call the LCT table to determine the number of device entries
1627 * to reserve space for.
1629 SG(&(Message_Ptr->SGL), 0,
1630 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER, &Table,
1633 * since this code is reused in several systems, code efficiency
1634 * is greater by using a shift operation rather than a divide by
1635 * sizeof(u_int32_t).
1637 I2O_LCT_setTableSize(&Table,
1638 (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)) >> 2);
1639 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1641 * Determine the size of the LCT table.
1644 kfree (sc->ha_LCT, M_TEMP);
1647 * kmalloc only generates contiguous memory when less than a
1648 * page is expected. We must break the request up into an SG list ...
1650 if (((len = (I2O_LCT_getTableSize(&Table) << 2)) <=
1651 (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)))
1652 || (len > (128 * 1024))) { /* Arbitrary */
1653 kfree (Message_Ptr, M_TEMP);
1656 sc->ha_LCT = (PI2O_LCT)kmalloc (len, M_TEMP, M_WAITOK);
1658 * since this code is reused in several systems, code efficiency
1659 * is greater by using a shift operation rather than a divide by
1660 * sizeof(u_int32_t).
1662 I2O_LCT_setTableSize(sc->ha_LCT,
1663 (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)) >> 2);
1665 * Convert the access to the LCT table into a SG list.
1667 sg = Message_Ptr->SGL.u.Simple;
1668 v = (caddr_t)(sc->ha_LCT);
1670 int next, base, span;
1673 next = base = KVTOPHYS(v);
1674 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, base);
1676 /* How far can we go contiguously */
1677 while ((len > 0) && (base == next)) {
1680 next = trunc_page(base) + PAGE_SIZE;
1691 /* Construct the Flags */
1692 I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), span);
1694 int rw = I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT;
1696 rw = (I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT
1697 | I2O_SGL_FLAGS_LAST_ELEMENT
1698 | I2O_SGL_FLAGS_END_OF_BUFFER);
1700 I2O_FLAGS_COUNT_setFlags(&(sg->FlagsCount), rw);
1708 * Incrementing requires resizing of the packet.
1711 MessageSizeInBytes += sizeof(*sg);
1712 I2O_MESSAGE_FRAME_setMessageSize(
1713 &(Message_Ptr->StdMessageFrame),
1714 I2O_MESSAGE_FRAME_getMessageSize(
1715 &(Message_Ptr->StdMessageFrame))
1716 + (sizeof(*sg) / sizeof(U32)));
1718 PI2O_EXEC_LCT_NOTIFY_MESSAGE NewMessage_Ptr;
1720 NewMessage_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)
1721 kmalloc (MessageSizeInBytes, M_TEMP, M_WAITOK);
1722 span = ((caddr_t)sg) - (caddr_t)Message_Ptr;
1723 bcopy ((caddr_t)Message_Ptr,
1724 (caddr_t)NewMessage_Ptr, span);
1725 kfree (Message_Ptr, M_TEMP);
1726 sg = (PI2O_SGE_SIMPLE_ELEMENT)
1727 (((caddr_t)NewMessage_Ptr) + span);
1728 Message_Ptr = NewMessage_Ptr;
1733 retval = ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1734 kfree (Message_Ptr, M_TEMP);
1735 if (retval != CAM_REQ_CMP) {
1739 /* If the LCT table grew, lets truncate accesses */
1740 if (I2O_LCT_getTableSize(&Table) < I2O_LCT_getTableSize(sc->ha_LCT)) {
1741 I2O_LCT_setTableSize(sc->ha_LCT, I2O_LCT_getTableSize(&Table));
1743 for (Entry = sc->ha_LCT->LCTEntry; Entry < (PI2O_LCT_ENTRY)
1744 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
1746 Entry->le_type = I2O_UNKNOWN;
1747 switch (I2O_CLASS_ID_getClass(&(Entry->ClassID))) {
1749 case I2O_CLASS_RANDOM_BLOCK_STORAGE:
1750 Entry->le_type = I2O_BSA;
1753 case I2O_CLASS_SCSI_PERIPHERAL:
1754 Entry->le_type = I2O_SCSI;
1757 case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
1758 Entry->le_type = I2O_FCA;
1761 case I2O_CLASS_BUS_ADAPTER_PORT:
1762 Entry->le_type = I2O_PORT | I2O_SCSI;
1764 case I2O_CLASS_FIBRE_CHANNEL_PORT:
1765 if (I2O_CLASS_ID_getClass(&(Entry->ClassID)) ==
1766 I2O_CLASS_FIBRE_CHANNEL_PORT) {
1767 Entry->le_type = I2O_PORT | I2O_FCA;
1769 { struct ControllerInfo {
1770 I2O_PARAM_RESULTS_LIST_HEADER Header;
1771 I2O_PARAM_READ_OPERATION_RESULT Read;
1772 I2O_HBA_SCSI_CONTROLLER_INFO_SCALAR Info;
1774 defAlignLong(struct ControllerInfo, Buffer);
1775 PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR Info;
1777 Entry->le_bus = 0xff;
1778 Entry->le_target = 0xff;
1779 Entry->le_lun = 0xff;
1781 if ((Info = (PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR)
1783 I2O_LCT_ENTRY_getLocalTID(Entry),
1784 I2O_HBA_SCSI_CONTROLLER_INFO_GROUP_NO,
1785 Buffer, sizeof(struct ControllerInfo)))
1786 == (PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR)NULL) {
1790 = I2O_HBA_SCSI_CONTROLLER_INFO_SCALAR_getInitiatorID(
1797 { struct DeviceInfo {
1798 I2O_PARAM_RESULTS_LIST_HEADER Header;
1799 I2O_PARAM_READ_OPERATION_RESULT Read;
1800 I2O_DPT_DEVICE_INFO_SCALAR Info;
1802 defAlignLong (struct DeviceInfo, Buffer);
1803 PI2O_DPT_DEVICE_INFO_SCALAR Info;
1805 Entry->le_bus = 0xff;
1806 Entry->le_target = 0xff;
1807 Entry->le_lun = 0xff;
1809 if ((Info = (PI2O_DPT_DEVICE_INFO_SCALAR)
1811 I2O_LCT_ENTRY_getLocalTID(Entry),
1812 I2O_DPT_DEVICE_INFO_GROUP_NO,
1813 Buffer, sizeof(struct DeviceInfo)))
1814 == (PI2O_DPT_DEVICE_INFO_SCALAR)NULL) {
1818 |= I2O_DPT_DEVICE_INFO_SCALAR_getDeviceType(Info);
1820 = I2O_DPT_DEVICE_INFO_SCALAR_getBus(Info);
1821 if ((Entry->le_bus > sc->ha_MaxBus)
1822 && (Entry->le_bus <= MAX_CHANNEL)) {
1823 sc->ha_MaxBus = Entry->le_bus;
1826 = I2O_DPT_DEVICE_INFO_SCALAR_getIdentifier(Info);
1828 = I2O_DPT_DEVICE_INFO_SCALAR_getLunInfo(Info);
1832 * A zero return value indicates success.
1835 } /* ASR_acquireLct */
1838 * Initialize a message frame.
1839 * We assume that the CDB has already been set up, so all we do here is
1840 * generate the Scatter Gather list.
1842 STATIC INLINE PI2O_MESSAGE_FRAME
1844 IN union asr_ccb * ccb,
1845 OUT PI2O_MESSAGE_FRAME Message)
1847 int next, span, base, rw;
1848 OUT PI2O_MESSAGE_FRAME Message_Ptr;
1849 Asr_softc_t * sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
1850 PI2O_SGE_SIMPLE_ELEMENT sg;
1852 vm_size_t size, len;
1855 /* We only need to zero out the PRIVATE_SCSI_SCB_EXECUTE_MESSAGE */
1856 bzero (Message_Ptr = getAlignLong(I2O_MESSAGE_FRAME, Message),
1857 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) - sizeof(I2O_SG_ELEMENT)));
1860 int target = ccb->ccb_h.target_id;
1861 int lun = ccb->ccb_h.target_lun;
1862 int bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path));
1865 if ((TID = ASR_getTid (sc, bus, target, lun)) == (tid_t)-1) {
1866 PI2O_LCT_ENTRY Device;
1869 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
1870 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
1872 if ((Device->le_type != I2O_UNKNOWN)
1873 && (Device->le_bus == bus)
1874 && (Device->le_target == target)
1875 && (Device->le_lun == lun)
1876 && (I2O_LCT_ENTRY_getUserTID(Device) == 0xFFF)) {
1877 TID = I2O_LCT_ENTRY_getLocalTID(Device);
1878 ASR_setTid (sc, Device->le_bus,
1879 Device->le_target, Device->le_lun,
1885 if (TID == (tid_t)0) {
1886 return ((PI2O_MESSAGE_FRAME)NULL);
1888 I2O_MESSAGE_FRAME_setTargetAddress(Message_Ptr, TID);
1889 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setTID(
1890 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, TID);
1892 I2O_MESSAGE_FRAME_setVersionOffset(Message_Ptr, I2O_VERSION_11 |
1893 (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1894 / sizeof(U32)) << 4));
1895 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
1896 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
1897 - sizeof(I2O_SG_ELEMENT)) / sizeof(U32));
1898 I2O_MESSAGE_FRAME_setInitiatorAddress (Message_Ptr, 1);
1899 I2O_MESSAGE_FRAME_setFunction(Message_Ptr, I2O_PRIVATE_MESSAGE);
1900 I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode (
1901 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, I2O_SCSI_SCB_EXEC);
1902 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (
1903 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
1904 I2O_SCB_FLAG_ENABLE_DISCONNECT
1905 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
1906 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
1908 * We do not need any (optional byteswapping) method access to
1909 * the Initiator & Transaction context field.
1911 I2O_MESSAGE_FRAME_setInitiatorContext64(Message, (long)ccb);
1913 I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
1914 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, DPT_ORGANIZATION_ID);
1918 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(
1919 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, ccb->csio.cdb_len);
1920 bcopy (&(ccb->csio.cdb_io),
1921 ((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)->CDB, ccb->csio.cdb_len);
1924 * Given a buffer describing a transfer, set up a scatter/gather map
1925 * in a ccb to map that SCSI transfer.
1928 rw = (ccb->ccb_h.flags & CAM_DIR_IN) ? 0 : I2O_SGL_FLAGS_DIR;
1930 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (
1931 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
1932 (ccb->csio.dxfer_len)
1933 ? ((rw) ? (I2O_SCB_FLAG_XFER_TO_DEVICE
1934 | I2O_SCB_FLAG_ENABLE_DISCONNECT
1935 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
1936 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER)
1937 : (I2O_SCB_FLAG_XFER_FROM_DEVICE
1938 | I2O_SCB_FLAG_ENABLE_DISCONNECT
1939 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
1940 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER))
1941 : (I2O_SCB_FLAG_ENABLE_DISCONNECT
1942 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
1943 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
1946 * Given a transfer described by a `data', fill in the SG list.
1948 sg = &((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)->SGL.u.Simple[0];
1950 len = ccb->csio.dxfer_len;
1951 v = ccb->csio.data_ptr;
1952 ASSERT (ccb->csio.dxfer_len >= 0);
1953 MessageSize = I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr);
1954 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setByteCount(
1955 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, len);
1956 while ((len > 0) && (sg < &((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
1957 Message_Ptr)->SGL.u.Simple[SG_SIZE])) {
1959 next = base = KVTOPHYS(v);
1960 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, base);
1962 /* How far can we go contiguously */
1963 while ((len > 0) && (base == next)) {
1964 next = trunc_page(base) + PAGE_SIZE;
1975 I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), span);
1977 rw |= I2O_SGL_FLAGS_LAST_ELEMENT;
1979 I2O_FLAGS_COUNT_setFlags(&(sg->FlagsCount),
1980 I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT | rw);
1982 MessageSize += sizeof(*sg) / sizeof(U32);
1984 /* We always do the request sense ... */
1985 if ((span = ccb->csio.sense_len) == 0) {
1986 span = sizeof(ccb->csio.sense_data);
1988 SG(sg, 0, I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
1989 &(ccb->csio.sense_data), span);
1990 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
1991 MessageSize + (sizeof(*sg) / sizeof(U32)));
1992 return (Message_Ptr);
1993 } /* ASR_init_message */
1996 * Reset the adapter.
2000 INOUT Asr_softc_t * sc)
2002 struct initOutBoundMessage {
2003 I2O_EXEC_OUTBOUND_INIT_MESSAGE M;
2006 defAlignLong(struct initOutBoundMessage,Message);
2007 PI2O_EXEC_OUTBOUND_INIT_MESSAGE Message_Ptr;
2008 OUT U32 * volatile Reply_Ptr;
2012 * Build up our copy of the Message.
2014 Message_Ptr = (PI2O_EXEC_OUTBOUND_INIT_MESSAGE)ASR_fillMessage(Message,
2015 sizeof(I2O_EXEC_OUTBOUND_INIT_MESSAGE));
2016 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
2017 I2O_EXEC_OUTBOUND_INIT);
2018 I2O_EXEC_OUTBOUND_INIT_MESSAGE_setHostPageFrameSize(Message_Ptr, PAGE_SIZE);
2019 I2O_EXEC_OUTBOUND_INIT_MESSAGE_setOutboundMFrameSize(Message_Ptr,
2020 sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME));
2022 * Reset the Reply Status
2024 *(Reply_Ptr = (U32 *)((char *)Message_Ptr
2025 + sizeof(I2O_EXEC_OUTBOUND_INIT_MESSAGE))) = 0;
2026 SG (&(Message_Ptr->SGL), 0, I2O_SGL_FLAGS_LAST_ELEMENT, Reply_Ptr,
2029 * Send the Message out
2031 if ((Old = ASR_initiateCp (sc->ha_Virt, sc->ha_Fvirt, (PI2O_MESSAGE_FRAME)Message_Ptr)) != (U32)-1L) {
2035 * Wait for a response (Poll).
2037 while (*Reply_Ptr < I2O_EXEC_OUTBOUND_INIT_REJECTED);
2039 * Re-enable the interrupts.
2041 sc->ha_Virt->Mask = Old;
2043 * Populate the outbound table.
2045 if (sc->ha_Msgs == (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL) {
2047 /* Allocate the reply frames */
2048 size = sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
2049 * sc->ha_Msgs_Count;
2052 * contigmalloc only works reliably at
2053 * initialization time.
2055 if ((sc->ha_Msgs = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
2056 contigmalloc (size, M_DEVBUF, M_WAITOK | M_ZERO, 0ul,
2057 0xFFFFFFFFul, (u_long)sizeof(U32), 0ul))
2058 != (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL) {
2059 sc->ha_Msgs_Phys = KVTOPHYS(sc->ha_Msgs);
2063 /* Initialize the outbound FIFO */
2064 if (sc->ha_Msgs != (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL)
2065 for (size = sc->ha_Msgs_Count, addr = sc->ha_Msgs_Phys;
2067 sc->ha_Virt->FromFIFO = addr;
2068 addr += sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME);
2070 return (*Reply_Ptr);
2073 } /* ASR_initOutBound */
2076 * Set the system table
2080 IN Asr_softc_t * sc)
2082 PI2O_EXEC_SYS_TAB_SET_MESSAGE Message_Ptr;
2083 PI2O_SET_SYSTAB_HEADER SystemTable;
2085 PI2O_SGE_SIMPLE_ELEMENT sg;
2088 SystemTable = (PI2O_SET_SYSTAB_HEADER)kmalloc (
2089 sizeof(I2O_SET_SYSTAB_HEADER), M_TEMP, M_WAITOK | M_ZERO);
2090 for (ha = Asr_softc; ha; ha = ha->ha_next) {
2091 ++SystemTable->NumberEntries;
2093 Message_Ptr = (PI2O_EXEC_SYS_TAB_SET_MESSAGE)kmalloc (
2094 sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
2095 + ((3+SystemTable->NumberEntries) * sizeof(I2O_SGE_SIMPLE_ELEMENT)),
2097 (void)ASR_fillMessage((char *)Message_Ptr,
2098 sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
2099 + ((3+SystemTable->NumberEntries) * sizeof(I2O_SGE_SIMPLE_ELEMENT)));
2100 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
2102 (((sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
2103 / sizeof(U32)) << 4)));
2104 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
2105 I2O_EXEC_SYS_TAB_SET);
2107 * Call the LCT table to determine the number of device entries
2108 * to reserve space for.
2109 * since this code is reused in several systems, code efficiency
2110 * is greater by using a shift operation rather than a divide by
2111 * sizeof(u_int32_t).
2113 sg = (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr
2114 + ((I2O_MESSAGE_FRAME_getVersionOffset(
2115 &(Message_Ptr->StdMessageFrame)) & 0xF0) >> 2));
2116 SG(sg, 0, I2O_SGL_FLAGS_DIR, SystemTable, sizeof(I2O_SET_SYSTAB_HEADER));
2118 for (ha = Asr_softc; ha; ha = ha->ha_next) {
2121 ? (I2O_SGL_FLAGS_DIR)
2122 : (I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER)),
2123 &(ha->ha_SystemTable), sizeof(ha->ha_SystemTable));
2126 SG(sg, 0, I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER, NULL, 0);
2127 SG(sg, 1, I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_LAST_ELEMENT
2128 | I2O_SGL_FLAGS_END_OF_BUFFER, NULL, 0);
2129 retVal = ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
2130 kfree (Message_Ptr, M_TEMP);
2131 kfree (SystemTable, M_TEMP);
2133 } /* ASR_setSysTab */
2137 INOUT Asr_softc_t * sc)
2139 defAlignLong(I2O_EXEC_HRT_GET_MESSAGE,Message);
2140 I2O_EXEC_HRT_GET_MESSAGE * Message_Ptr;
2143 I2O_HRT_ENTRY Entry[MAX_CHANNEL];
2145 u_int8_t NumberOfEntries;
2146 PI2O_HRT_ENTRY Entry;
2148 bzero ((void *)&Hrt, sizeof (Hrt));
2149 Message_Ptr = (I2O_EXEC_HRT_GET_MESSAGE *)ASR_fillMessage(Message,
2150 sizeof(I2O_EXEC_HRT_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
2151 + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2152 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
2154 + (((sizeof(I2O_EXEC_HRT_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
2155 / sizeof(U32)) << 4)));
2156 I2O_MESSAGE_FRAME_setFunction (&(Message_Ptr->StdMessageFrame),
2160 * Set up the buffers as scatter gather elements.
2162 SG(&(Message_Ptr->SGL), 0,
2163 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
2165 if (ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) != CAM_REQ_CMP) {
2168 if ((NumberOfEntries = I2O_HRT_getNumberEntries(&Hrt.Header))
2169 > (MAX_CHANNEL + 1)) {
2170 NumberOfEntries = MAX_CHANNEL + 1;
2172 for (Entry = Hrt.Header.HRTEntry;
2173 NumberOfEntries != 0;
2174 ++Entry, --NumberOfEntries) {
2175 PI2O_LCT_ENTRY Device;
2177 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
2178 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
2180 if (I2O_LCT_ENTRY_getLocalTID(Device)
2181 == (I2O_HRT_ENTRY_getAdapterID(Entry) & 0xFFF)) {
2182 Device->le_bus = I2O_HRT_ENTRY_getAdapterID(
2184 if ((Device->le_bus > sc->ha_MaxBus)
2185 && (Device->le_bus <= MAX_CHANNEL)) {
2186 sc->ha_MaxBus = Device->le_bus;
2192 } /* ASR_acquireHrt */
2195 * Enable the adapter.
2199 IN Asr_softc_t * sc)
2201 defAlignLong(I2O_EXEC_SYS_ENABLE_MESSAGE,Message);
2202 PI2O_EXEC_SYS_ENABLE_MESSAGE Message_Ptr;
2204 Message_Ptr = (PI2O_EXEC_SYS_ENABLE_MESSAGE)ASR_fillMessage(Message,
2205 sizeof(I2O_EXEC_SYS_ENABLE_MESSAGE));
2206 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
2207 I2O_EXEC_SYS_ENABLE);
2208 return (ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) != 0);
2209 } /* ASR_enableSys */
2212 * Perform the stages necessary to initialize the adapter
2216 IN Asr_softc_t * sc)
2218 return ((ASR_initOutBound(sc) == 0)
2219 || (ASR_setSysTab(sc) != CAM_REQ_CMP)
2220 || (ASR_enableSys(sc) != CAM_REQ_CMP));
2224 * Send a Synchronize Cache command to the target device.
2228 IN Asr_softc_t * sc,
2236 * We will not synchronize the device when there are outstanding
2237 * commands issued by the OS (this is due to a locked up device,
2238 * as the OS normally would flush all outstanding commands before
2239 * issuing a shutdown or an adapter reset).
2242 && (LIST_FIRST(&(sc->ha_ccb)) != NULL)
2243 && ((TID = ASR_getTid (sc, bus, target, lun)) != (tid_t)-1)
2244 && (TID != (tid_t)0)) {
2245 defAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE,Message);
2246 PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE Message_Ptr;
2249 = getAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE, Message),
2250 sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2251 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2253 I2O_MESSAGE_FRAME_setVersionOffset(
2254 (PI2O_MESSAGE_FRAME)Message_Ptr,
2256 | (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2257 - sizeof(I2O_SG_ELEMENT))
2258 / sizeof(U32)) << 4));
2259 I2O_MESSAGE_FRAME_setMessageSize(
2260 (PI2O_MESSAGE_FRAME)Message_Ptr,
2261 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2262 - sizeof(I2O_SG_ELEMENT))
2264 I2O_MESSAGE_FRAME_setInitiatorAddress (
2265 (PI2O_MESSAGE_FRAME)Message_Ptr, 1);
2266 I2O_MESSAGE_FRAME_setFunction(
2267 (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_PRIVATE_MESSAGE);
2268 I2O_MESSAGE_FRAME_setTargetAddress(
2269 (PI2O_MESSAGE_FRAME)Message_Ptr, TID);
2270 I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode (
2271 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2273 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setTID(Message_Ptr, TID);
2274 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2275 I2O_SCB_FLAG_ENABLE_DISCONNECT
2276 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2277 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
2278 I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
2279 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2280 DPT_ORGANIZATION_ID);
2281 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(Message_Ptr, 6);
2282 Message_Ptr->CDB[0] = SYNCHRONIZE_CACHE;
2283 Message_Ptr->CDB[1] = (lun << 5);
2285 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2286 (I2O_SCB_FLAG_XFER_FROM_DEVICE
2287 | I2O_SCB_FLAG_ENABLE_DISCONNECT
2288 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2289 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
2291 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
2298 IN Asr_softc_t * sc)
2300 int bus, target, lun;
2302 for (bus = 0; bus <= sc->ha_MaxBus; ++bus) {
2303 for (target = 0; target <= sc->ha_MaxId; ++target) {
2304 for (lun = 0; lun <= sc->ha_MaxLun; ++lun) {
2305 ASR_sync(sc,bus,target,lun);
2312 * Reset the HBA, targets and BUS.
2313 * Currently this resets *all* the SCSI busses.
2317 IN Asr_softc_t * sc)
2319 ASR_synchronize (sc);
2320 (void)ASR_reset (sc);
2321 } /* asr_hbareset */
2324 * A reduced copy of the real pci_map_mem, incorporating the MAX_MAP
2325 * limit and a reduction in error checking (in the pre 4.0 case).
2330 IN Asr_softc_t * sc)
2336 * I2O specification says we must find first *memory* mapped BAR
2338 for (rid = PCIR_MAPS;
2339 rid < (PCIR_MAPS + 4 * sizeof(u_int32_t));
2340 rid += sizeof(u_int32_t)) {
2341 p = pci_read_config(tag, rid, sizeof(p));
2349 if (rid >= (PCIR_MAPS + 4 * sizeof(u_int32_t))) {
2352 p = pci_read_config(tag, rid, sizeof(p));
2353 pci_write_config(tag, rid, -1, sizeof(p));
2354 l = 0 - (pci_read_config(tag, rid, sizeof(l)) & ~15);
2355 pci_write_config(tag, rid, p, sizeof(p));
2360 * The 2005S Zero Channel RAID solution is not a perfect PCI
2361 * citizen. It asks for 4MB on BAR0, and 0MB on BAR1, once
2362 * enabled it rewrites the size of BAR0 to 2MB, sets BAR1 to
2363 * BAR0+2MB and sets it's size to 2MB. The IOP registers are
2364 * accessible via BAR0, the messaging registers are accessible
2365 * via BAR1. If the subdevice code is 50 to 59 decimal.
2367 s = pci_read_config(tag, PCIR_DEVVENDOR, sizeof(s));
2368 if (s != 0xA5111044) {
2369 s = pci_read_config(tag, PCIR_SUBVEND_0, sizeof(s));
2370 if ((((ADPTDOMINATOR_SUB_ID_START ^ s) & 0xF000FFFF) == 0)
2371 && (ADPTDOMINATOR_SUB_ID_START <= s)
2372 && (s <= ADPTDOMINATOR_SUB_ID_END)) {
2373 l = MAX_MAP; /* Conjoined BAR Raptor Daptor */
2377 sc->ha_mem_res = bus_alloc_resource(tag, SYS_RES_MEMORY, &rid,
2378 p, p + l, l, RF_ACTIVE);
2379 if (sc->ha_mem_res == NULL) {
2382 sc->ha_Base = (void *)rman_get_start(sc->ha_mem_res);
2383 if (sc->ha_Base == NULL) {
2386 sc->ha_Virt = (i2oRegs_t *) rman_get_virtual(sc->ha_mem_res);
2387 if (s == 0xA5111044) { /* Split BAR Raptor Daptor */
2388 if ((rid += sizeof(u_int32_t))
2389 >= (PCIR_MAPS + 4 * sizeof(u_int32_t))) {
2392 p = pci_read_config(tag, rid, sizeof(p));
2393 pci_write_config(tag, rid, -1, sizeof(p));
2394 l = 0 - (pci_read_config(tag, rid, sizeof(l)) & ~15);
2395 pci_write_config(tag, rid, p, sizeof(p));
2400 sc->ha_mes_res = bus_alloc_resource(tag, SYS_RES_MEMORY, &rid,
2401 p, p + l, l, RF_ACTIVE);
2402 if (sc->ha_mes_res == NULL) {
2405 if ((void *)rman_get_start(sc->ha_mes_res) == NULL) {
2408 sc->ha_Fvirt = (U8 *) rman_get_virtual(sc->ha_mes_res);
2410 sc->ha_Fvirt = (U8 *)(sc->ha_Virt);
2413 } /* asr_pci_map_mem */
2416 * A simplified copy of the real pci_map_int with additional
2417 * registration requirements.
2422 IN Asr_softc_t * sc)
2427 sc->ha_irq_res = bus_alloc_resource(tag, SYS_RES_IRQ, &rid,
2428 0, ~0, 1, RF_ACTIVE | RF_SHAREABLE);
2429 if (sc->ha_irq_res == NULL) {
2432 error = bus_setup_intr(tag, sc->ha_irq_res, 0,
2433 (driver_intr_t *)asr_intr, (void *)sc,
2434 &(sc->ha_intr), NULL);
2438 sc->ha_irq = pci_read_config(tag, PCIR_INTLINE, sizeof(char));
2440 } /* asr_pci_map_int */
2443 * Attach the devices, and virtual devices to the driver list.
2446 asr_attach (ATTACH_ARGS)
2449 struct scsi_inquiry_data * iq;
2452 sc = kmalloc(sizeof(*sc), M_DEVBUF, M_INTWAIT | M_ZERO);
2453 if (Asr_softc == NULL) {
2455 * Fixup the OS revision as saved in the dptsig for the
2456 * engine (dptioctl.h) to pick up.
2458 bcopy (osrelease, &ASR_sig.dsDescription[16], 5);
2459 kprintf ("asr%d: major=%d\n", unit, asr_ops.head.maj);
2462 * Initialize the software structure
2464 LIST_INIT(&(sc->ha_ccb));
2465 /* Link us into the HA list */
2469 for (ha = &Asr_softc; *ha; ha = &((*ha)->ha_next));
2473 PI2O_EXEC_STATUS_GET_REPLY status;
2477 * This is the real McCoy!
2479 if (!asr_pci_map_mem(tag, sc)) {
2480 kprintf ("asr%d: could not map memory\n", unit);
2481 ATTACH_RETURN(ENXIO);
2483 /* Enable if not formerly enabled */
2484 pci_write_config (tag, PCIR_COMMAND,
2485 pci_read_config (tag, PCIR_COMMAND, sizeof(char))
2486 | PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN, sizeof(char));
2487 /* Knowledge is power, responsibility is direct */
2489 struct pci_devinfo {
2490 STAILQ_ENTRY(pci_devinfo) pci_links;
2491 struct resource_list resources;
2493 } * dinfo = device_get_ivars(tag);
2494 sc->ha_pciBusNum = dinfo->cfg.bus;
2495 sc->ha_pciDeviceNum = (dinfo->cfg.slot << 3)
2498 /* Check if the device is there? */
2499 if ((ASR_resetIOP(sc->ha_Virt, sc->ha_Fvirt) == 0)
2500 || ((status = (PI2O_EXEC_STATUS_GET_REPLY)kmalloc (
2501 sizeof(I2O_EXEC_STATUS_GET_REPLY), M_TEMP, M_WAITOK))
2502 == (PI2O_EXEC_STATUS_GET_REPLY)NULL)
2503 || (ASR_getStatus(sc->ha_Virt, sc->ha_Fvirt, status) == NULL)) {
2504 kprintf ("asr%d: could not initialize hardware\n", unit);
2505 ATTACH_RETURN(ENODEV); /* Get next, maybe better luck */
2507 sc->ha_SystemTable.OrganizationID = status->OrganizationID;
2508 sc->ha_SystemTable.IOP_ID = status->IOP_ID;
2509 sc->ha_SystemTable.I2oVersion = status->I2oVersion;
2510 sc->ha_SystemTable.IopState = status->IopState;
2511 sc->ha_SystemTable.MessengerType = status->MessengerType;
2512 sc->ha_SystemTable.InboundMessageFrameSize
2513 = status->InboundMFrameSize;
2514 sc->ha_SystemTable.MessengerInfo.InboundMessagePortAddressLow
2515 = (U32)(sc->ha_Base) + (U32)(&(((i2oRegs_t *)NULL)->ToFIFO));
2517 if (!asr_pci_map_int(tag, (void *)sc)) {
2518 kprintf ("asr%d: could not map interrupt\n", unit);
2519 ATTACH_RETURN(ENXIO);
2522 /* Adjust the maximim inbound count */
2523 if (((sc->ha_QueueSize
2524 = I2O_EXEC_STATUS_GET_REPLY_getMaxInboundMFrames(status))
2526 || (sc->ha_QueueSize == 0)) {
2527 sc->ha_QueueSize = MAX_INBOUND;
2530 /* Adjust the maximum outbound count */
2531 if (((sc->ha_Msgs_Count
2532 = I2O_EXEC_STATUS_GET_REPLY_getMaxOutboundMFrames(status))
2534 || (sc->ha_Msgs_Count == 0)) {
2535 sc->ha_Msgs_Count = MAX_OUTBOUND;
2537 if (sc->ha_Msgs_Count > sc->ha_QueueSize) {
2538 sc->ha_Msgs_Count = sc->ha_QueueSize;
2541 /* Adjust the maximum SG size to adapter */
2542 if ((size = (I2O_EXEC_STATUS_GET_REPLY_getInboundMFrameSize(
2543 status) << 2)) > MAX_INBOUND_SIZE) {
2544 size = MAX_INBOUND_SIZE;
2546 kfree (status, M_TEMP);
2547 sc->ha_SgSize = (size - sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2548 + sizeof(I2O_SG_ELEMENT)) / sizeof(I2O_SGE_SIMPLE_ELEMENT);
2552 * Only do a bus/HBA reset on the first time through. On this
2553 * first time through, we do not send a flush to the devices.
2555 if (ASR_init(sc) == 0) {
2557 I2O_PARAM_RESULTS_LIST_HEADER Header;
2558 I2O_PARAM_READ_OPERATION_RESULT Read;
2559 I2O_DPT_EXEC_IOP_BUFFERS_SCALAR Info;
2561 defAlignLong (struct BufferInfo, Buffer);
2562 PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR Info;
2563 # define FW_DEBUG_BLED_OFFSET 8
2565 if ((Info = (PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR)
2566 ASR_getParams(sc, 0,
2567 I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO,
2568 Buffer, sizeof(struct BufferInfo)))
2569 != (PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR)NULL) {
2570 sc->ha_blinkLED = sc->ha_Fvirt
2571 + I2O_DPT_EXEC_IOP_BUFFERS_SCALAR_getSerialOutputOffset(Info)
2572 + FW_DEBUG_BLED_OFFSET;
2574 if (ASR_acquireLct(sc) == 0) {
2575 (void)ASR_acquireHrt(sc);
2578 kprintf ("asr%d: failed to initialize\n", unit);
2579 ATTACH_RETURN(ENXIO);
2582 * Add in additional probe responses for more channels. We
2583 * are reusing the variable `target' for a channel loop counter.
2584 * Done here because of we need both the acquireLct and
2587 { PI2O_LCT_ENTRY Device;
2589 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
2590 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
2592 if (Device->le_type == I2O_UNKNOWN) {
2595 if (I2O_LCT_ENTRY_getUserTID(Device) == 0xFFF) {
2596 if (Device->le_target > sc->ha_MaxId) {
2597 sc->ha_MaxId = Device->le_target;
2599 if (Device->le_lun > sc->ha_MaxLun) {
2600 sc->ha_MaxLun = Device->le_lun;
2603 if (((Device->le_type & I2O_PORT) != 0)
2604 && (Device->le_bus <= MAX_CHANNEL)) {
2605 /* Do not increase MaxId for efficiency */
2606 sc->ha_adapter_target[Device->le_bus]
2607 = Device->le_target;
2614 * Print the HBA model number as inquired from the card.
2617 kprintf ("asr%d:", unit);
2619 iq = (struct scsi_inquiry_data *)kmalloc (
2620 sizeof(struct scsi_inquiry_data), M_TEMP, M_WAITOK | M_ZERO);
2621 defAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE,Message);
2622 PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE Message_Ptr;
2626 = getAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE, Message),
2627 sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2628 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2630 I2O_MESSAGE_FRAME_setVersionOffset(
2631 (PI2O_MESSAGE_FRAME)Message_Ptr,
2633 | (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2634 - sizeof(I2O_SG_ELEMENT))
2635 / sizeof(U32)) << 4));
2636 I2O_MESSAGE_FRAME_setMessageSize(
2637 (PI2O_MESSAGE_FRAME)Message_Ptr,
2638 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2639 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT))
2641 I2O_MESSAGE_FRAME_setInitiatorAddress (
2642 (PI2O_MESSAGE_FRAME)Message_Ptr, 1);
2643 I2O_MESSAGE_FRAME_setFunction(
2644 (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_PRIVATE_MESSAGE);
2645 I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode (
2646 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2648 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2649 I2O_SCB_FLAG_ENABLE_DISCONNECT
2650 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2651 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
2652 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setInterpret(Message_Ptr, 1);
2653 I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
2654 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2655 DPT_ORGANIZATION_ID);
2656 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(Message_Ptr, 6);
2657 Message_Ptr->CDB[0] = INQUIRY;
2658 Message_Ptr->CDB[4] = (unsigned char)sizeof(struct scsi_inquiry_data);
2659 if (Message_Ptr->CDB[4] == 0) {
2660 Message_Ptr->CDB[4] = 255;
2663 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2664 (I2O_SCB_FLAG_XFER_FROM_DEVICE
2665 | I2O_SCB_FLAG_ENABLE_DISCONNECT
2666 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2667 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
2669 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setByteCount(
2670 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
2671 sizeof(struct scsi_inquiry_data));
2672 SG(&(Message_Ptr->SGL), 0,
2673 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
2674 iq, sizeof(struct scsi_inquiry_data));
2675 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
2677 if (iq->vendor[0] && (iq->vendor[0] != ' ')) {
2679 ASR_prstring (iq->vendor, 8);
2682 if (iq->product[0] && (iq->product[0] != ' ')) {
2684 ASR_prstring (iq->product, 16);
2687 if (iq->revision[0] && (iq->revision[0] != ' ')) {
2688 kprintf (" FW Rev. ");
2689 ASR_prstring (iq->revision, 4);
2692 kfree ((caddr_t)iq, M_TEMP);
2696 kprintf (" %d channel, %d CCBs, Protocol I2O\n", sc->ha_MaxBus + 1,
2697 (sc->ha_QueueSize > MAX_INBOUND) ? MAX_INBOUND : sc->ha_QueueSize);
2700 * fill in the prototype cam_path.
2704 union asr_ccb * ccb;
2706 if ((ccb = asr_alloc_ccb (sc)) == NULL) {
2707 kprintf ("asr%d: CAM could not be notified of asynchronous callback parameters\n", unit);
2708 ATTACH_RETURN(ENOMEM);
2710 for (bus = 0; bus <= sc->ha_MaxBus; ++bus) {
2711 int QueueSize = sc->ha_QueueSize;
2713 if (QueueSize > MAX_INBOUND) {
2714 QueueSize = MAX_INBOUND;
2718 * Construct our first channel SIM entry
2720 sc->ha_sim[bus] = cam_sim_alloc(
2721 asr_action, asr_poll, "asr", sc,
2722 unit, &sim_mplock, 1, QueueSize, NULL);
2723 if (sc->ha_sim[bus] == NULL)
2726 if (xpt_bus_register(sc->ha_sim[bus], bus)
2728 cam_sim_free(sc->ha_sim[bus]);
2729 sc->ha_sim[bus] = NULL;
2733 if (xpt_create_path(&(sc->ha_path[bus]), /*periph*/NULL,
2734 cam_sim_path(sc->ha_sim[bus]), CAM_TARGET_WILDCARD,
2735 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2737 cam_sim_path(sc->ha_sim[bus]));
2738 cam_sim_free(sc->ha_sim[bus]);
2739 sc->ha_sim[bus] = NULL;
2746 * Generate the device node information
2748 make_dev(&asr_ops, unit, 0, 0, S_IRWXU, "rasr%d", unit);
2754 IN struct cam_sim *sim)
2756 asr_intr(cam_sim_softc(sim));
2761 IN struct cam_sim * sim,
2764 struct Asr_softc * sc;
2766 debug_asr_printf ("asr_action(%lx,%lx{%x})\n",
2767 (u_long)sim, (u_long)ccb, ccb->ccb_h.func_code);
2769 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("asr_action\n"));
2771 ccb->ccb_h.spriv_ptr0 = sc = (struct Asr_softc *)cam_sim_softc(sim);
2773 switch (ccb->ccb_h.func_code) {
2775 /* Common cases first */
2776 case XPT_SCSI_IO: /* Execute the requested I/O operation */
2779 char M[MAX_INBOUND_SIZE];
2781 defAlignLong(struct Message,Message);
2782 PI2O_MESSAGE_FRAME Message_Ptr;
2784 /* Reject incoming commands while we are resetting the card */
2785 if (sc->ha_in_reset != HA_OPERATIONAL) {
2786 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2787 if (sc->ha_in_reset >= HA_OFF_LINE) {
2788 /* HBA is now off-line */
2789 ccb->ccb_h.status |= CAM_UNREC_HBA_ERROR;
2791 /* HBA currently resetting, try again later. */
2792 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
2794 debug_asr_cmd_printf (" e\n");
2796 debug_asr_cmd_printf (" q\n");
2799 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
2801 "asr%d WARNING: scsi_cmd(%x) already done on b%dt%du%d\n",
2802 cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)),
2803 ccb->csio.cdb_io.cdb_bytes[0],
2805 ccb->ccb_h.target_id,
2806 ccb->ccb_h.target_lun);
2808 debug_asr_cmd_printf ("(%d,%d,%d,%d)",
2811 ccb->ccb_h.target_id,
2812 ccb->ccb_h.target_lun);
2813 debug_asr_cmd_dump_ccb(ccb);
2815 if ((Message_Ptr = ASR_init_message ((union asr_ccb *)ccb,
2816 (PI2O_MESSAGE_FRAME)Message)) != (PI2O_MESSAGE_FRAME)NULL) {
2817 debug_asr_cmd2_printf ("TID=%x:\n",
2818 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_getTID(
2819 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr));
2820 debug_asr_cmd2_dump_message(Message_Ptr);
2821 debug_asr_cmd1_printf (" q");
2823 if (ASR_queue (sc, Message_Ptr) == EMPTY_QUEUE) {
2824 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2825 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
2826 debug_asr_cmd_printf (" E\n");
2829 debug_asr_cmd_printf (" Q\n");
2833 * We will get here if there is no valid TID for the device
2834 * referenced in the scsi command packet.
2836 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2837 ccb->ccb_h.status |= CAM_SEL_TIMEOUT;
2838 debug_asr_cmd_printf (" B\n");
2843 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */
2844 /* Rese HBA device ... */
2846 ccb->ccb_h.status = CAM_REQ_CMP;
2850 # if (defined(REPORT_LUNS))
2853 case XPT_ABORT: /* Abort the specified CCB */
2855 ccb->ccb_h.status = CAM_REQ_INVALID;
2859 case XPT_SET_TRAN_SETTINGS:
2861 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
2865 case XPT_GET_TRAN_SETTINGS:
2866 /* Get default/user set transfer settings for the target */
2868 struct ccb_trans_settings *cts = &(ccb->cts);
2869 struct ccb_trans_settings_scsi *scsi =
2870 &cts->proto_specific.scsi;
2871 struct ccb_trans_settings_spi *spi =
2872 &cts->xport_specific.spi;
2874 if (cts->type == CTS_TYPE_USER_SETTINGS) {
2875 cts->protocol = PROTO_SCSI;
2876 cts->protocol_version = SCSI_REV_2;
2877 cts->transport = XPORT_SPI;
2878 cts->transport_version = 2;
2880 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
2881 spi->flags = CTS_SPI_FLAGS_DISC_ENB;
2882 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2883 spi->sync_period = 6; /* 40MHz */
2884 spi->sync_offset = 15;
2885 spi->valid = CTS_SPI_VALID_SYNC_RATE
2886 | CTS_SPI_VALID_SYNC_OFFSET
2887 | CTS_SPI_VALID_BUS_WIDTH
2888 | CTS_SPI_VALID_DISC;
2889 scsi->valid = CTS_SCSI_VALID_TQ;
2891 ccb->ccb_h.status = CAM_REQ_CMP;
2893 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
2899 case XPT_CALC_GEOMETRY:
2901 struct ccb_calc_geometry *ccg;
2903 u_int32_t secs_per_cylinder;
2906 size_mb = ccg->volume_size
2907 / ((1024L * 1024L) / ccg->block_size);
2909 if (size_mb > 4096) {
2911 ccg->secs_per_track = 63;
2912 } else if (size_mb > 2048) {
2914 ccg->secs_per_track = 63;
2915 } else if (size_mb > 1024) {
2917 ccg->secs_per_track = 63;
2920 ccg->secs_per_track = 32;
2922 secs_per_cylinder = ccg->heads * ccg->secs_per_track;
2923 ccg->cylinders = ccg->volume_size / secs_per_cylinder;
2924 ccb->ccb_h.status = CAM_REQ_CMP;
2929 case XPT_RESET_BUS: /* Reset the specified SCSI bus */
2930 ASR_resetBus (sc, cam_sim_bus(sim));
2931 ccb->ccb_h.status = CAM_REQ_CMP;
2935 case XPT_TERM_IO: /* Terminate the I/O process */
2937 ccb->ccb_h.status = CAM_REQ_INVALID;
2941 case XPT_PATH_INQ: /* Path routing inquiry */
2943 struct ccb_pathinq *cpi = &(ccb->cpi);
2945 cpi->version_num = 1; /* XXX??? */
2946 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
2947 cpi->target_sprt = 0;
2948 /* Not necessary to reset bus, done by HDM initialization */
2949 cpi->hba_misc = PIM_NOBUSRESET;
2950 cpi->hba_eng_cnt = 0;
2951 cpi->max_target = sc->ha_MaxId;
2952 cpi->max_lun = sc->ha_MaxLun;
2953 cpi->initiator_id = sc->ha_adapter_target[cam_sim_bus(sim)];
2954 cpi->bus_id = cam_sim_bus(sim);
2955 cpi->base_transfer_speed = 3300;
2956 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2957 strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN);
2958 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
2959 cpi->unit_number = cam_sim_unit(sim);
2960 cpi->ccb_h.status = CAM_REQ_CMP;
2961 cpi->transport = XPORT_SPI;
2962 cpi->transport_version = 2;
2963 cpi->protocol = PROTO_SCSI;
2964 cpi->protocol_version = SCSI_REV_2;
2969 ccb->ccb_h.status = CAM_REQ_INVALID;
2977 * Handle processing of current CCB as pointed to by the Status.
2981 IN Asr_softc_t * sc)
2986 sc->ha_Virt->Status & Mask_InterruptsDisabled;
2988 union asr_ccb * ccb;
2990 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply;
2992 if (((ReplyOffset = sc->ha_Virt->FromFIFO) == EMPTY_QUEUE)
2993 && ((ReplyOffset = sc->ha_Virt->FromFIFO) == EMPTY_QUEUE)) {
2996 Reply = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)(ReplyOffset
2997 - sc->ha_Msgs_Phys + (char *)(sc->ha_Msgs));
2999 * We do not need any (optional byteswapping) method access to
3000 * the Initiator context field.
3002 ccb = (union asr_ccb *)(long)
3003 I2O_MESSAGE_FRAME_getInitiatorContext64(
3004 &(Reply->StdReplyFrame.StdMessageFrame));
3005 if (I2O_MESSAGE_FRAME_getMsgFlags(
3006 &(Reply->StdReplyFrame.StdMessageFrame))
3007 & I2O_MESSAGE_FLAGS_FAIL) {
3008 defAlignLong(I2O_UTIL_NOP_MESSAGE,Message);
3009 PI2O_UTIL_NOP_MESSAGE Message_Ptr;
3012 MessageOffset = (u_long)
3013 I2O_FAILURE_REPLY_MESSAGE_FRAME_getPreservedMFA(
3014 (PI2O_FAILURE_REPLY_MESSAGE_FRAME)Reply);
3016 * Get the Original Message Frame's address, and get
3017 * it's Transaction Context into our space. (Currently
3018 * unused at original authorship, but better to be
3019 * safe than sorry). Straight copy means that we
3020 * need not concern ourselves with the (optional
3021 * byteswapping) method access.
3023 Reply->StdReplyFrame.TransactionContext
3024 = ((PI2O_SINGLE_REPLY_MESSAGE_FRAME)
3025 (sc->ha_Fvirt + MessageOffset))->TransactionContext;
3027 * For 64 bit machines, we need to reconstruct the
3030 ccb = (union asr_ccb *)(long)
3031 I2O_MESSAGE_FRAME_getInitiatorContext64(
3032 &(Reply->StdReplyFrame.StdMessageFrame));
3034 * Unique error code for command failure.
3036 I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode(
3037 &(Reply->StdReplyFrame), (u_int16_t)-2);
3039 * Modify the message frame to contain a NOP and
3040 * re-issue it to the controller.
3042 Message_Ptr = (PI2O_UTIL_NOP_MESSAGE)ASR_fillMessage(
3043 Message, sizeof(I2O_UTIL_NOP_MESSAGE));
3044 # if (I2O_UTIL_NOP != 0)
3045 I2O_MESSAGE_FRAME_setFunction (
3046 &(Message_Ptr->StdMessageFrame),
3050 * Copy the packet out to the Original Message
3052 bcopy ((caddr_t)Message_Ptr,
3053 sc->ha_Fvirt + MessageOffset,
3054 sizeof(I2O_UTIL_NOP_MESSAGE));
3058 sc->ha_Virt->ToFIFO = MessageOffset;
3062 * Asynchronous command with no return requirements,
3063 * and a generic handler for immunity against odd error
3064 * returns from the adapter.
3068 * Return Reply so that it can be used for the
3071 sc->ha_Virt->FromFIFO = ReplyOffset;
3075 /* Welease Wadjah! (and stop timeouts) */
3076 ASR_ccbRemove (sc, ccb);
3079 I2O_SINGLE_REPLY_MESSAGE_FRAME_getDetailedStatusCode(
3080 &(Reply->StdReplyFrame))) {
3082 case I2O_SCSI_DSC_SUCCESS:
3083 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3084 ccb->ccb_h.status |= CAM_REQ_CMP;
3087 case I2O_SCSI_DSC_CHECK_CONDITION:
3088 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3089 ccb->ccb_h.status |= CAM_REQ_CMP|CAM_AUTOSNS_VALID;
3092 case I2O_SCSI_DSC_BUSY:
3094 case I2O_SCSI_HBA_DSC_ADAPTER_BUSY:
3096 case I2O_SCSI_HBA_DSC_SCSI_BUS_RESET:
3098 case I2O_SCSI_HBA_DSC_BUS_BUSY:
3099 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3100 ccb->ccb_h.status |= CAM_SCSI_BUSY;
3103 case I2O_SCSI_HBA_DSC_SELECTION_TIMEOUT:
3104 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3105 ccb->ccb_h.status |= CAM_SEL_TIMEOUT;
3108 case I2O_SCSI_HBA_DSC_COMMAND_TIMEOUT:
3110 case I2O_SCSI_HBA_DSC_DEVICE_NOT_PRESENT:
3112 case I2O_SCSI_HBA_DSC_LUN_INVALID:
3114 case I2O_SCSI_HBA_DSC_SCSI_TID_INVALID:
3115 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3116 ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
3119 case I2O_SCSI_HBA_DSC_DATA_OVERRUN:
3121 case I2O_SCSI_HBA_DSC_REQUEST_LENGTH_ERROR:
3122 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3123 ccb->ccb_h.status |= CAM_DATA_RUN_ERR;
3127 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3128 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
3131 if ((ccb->csio.resid = ccb->csio.dxfer_len) != 0) {
3133 I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_getTransferCount(
3137 /* Sense data in reply packet */
3138 if (ccb->ccb_h.status & CAM_AUTOSNS_VALID) {
3139 u_int16_t size = I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_getAutoSenseTransferCount(Reply);
3142 if (size > sizeof(ccb->csio.sense_data)) {
3143 size = sizeof(ccb->csio.sense_data);
3145 if (size > I2O_SCSI_SENSE_DATA_SZ) {
3146 size = I2O_SCSI_SENSE_DATA_SZ;
3148 if ((ccb->csio.sense_len)
3149 && (size > ccb->csio.sense_len)) {
3150 size = ccb->csio.sense_len;
3152 bcopy ((caddr_t)Reply->SenseData,
3153 (caddr_t)&(ccb->csio.sense_data), size);
3158 * Return Reply so that it can be used for the next command
3159 * since we have no more need for it now
3161 sc->ha_Virt->FromFIFO = ReplyOffset;
3163 if (ccb->ccb_h.path) {
3164 xpt_done ((union ccb *)ccb);
3166 wakeup ((caddr_t)ccb);
3172 #undef QueueSize /* Grrrr */
3173 #undef SG_Size /* Grrrr */
3176 * Meant to be included at the bottom of asr.c !!!
3180 * Included here as hard coded. Done because other necessary include
3181 * files utilize C++ comment structures which make them a nuisance to
3182 * included here just to pick up these three typedefs.
3184 typedef U32 DPT_TAG_T;
3185 typedef U32 DPT_MSG_T;
3186 typedef U32 DPT_RTN_T;
3188 #undef SCSI_RESET /* Conflicts with "scsi/scsiconf.h" defintion */
3189 #include "osd_unix.h"
3191 #define asr_unit(dev) minor(dev)
3193 STATIC INLINE Asr_softc_t *
3197 int unit = asr_unit(dev);
3198 OUT Asr_softc_t * sc = Asr_softc;
3200 while (sc && sc->ha_sim[0] && (cam_sim_unit(sc->ha_sim[0]) != unit)) {
3206 STATIC u_int8_t ASR_ctlr_held;
3207 #if (!defined(UNREFERENCED_PARAMETER))
3208 # define UNREFERENCED_PARAMETER(x) (void)(x)
3212 asr_open(struct dev_open_args *ap)
3214 cdev_t dev = ap->a_head.a_dev;
3217 if (ASR_get_sc (dev) == NULL) {
3221 if (ASR_ctlr_held) {
3223 } else if ((error = priv_check_cred(ap->a_cred, PRIV_ROOT, 0)) == 0) {
3231 asr_close(struct dev_close_args *ap)
3238 /*-------------------------------------------------------------------------*/
3239 /* Function ASR_queue_i */
3240 /*-------------------------------------------------------------------------*/
3241 /* The Parameters Passed To This Function Are : */
3242 /* Asr_softc_t * : HBA miniport driver's adapter data storage. */
3243 /* PI2O_MESSAGE_FRAME : Msg Structure Pointer For This Command */
3244 /* I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME following the Msg Structure */
3246 /* This Function Will Take The User Request Packet And Convert It To An */
3247 /* I2O MSG And Send It Off To The Adapter. */
3249 /* Return : 0 For OK, Error Code Otherwise */
3250 /*-------------------------------------------------------------------------*/
3253 IN Asr_softc_t * sc,
3254 INOUT PI2O_MESSAGE_FRAME Packet)
3256 union asr_ccb * ccb;
3257 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply;
3258 PI2O_MESSAGE_FRAME Message_Ptr;
3259 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply_Ptr;
3260 int MessageSizeInBytes;
3261 int ReplySizeInBytes;
3264 /* Scatter Gather buffer list */
3265 struct ioctlSgList_S {
3266 SLIST_ENTRY(ioctlSgList_S) link;
3268 I2O_FLAGS_COUNT FlagsCount;
3269 char KernelSpace[sizeof(long)];
3271 /* Generates a `first' entry */
3272 SLIST_HEAD(ioctlSgListHead_S, ioctlSgList_S) sgList;
3274 if (ASR_getBlinkLedCode(sc)) {
3275 debug_usr_cmd_printf ("Adapter currently in BlinkLed %x\n",
3276 ASR_getBlinkLedCode(sc));
3279 /* Copy in the message into a local allocation */
3280 Message_Ptr = (PI2O_MESSAGE_FRAME)kmalloc (
3281 sizeof(I2O_MESSAGE_FRAME), M_TEMP, M_WAITOK);
3282 if ((error = copyin ((caddr_t)Packet, (caddr_t)Message_Ptr,
3283 sizeof(I2O_MESSAGE_FRAME))) != 0) {
3284 kfree (Message_Ptr, M_TEMP);
3285 debug_usr_cmd_printf ("Can't copy in packet errno=%d\n", error);
3288 /* Acquire information to determine type of packet */
3289 MessageSizeInBytes = (I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr)<<2);
3290 /* The offset of the reply information within the user packet */
3291 Reply = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)((char *)Packet
3292 + MessageSizeInBytes);
3294 /* Check if the message is a synchronous initialization command */
3295 s = I2O_MESSAGE_FRAME_getFunction(Message_Ptr);
3296 kfree (Message_Ptr, M_TEMP);
3299 case I2O_EXEC_IOP_RESET:
3302 status = ASR_resetIOP(sc->ha_Virt, sc->ha_Fvirt);
3303 ReplySizeInBytes = sizeof(status);
3304 debug_usr_cmd_printf ("resetIOP done\n");
3305 return (copyout ((caddr_t)&status, (caddr_t)Reply,
3309 case I2O_EXEC_STATUS_GET:
3310 { I2O_EXEC_STATUS_GET_REPLY status;
3312 if (ASR_getStatus (sc->ha_Virt, sc->ha_Fvirt, &status)
3313 == (PI2O_EXEC_STATUS_GET_REPLY)NULL) {
3314 debug_usr_cmd_printf ("getStatus failed\n");
3317 ReplySizeInBytes = sizeof(status);
3318 debug_usr_cmd_printf ("getStatus done\n");
3319 return (copyout ((caddr_t)&status, (caddr_t)Reply,
3323 case I2O_EXEC_OUTBOUND_INIT:
3326 status = ASR_initOutBound(sc);
3327 ReplySizeInBytes = sizeof(status);
3328 debug_usr_cmd_printf ("intOutBound done\n");
3329 return (copyout ((caddr_t)&status, (caddr_t)Reply,
3334 /* Determine if the message size is valid */
3335 if ((MessageSizeInBytes < sizeof(I2O_MESSAGE_FRAME))
3336 || (MAX_INBOUND_SIZE < MessageSizeInBytes)) {
3337 debug_usr_cmd_printf ("Packet size %d incorrect\n",
3338 MessageSizeInBytes);
3342 Message_Ptr = (PI2O_MESSAGE_FRAME)kmalloc (MessageSizeInBytes,
3344 if ((error = copyin ((caddr_t)Packet, (caddr_t)Message_Ptr,
3345 MessageSizeInBytes)) != 0) {
3346 kfree (Message_Ptr, M_TEMP);
3347 debug_usr_cmd_printf ("Can't copy in packet[%d] errno=%d\n",
3348 MessageSizeInBytes, error);
3352 /* Check the size of the reply frame, and start constructing */
3354 Reply_Ptr = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)kmalloc (
3355 sizeof(I2O_MESSAGE_FRAME), M_TEMP, M_WAITOK);
3356 if ((error = copyin ((caddr_t)Reply, (caddr_t)Reply_Ptr,
3357 sizeof(I2O_MESSAGE_FRAME))) != 0) {
3358 kfree (Reply_Ptr, M_TEMP);
3359 kfree (Message_Ptr, M_TEMP);
3360 debug_usr_cmd_printf (
3361 "Failed to copy in reply frame, errno=%d\n",
3365 ReplySizeInBytes = (I2O_MESSAGE_FRAME_getMessageSize(
3366 &(Reply_Ptr->StdReplyFrame.StdMessageFrame)) << 2);
3367 kfree (Reply_Ptr, M_TEMP);
3368 if (ReplySizeInBytes < sizeof(I2O_SINGLE_REPLY_MESSAGE_FRAME)) {
3369 kfree (Message_Ptr, M_TEMP);
3370 debug_usr_cmd_printf (
3371 "Failed to copy in reply frame[%d], errno=%d\n",
3372 ReplySizeInBytes, error);
3376 Reply_Ptr = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)kmalloc (
3377 ((ReplySizeInBytes > sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME))
3379 : sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)),
3381 (void)ASR_fillMessage ((char *)Reply_Ptr, ReplySizeInBytes);
3382 Reply_Ptr->StdReplyFrame.StdMessageFrame.InitiatorContext
3383 = Message_Ptr->InitiatorContext;
3384 Reply_Ptr->StdReplyFrame.TransactionContext
3385 = ((PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr)->TransactionContext;
3386 I2O_MESSAGE_FRAME_setMsgFlags(
3387 &(Reply_Ptr->StdReplyFrame.StdMessageFrame),
3388 I2O_MESSAGE_FRAME_getMsgFlags(
3389 &(Reply_Ptr->StdReplyFrame.StdMessageFrame))
3390 | I2O_MESSAGE_FLAGS_REPLY);
3392 /* Check if the message is a special case command */
3393 switch (I2O_MESSAGE_FRAME_getFunction(Message_Ptr)) {
3394 case I2O_EXEC_SYS_TAB_SET: /* Special Case of empty Scatter Gather */
3395 if (MessageSizeInBytes == ((I2O_MESSAGE_FRAME_getVersionOffset(
3396 Message_Ptr) & 0xF0) >> 2)) {
3397 kfree (Message_Ptr, M_TEMP);
3398 I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode(
3399 &(Reply_Ptr->StdReplyFrame),
3400 (ASR_setSysTab(sc) != CAM_REQ_CMP));
3401 I2O_MESSAGE_FRAME_setMessageSize(
3402 &(Reply_Ptr->StdReplyFrame.StdMessageFrame),
3403 sizeof(I2O_SINGLE_REPLY_MESSAGE_FRAME));
3404 error = copyout ((caddr_t)Reply_Ptr, (caddr_t)Reply,
3406 kfree (Reply_Ptr, M_TEMP);
3411 /* Deal in the general case */
3412 /* First allocate and optionally copy in each scatter gather element */
3413 SLIST_INIT(&sgList);
3414 if ((I2O_MESSAGE_FRAME_getVersionOffset(Message_Ptr) & 0xF0) != 0) {
3415 PI2O_SGE_SIMPLE_ELEMENT sg;
3418 * since this code is reused in several systems, code
3419 * efficiency is greater by using a shift operation rather
3420 * than a divide by sizeof(u_int32_t).
3422 sg = (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr
3423 + ((I2O_MESSAGE_FRAME_getVersionOffset(Message_Ptr) & 0xF0)
3425 while (sg < (PI2O_SGE_SIMPLE_ELEMENT)(((caddr_t)Message_Ptr)
3426 + MessageSizeInBytes)) {
3430 if ((I2O_FLAGS_COUNT_getFlags(&(sg->FlagsCount))
3431 & I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT) == 0) {
3435 len = I2O_FLAGS_COUNT_getCount(&(sg->FlagsCount));
3436 debug_usr_cmd_printf ("SG[%d] = %x[%d]\n",
3437 sg - (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr
3438 + ((I2O_MESSAGE_FRAME_getVersionOffset(
3439 Message_Ptr) & 0xF0) >> 2)),
3440 I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg), len);
3442 elm = (struct ioctlSgList_S *)kmalloc (
3443 sizeof(*elm) - sizeof(elm->KernelSpace) + len,
3445 SLIST_INSERT_HEAD(&sgList, elm, link);
3446 elm->FlagsCount = sg->FlagsCount;
3447 elm->UserSpace = (caddr_t)
3448 (I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg));
3449 v = elm->KernelSpace;
3450 /* Copy in outgoing data (DIR bit could be invalid) */
3451 if ((error = copyin (elm->UserSpace, (caddr_t)v, len))
3456 * If the buffer is not contiguous, lets
3457 * break up the scatter/gather entries.
3460 && (sg < (PI2O_SGE_SIMPLE_ELEMENT)
3461 (((caddr_t)Message_Ptr) + MAX_INBOUND_SIZE))) {
3462 int next, base, span;
3465 next = base = KVTOPHYS(v);
3466 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg,
3469 /* How far can we go physically contiguously */
3470 while ((len > 0) && (base == next)) {
3473 next = trunc_page(base) + PAGE_SIZE;
3484 /* Construct the Flags */
3485 I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount),
3488 int flags = I2O_FLAGS_COUNT_getFlags(
3489 &(elm->FlagsCount));
3490 /* Any remaining length? */
3493 ~(I2O_SGL_FLAGS_END_OF_BUFFER
3494 | I2O_SGL_FLAGS_LAST_ELEMENT);
3496 I2O_FLAGS_COUNT_setFlags(
3497 &(sg->FlagsCount), flags);
3500 debug_usr_cmd_printf ("sg[%d] = %x[%d]\n",
3501 sg - (PI2O_SGE_SIMPLE_ELEMENT)
3502 ((char *)Message_Ptr
3503 + ((I2O_MESSAGE_FRAME_getVersionOffset(
3504 Message_Ptr) & 0xF0) >> 2)),
3505 I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg),
3512 * Incrementing requires resizing of the
3513 * packet, and moving up the existing SG
3517 MessageSizeInBytes += sizeof(*sg);
3518 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
3519 I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr)
3520 + (sizeof(*sg) / sizeof(U32)));
3522 PI2O_MESSAGE_FRAME NewMessage_Ptr;
3525 = (PI2O_MESSAGE_FRAME)
3526 kmalloc (MessageSizeInBytes,
3528 span = ((caddr_t)sg)
3529 - (caddr_t)Message_Ptr;
3530 bcopy ((caddr_t)Message_Ptr,
3531 (caddr_t)NewMessage_Ptr, span);
3532 bcopy ((caddr_t)(sg-1),
3533 ((caddr_t)NewMessage_Ptr) + span,
3534 MessageSizeInBytes - span);
3535 kfree (Message_Ptr, M_TEMP);
3536 sg = (PI2O_SGE_SIMPLE_ELEMENT)
3537 (((caddr_t)NewMessage_Ptr) + span);
3538 Message_Ptr = NewMessage_Ptr;
3542 || ((I2O_FLAGS_COUNT_getFlags(&(sg->FlagsCount))
3543 & I2O_SGL_FLAGS_LAST_ELEMENT) != 0)) {
3549 while ((elm = SLIST_FIRST(&sgList))
3551 SLIST_REMOVE_HEAD(&sgList, link);
3552 kfree (elm, M_TEMP);
3554 kfree (Reply_Ptr, M_TEMP);
3555 kfree (Message_Ptr, M_TEMP);
3560 debug_usr_cmd_printf ("Inbound: ");
3561 debug_usr_cmd_dump_message(Message_Ptr);
3563 /* Send the command */
3564 if ((ccb = asr_alloc_ccb (sc)) == NULL) {
3565 /* Free up in-kernel buffers */
3566 while ((elm = SLIST_FIRST(&sgList))
3568 SLIST_REMOVE_HEAD(&sgList, link);
3569 kfree (elm, M_TEMP);
3571 kfree (Reply_Ptr, M_TEMP);
3572 kfree (Message_Ptr, M_TEMP);
3577 * We do not need any (optional byteswapping) method access to
3578 * the Initiator context field.
3580 I2O_MESSAGE_FRAME_setInitiatorContext64(
3581 (PI2O_MESSAGE_FRAME)Message_Ptr, (long)ccb);
3583 (void)ASR_queue (sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
3585 kfree (Message_Ptr, M_TEMP);
3588 * Wait for the board to report a finished instruction.
3591 while ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
3592 if (ASR_getBlinkLedCode(sc)) {
3594 kprintf ("asr%d: Blink LED 0x%x resetting adapter\n",
3595 cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)),
3596 ASR_getBlinkLedCode(sc));
3597 if (ASR_reset (sc) == ENXIO) {
3598 /* Command Cleanup */
3599 ASR_ccbRemove(sc, ccb);
3602 /* Free up in-kernel buffers */
3603 while ((elm = SLIST_FIRST(&sgList))
3605 SLIST_REMOVE_HEAD(&sgList, link);
3606 kfree (elm, M_TEMP);
3608 kfree (Reply_Ptr, M_TEMP);
3612 /* Check every second for BlinkLed */
3613 tsleep((caddr_t)ccb, 0, "asr", hz);
3617 debug_usr_cmd_printf ("Outbound: ");
3618 debug_usr_cmd_dump_message(Reply_Ptr);
3620 I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode(
3621 &(Reply_Ptr->StdReplyFrame),
3622 (ccb->ccb_h.status != CAM_REQ_CMP));
3624 if (ReplySizeInBytes >= (sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
3625 - I2O_SCSI_SENSE_DATA_SZ - sizeof(U32))) {
3626 I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_setTransferCount(Reply_Ptr,
3627 ccb->csio.dxfer_len - ccb->csio.resid);
3629 if ((ccb->ccb_h.status & CAM_AUTOSNS_VALID) && (ReplySizeInBytes
3630 > (sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
3631 - I2O_SCSI_SENSE_DATA_SZ))) {
3632 int size = ReplySizeInBytes
3633 - sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
3634 - I2O_SCSI_SENSE_DATA_SZ;
3636 if (size > sizeof(ccb->csio.sense_data)) {
3637 size = sizeof(ccb->csio.sense_data);
3639 bcopy ((caddr_t)&(ccb->csio.sense_data), (caddr_t)Reply_Ptr->SenseData,
3641 I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_setAutoSenseTransferCount(
3645 /* Free up in-kernel buffers */
3646 while ((elm = SLIST_FIRST(&sgList)) != NULL) {
3647 /* Copy out as necessary */
3649 /* DIR bit considered `valid', error due to ignorance works */
3650 && ((I2O_FLAGS_COUNT_getFlags(&(elm->FlagsCount))
3651 & I2O_SGL_FLAGS_DIR) == 0)) {
3652 error = copyout ((caddr_t)(elm->KernelSpace),
3654 I2O_FLAGS_COUNT_getCount(&(elm->FlagsCount)));
3656 SLIST_REMOVE_HEAD(&sgList, link);
3657 kfree (elm, M_TEMP);
3660 /* Copy reply frame to user space */
3661 error = copyout ((caddr_t)Reply_Ptr, (caddr_t)Reply,
3664 kfree (Reply_Ptr, M_TEMP);
3670 /*----------------------------------------------------------------------*/
3671 /* Function asr_ioctl */
3672 /*----------------------------------------------------------------------*/
3673 /* The parameters passed to this function are : */
3674 /* dev : Device number. */
3675 /* cmd : Ioctl Command */
3676 /* data : User Argument Passed In. */
3677 /* flag : Mode Parameter */
3678 /* proc : Process Parameter */
3680 /* This function is the user interface into this adapter driver */
3682 /* Return : zero if OK, error code if not */
3683 /*----------------------------------------------------------------------*/
3686 asr_ioctl(struct dev_ioctl_args *ap)
3688 cdev_t dev = ap->a_head.a_dev;
3689 caddr_t data = ap->a_data;
3692 Asr_softc_t * sc = ASR_get_sc (dev);
3698 # if (dsDescription_size != 50)
3699 case DPT_SIGNATURE + ((50 - dsDescription_size) << 16):
3701 if (ap->a_cmd & 0xFFFF0000) {
3702 (void)bcopy ((caddr_t)(&ASR_sig), data,
3706 /* Traditional version of the ioctl interface */
3707 case DPT_SIGNATURE & 0x0000FFFF:
3708 return (copyout ((caddr_t)(&ASR_sig), *((caddr_t *)data),
3709 sizeof(dpt_sig_S)));
3711 /* Traditional version of the ioctl interface */
3712 case DPT_CTRLINFO & 0x0000FFFF:
3713 case DPT_CTRLINFO: {
3716 u_int16_t drvrHBAnum;
3718 u_int16_t blinkState;
3720 u_int8_t pciDeviceNum;
3722 u_int16_t Interrupt;
3723 u_int32_t reserved1;
3724 u_int32_t reserved2;
3725 u_int32_t reserved3;
3728 bzero (&CtlrInfo, sizeof(CtlrInfo));
3729 CtlrInfo.length = sizeof(CtlrInfo) - sizeof(u_int16_t);
3730 CtlrInfo.drvrHBAnum = asr_unit(dev);
3731 CtlrInfo.baseAddr = (u_long)sc->ha_Base;
3732 i = ASR_getBlinkLedCode (sc);
3736 CtlrInfo.blinkState = i;
3737 CtlrInfo.pciBusNum = sc->ha_pciBusNum;
3738 CtlrInfo.pciDeviceNum = sc->ha_pciDeviceNum;
3739 #define FLG_OSD_PCI_VALID 0x0001
3740 #define FLG_OSD_DMA 0x0002
3741 #define FLG_OSD_I2O 0x0004
3742 CtlrInfo.hbaFlags = FLG_OSD_PCI_VALID | FLG_OSD_DMA | FLG_OSD_I2O;
3743 CtlrInfo.Interrupt = sc->ha_irq;
3744 if (ap->a_cmd & 0xFFFF0000) {
3745 bcopy (&CtlrInfo, data, sizeof(CtlrInfo));
3747 error = copyout (&CtlrInfo, *(caddr_t *)data, sizeof(CtlrInfo));
3751 /* Traditional version of the ioctl interface */
3752 case DPT_SYSINFO & 0x0000FFFF:
3756 /* Kernel Specific ptok `hack' */
3757 # define ptok(a) ((char *)(uintptr_t)(a) + KERNBASE)
3759 bzero (&Info, sizeof(Info));
3761 /* Appears I am the only person in the Kernel doing this */
3769 Info.drive0CMOS = j;
3776 Info.drive1CMOS = j;
3778 Info.numDrives = *((char *)ptok(0x475));
3780 Info.processorFamily = ASR_sig.dsProcessorFamily;
3781 Info.processorType = PROC_SEXIUM; break;
3782 Info.osType = OS_BSDI_UNIX;
3783 Info.osMajorVersion = osrelease[0] - '0';
3784 Info.osMinorVersion = osrelease[2] - '0';
3785 /* Info.osRevision = 0; */
3786 /* Info.osSubRevision = 0; */
3787 Info.busType = SI_PCI_BUS;
3788 Info.flags = SI_CMOS_Valid | SI_NumDrivesValid
3789 | SI_OSversionValid | SI_BusTypeValid | SI_NO_SmartROM;
3791 /* Go Out And Look For I2O SmartROM */
3792 for(j = 0xC8000; j < 0xE0000; j += 2048) {
3796 if (*((unsigned short *)cp) != 0xAA55) {
3799 j += (cp[2] * 512) - 2048;
3800 if ((*((u_long *)(cp + 6))
3801 != ('S' + (' ' * 256) + (' ' * 65536L)))
3802 || (*((u_long *)(cp + 10))
3803 != ('I' + ('2' * 256) + ('0' * 65536L)))) {
3807 for (k = 0; k < 64; ++k) {
3808 if (*((unsigned short *)cp)
3809 == (' ' + ('v' * 256))) {
3814 Info.smartROMMajorVersion
3815 = *((unsigned char *)(cp += 4)) - '0';
3816 Info.smartROMMinorVersion
3817 = *((unsigned char *)(cp += 2));
3818 Info.smartROMRevision
3819 = *((unsigned char *)(++cp));
3820 Info.flags |= SI_SmartROMverValid;
3821 Info.flags &= ~SI_NO_SmartROM;
3825 /* Get The Conventional Memory Size From CMOS */
3831 Info.conventionalMemSize = j;
3833 /* Get The Extended Memory Found At Power On From CMOS */
3839 Info.extendedMemSize = j;
3840 Info.flags |= SI_MemorySizeValid;
3842 # if (defined(THIS_IS_BROKEN))
3843 /* If There Is 1 or 2 Drives Found, Set Up Drive Parameters */
3844 if (Info.numDrives > 0) {
3846 * Get The Pointer From Int 41 For The First
3849 j = ((unsigned)(*((unsigned short *)ptok(0x104+2))) << 4)
3850 + (unsigned)(*((unsigned short *)ptok(0x104+0)));
3852 * It appears that SmartROM's Int41/Int46 pointers
3853 * use memory that gets stepped on by the kernel
3854 * loading. We no longer have access to this
3855 * geometry information but try anyways (!?)
3857 Info.drives[0].cylinders = *((unsigned char *)ptok(j));
3859 Info.drives[0].cylinders += ((int)*((unsigned char *)
3862 Info.drives[0].heads = *((unsigned char *)ptok(j));
3864 Info.drives[0].sectors = *((unsigned char *)ptok(j));
3865 Info.flags |= SI_DriveParamsValid;
3866 if ((Info.drives[0].cylinders == 0)
3867 || (Info.drives[0].heads == 0)
3868 || (Info.drives[0].sectors == 0)) {
3869 Info.flags &= ~SI_DriveParamsValid;
3871 if (Info.numDrives > 1) {
3873 * Get The Pointer From Int 46 For The
3874 * Second Drive Parameters
3876 j = ((unsigned)(*((unsigned short *)ptok(0x118+2))) << 4)
3877 + (unsigned)(*((unsigned short *)ptok(0x118+0)));
3878 Info.drives[1].cylinders = *((unsigned char *)
3881 Info.drives[1].cylinders += ((int)
3882 *((unsigned char *)ptok(j))) << 8;
3884 Info.drives[1].heads = *((unsigned char *)
3887 Info.drives[1].sectors = *((unsigned char *)
3889 if ((Info.drives[1].cylinders == 0)
3890 || (Info.drives[1].heads == 0)
3891 || (Info.drives[1].sectors == 0)) {
3892 Info.flags &= ~SI_DriveParamsValid;
3897 /* Copy Out The Info Structure To The User */
3898 if (ap->a_cmd & 0xFFFF0000) {
3899 bcopy (&Info, data, sizeof(Info));
3901 error = copyout (&Info, *(caddr_t *)data, sizeof(Info));
3905 /* Get The BlinkLED State */
3907 i = ASR_getBlinkLedCode (sc);
3911 if (ap->a_cmd & 0xFFFF0000) {
3912 bcopy ((caddr_t)(&i), data, sizeof(i));
3914 error = copyout (&i, *(caddr_t *)data, sizeof(i));
3918 /* Send an I2O command */
3920 return (ASR_queue_i (sc, *((PI2O_MESSAGE_FRAME *)data)));
3922 /* Reset and re-initialize the adapter */
3924 return (ASR_reset (sc));
3926 /* Rescan the LCT table and resynchronize the information */
3928 return (ASR_rescan (sc));