1 /* $FreeBSD: src/sys/dev/asr/asr.c,v 1.3.2.2 2001/08/23 05:21:29 scottl Exp $ */
2 /* $DragonFly: src/sys/dev/raid/asr/asr.c,v 1.27 2006/10/25 20:56:01 dillon Exp $ */
4 * Copyright (c) 1996-2000 Distributed Processing Technology Corporation
5 * Copyright (c) 2000-2001 Adaptec Corporation
8 * TERMS AND CONDITIONS OF USE
10 * Redistribution and use in source form, with or without modification, are
11 * permitted provided that redistributions of source code must retain the
12 * above copyright notice, this list of conditions and the following disclaimer.
14 * This software is provided `as is' by Adaptec and any express or implied
15 * warranties, including, but not limited to, the implied warranties of
16 * merchantability and fitness for a particular purpose, are disclaimed. In no
17 * event shall Adaptec be liable for any direct, indirect, incidental, special,
18 * exemplary or consequential damages (including, but not limited to,
19 * procurement of substitute goods or services; loss of use, data, or profits;
20 * or business interruptions) however caused and on any theory of liability,
21 * whether in contract, strict liability, or tort (including negligence or
22 * otherwise) arising in any way out of the use of this driver software, even
23 * if advised of the possibility of such damage.
25 * SCSI I2O host adapter driver
27 * V1.08 2001/08/21 Mark_Salyzyn@adaptec.com
28 * - The 2000S and 2005S do not initialize on some machines,
29 * increased timeout to 255ms from 50ms for the StatusGet
31 * V1.07 2001/05/22 Mark_Salyzyn@adaptec.com
32 * - I knew this one was too good to be true. The error return
33 * on ioctl commands needs to be compared to CAM_REQ_CMP, not
34 * to the bit masked status.
35 * V1.06 2001/05/08 Mark_Salyzyn@adaptec.com
36 * - The 2005S that was supported is affectionately called the
37 * Conjoined BAR Firmware. In order to support RAID-5 in a
38 * 16MB low-cost configuration, Firmware was forced to go
39 * to a Split BAR Firmware. This requires a separate IOP and
40 * Messaging base address.
41 * V1.05 2001/04/25 Mark_Salyzyn@adaptec.com
42 * - Handle support for 2005S Zero Channel RAID solution.
43 * - System locked up if the Adapter locked up. Do not try
44 * to send other commands if the resetIOP command fails. The
45 * fail outstanding command discovery loop was flawed as the
46 * removal of the command from the list prevented discovering
48 * - Comment changes to clarify driver.
49 * - SysInfo searched for an EATA SmartROM, not an I2O SmartROM.
50 * - We do not use the AC_FOUND_DEV event because of I2O.
52 * V1.04 2000/09/22 Mark_Salyzyn@adaptec.com, msmith@freebsd.org,
53 * lampa@fee.vutbr.cz and Scott_Long@adaptec.com.
54 * - Removed support for PM1554, PM2554 and PM2654 in Mode-0
55 * mode as this is confused with competitor adapters in run
57 * - critical locking needed in ASR_ccbAdd and ASR_ccbRemove
58 * to prevent operating system panic.
59 * - moved default major number to 154 from 97.
60 * V1.03 2000/07/12 Mark_Salyzyn@adaptec.com
61 * - The controller is not actually an ASR (Adaptec SCSI RAID)
62 * series that is visible, it's more of an internal code name.
63 * remove any visible references within reason for now.
64 * - bus_ptr->LUN was not correctly zeroed when initially
65 * allocated causing a possible panic of the operating system
67 * V1.02 2000/06/26 Mark_Salyzyn@adaptec.com
68 * - Code always fails for ASR_getTid affecting performance.
69 * - initiated a set of changes that resulted from a formal
70 * code inspection by Mark_Salyzyn@adaptec.com,
71 * George_Dake@adaptec.com, Jeff_Zeak@adaptec.com,
72 * Martin_Wilson@adaptec.com and Vincent_Trandoan@adaptec.com.
73 * Their findings were focussed on the LCT & TID handler, and
74 * all resulting changes were to improve code readability,
75 * consistency or have a positive effect on performance.
76 * V1.01 2000/06/14 Mark_Salyzyn@adaptec.com
77 * - Passthrough returned an incorrect error.
78 * - Passthrough did not migrate the intrinsic scsi layer wakeup
79 * on command completion.
80 * - generate control device nodes using make_dev and delete_dev.
81 * - Performance affected by TID caching reallocing.
82 * - Made suggested changes by Justin_Gibbs@adaptec.com
83 * - use splcam instead of splbio.
84 * - use u_int8_t instead of u_char.
85 * - use u_int16_t instead of u_short.
86 * - use u_int32_t instead of u_long where appropriate.
87 * - use 64 bit context handler instead of 32 bit.
88 * - create_ccb should only allocate the worst case
89 * requirements for the driver since CAM may evolve
90 * making union ccb much larger than needed here.
91 * renamed create_ccb to asr_alloc_ccb.
92 * - go nutz justifying all debug prints as macros
93 * defined at the top and remove unsightly ifdefs.
94 * - INLINE STATIC viewed as confusing. Historically
95 * utilized to affect code performance and debug
96 * issues in OS, Compiler or OEM specific situations.
97 * V1.00 2000/05/31 Mark_Salyzyn@adaptec.com
98 * - Ported from FreeBSD 2.2.X DPT I2O driver.
99 * changed struct scsi_xfer to union ccb/struct ccb_hdr
100 * changed variable name xs to ccb
101 * changed struct scsi_link to struct cam_path
102 * changed struct scsibus_data to struct cam_sim
103 * stopped using fordriver for holding on to the TID
104 * use proprietary packet creation instead of scsi_inquire
105 * CAM layer sends synchronize commands.
108 #define ASR_VERSION 1
109 #define ASR_REVISION '0'
110 #define ASR_SUBREVISION '8'
113 #define ASR_YEAR 2001 - 1980
116 * Debug macros to reduce the unsightly ifdefs
118 #if (defined(DEBUG_ASR) || defined(DEBUG_ASR_USR_CMD) || defined(DEBUG_ASR_CMD))
119 # define debug_asr_message(message) \
121 u_int32_t * pointer = (u_int32_t *)message; \
122 u_int32_t length = I2O_MESSAGE_FRAME_getMessageSize(message);\
123 u_int32_t counter = 0; \
126 printf ("%08lx%c", (u_long)*(pointer++), \
127 (((++counter & 7) == 0) || (length == 0)) \
132 #endif /* DEBUG_ASR || DEBUG_ASR_USR_CMD || DEBUG_ASR_CMD */
134 #if (defined(DEBUG_ASR))
135 /* Breaks on none STDC based compilers :-( */
136 # define debug_asr_printf(fmt,args...) printf(fmt, ##args)
137 # define debug_asr_dump_message(message) debug_asr_message(message)
138 # define debug_asr_print_path(ccb) xpt_print_path(ccb->ccb_h.path);
139 /* None fatal version of the ASSERT macro */
140 # if (defined(__STDC__))
141 # define ASSERT(phrase) if(!(phrase))printf(#phrase " at line %d file %s\n",__LINE__,__FILE__)
143 # define ASSERT(phrase) if(!(phrase))printf("phrase" " at line %d file %s\n",__LINE__,__FILE__)
145 #else /* DEBUG_ASR */
146 # define debug_asr_printf(fmt,args...)
147 # define debug_asr_dump_message(message)
148 # define debug_asr_print_path(ccb)
150 #endif /* DEBUG_ASR */
153 * If DEBUG_ASR_CMD is defined:
154 * 0 - Display incoming SCSI commands
155 * 1 - add in a quick character before queueing.
156 * 2 - add in outgoing message frames.
158 #if (defined(DEBUG_ASR_CMD))
159 # define debug_asr_cmd_printf(fmt,args...) printf(fmt,##args)
160 # define debug_asr_dump_ccb(ccb) \
162 u_int8_t * cp = (unsigned char *)&(ccb->csio.cdb_io); \
163 int len = ccb->csio.cdb_len; \
166 debug_asr_cmd_printf (" %02x", *(cp++)); \
170 # if (DEBUG_ASR_CMD > 0)
171 # define debug_asr_cmd1_printf debug_asr_cmd_printf
173 # define debug_asr_cmd1_printf(fmt,args...)
175 # if (DEBUG_ASR_CMD > 1)
176 # define debug_asr_cmd2_printf debug_asr_cmd_printf
177 # define debug_asr_cmd2_dump_message(message) debug_asr_message(message)
179 # define debug_asr_cmd2_printf(fmt,args...)
180 # define debug_asr_cmd2_dump_message(message)
182 #else /* DEBUG_ASR_CMD */
183 # define debug_asr_cmd_printf(fmt,args...)
184 # define debug_asr_cmd_dump_ccb(ccb)
185 # define debug_asr_cmd1_printf(fmt,args...)
186 # define debug_asr_cmd2_printf(fmt,args...)
187 # define debug_asr_cmd2_dump_message(message)
188 #endif /* DEBUG_ASR_CMD */
190 #if (defined(DEBUG_ASR_USR_CMD))
191 # define debug_usr_cmd_printf(fmt,args...) printf(fmt,##args)
192 # define debug_usr_cmd_dump_message(message) debug_usr_message(message)
193 #else /* DEBUG_ASR_USR_CMD */
194 # define debug_usr_cmd_printf(fmt,args...)
195 # define debug_usr_cmd_dump_message(message)
196 #endif /* DEBUG_ASR_USR_CMD */
198 #define dsDescription_size 46 /* Snug as a bug in a rug */
201 static dpt_sig_S ASR_sig = {
202 { 'd', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION, PROC_INTEL,
203 PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM, FT_HBADRVR, 0,
204 OEM_DPT, OS_FREE_BSD, CAP_ABOVE16MB, DEV_ALL,
206 0, 0, ASR_VERSION, ASR_REVISION, ASR_SUBREVISION,
207 ASR_MONTH, ASR_DAY, ASR_YEAR,
208 /* 01234567890123456789012345678901234567890123456789 < 50 chars */
209 "Adaptec FreeBSD 4.0.0 Unix SCSI I2O HBA Driver"
210 /* ^^^^^ asr_attach alters these to match OS */
213 #include <sys/param.h> /* TRUE=1 and FALSE=0 defined here */
214 #include <sys/kernel.h>
215 #include <sys/systm.h>
216 #include <sys/malloc.h>
217 #include <sys/proc.h>
218 #include <sys/conf.h>
219 #include <sys/disklabel.h>
221 #include <sys/rman.h>
222 #include <sys/stat.h>
223 #include <sys/device.h>
224 #include <sys/thread2.h>
226 #include <bus/cam/cam.h>
227 #include <bus/cam/cam_ccb.h>
228 #include <bus/cam/cam_sim.h>
229 #include <bus/cam/cam_xpt_sim.h>
230 #include <bus/cam/cam_xpt_periph.h>
232 #include <bus/cam/scsi/scsi_all.h>
233 #include <bus/cam/scsi/scsi_message.h>
237 #include <machine/cputypes.h>
238 #include <machine/clock.h>
239 #include <machine/vmparam.h>
241 #include <bus/pci/pcivar.h>
242 #include <bus/pci/pcireg.h>
244 #define STATIC static
247 #if (defined(DEBUG_ASR) && (DEBUG_ASR > 0))
257 #define osdSwap4(x) ((u_long)ntohl((u_long)(x)))
258 #define KVTOPHYS(x) vtophys(x)
259 #include "dptalign.h"
261 #include "i2obscsi.h"
263 #include "i2oadptr.h"
264 #include "sys_info.h"
266 /* Configuration Definitions */
268 #define SG_SIZE 58 /* Scatter Gather list Size */
269 #define MAX_TARGET_ID 126 /* Maximum Target ID supported */
270 #define MAX_LUN 255 /* Maximum LUN Supported */
271 #define MAX_CHANNEL 7 /* Maximum Channel # Supported by driver */
272 #define MAX_INBOUND 2000 /* Max CCBs, Also Max Queue Size */
273 #define MAX_OUTBOUND 256 /* Maximum outbound frames/adapter */
274 #define MAX_INBOUND_SIZE 512 /* Maximum inbound frame size */
275 #define MAX_MAP 4194304L /* Maximum mapping size of IOP */
276 /* Also serves as the minimum map for */
277 /* the 2005S zero channel RAID product */
279 /**************************************************************************
280 ** ASR Host Adapter structure - One Structure For Each Host Adapter That **
281 ** Is Configured Into The System. The Structure Supplies Configuration **
282 ** Information, Status Info, Queue Info And An Active CCB List Pointer. **
283 ***************************************************************************/
285 /* I2O register set */
290 # define Mask_InterruptsDisabled 0x08
292 volatile U32 ToFIFO; /* In Bound FIFO */
293 volatile U32 FromFIFO; /* Out Bound FIFO */
297 * A MIX of performance and space considerations for TID lookups
299 typedef u_int16_t tid_t;
302 u_int32_t size; /* up to MAX_LUN */
307 u_int32_t size; /* up to MAX_TARGET */
312 * To ensure that we only allocate and use the worst case ccb here, lets
313 * make our own local ccb union. If asr_alloc_ccb is utilized for another
314 * ccb type, ensure that you add the additional structures into our local
315 * ccb union. To ensure strict type checking, we will utilize the local
316 * ccb definition wherever possible.
319 struct ccb_hdr ccb_h; /* For convenience */
320 struct ccb_scsiio csio;
321 struct ccb_setasync csa;
324 typedef struct Asr_softc {
326 void * ha_Base; /* base port for each board */
327 u_int8_t * volatile ha_blinkLED;
328 i2oRegs_t * ha_Virt; /* Base address of IOP */
329 U8 * ha_Fvirt; /* Base address of Frames */
330 I2O_IOP_ENTRY ha_SystemTable;
331 LIST_HEAD(,ccb_hdr) ha_ccb; /* ccbs in use */
332 struct cam_path * ha_path[MAX_CHANNEL+1];
333 struct cam_sim * ha_sim[MAX_CHANNEL+1];
334 struct resource * ha_mem_res;
335 struct resource * ha_mes_res;
336 struct resource * ha_irq_res;
338 PI2O_LCT ha_LCT; /* Complete list of devices */
339 # define le_type IdentityTag[0]
340 # define I2O_BSA 0x20
341 # define I2O_FCA 0x40
342 # define I2O_SCSI 0x00
343 # define I2O_PORT 0x80
344 # define I2O_UNKNOWN 0x7F
345 # define le_bus IdentityTag[1]
346 # define le_target IdentityTag[2]
347 # define le_lun IdentityTag[3]
348 target2lun_t * ha_targets[MAX_CHANNEL+1];
349 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME ha_Msgs;
352 u_int8_t ha_in_reset;
353 # define HA_OPERATIONAL 0
354 # define HA_IN_RESET 1
355 # define HA_OFF_LINE 2
356 # define HA_OFF_LINE_RECOVERY 3
357 /* Configuration information */
358 /* The target id maximums we take */
359 u_int8_t ha_MaxBus; /* Maximum bus */
360 u_int8_t ha_MaxId; /* Maximum target ID */
361 u_int8_t ha_MaxLun; /* Maximum target LUN */
362 u_int8_t ha_SgSize; /* Max SG elements */
363 u_int8_t ha_pciBusNum;
364 u_int8_t ha_pciDeviceNum;
365 u_int8_t ha_adapter_target[MAX_CHANNEL+1];
366 u_int16_t ha_QueueSize; /* Max outstanding commands */
367 u_int16_t ha_Msgs_Count;
369 /* Links into other parents and HBAs */
370 struct Asr_softc * ha_next; /* HBA list */
373 STATIC Asr_softc_t * Asr_softc;
376 * Prototypes of the routines we have in this object.
379 /* Externally callable routines */
380 #define PROBE_ARGS IN device_t tag
381 #define PROBE_RET int
382 #define PROBE_SET() u_long id = (pci_get_device(tag)<<16)|pci_get_vendor(tag)
383 #define PROBE_RETURN(retval) if(retval){device_set_desc(tag,retval);return(0);}else{return(ENXIO);}
384 #define ATTACH_ARGS IN device_t tag
385 #define ATTACH_RET int
386 #define ATTACH_SET() int unit = device_get_unit(tag)
387 #define ATTACH_RETURN(retval) return(retval)
388 /* I2O HDM interface */
389 STATIC PROBE_RET asr_probe (PROBE_ARGS);
390 STATIC ATTACH_RET asr_attach (ATTACH_ARGS);
391 /* DOMINO placeholder */
392 STATIC PROBE_RET domino_probe (PROBE_ARGS);
393 STATIC ATTACH_RET domino_attach (ATTACH_ARGS);
394 /* MODE0 adapter placeholder */
395 STATIC PROBE_RET mode0_probe (PROBE_ARGS);
396 STATIC ATTACH_RET mode0_attach (ATTACH_ARGS);
398 STATIC Asr_softc_t * ASR_get_sc (cdev_t dev);
399 STATIC d_ioctl_t asr_ioctl;
400 STATIC d_open_t asr_open;
401 STATIC d_close_t asr_close;
402 STATIC int asr_intr (IN Asr_softc_t *sc);
403 STATIC void asr_timeout (INOUT void *arg);
404 STATIC int ASR_init (IN Asr_softc_t *sc);
405 STATIC INLINE int ASR_acquireLct (INOUT Asr_softc_t *sc);
406 STATIC INLINE int ASR_acquireHrt (INOUT Asr_softc_t *sc);
407 STATIC void asr_action (IN struct cam_sim *sim,
409 STATIC void asr_poll (IN struct cam_sim * sim);
412 * Here is the auto-probe structure used to nest our tests appropriately
413 * during the startup phase of the operating system.
415 STATIC device_method_t asr_methods[] = {
416 DEVMETHOD(device_probe, asr_probe),
417 DEVMETHOD(device_attach, asr_attach),
421 STATIC driver_t asr_driver = {
427 STATIC devclass_t asr_devclass;
429 DECLARE_DUMMY_MODULE(asr);
430 DRIVER_MODULE(asr, pci, asr_driver, asr_devclass, 0, 0);
432 STATIC device_method_t domino_methods[] = {
433 DEVMETHOD(device_probe, domino_probe),
434 DEVMETHOD(device_attach, domino_attach),
438 STATIC driver_t domino_driver = {
444 STATIC devclass_t domino_devclass;
446 DRIVER_MODULE(domino, pci, domino_driver, domino_devclass, 0, 0);
448 STATIC device_method_t mode0_methods[] = {
449 DEVMETHOD(device_probe, mode0_probe),
450 DEVMETHOD(device_attach, mode0_attach),
454 STATIC driver_t mode0_driver = {
460 STATIC devclass_t mode0_devclass;
462 DRIVER_MODULE(mode0, pci, mode0_driver, mode0_devclass, 0, 0);
465 * devsw for asr hba driver
467 * only ioctl is used. the sd driver provides all other access.
469 #define CDEV_MAJOR 154 /* prefered default character major */
470 STATIC struct dev_ops asr_ops = {
471 { "asr", CDEV_MAJOR, 0 },
473 .d_close = asr_close,
474 .d_ioctl = asr_ioctl,
478 * Initialize the dynamic dev_ops hooks.
481 asr_drvinit (void * unused)
483 static int asr_devsw_installed = 0;
485 if (asr_devsw_installed) {
488 asr_devsw_installed++;
490 * Find a free spot (the report during driver load used by
491 * osd layer in engine to generate the controlling nodes).
493 * XXX this is garbage code, store a unit number in asr_ops
494 * and iterate through that instead?
496 while (asr_ops.head.maj < NUMCDEVSW &&
497 dev_ops_get(asr_ops.head.maj, -1) != NULL
501 if (asr_ops.head.maj >= NUMCDEVSW) {
502 asr_ops.head.maj = 0;
503 while (asr_ops.head.maj < CDEV_MAJOR &&
504 dev_ops_get(asr_ops.head.maj, -1) != NULL
513 dev_ops_add(&asr_ops, 0, 0);
516 /* Must initialize before CAM layer picks up our HBA driver */
517 SYSINIT(asrdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,asr_drvinit,NULL)
519 /* I2O support routines */
520 #define defAlignLong(STRUCT,NAME) char NAME[sizeof(STRUCT)]
521 #define getAlignLong(STRUCT,NAME) ((STRUCT *)(NAME))
524 * Fill message with default.
526 STATIC PI2O_MESSAGE_FRAME
531 OUT PI2O_MESSAGE_FRAME Message_Ptr;
533 Message_Ptr = getAlignLong(I2O_MESSAGE_FRAME, Message);
534 bzero ((void *)Message_Ptr, size);
535 I2O_MESSAGE_FRAME_setVersionOffset(Message_Ptr, I2O_VERSION_11);
536 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
537 (size + sizeof(U32) - 1) >> 2);
538 I2O_MESSAGE_FRAME_setInitiatorAddress (Message_Ptr, 1);
539 return (Message_Ptr);
540 } /* ASR_fillMessage */
542 #define EMPTY_QUEUE ((U32)-1L)
548 OUT U32 MessageOffset;
550 if ((MessageOffset = virt->ToFIFO) == EMPTY_QUEUE) {
551 MessageOffset = virt->ToFIFO;
553 return (MessageOffset);
554 } /* ASR_getMessage */
556 /* Issue a polled command */
559 INOUT i2oRegs_t * virt,
561 IN PI2O_MESSAGE_FRAME Message)
568 * ASR_initiateCp is only used for synchronous commands and will
569 * be made more resiliant to adapter delays since commands like
570 * resetIOP can cause the adapter to be deaf for a little time.
572 while (((MessageOffset = ASR_getMessage(virt)) == EMPTY_QUEUE)
576 if (MessageOffset != EMPTY_QUEUE) {
577 bcopy (Message, fvirt + MessageOffset,
578 I2O_MESSAGE_FRAME_getMessageSize(Message) << 2);
580 * Disable the Interrupts
582 virt->Mask = (Mask = virt->Mask) | Mask_InterruptsDisabled;
583 virt->ToFIFO = MessageOffset;
586 } /* ASR_initiateCp */
593 INOUT i2oRegs_t * virt,
596 struct resetMessage {
597 I2O_EXEC_IOP_RESET_MESSAGE M;
600 defAlignLong(struct resetMessage,Message);
601 PI2O_EXEC_IOP_RESET_MESSAGE Message_Ptr;
602 OUT U32 * volatile Reply_Ptr;
606 * Build up our copy of the Message.
608 Message_Ptr = (PI2O_EXEC_IOP_RESET_MESSAGE)ASR_fillMessage(Message,
609 sizeof(I2O_EXEC_IOP_RESET_MESSAGE));
610 I2O_EXEC_IOP_RESET_MESSAGE_setFunction(Message_Ptr, I2O_EXEC_IOP_RESET);
612 * Reset the Reply Status
614 *(Reply_Ptr = (U32 *)((char *)Message_Ptr
615 + sizeof(I2O_EXEC_IOP_RESET_MESSAGE))) = 0;
616 I2O_EXEC_IOP_RESET_MESSAGE_setStatusWordLowAddress(Message_Ptr,
617 KVTOPHYS((void *)Reply_Ptr));
619 * Send the Message out
621 if ((Old = ASR_initiateCp (virt, fvirt, (PI2O_MESSAGE_FRAME)Message_Ptr)) != (U32)-1L) {
623 * Wait for a response (Poll), timeouts are dangerous if
624 * the card is truly responsive. We assume response in 2s.
626 u_int8_t Delay = 200;
628 while ((*Reply_Ptr == 0) && (--Delay != 0)) {
632 * Re-enable the interrupts.
638 ASSERT (Old != (U32)-1L);
643 * Get the curent state of the adapter
645 STATIC INLINE PI2O_EXEC_STATUS_GET_REPLY
647 INOUT i2oRegs_t * virt,
649 OUT PI2O_EXEC_STATUS_GET_REPLY buffer)
651 defAlignLong(I2O_EXEC_STATUS_GET_MESSAGE,Message);
652 PI2O_EXEC_STATUS_GET_MESSAGE Message_Ptr;
656 * Build up our copy of the Message.
658 Message_Ptr = (PI2O_EXEC_STATUS_GET_MESSAGE)ASR_fillMessage(Message,
659 sizeof(I2O_EXEC_STATUS_GET_MESSAGE));
660 I2O_EXEC_STATUS_GET_MESSAGE_setFunction(Message_Ptr,
661 I2O_EXEC_STATUS_GET);
662 I2O_EXEC_STATUS_GET_MESSAGE_setReplyBufferAddressLow(Message_Ptr,
663 KVTOPHYS((void *)buffer));
664 /* This one is a Byte Count */
665 I2O_EXEC_STATUS_GET_MESSAGE_setReplyBufferLength(Message_Ptr,
666 sizeof(I2O_EXEC_STATUS_GET_REPLY));
668 * Reset the Reply Status
670 bzero ((void *)buffer, sizeof(I2O_EXEC_STATUS_GET_REPLY));
672 * Send the Message out
674 if ((Old = ASR_initiateCp (virt, fvirt, (PI2O_MESSAGE_FRAME)Message_Ptr)) != (U32)-1L) {
676 * Wait for a response (Poll), timeouts are dangerous if
677 * the card is truly responsive. We assume response in 50ms.
679 u_int8_t Delay = 255;
681 while (*((U8 * volatile)&(buffer->SyncByte)) == 0) {
683 buffer = (PI2O_EXEC_STATUS_GET_REPLY)NULL;
689 * Re-enable the interrupts.
694 return ((PI2O_EXEC_STATUS_GET_REPLY)NULL);
695 } /* ASR_getStatus */
698 * Check if the device is a SCSI I2O HBA, and add it to the list.
702 * Probe for ASR controller. If we find it, we will use it.
706 asr_probe(PROBE_ARGS)
709 if ((id == 0xA5011044) || (id == 0xA5111044)) {
710 PROBE_RETURN ("Adaptec Caching SCSI RAID");
716 * Probe/Attach for DOMINO chipset.
719 domino_probe(PROBE_ARGS)
722 if (id == 0x10121044) {
723 PROBE_RETURN ("Adaptec Caching Memory Controller");
729 domino_attach (ATTACH_ARGS)
732 } /* domino_attach */
735 * Probe/Attach for MODE0 adapters.
738 mode0_probe(PROBE_ARGS)
743 * If/When we can get a business case to commit to a
744 * Mode0 driver here, we can make all these tests more
745 * specific and robust. Mode0 adapters have their processors
746 * turned off, this the chips are in a raw state.
749 /* This is a PLX9054 */
750 if (id == 0x905410B5) {
751 PROBE_RETURN ("Adaptec Mode0 PM3757");
753 /* This is a PLX9080 */
754 if (id == 0x908010B5) {
755 PROBE_RETURN ("Adaptec Mode0 PM3754/PM3755");
757 /* This is a ZION 80303 */
758 if (id == 0x53098086) {
759 PROBE_RETURN ("Adaptec Mode0 3010S");
761 /* This is an i960RS */
762 if (id == 0x39628086) {
763 PROBE_RETURN ("Adaptec Mode0 2100S");
765 /* This is an i960RN */
766 if (id == 0x19648086) {
767 PROBE_RETURN ("Adaptec Mode0 PM2865/2400A/3200S/3400S");
769 #if 0 /* this would match any generic i960 -- mjs */
770 /* This is an i960RP (typically also on Motherboards) */
771 if (id == 0x19608086) {
772 PROBE_RETURN ("Adaptec Mode0 PM2554/PM1554/PM2654");
779 mode0_attach (ATTACH_ARGS)
784 STATIC INLINE union asr_ccb *
788 OUT union asr_ccb * new_ccb;
790 if ((new_ccb = (union asr_ccb *)kmalloc(sizeof(*new_ccb),
791 M_DEVBUF, M_WAITOK)) != (union asr_ccb *)NULL) {
792 bzero (new_ccb, sizeof(*new_ccb));
793 new_ccb->ccb_h.pinfo.priority = 1;
794 new_ccb->ccb_h.pinfo.index = CAM_UNQUEUED_INDEX;
795 new_ccb->ccb_h.spriv_ptr0 = sc;
798 } /* asr_alloc_ccb */
802 IN union asr_ccb * free_ccb)
804 kfree(free_ccb, M_DEVBUF);
808 * Print inquiry data `carefully'
815 while ((--len >= 0) && (*s) && (*s != ' ') && (*s != '-')) {
816 printf ("%c", *(s++));
823 STATIC INLINE int ASR_queue (
825 IN PI2O_MESSAGE_FRAME Message);
827 * Send a message synchronously and without Interrupt to a ccb.
831 INOUT union asr_ccb * ccb,
832 IN PI2O_MESSAGE_FRAME Message)
835 Asr_softc_t * sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
838 * We do not need any (optional byteswapping) method access to
839 * the Initiator context field.
841 I2O_MESSAGE_FRAME_setInitiatorContext64(Message, (long)ccb);
843 /* Prevent interrupt service */
845 sc->ha_Virt->Mask = (Mask = sc->ha_Virt->Mask)
846 | Mask_InterruptsDisabled;
848 if (ASR_queue (sc, Message) == EMPTY_QUEUE) {
849 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
850 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
854 * Wait for this board to report a finished instruction.
856 while ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
860 /* Re-enable Interrupts */
861 sc->ha_Virt->Mask = Mask;
864 return (ccb->ccb_h.status);
868 * Send a message synchronously to a Asr_softc_t
873 IN PI2O_MESSAGE_FRAME Message)
878 if ((ccb = asr_alloc_ccb (sc)) == (union asr_ccb *)NULL) {
879 return (CAM_REQUEUE_REQ);
882 status = ASR_queue_s (ccb, Message);
890 * Add the specified ccb to the active queue
895 INOUT union asr_ccb * ccb)
898 LIST_INSERT_HEAD(&(sc->ha_ccb), &(ccb->ccb_h), sim_links.le);
899 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
900 if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT) {
902 * RAID systems can take considerable time to
903 * complete some commands given the large cache
904 * flashes switching from write back to write thru.
906 ccb->ccb_h.timeout = 6 * 60 * 1000;
908 callout_reset(&ccb->ccb_h.timeout_ch,
909 (ccb->ccb_h.timeout * hz) / 1000, asr_timeout, ccb);
915 * Remove the specified ccb from the active queue.
920 INOUT union asr_ccb * ccb)
923 callout_stop(&ccb->ccb_h.timeout_ch);
924 LIST_REMOVE(&(ccb->ccb_h), sim_links.le);
926 } /* ASR_ccbRemove */
929 * Fail all the active commands, so they get re-issued by the operating
933 ASR_failActiveCommands (
936 struct ccb_hdr * ccb;
938 #if 0 /* Currently handled by callers, unnecessary paranoia currently */
939 /* Left in for historical perspective. */
940 defAlignLong(I2O_EXEC_LCT_NOTIFY_MESSAGE,Message);
941 PI2O_EXEC_LCT_NOTIFY_MESSAGE Message_Ptr;
943 /* Send a blind LCT command to wait for the enableSys to complete */
944 Message_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)ASR_fillMessage(Message,
945 sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) - sizeof(I2O_SG_ELEMENT));
946 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
947 I2O_EXEC_LCT_NOTIFY);
948 I2O_EXEC_LCT_NOTIFY_MESSAGE_setClassIdentifier(Message_Ptr,
949 I2O_CLASS_MATCH_ANYCLASS);
950 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
955 * We do not need to inform the CAM layer that we had a bus
956 * reset since we manage it on our own, this also prevents the
957 * SCSI_DELAY settling that would be required on other systems.
958 * The `SCSI_DELAY' has already been handled by the card via the
959 * acquisition of the LCT table while we are at CAM priority level.
960 * for (int bus = 0; bus <= sc->ha_MaxBus; ++bus) {
961 * xpt_async (AC_BUS_RESET, sc->ha_path[bus], NULL);
964 while ((ccb = LIST_FIRST(&(sc->ha_ccb))) != (struct ccb_hdr *)NULL) {
965 ASR_ccbRemove (sc, (union asr_ccb *)ccb);
967 ccb->status &= ~CAM_STATUS_MASK;
968 ccb->status |= CAM_REQUEUE_REQ;
969 /* Nothing Transfered */
970 ((struct ccb_scsiio *)ccb)->resid
971 = ((struct ccb_scsiio *)ccb)->dxfer_len;
974 xpt_done ((union ccb *)ccb);
976 wakeup ((caddr_t)ccb);
980 } /* ASR_failActiveCommands */
983 * The following command causes the HBA to reset the specific bus
990 defAlignLong(I2O_HBA_BUS_RESET_MESSAGE,Message);
991 I2O_HBA_BUS_RESET_MESSAGE * Message_Ptr;
992 PI2O_LCT_ENTRY Device;
994 Message_Ptr = (I2O_HBA_BUS_RESET_MESSAGE *)ASR_fillMessage(Message,
995 sizeof(I2O_HBA_BUS_RESET_MESSAGE));
996 I2O_MESSAGE_FRAME_setFunction(&Message_Ptr->StdMessageFrame,
998 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
999 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
1001 if (((Device->le_type & I2O_PORT) != 0)
1002 && (Device->le_bus == bus)) {
1003 I2O_MESSAGE_FRAME_setTargetAddress(
1004 &Message_Ptr->StdMessageFrame,
1005 I2O_LCT_ENTRY_getLocalTID(Device));
1006 /* Asynchronous command, with no expectations */
1007 (void)ASR_queue(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1011 } /* ASR_resetBus */
1014 ASR_getBlinkLedCode (
1015 IN Asr_softc_t * sc)
1017 if ((sc != (Asr_softc_t *)NULL)
1018 && (sc->ha_blinkLED != (u_int8_t *)NULL)
1019 && (sc->ha_blinkLED[1] == 0xBC)) {
1020 return (sc->ha_blinkLED[0]);
1023 } /* ASR_getBlinkCode */
1026 * Determine the address of an TID lookup. Must be done at high priority
1027 * since the address can be changed by other threads of execution.
1029 * Returns NULL pointer if not indexible (but will attempt to generate
1030 * an index if `new_entry' flag is set to TRUE).
1032 * All addressible entries are to be guaranteed zero if never initialized.
1034 STATIC INLINE tid_t *
1036 INOUT Asr_softc_t * sc,
1042 target2lun_t * bus_ptr;
1043 lun2tid_t * target_ptr;
1047 * Validity checking of incoming parameters. More of a bound
1048 * expansion limit than an issue with the code dealing with the
1051 * sc must be valid before it gets here, so that check could be
1052 * dropped if speed a critical issue.
1054 if ((sc == (Asr_softc_t *)NULL)
1055 || (bus > MAX_CHANNEL)
1056 || (target > sc->ha_MaxId)
1057 || (lun > sc->ha_MaxLun)) {
1058 debug_asr_printf("(%lx,%d,%d,%d) target out of range\n",
1059 (u_long)sc, bus, target, lun);
1060 return ((tid_t *)NULL);
1063 * See if there is an associated bus list.
1065 * for performance, allocate in size of BUS_CHUNK chunks.
1066 * BUS_CHUNK must be a power of two. This is to reduce
1067 * fragmentation effects on the allocations.
1069 # define BUS_CHUNK 8
1070 new_size = ((target + BUS_CHUNK - 1) & ~(BUS_CHUNK - 1));
1071 if ((bus_ptr = sc->ha_targets[bus]) == (target2lun_t *)NULL) {
1073 * Allocate a new structure?
1074 * Since one element in structure, the +1
1075 * needed for size has been abstracted.
1077 if ((new_entry == FALSE)
1078 || ((sc->ha_targets[bus] = bus_ptr = (target2lun_t *)kmalloc (
1079 sizeof(*bus_ptr) + (sizeof(bus_ptr->LUN) * new_size),
1081 == (target2lun_t *)NULL)) {
1082 debug_asr_printf("failed to allocate bus list\n");
1083 return ((tid_t *)NULL);
1085 bzero (bus_ptr, sizeof(*bus_ptr)
1086 + (sizeof(bus_ptr->LUN) * new_size));
1087 bus_ptr->size = new_size + 1;
1088 } else if (bus_ptr->size <= new_size) {
1089 target2lun_t * new_bus_ptr;
1092 * Reallocate a new structure?
1093 * Since one element in structure, the +1
1094 * needed for size has been abstracted.
1096 if ((new_entry == FALSE)
1097 || ((new_bus_ptr = (target2lun_t *)kmalloc (
1098 sizeof(*bus_ptr) + (sizeof(bus_ptr->LUN) * new_size),
1100 == (target2lun_t *)NULL)) {
1101 debug_asr_printf("failed to reallocate bus list\n");
1102 return ((tid_t *)NULL);
1105 * Zero and copy the whole thing, safer, simpler coding
1106 * and not really performance critical at this point.
1108 bzero (new_bus_ptr, sizeof(*bus_ptr)
1109 + (sizeof(bus_ptr->LUN) * new_size));
1110 bcopy (bus_ptr, new_bus_ptr, sizeof(*bus_ptr)
1111 + (sizeof(bus_ptr->LUN) * (bus_ptr->size - 1)));
1112 sc->ha_targets[bus] = new_bus_ptr;
1113 kfree (bus_ptr, M_TEMP);
1114 bus_ptr = new_bus_ptr;
1115 bus_ptr->size = new_size + 1;
1118 * We now have the bus list, lets get to the target list.
1119 * Since most systems have only *one* lun, we do not allocate
1120 * in chunks as above, here we allow one, then in chunk sizes.
1121 * TARGET_CHUNK must be a power of two. This is to reduce
1122 * fragmentation effects on the allocations.
1124 # define TARGET_CHUNK 8
1125 if ((new_size = lun) != 0) {
1126 new_size = ((lun + TARGET_CHUNK - 1) & ~(TARGET_CHUNK - 1));
1128 if ((target_ptr = bus_ptr->LUN[target]) == (lun2tid_t *)NULL) {
1130 * Allocate a new structure?
1131 * Since one element in structure, the +1
1132 * needed for size has been abstracted.
1134 if ((new_entry == FALSE)
1135 || ((bus_ptr->LUN[target] = target_ptr = (lun2tid_t *)kmalloc (
1136 sizeof(*target_ptr) + (sizeof(target_ptr->TID) * new_size),
1138 == (lun2tid_t *)NULL)) {
1139 debug_asr_printf("failed to allocate target list\n");
1140 return ((tid_t *)NULL);
1142 bzero (target_ptr, sizeof(*target_ptr)
1143 + (sizeof(target_ptr->TID) * new_size));
1144 target_ptr->size = new_size + 1;
1145 } else if (target_ptr->size <= new_size) {
1146 lun2tid_t * new_target_ptr;
1149 * Reallocate a new structure?
1150 * Since one element in structure, the +1
1151 * needed for size has been abstracted.
1153 if ((new_entry == FALSE)
1154 || ((new_target_ptr = (lun2tid_t *)kmalloc (
1155 sizeof(*target_ptr) + (sizeof(target_ptr->TID) * new_size),
1157 == (lun2tid_t *)NULL)) {
1158 debug_asr_printf("failed to reallocate target list\n");
1159 return ((tid_t *)NULL);
1162 * Zero and copy the whole thing, safer, simpler coding
1163 * and not really performance critical at this point.
1165 bzero (new_target_ptr, sizeof(*target_ptr)
1166 + (sizeof(target_ptr->TID) * new_size));
1167 bcopy (target_ptr, new_target_ptr,
1169 + (sizeof(target_ptr->TID) * (target_ptr->size - 1)));
1170 bus_ptr->LUN[target] = new_target_ptr;
1171 kfree (target_ptr, M_TEMP);
1172 target_ptr = new_target_ptr;
1173 target_ptr->size = new_size + 1;
1176 * Now, acquire the TID address from the LUN indexed list.
1178 return (&(target_ptr->TID[lun]));
1179 } /* ASR_getTidAddress */
1182 * Get a pre-existing TID relationship.
1184 * If the TID was never set, return (tid_t)-1.
1186 * should use mutex rather than spl.
1190 IN Asr_softc_t * sc,
1199 if (((tid_ptr = ASR_getTidAddress (sc, bus, target, lun, FALSE))
1201 /* (tid_t)0 or (tid_t)-1 indicate no TID */
1202 || (*tid_ptr == (tid_t)0)) {
1212 * Set a TID relationship.
1214 * If the TID was not set, return (tid_t)-1.
1216 * should use mutex rather than spl.
1220 INOUT Asr_softc_t * sc,
1228 if (TID != (tid_t)-1) {
1233 if ((tid_ptr = ASR_getTidAddress (sc, bus, target, lun, TRUE))
1244 /*-------------------------------------------------------------------------*/
1245 /* Function ASR_rescan */
1246 /*-------------------------------------------------------------------------*/
1247 /* The Parameters Passed To This Function Are : */
1248 /* Asr_softc_t * : HBA miniport driver's adapter data storage. */
1250 /* This Function Will rescan the adapter and resynchronize any data */
1252 /* Return : 0 For OK, Error Code Otherwise */
1253 /*-------------------------------------------------------------------------*/
1257 IN Asr_softc_t * sc)
1263 * Re-acquire the LCT table and synchronize us to the adapter.
1265 if ((error = ASR_acquireLct(sc)) == 0) {
1266 error = ASR_acquireHrt(sc);
1273 bus = sc->ha_MaxBus;
1274 /* Reset all existing cached TID lookups */
1276 int target, event = 0;
1279 * Scan for all targets on this bus to see if they
1280 * got affected by the rescan.
1282 for (target = 0; target <= sc->ha_MaxId; ++target) {
1285 /* Stay away from the controller ID */
1286 if (target == sc->ha_adapter_target[bus]) {
1289 for (lun = 0; lun <= sc->ha_MaxLun; ++lun) {
1290 PI2O_LCT_ENTRY Device;
1291 tid_t TID = (tid_t)-1;
1295 * See if the cached TID changed. Search for
1296 * the device in our new LCT.
1298 for (Device = sc->ha_LCT->LCTEntry;
1299 Device < (PI2O_LCT_ENTRY)(((U32 *)sc->ha_LCT)
1300 + I2O_LCT_getTableSize(sc->ha_LCT));
1302 if ((Device->le_type != I2O_UNKNOWN)
1303 && (Device->le_bus == bus)
1304 && (Device->le_target == target)
1305 && (Device->le_lun == lun)
1306 && (I2O_LCT_ENTRY_getUserTID(Device)
1308 TID = I2O_LCT_ENTRY_getLocalTID(
1314 * Indicate to the OS that the label needs
1315 * to be recalculated, or that the specific
1316 * open device is no longer valid (Merde)
1317 * because the cached TID changed.
1319 LastTID = ASR_getTid (sc, bus, target, lun);
1320 if (LastTID != TID) {
1321 struct cam_path * path;
1323 if (xpt_create_path(&path,
1325 cam_sim_path(sc->ha_sim[bus]),
1326 target, lun) != CAM_REQ_CMP) {
1327 if (TID == (tid_t)-1) {
1328 event |= AC_LOST_DEVICE;
1330 event |= AC_INQ_CHANGED
1331 | AC_GETDEV_CHANGED;
1334 if (TID == (tid_t)-1) {
1338 } else if (LastTID == (tid_t)-1) {
1339 struct ccb_getdev ccb;
1343 path, /*priority*/5);
1359 * We have the option of clearing the
1360 * cached TID for it to be rescanned, or to
1361 * set it now even if the device never got
1362 * accessed. We chose the later since we
1363 * currently do not use the condition that
1364 * the TID ever got cached.
1366 ASR_setTid (sc, bus, target, lun, TID);
1370 * The xpt layer can not handle multiple events at the
1373 if (event & AC_LOST_DEVICE) {
1374 xpt_async(AC_LOST_DEVICE, sc->ha_path[bus], NULL);
1376 if (event & AC_INQ_CHANGED) {
1377 xpt_async(AC_INQ_CHANGED, sc->ha_path[bus], NULL);
1379 if (event & AC_GETDEV_CHANGED) {
1380 xpt_async(AC_GETDEV_CHANGED, sc->ha_path[bus], NULL);
1382 } while (--bus >= 0);
1386 /*-------------------------------------------------------------------------*/
1387 /* Function ASR_reset */
1388 /*-------------------------------------------------------------------------*/
1389 /* The Parameters Passed To This Function Are : */
1390 /* Asr_softc_t * : HBA miniport driver's adapter data storage. */
1392 /* This Function Will reset the adapter and resynchronize any data */
1395 /*-------------------------------------------------------------------------*/
1399 IN Asr_softc_t * sc)
1404 if ((sc->ha_in_reset == HA_IN_RESET)
1405 || (sc->ha_in_reset == HA_OFF_LINE_RECOVERY)) {
1410 * Promotes HA_OPERATIONAL to HA_IN_RESET,
1411 * or HA_OFF_LINE to HA_OFF_LINE_RECOVERY.
1413 ++(sc->ha_in_reset);
1414 if (ASR_resetIOP (sc->ha_Virt, sc->ha_Fvirt) == 0) {
1415 debug_asr_printf ("ASR_resetIOP failed\n");
1417 * We really need to take this card off-line, easier said
1418 * than make sense. Better to keep retrying for now since if a
1419 * UART cable is connected the blinkLEDs the adapter is now in
1420 * a hard state requiring action from the monitor commands to
1421 * the HBA to continue. For debugging waiting forever is a
1422 * good thing. In a production system, however, one may wish
1423 * to instead take the card off-line ...
1425 # if 0 && (defined(HA_OFF_LINE))
1427 * Take adapter off-line.
1429 printf ("asr%d: Taking adapter off-line\n",
1431 ? cam_sim_unit(xpt_path_sim(sc->ha_path[0]))
1433 sc->ha_in_reset = HA_OFF_LINE;
1438 while (ASR_resetIOP (sc->ha_Virt, sc->ha_Fvirt) == 0);
1441 retVal = ASR_init (sc);
1444 debug_asr_printf ("ASR_init failed\n");
1445 sc->ha_in_reset = HA_OFF_LINE;
1448 if (ASR_rescan (sc) != 0) {
1449 debug_asr_printf ("ASR_rescan failed\n");
1451 ASR_failActiveCommands (sc);
1452 if (sc->ha_in_reset == HA_OFF_LINE_RECOVERY) {
1453 printf ("asr%d: Brining adapter back on-line\n",
1455 ? cam_sim_unit(xpt_path_sim(sc->ha_path[0]))
1458 sc->ha_in_reset = HA_OPERATIONAL;
1463 * Device timeout handler.
1469 union asr_ccb * ccb = (union asr_ccb *)arg;
1470 Asr_softc_t * sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
1473 debug_asr_print_path(ccb);
1474 debug_asr_printf("timed out");
1477 * Check if the adapter has locked up?
1479 if ((s = ASR_getBlinkLedCode(sc)) != 0) {
1481 printf ("asr%d: Blink LED 0x%x resetting adapter\n",
1482 cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)), s);
1483 if (ASR_reset (sc) == ENXIO) {
1484 /* Try again later */
1485 callout_reset(&ccb->ccb_h.timeout_ch,
1486 (ccb->ccb_h.timeout * hz) / 1000, asr_timeout, ccb);
1491 * Abort does not function on the ASR card!!! Walking away from
1492 * the SCSI command is also *very* dangerous. A SCSI BUS reset is
1493 * our best bet, followed by a complete adapter reset if that fails.
1496 /* Check if we already timed out once to raise the issue */
1497 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_CMD_TIMEOUT) {
1498 debug_asr_printf (" AGAIN\nreinitializing adapter\n");
1499 if (ASR_reset (sc) == ENXIO) {
1500 callout_reset(&ccb->ccb_h.timeout_ch,
1501 (ccb->ccb_h.timeout * hz) / 1000, asr_timeout, ccb);
1506 debug_asr_printf ("\nresetting bus\n");
1507 /* If the BUS reset does not take, then an adapter reset is next! */
1508 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1509 ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
1510 callout_reset(&ccb->ccb_h.timeout_ch, (ccb->ccb_h.timeout * hz) / 1000,
1512 ASR_resetBus (sc, cam_sim_bus(xpt_path_sim(ccb->ccb_h.path)));
1513 xpt_async (AC_BUS_RESET, ccb->ccb_h.path, NULL);
1518 * send a message asynchronously
1522 IN Asr_softc_t * sc,
1523 IN PI2O_MESSAGE_FRAME Message)
1525 OUT U32 MessageOffset;
1526 union asr_ccb * ccb;
1528 debug_asr_printf ("Host Command Dump:\n");
1529 debug_asr_dump_message (Message);
1531 ccb = (union asr_ccb *)(long)
1532 I2O_MESSAGE_FRAME_getInitiatorContext64(Message);
1534 if ((MessageOffset = ASR_getMessage(sc->ha_Virt)) != EMPTY_QUEUE) {
1535 bcopy (Message, sc->ha_Fvirt + MessageOffset,
1536 I2O_MESSAGE_FRAME_getMessageSize(Message) << 2);
1538 ASR_ccbAdd (sc, ccb);
1540 /* Post the command */
1541 sc->ha_Virt->ToFIFO = MessageOffset;
1543 if (ASR_getBlinkLedCode(sc)) {
1545 * Unlikely we can do anything if we can't grab a
1546 * message frame :-(, but lets give it a try.
1548 (void)ASR_reset (sc);
1551 return (MessageOffset);
1555 /* Simple Scatter Gather elements */
1556 #define SG(SGL,Index,Flags,Buffer,Size) \
1557 I2O_FLAGS_COUNT_setCount( \
1558 &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index].FlagsCount), \
1560 I2O_FLAGS_COUNT_setFlags( \
1561 &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index].FlagsCount), \
1562 I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT | (Flags)); \
1563 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress( \
1564 &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index]), \
1565 (Buffer == NULL) ? NULL : KVTOPHYS(Buffer))
1568 * Retrieve Parameter Group.
1569 * Buffer must be allocated using defAlignLong macro.
1573 IN Asr_softc_t * sc,
1577 IN unsigned BufferSize)
1579 struct paramGetMessage {
1580 I2O_UTIL_PARAMS_GET_MESSAGE M;
1582 sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT)];
1584 I2O_PARAM_OPERATIONS_LIST_HEADER Header;
1585 I2O_PARAM_OPERATION_ALL_TEMPLATE Template[1];
1588 defAlignLong(struct paramGetMessage, Message);
1589 struct Operations * Operations_Ptr;
1590 I2O_UTIL_PARAMS_GET_MESSAGE * Message_Ptr;
1591 struct ParamBuffer {
1592 I2O_PARAM_RESULTS_LIST_HEADER Header;
1593 I2O_PARAM_READ_OPERATION_RESULT Read;
1597 Message_Ptr = (I2O_UTIL_PARAMS_GET_MESSAGE *)ASR_fillMessage(Message,
1598 sizeof(I2O_UTIL_PARAMS_GET_MESSAGE)
1599 + sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT));
1600 Operations_Ptr = (struct Operations *)((char *)Message_Ptr
1601 + sizeof(I2O_UTIL_PARAMS_GET_MESSAGE)
1602 + sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT));
1603 bzero ((void *)Operations_Ptr, sizeof(struct Operations));
1604 I2O_PARAM_OPERATIONS_LIST_HEADER_setOperationCount(
1605 &(Operations_Ptr->Header), 1);
1606 I2O_PARAM_OPERATION_ALL_TEMPLATE_setOperation(
1607 &(Operations_Ptr->Template[0]), I2O_PARAMS_OPERATION_FIELD_GET);
1608 I2O_PARAM_OPERATION_ALL_TEMPLATE_setFieldCount(
1609 &(Operations_Ptr->Template[0]), 0xFFFF);
1610 I2O_PARAM_OPERATION_ALL_TEMPLATE_setGroupNumber(
1611 &(Operations_Ptr->Template[0]), Group);
1612 bzero ((void *)(Buffer_Ptr = getAlignLong(struct ParamBuffer, Buffer)),
1615 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
1617 + (((sizeof(I2O_UTIL_PARAMS_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1618 / sizeof(U32)) << 4));
1619 I2O_MESSAGE_FRAME_setTargetAddress (&(Message_Ptr->StdMessageFrame),
1621 I2O_MESSAGE_FRAME_setFunction (&(Message_Ptr->StdMessageFrame),
1622 I2O_UTIL_PARAMS_GET);
1624 * Set up the buffers as scatter gather elements.
1626 SG(&(Message_Ptr->SGL), 0,
1627 I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER,
1628 Operations_Ptr, sizeof(struct Operations));
1629 SG(&(Message_Ptr->SGL), 1,
1630 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
1631 Buffer_Ptr, BufferSize);
1633 if ((ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) == CAM_REQ_CMP)
1634 && (Buffer_Ptr->Header.ResultCount)) {
1635 return ((void *)(Buffer_Ptr->Info));
1637 return ((void *)NULL);
1638 } /* ASR_getParams */
1641 * Acquire the LCT information.
1645 INOUT Asr_softc_t * sc)
1647 PI2O_EXEC_LCT_NOTIFY_MESSAGE Message_Ptr;
1648 PI2O_SGE_SIMPLE_ELEMENT sg;
1649 int MessageSizeInBytes;
1653 PI2O_LCT_ENTRY Entry;
1656 * sc value assumed valid
1658 MessageSizeInBytes = sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE)
1659 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT);
1660 if ((Message_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)kmalloc (
1661 MessageSizeInBytes, M_TEMP, M_WAITOK))
1662 == (PI2O_EXEC_LCT_NOTIFY_MESSAGE)NULL) {
1665 (void)ASR_fillMessage((char *)Message_Ptr, MessageSizeInBytes);
1666 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
1668 (((sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1669 / sizeof(U32)) << 4)));
1670 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
1671 I2O_EXEC_LCT_NOTIFY);
1672 I2O_EXEC_LCT_NOTIFY_MESSAGE_setClassIdentifier(Message_Ptr,
1673 I2O_CLASS_MATCH_ANYCLASS);
1675 * Call the LCT table to determine the number of device entries
1676 * to reserve space for.
1678 SG(&(Message_Ptr->SGL), 0,
1679 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER, &Table,
1682 * since this code is reused in several systems, code efficiency
1683 * is greater by using a shift operation rather than a divide by
1684 * sizeof(u_int32_t).
1686 I2O_LCT_setTableSize(&Table,
1687 (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)) >> 2);
1688 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1690 * Determine the size of the LCT table.
1693 kfree (sc->ha_LCT, M_TEMP);
1696 * kmalloc only generates contiguous memory when less than a
1697 * page is expected. We must break the request up into an SG list ...
1699 if (((len = (I2O_LCT_getTableSize(&Table) << 2)) <=
1700 (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)))
1701 || (len > (128 * 1024))) { /* Arbitrary */
1702 kfree (Message_Ptr, M_TEMP);
1705 if ((sc->ha_LCT = (PI2O_LCT)kmalloc (len, M_TEMP, M_WAITOK))
1706 == (PI2O_LCT)NULL) {
1707 kfree (Message_Ptr, M_TEMP);
1711 * since this code is reused in several systems, code efficiency
1712 * is greater by using a shift operation rather than a divide by
1713 * sizeof(u_int32_t).
1715 I2O_LCT_setTableSize(sc->ha_LCT,
1716 (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)) >> 2);
1718 * Convert the access to the LCT table into a SG list.
1720 sg = Message_Ptr->SGL.u.Simple;
1721 v = (caddr_t)(sc->ha_LCT);
1723 int next, base, span;
1726 next = base = KVTOPHYS(v);
1727 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, base);
1729 /* How far can we go contiguously */
1730 while ((len > 0) && (base == next)) {
1733 next = trunc_page(base) + PAGE_SIZE;
1744 /* Construct the Flags */
1745 I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), span);
1747 int rw = I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT;
1749 rw = (I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT
1750 | I2O_SGL_FLAGS_LAST_ELEMENT
1751 | I2O_SGL_FLAGS_END_OF_BUFFER);
1753 I2O_FLAGS_COUNT_setFlags(&(sg->FlagsCount), rw);
1761 * Incrementing requires resizing of the packet.
1764 MessageSizeInBytes += sizeof(*sg);
1765 I2O_MESSAGE_FRAME_setMessageSize(
1766 &(Message_Ptr->StdMessageFrame),
1767 I2O_MESSAGE_FRAME_getMessageSize(
1768 &(Message_Ptr->StdMessageFrame))
1769 + (sizeof(*sg) / sizeof(U32)));
1771 PI2O_EXEC_LCT_NOTIFY_MESSAGE NewMessage_Ptr;
1773 if ((NewMessage_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)
1774 kmalloc (MessageSizeInBytes, M_TEMP, M_WAITOK))
1775 == (PI2O_EXEC_LCT_NOTIFY_MESSAGE)NULL) {
1776 kfree (sc->ha_LCT, M_TEMP);
1777 sc->ha_LCT = (PI2O_LCT)NULL;
1778 kfree (Message_Ptr, M_TEMP);
1781 span = ((caddr_t)sg) - (caddr_t)Message_Ptr;
1782 bcopy ((caddr_t)Message_Ptr,
1783 (caddr_t)NewMessage_Ptr, span);
1784 kfree (Message_Ptr, M_TEMP);
1785 sg = (PI2O_SGE_SIMPLE_ELEMENT)
1786 (((caddr_t)NewMessage_Ptr) + span);
1787 Message_Ptr = NewMessage_Ptr;
1792 retval = ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1793 kfree (Message_Ptr, M_TEMP);
1794 if (retval != CAM_REQ_CMP) {
1798 /* If the LCT table grew, lets truncate accesses */
1799 if (I2O_LCT_getTableSize(&Table) < I2O_LCT_getTableSize(sc->ha_LCT)) {
1800 I2O_LCT_setTableSize(sc->ha_LCT, I2O_LCT_getTableSize(&Table));
1802 for (Entry = sc->ha_LCT->LCTEntry; Entry < (PI2O_LCT_ENTRY)
1803 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
1805 Entry->le_type = I2O_UNKNOWN;
1806 switch (I2O_CLASS_ID_getClass(&(Entry->ClassID))) {
1808 case I2O_CLASS_RANDOM_BLOCK_STORAGE:
1809 Entry->le_type = I2O_BSA;
1812 case I2O_CLASS_SCSI_PERIPHERAL:
1813 Entry->le_type = I2O_SCSI;
1816 case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
1817 Entry->le_type = I2O_FCA;
1820 case I2O_CLASS_BUS_ADAPTER_PORT:
1821 Entry->le_type = I2O_PORT | I2O_SCSI;
1823 case I2O_CLASS_FIBRE_CHANNEL_PORT:
1824 if (I2O_CLASS_ID_getClass(&(Entry->ClassID)) ==
1825 I2O_CLASS_FIBRE_CHANNEL_PORT) {
1826 Entry->le_type = I2O_PORT | I2O_FCA;
1828 { struct ControllerInfo {
1829 I2O_PARAM_RESULTS_LIST_HEADER Header;
1830 I2O_PARAM_READ_OPERATION_RESULT Read;
1831 I2O_HBA_SCSI_CONTROLLER_INFO_SCALAR Info;
1833 defAlignLong(struct ControllerInfo, Buffer);
1834 PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR Info;
1836 Entry->le_bus = 0xff;
1837 Entry->le_target = 0xff;
1838 Entry->le_lun = 0xff;
1840 if ((Info = (PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR)
1842 I2O_LCT_ENTRY_getLocalTID(Entry),
1843 I2O_HBA_SCSI_CONTROLLER_INFO_GROUP_NO,
1844 Buffer, sizeof(struct ControllerInfo)))
1845 == (PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR)NULL) {
1849 = I2O_HBA_SCSI_CONTROLLER_INFO_SCALAR_getInitiatorID(
1856 { struct DeviceInfo {
1857 I2O_PARAM_RESULTS_LIST_HEADER Header;
1858 I2O_PARAM_READ_OPERATION_RESULT Read;
1859 I2O_DPT_DEVICE_INFO_SCALAR Info;
1861 defAlignLong (struct DeviceInfo, Buffer);
1862 PI2O_DPT_DEVICE_INFO_SCALAR Info;
1864 Entry->le_bus = 0xff;
1865 Entry->le_target = 0xff;
1866 Entry->le_lun = 0xff;
1868 if ((Info = (PI2O_DPT_DEVICE_INFO_SCALAR)
1870 I2O_LCT_ENTRY_getLocalTID(Entry),
1871 I2O_DPT_DEVICE_INFO_GROUP_NO,
1872 Buffer, sizeof(struct DeviceInfo)))
1873 == (PI2O_DPT_DEVICE_INFO_SCALAR)NULL) {
1877 |= I2O_DPT_DEVICE_INFO_SCALAR_getDeviceType(Info);
1879 = I2O_DPT_DEVICE_INFO_SCALAR_getBus(Info);
1880 if ((Entry->le_bus > sc->ha_MaxBus)
1881 && (Entry->le_bus <= MAX_CHANNEL)) {
1882 sc->ha_MaxBus = Entry->le_bus;
1885 = I2O_DPT_DEVICE_INFO_SCALAR_getIdentifier(Info);
1887 = I2O_DPT_DEVICE_INFO_SCALAR_getLunInfo(Info);
1891 * A zero return value indicates success.
1894 } /* ASR_acquireLct */
1897 * Initialize a message frame.
1898 * We assume that the CDB has already been set up, so all we do here is
1899 * generate the Scatter Gather list.
1901 STATIC INLINE PI2O_MESSAGE_FRAME
1903 IN union asr_ccb * ccb,
1904 OUT PI2O_MESSAGE_FRAME Message)
1906 int next, span, base, rw;
1907 OUT PI2O_MESSAGE_FRAME Message_Ptr;
1908 Asr_softc_t * sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
1909 PI2O_SGE_SIMPLE_ELEMENT sg;
1911 vm_size_t size, len;
1914 /* We only need to zero out the PRIVATE_SCSI_SCB_EXECUTE_MESSAGE */
1915 bzero (Message_Ptr = getAlignLong(I2O_MESSAGE_FRAME, Message),
1916 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) - sizeof(I2O_SG_ELEMENT)));
1919 int target = ccb->ccb_h.target_id;
1920 int lun = ccb->ccb_h.target_lun;
1921 int bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path));
1924 if ((TID = ASR_getTid (sc, bus, target, lun)) == (tid_t)-1) {
1925 PI2O_LCT_ENTRY Device;
1928 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
1929 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
1931 if ((Device->le_type != I2O_UNKNOWN)
1932 && (Device->le_bus == bus)
1933 && (Device->le_target == target)
1934 && (Device->le_lun == lun)
1935 && (I2O_LCT_ENTRY_getUserTID(Device) == 0xFFF)) {
1936 TID = I2O_LCT_ENTRY_getLocalTID(Device);
1937 ASR_setTid (sc, Device->le_bus,
1938 Device->le_target, Device->le_lun,
1944 if (TID == (tid_t)0) {
1945 return ((PI2O_MESSAGE_FRAME)NULL);
1947 I2O_MESSAGE_FRAME_setTargetAddress(Message_Ptr, TID);
1948 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setTID(
1949 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, TID);
1951 I2O_MESSAGE_FRAME_setVersionOffset(Message_Ptr, I2O_VERSION_11 |
1952 (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1953 / sizeof(U32)) << 4));
1954 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
1955 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
1956 - sizeof(I2O_SG_ELEMENT)) / sizeof(U32));
1957 I2O_MESSAGE_FRAME_setInitiatorAddress (Message_Ptr, 1);
1958 I2O_MESSAGE_FRAME_setFunction(Message_Ptr, I2O_PRIVATE_MESSAGE);
1959 I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode (
1960 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, I2O_SCSI_SCB_EXEC);
1961 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (
1962 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
1963 I2O_SCB_FLAG_ENABLE_DISCONNECT
1964 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
1965 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
1967 * We do not need any (optional byteswapping) method access to
1968 * the Initiator & Transaction context field.
1970 I2O_MESSAGE_FRAME_setInitiatorContext64(Message, (long)ccb);
1972 I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
1973 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, DPT_ORGANIZATION_ID);
1977 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(
1978 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, ccb->csio.cdb_len);
1979 bcopy (&(ccb->csio.cdb_io),
1980 ((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)->CDB, ccb->csio.cdb_len);
1983 * Given a buffer describing a transfer, set up a scatter/gather map
1984 * in a ccb to map that SCSI transfer.
1987 rw = (ccb->ccb_h.flags & CAM_DIR_IN) ? 0 : I2O_SGL_FLAGS_DIR;
1989 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (
1990 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
1991 (ccb->csio.dxfer_len)
1992 ? ((rw) ? (I2O_SCB_FLAG_XFER_TO_DEVICE
1993 | I2O_SCB_FLAG_ENABLE_DISCONNECT
1994 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
1995 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER)
1996 : (I2O_SCB_FLAG_XFER_FROM_DEVICE
1997 | I2O_SCB_FLAG_ENABLE_DISCONNECT
1998 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
1999 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER))
2000 : (I2O_SCB_FLAG_ENABLE_DISCONNECT
2001 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2002 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
2005 * Given a transfer described by a `data', fill in the SG list.
2007 sg = &((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)->SGL.u.Simple[0];
2009 len = ccb->csio.dxfer_len;
2010 v = ccb->csio.data_ptr;
2011 ASSERT (ccb->csio.dxfer_len >= 0);
2012 MessageSize = I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr);
2013 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setByteCount(
2014 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, len);
2015 while ((len > 0) && (sg < &((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2016 Message_Ptr)->SGL.u.Simple[SG_SIZE])) {
2018 next = base = KVTOPHYS(v);
2019 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, base);
2021 /* How far can we go contiguously */
2022 while ((len > 0) && (base == next)) {
2023 next = trunc_page(base) + PAGE_SIZE;
2034 I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), span);
2036 rw |= I2O_SGL_FLAGS_LAST_ELEMENT;
2038 I2O_FLAGS_COUNT_setFlags(&(sg->FlagsCount),
2039 I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT | rw);
2041 MessageSize += sizeof(*sg) / sizeof(U32);
2043 /* We always do the request sense ... */
2044 if ((span = ccb->csio.sense_len) == 0) {
2045 span = sizeof(ccb->csio.sense_data);
2047 SG(sg, 0, I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
2048 &(ccb->csio.sense_data), span);
2049 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
2050 MessageSize + (sizeof(*sg) / sizeof(U32)));
2051 return (Message_Ptr);
2052 } /* ASR_init_message */
2055 * Reset the adapter.
2059 INOUT Asr_softc_t * sc)
2061 struct initOutBoundMessage {
2062 I2O_EXEC_OUTBOUND_INIT_MESSAGE M;
2065 defAlignLong(struct initOutBoundMessage,Message);
2066 PI2O_EXEC_OUTBOUND_INIT_MESSAGE Message_Ptr;
2067 OUT U32 * volatile Reply_Ptr;
2071 * Build up our copy of the Message.
2073 Message_Ptr = (PI2O_EXEC_OUTBOUND_INIT_MESSAGE)ASR_fillMessage(Message,
2074 sizeof(I2O_EXEC_OUTBOUND_INIT_MESSAGE));
2075 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
2076 I2O_EXEC_OUTBOUND_INIT);
2077 I2O_EXEC_OUTBOUND_INIT_MESSAGE_setHostPageFrameSize(Message_Ptr, PAGE_SIZE);
2078 I2O_EXEC_OUTBOUND_INIT_MESSAGE_setOutboundMFrameSize(Message_Ptr,
2079 sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME));
2081 * Reset the Reply Status
2083 *(Reply_Ptr = (U32 *)((char *)Message_Ptr
2084 + sizeof(I2O_EXEC_OUTBOUND_INIT_MESSAGE))) = 0;
2085 SG (&(Message_Ptr->SGL), 0, I2O_SGL_FLAGS_LAST_ELEMENT, Reply_Ptr,
2088 * Send the Message out
2090 if ((Old = ASR_initiateCp (sc->ha_Virt, sc->ha_Fvirt, (PI2O_MESSAGE_FRAME)Message_Ptr)) != (U32)-1L) {
2094 * Wait for a response (Poll).
2096 while (*Reply_Ptr < I2O_EXEC_OUTBOUND_INIT_REJECTED);
2098 * Re-enable the interrupts.
2100 sc->ha_Virt->Mask = Old;
2102 * Populate the outbound table.
2104 if (sc->ha_Msgs == (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL) {
2106 /* Allocate the reply frames */
2107 size = sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
2108 * sc->ha_Msgs_Count;
2111 * contigmalloc only works reliably at
2112 * initialization time.
2114 if ((sc->ha_Msgs = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
2115 contigmalloc (size, M_DEVBUF, M_WAITOK, 0ul,
2116 0xFFFFFFFFul, (u_long)sizeof(U32), 0ul))
2117 != (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL) {
2118 (void)bzero ((char *)sc->ha_Msgs, size);
2119 sc->ha_Msgs_Phys = KVTOPHYS(sc->ha_Msgs);
2123 /* Initialize the outbound FIFO */
2124 if (sc->ha_Msgs != (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL)
2125 for (size = sc->ha_Msgs_Count, addr = sc->ha_Msgs_Phys;
2127 sc->ha_Virt->FromFIFO = addr;
2128 addr += sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME);
2130 return (*Reply_Ptr);
2133 } /* ASR_initOutBound */
2136 * Set the system table
2140 IN Asr_softc_t * sc)
2142 PI2O_EXEC_SYS_TAB_SET_MESSAGE Message_Ptr;
2143 PI2O_SET_SYSTAB_HEADER SystemTable;
2145 PI2O_SGE_SIMPLE_ELEMENT sg;
2148 if ((SystemTable = (PI2O_SET_SYSTAB_HEADER)kmalloc (
2149 sizeof(I2O_SET_SYSTAB_HEADER), M_TEMP, M_WAITOK))
2150 == (PI2O_SET_SYSTAB_HEADER)NULL) {
2153 bzero (SystemTable, sizeof(I2O_SET_SYSTAB_HEADER));
2154 for (ha = Asr_softc; ha; ha = ha->ha_next) {
2155 ++SystemTable->NumberEntries;
2157 if ((Message_Ptr = (PI2O_EXEC_SYS_TAB_SET_MESSAGE)kmalloc (
2158 sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
2159 + ((3+SystemTable->NumberEntries) * sizeof(I2O_SGE_SIMPLE_ELEMENT)),
2160 M_TEMP, M_WAITOK)) == (PI2O_EXEC_SYS_TAB_SET_MESSAGE)NULL) {
2161 kfree (SystemTable, M_TEMP);
2164 (void)ASR_fillMessage((char *)Message_Ptr,
2165 sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
2166 + ((3+SystemTable->NumberEntries) * sizeof(I2O_SGE_SIMPLE_ELEMENT)));
2167 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
2169 (((sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
2170 / sizeof(U32)) << 4)));
2171 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
2172 I2O_EXEC_SYS_TAB_SET);
2174 * Call the LCT table to determine the number of device entries
2175 * to reserve space for.
2176 * since this code is reused in several systems, code efficiency
2177 * is greater by using a shift operation rather than a divide by
2178 * sizeof(u_int32_t).
2180 sg = (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr
2181 + ((I2O_MESSAGE_FRAME_getVersionOffset(
2182 &(Message_Ptr->StdMessageFrame)) & 0xF0) >> 2));
2183 SG(sg, 0, I2O_SGL_FLAGS_DIR, SystemTable, sizeof(I2O_SET_SYSTAB_HEADER));
2185 for (ha = Asr_softc; ha; ha = ha->ha_next) {
2188 ? (I2O_SGL_FLAGS_DIR)
2189 : (I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER)),
2190 &(ha->ha_SystemTable), sizeof(ha->ha_SystemTable));
2193 SG(sg, 0, I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER, NULL, 0);
2194 SG(sg, 1, I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_LAST_ELEMENT
2195 | I2O_SGL_FLAGS_END_OF_BUFFER, NULL, 0);
2196 retVal = ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
2197 kfree (Message_Ptr, M_TEMP);
2198 kfree (SystemTable, M_TEMP);
2200 } /* ASR_setSysTab */
2204 INOUT Asr_softc_t * sc)
2206 defAlignLong(I2O_EXEC_HRT_GET_MESSAGE,Message);
2207 I2O_EXEC_HRT_GET_MESSAGE * Message_Ptr;
2210 I2O_HRT_ENTRY Entry[MAX_CHANNEL];
2212 u_int8_t NumberOfEntries;
2213 PI2O_HRT_ENTRY Entry;
2215 bzero ((void *)&Hrt, sizeof (Hrt));
2216 Message_Ptr = (I2O_EXEC_HRT_GET_MESSAGE *)ASR_fillMessage(Message,
2217 sizeof(I2O_EXEC_HRT_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
2218 + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2219 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
2221 + (((sizeof(I2O_EXEC_HRT_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
2222 / sizeof(U32)) << 4)));
2223 I2O_MESSAGE_FRAME_setFunction (&(Message_Ptr->StdMessageFrame),
2227 * Set up the buffers as scatter gather elements.
2229 SG(&(Message_Ptr->SGL), 0,
2230 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
2232 if (ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) != CAM_REQ_CMP) {
2235 if ((NumberOfEntries = I2O_HRT_getNumberEntries(&Hrt.Header))
2236 > (MAX_CHANNEL + 1)) {
2237 NumberOfEntries = MAX_CHANNEL + 1;
2239 for (Entry = Hrt.Header.HRTEntry;
2240 NumberOfEntries != 0;
2241 ++Entry, --NumberOfEntries) {
2242 PI2O_LCT_ENTRY Device;
2244 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
2245 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
2247 if (I2O_LCT_ENTRY_getLocalTID(Device)
2248 == (I2O_HRT_ENTRY_getAdapterID(Entry) & 0xFFF)) {
2249 Device->le_bus = I2O_HRT_ENTRY_getAdapterID(
2251 if ((Device->le_bus > sc->ha_MaxBus)
2252 && (Device->le_bus <= MAX_CHANNEL)) {
2253 sc->ha_MaxBus = Device->le_bus;
2259 } /* ASR_acquireHrt */
2262 * Enable the adapter.
2266 IN Asr_softc_t * sc)
2268 defAlignLong(I2O_EXEC_SYS_ENABLE_MESSAGE,Message);
2269 PI2O_EXEC_SYS_ENABLE_MESSAGE Message_Ptr;
2271 Message_Ptr = (PI2O_EXEC_SYS_ENABLE_MESSAGE)ASR_fillMessage(Message,
2272 sizeof(I2O_EXEC_SYS_ENABLE_MESSAGE));
2273 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
2274 I2O_EXEC_SYS_ENABLE);
2275 return (ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) != 0);
2276 } /* ASR_enableSys */
2279 * Perform the stages necessary to initialize the adapter
2283 IN Asr_softc_t * sc)
2285 return ((ASR_initOutBound(sc) == 0)
2286 || (ASR_setSysTab(sc) != CAM_REQ_CMP)
2287 || (ASR_enableSys(sc) != CAM_REQ_CMP));
2291 * Send a Synchronize Cache command to the target device.
2295 IN Asr_softc_t * sc,
2303 * We will not synchronize the device when there are outstanding
2304 * commands issued by the OS (this is due to a locked up device,
2305 * as the OS normally would flush all outstanding commands before
2306 * issuing a shutdown or an adapter reset).
2308 if ((sc != (Asr_softc_t *)NULL)
2309 && (LIST_FIRST(&(sc->ha_ccb)) != (struct ccb_hdr *)NULL)
2310 && ((TID = ASR_getTid (sc, bus, target, lun)) != (tid_t)-1)
2311 && (TID != (tid_t)0)) {
2312 defAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE,Message);
2313 PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE Message_Ptr;
2316 = getAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE, Message),
2317 sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2318 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2320 I2O_MESSAGE_FRAME_setVersionOffset(
2321 (PI2O_MESSAGE_FRAME)Message_Ptr,
2323 | (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2324 - sizeof(I2O_SG_ELEMENT))
2325 / sizeof(U32)) << 4));
2326 I2O_MESSAGE_FRAME_setMessageSize(
2327 (PI2O_MESSAGE_FRAME)Message_Ptr,
2328 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2329 - sizeof(I2O_SG_ELEMENT))
2331 I2O_MESSAGE_FRAME_setInitiatorAddress (
2332 (PI2O_MESSAGE_FRAME)Message_Ptr, 1);
2333 I2O_MESSAGE_FRAME_setFunction(
2334 (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_PRIVATE_MESSAGE);
2335 I2O_MESSAGE_FRAME_setTargetAddress(
2336 (PI2O_MESSAGE_FRAME)Message_Ptr, TID);
2337 I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode (
2338 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2340 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setTID(Message_Ptr, TID);
2341 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2342 I2O_SCB_FLAG_ENABLE_DISCONNECT
2343 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2344 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
2345 I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
2346 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2347 DPT_ORGANIZATION_ID);
2348 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(Message_Ptr, 6);
2349 Message_Ptr->CDB[0] = SYNCHRONIZE_CACHE;
2350 Message_Ptr->CDB[1] = (lun << 5);
2352 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2353 (I2O_SCB_FLAG_XFER_FROM_DEVICE
2354 | I2O_SCB_FLAG_ENABLE_DISCONNECT
2355 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2356 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
2358 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
2365 IN Asr_softc_t * sc)
2367 int bus, target, lun;
2369 for (bus = 0; bus <= sc->ha_MaxBus; ++bus) {
2370 for (target = 0; target <= sc->ha_MaxId; ++target) {
2371 for (lun = 0; lun <= sc->ha_MaxLun; ++lun) {
2372 ASR_sync(sc,bus,target,lun);
2379 * Reset the HBA, targets and BUS.
2380 * Currently this resets *all* the SCSI busses.
2384 IN Asr_softc_t * sc)
2386 ASR_synchronize (sc);
2387 (void)ASR_reset (sc);
2388 } /* asr_hbareset */
2391 * A reduced copy of the real pci_map_mem, incorporating the MAX_MAP
2392 * limit and a reduction in error checking (in the pre 4.0 case).
2397 IN Asr_softc_t * sc)
2403 * I2O specification says we must find first *memory* mapped BAR
2405 for (rid = PCIR_MAPS;
2406 rid < (PCIR_MAPS + 4 * sizeof(u_int32_t));
2407 rid += sizeof(u_int32_t)) {
2408 p = pci_read_config(tag, rid, sizeof(p));
2416 if (rid >= (PCIR_MAPS + 4 * sizeof(u_int32_t))) {
2419 p = pci_read_config(tag, rid, sizeof(p));
2420 pci_write_config(tag, rid, -1, sizeof(p));
2421 l = 0 - (pci_read_config(tag, rid, sizeof(l)) & ~15);
2422 pci_write_config(tag, rid, p, sizeof(p));
2427 * The 2005S Zero Channel RAID solution is not a perfect PCI
2428 * citizen. It asks for 4MB on BAR0, and 0MB on BAR1, once
2429 * enabled it rewrites the size of BAR0 to 2MB, sets BAR1 to
2430 * BAR0+2MB and sets it's size to 2MB. The IOP registers are
2431 * accessible via BAR0, the messaging registers are accessible
2432 * via BAR1. If the subdevice code is 50 to 59 decimal.
2434 s = pci_read_config(tag, PCIR_DEVVENDOR, sizeof(s));
2435 if (s != 0xA5111044) {
2436 s = pci_read_config(tag, PCIR_SUBVEND_0, sizeof(s));
2437 if ((((ADPTDOMINATOR_SUB_ID_START ^ s) & 0xF000FFFF) == 0)
2438 && (ADPTDOMINATOR_SUB_ID_START <= s)
2439 && (s <= ADPTDOMINATOR_SUB_ID_END)) {
2440 l = MAX_MAP; /* Conjoined BAR Raptor Daptor */
2444 sc->ha_mem_res = bus_alloc_resource(tag, SYS_RES_MEMORY, &rid,
2445 p, p + l, l, RF_ACTIVE);
2446 if (sc->ha_mem_res == (struct resource *)NULL) {
2449 sc->ha_Base = (void *)rman_get_start(sc->ha_mem_res);
2450 if (sc->ha_Base == (void *)NULL) {
2453 sc->ha_Virt = (i2oRegs_t *) rman_get_virtual(sc->ha_mem_res);
2454 if (s == 0xA5111044) { /* Split BAR Raptor Daptor */
2455 if ((rid += sizeof(u_int32_t))
2456 >= (PCIR_MAPS + 4 * sizeof(u_int32_t))) {
2459 p = pci_read_config(tag, rid, sizeof(p));
2460 pci_write_config(tag, rid, -1, sizeof(p));
2461 l = 0 - (pci_read_config(tag, rid, sizeof(l)) & ~15);
2462 pci_write_config(tag, rid, p, sizeof(p));
2467 sc->ha_mes_res = bus_alloc_resource(tag, SYS_RES_MEMORY, &rid,
2468 p, p + l, l, RF_ACTIVE);
2469 if (sc->ha_mes_res == (struct resource *)NULL) {
2472 if ((void *)rman_get_start(sc->ha_mes_res) == (void *)NULL) {
2475 sc->ha_Fvirt = (U8 *) rman_get_virtual(sc->ha_mes_res);
2477 sc->ha_Fvirt = (U8 *)(sc->ha_Virt);
2480 } /* asr_pci_map_mem */
2483 * A simplified copy of the real pci_map_int with additional
2484 * registration requirements.
2489 IN Asr_softc_t * sc)
2494 sc->ha_irq_res = bus_alloc_resource(tag, SYS_RES_IRQ, &rid,
2495 0, ~0, 1, RF_ACTIVE | RF_SHAREABLE);
2496 if (sc->ha_irq_res == (struct resource *)NULL) {
2499 error = bus_setup_intr(tag, sc->ha_irq_res, 0,
2500 (driver_intr_t *)asr_intr, (void *)sc,
2501 &(sc->ha_intr), NULL);
2505 sc->ha_irq = pci_read_config(tag, PCIR_INTLINE, sizeof(char));
2507 } /* asr_pci_map_int */
2510 * Attach the devices, and virtual devices to the driver list.
2513 asr_attach (ATTACH_ARGS)
2516 struct scsi_inquiry_data * iq;
2519 sc = kmalloc(sizeof(*sc), M_DEVBUF, M_INTWAIT);
2520 if (Asr_softc == (Asr_softc_t *)NULL) {
2522 * Fixup the OS revision as saved in the dptsig for the
2523 * engine (dptioctl.h) to pick up.
2525 bcopy (osrelease, &ASR_sig.dsDescription[16], 5);
2526 printf ("asr%d: major=%d\n", unit, asr_ops.head.maj);
2529 * Initialize the software structure
2531 bzero (sc, sizeof(*sc));
2532 LIST_INIT(&(sc->ha_ccb));
2533 /* Link us into the HA list */
2537 for (ha = &Asr_softc; *ha; ha = &((*ha)->ha_next));
2541 PI2O_EXEC_STATUS_GET_REPLY status;
2545 * This is the real McCoy!
2547 if (!asr_pci_map_mem(tag, sc)) {
2548 printf ("asr%d: could not map memory\n", unit);
2549 ATTACH_RETURN(ENXIO);
2551 /* Enable if not formerly enabled */
2552 pci_write_config (tag, PCIR_COMMAND,
2553 pci_read_config (tag, PCIR_COMMAND, sizeof(char))
2554 | PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN, sizeof(char));
2555 /* Knowledge is power, responsibility is direct */
2557 struct pci_devinfo {
2558 STAILQ_ENTRY(pci_devinfo) pci_links;
2559 struct resource_list resources;
2561 } * dinfo = device_get_ivars(tag);
2562 sc->ha_pciBusNum = dinfo->cfg.bus;
2563 sc->ha_pciDeviceNum = (dinfo->cfg.slot << 3)
2566 /* Check if the device is there? */
2567 if ((ASR_resetIOP(sc->ha_Virt, sc->ha_Fvirt) == 0)
2568 || ((status = (PI2O_EXEC_STATUS_GET_REPLY)kmalloc (
2569 sizeof(I2O_EXEC_STATUS_GET_REPLY), M_TEMP, M_WAITOK))
2570 == (PI2O_EXEC_STATUS_GET_REPLY)NULL)
2571 || (ASR_getStatus(sc->ha_Virt, sc->ha_Fvirt, status) == NULL)) {
2572 printf ("asr%d: could not initialize hardware\n", unit);
2573 ATTACH_RETURN(ENODEV); /* Get next, maybe better luck */
2575 sc->ha_SystemTable.OrganizationID = status->OrganizationID;
2576 sc->ha_SystemTable.IOP_ID = status->IOP_ID;
2577 sc->ha_SystemTable.I2oVersion = status->I2oVersion;
2578 sc->ha_SystemTable.IopState = status->IopState;
2579 sc->ha_SystemTable.MessengerType = status->MessengerType;
2580 sc->ha_SystemTable.InboundMessageFrameSize
2581 = status->InboundMFrameSize;
2582 sc->ha_SystemTable.MessengerInfo.InboundMessagePortAddressLow
2583 = (U32)(sc->ha_Base) + (U32)(&(((i2oRegs_t *)NULL)->ToFIFO));
2585 if (!asr_pci_map_int(tag, (void *)sc)) {
2586 printf ("asr%d: could not map interrupt\n", unit);
2587 ATTACH_RETURN(ENXIO);
2590 /* Adjust the maximim inbound count */
2591 if (((sc->ha_QueueSize
2592 = I2O_EXEC_STATUS_GET_REPLY_getMaxInboundMFrames(status))
2594 || (sc->ha_QueueSize == 0)) {
2595 sc->ha_QueueSize = MAX_INBOUND;
2598 /* Adjust the maximum outbound count */
2599 if (((sc->ha_Msgs_Count
2600 = I2O_EXEC_STATUS_GET_REPLY_getMaxOutboundMFrames(status))
2602 || (sc->ha_Msgs_Count == 0)) {
2603 sc->ha_Msgs_Count = MAX_OUTBOUND;
2605 if (sc->ha_Msgs_Count > sc->ha_QueueSize) {
2606 sc->ha_Msgs_Count = sc->ha_QueueSize;
2609 /* Adjust the maximum SG size to adapter */
2610 if ((size = (I2O_EXEC_STATUS_GET_REPLY_getInboundMFrameSize(
2611 status) << 2)) > MAX_INBOUND_SIZE) {
2612 size = MAX_INBOUND_SIZE;
2614 kfree (status, M_TEMP);
2615 sc->ha_SgSize = (size - sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2616 + sizeof(I2O_SG_ELEMENT)) / sizeof(I2O_SGE_SIMPLE_ELEMENT);
2620 * Only do a bus/HBA reset on the first time through. On this
2621 * first time through, we do not send a flush to the devices.
2623 if (ASR_init(sc) == 0) {
2625 I2O_PARAM_RESULTS_LIST_HEADER Header;
2626 I2O_PARAM_READ_OPERATION_RESULT Read;
2627 I2O_DPT_EXEC_IOP_BUFFERS_SCALAR Info;
2629 defAlignLong (struct BufferInfo, Buffer);
2630 PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR Info;
2631 # define FW_DEBUG_BLED_OFFSET 8
2633 if ((Info = (PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR)
2634 ASR_getParams(sc, 0,
2635 I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO,
2636 Buffer, sizeof(struct BufferInfo)))
2637 != (PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR)NULL) {
2638 sc->ha_blinkLED = sc->ha_Fvirt
2639 + I2O_DPT_EXEC_IOP_BUFFERS_SCALAR_getSerialOutputOffset(Info)
2640 + FW_DEBUG_BLED_OFFSET;
2642 if (ASR_acquireLct(sc) == 0) {
2643 (void)ASR_acquireHrt(sc);
2646 printf ("asr%d: failed to initialize\n", unit);
2647 ATTACH_RETURN(ENXIO);
2650 * Add in additional probe responses for more channels. We
2651 * are reusing the variable `target' for a channel loop counter.
2652 * Done here because of we need both the acquireLct and
2655 { PI2O_LCT_ENTRY Device;
2657 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
2658 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
2660 if (Device->le_type == I2O_UNKNOWN) {
2663 if (I2O_LCT_ENTRY_getUserTID(Device) == 0xFFF) {
2664 if (Device->le_target > sc->ha_MaxId) {
2665 sc->ha_MaxId = Device->le_target;
2667 if (Device->le_lun > sc->ha_MaxLun) {
2668 sc->ha_MaxLun = Device->le_lun;
2671 if (((Device->le_type & I2O_PORT) != 0)
2672 && (Device->le_bus <= MAX_CHANNEL)) {
2673 /* Do not increase MaxId for efficiency */
2674 sc->ha_adapter_target[Device->le_bus]
2675 = Device->le_target;
2682 * Print the HBA model number as inquired from the card.
2685 printf ("asr%d:", unit);
2687 if ((iq = (struct scsi_inquiry_data *)kmalloc (
2688 sizeof(struct scsi_inquiry_data), M_TEMP, M_WAITOK))
2689 != (struct scsi_inquiry_data *)NULL) {
2690 defAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE,Message);
2691 PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE Message_Ptr;
2694 bzero (iq, sizeof(struct scsi_inquiry_data));
2696 = getAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE, Message),
2697 sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2698 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2700 I2O_MESSAGE_FRAME_setVersionOffset(
2701 (PI2O_MESSAGE_FRAME)Message_Ptr,
2703 | (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2704 - sizeof(I2O_SG_ELEMENT))
2705 / sizeof(U32)) << 4));
2706 I2O_MESSAGE_FRAME_setMessageSize(
2707 (PI2O_MESSAGE_FRAME)Message_Ptr,
2708 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2709 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT))
2711 I2O_MESSAGE_FRAME_setInitiatorAddress (
2712 (PI2O_MESSAGE_FRAME)Message_Ptr, 1);
2713 I2O_MESSAGE_FRAME_setFunction(
2714 (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_PRIVATE_MESSAGE);
2715 I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode (
2716 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2718 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2719 I2O_SCB_FLAG_ENABLE_DISCONNECT
2720 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2721 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
2722 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setInterpret(Message_Ptr, 1);
2723 I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
2724 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2725 DPT_ORGANIZATION_ID);
2726 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(Message_Ptr, 6);
2727 Message_Ptr->CDB[0] = INQUIRY;
2728 Message_Ptr->CDB[4] = (unsigned char)sizeof(struct scsi_inquiry_data);
2729 if (Message_Ptr->CDB[4] == 0) {
2730 Message_Ptr->CDB[4] = 255;
2733 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2734 (I2O_SCB_FLAG_XFER_FROM_DEVICE
2735 | I2O_SCB_FLAG_ENABLE_DISCONNECT
2736 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2737 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
2739 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setByteCount(
2740 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
2741 sizeof(struct scsi_inquiry_data));
2742 SG(&(Message_Ptr->SGL), 0,
2743 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
2744 iq, sizeof(struct scsi_inquiry_data));
2745 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
2747 if (iq->vendor[0] && (iq->vendor[0] != ' ')) {
2749 ASR_prstring (iq->vendor, 8);
2752 if (iq->product[0] && (iq->product[0] != ' ')) {
2754 ASR_prstring (iq->product, 16);
2757 if (iq->revision[0] && (iq->revision[0] != ' ')) {
2758 printf (" FW Rev. ");
2759 ASR_prstring (iq->revision, 4);
2762 kfree ((caddr_t)iq, M_TEMP);
2767 printf (" %d channel, %d CCBs, Protocol I2O\n", sc->ha_MaxBus + 1,
2768 (sc->ha_QueueSize > MAX_INBOUND) ? MAX_INBOUND : sc->ha_QueueSize);
2771 * fill in the prototype cam_path.
2775 union asr_ccb * ccb;
2777 if ((ccb = asr_alloc_ccb (sc)) == (union asr_ccb *)NULL) {
2778 printf ("asr%d: CAM could not be notified of asynchronous callback parameters\n", unit);
2779 ATTACH_RETURN(ENOMEM);
2781 for (bus = 0; bus <= sc->ha_MaxBus; ++bus) {
2782 int QueueSize = sc->ha_QueueSize;
2784 if (QueueSize > MAX_INBOUND) {
2785 QueueSize = MAX_INBOUND;
2789 * Construct our first channel SIM entry
2791 sc->ha_sim[bus] = cam_sim_alloc(
2792 asr_action, asr_poll, "asr", sc,
2793 unit, 1, QueueSize, NULL);
2794 if (sc->ha_sim[bus] == NULL)
2797 if (xpt_bus_register(sc->ha_sim[bus], bus)
2799 cam_sim_free(sc->ha_sim[bus]);
2800 sc->ha_sim[bus] = NULL;
2804 if (xpt_create_path(&(sc->ha_path[bus]), /*periph*/NULL,
2805 cam_sim_path(sc->ha_sim[bus]), CAM_TARGET_WILDCARD,
2806 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2808 cam_sim_path(sc->ha_sim[bus]));
2809 cam_sim_free(sc->ha_sim[bus]);
2810 sc->ha_sim[bus] = NULL;
2817 * Generate the device node information
2819 make_dev(&asr_ops, unit, 0, 0, S_IRWXU, "rasr%d", unit);
2825 IN struct cam_sim *sim)
2827 asr_intr(cam_sim_softc(sim));
2832 IN struct cam_sim * sim,
2835 struct Asr_softc * sc;
2837 debug_asr_printf ("asr_action(%lx,%lx{%x})\n",
2838 (u_long)sim, (u_long)ccb, ccb->ccb_h.func_code);
2840 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("asr_action\n"));
2842 ccb->ccb_h.spriv_ptr0 = sc = (struct Asr_softc *)cam_sim_softc(sim);
2844 switch (ccb->ccb_h.func_code) {
2846 /* Common cases first */
2847 case XPT_SCSI_IO: /* Execute the requested I/O operation */
2850 char M[MAX_INBOUND_SIZE];
2852 defAlignLong(struct Message,Message);
2853 PI2O_MESSAGE_FRAME Message_Ptr;
2855 /* Reject incoming commands while we are resetting the card */
2856 if (sc->ha_in_reset != HA_OPERATIONAL) {
2857 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2858 if (sc->ha_in_reset >= HA_OFF_LINE) {
2859 /* HBA is now off-line */
2860 ccb->ccb_h.status |= CAM_UNREC_HBA_ERROR;
2862 /* HBA currently resetting, try again later. */
2863 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
2865 debug_asr_cmd_printf (" e\n");
2867 debug_asr_cmd_printf (" q\n");
2870 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
2872 "asr%d WARNING: scsi_cmd(%x) already done on b%dt%du%d\n",
2873 cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)),
2874 ccb->csio.cdb_io.cdb_bytes[0],
2876 ccb->ccb_h.target_id,
2877 ccb->ccb_h.target_lun);
2879 debug_asr_cmd_printf ("(%d,%d,%d,%d)",
2882 ccb->ccb_h.target_id,
2883 ccb->ccb_h.target_lun);
2884 debug_asr_cmd_dump_ccb(ccb);
2886 if ((Message_Ptr = ASR_init_message ((union asr_ccb *)ccb,
2887 (PI2O_MESSAGE_FRAME)Message)) != (PI2O_MESSAGE_FRAME)NULL) {
2888 debug_asr_cmd2_printf ("TID=%x:\n",
2889 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_getTID(
2890 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr));
2891 debug_asr_cmd2_dump_message(Message_Ptr);
2892 debug_asr_cmd1_printf (" q");
2894 if (ASR_queue (sc, Message_Ptr) == EMPTY_QUEUE) {
2895 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2896 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
2897 debug_asr_cmd_printf (" E\n");
2900 debug_asr_cmd_printf (" Q\n");
2904 * We will get here if there is no valid TID for the device
2905 * referenced in the scsi command packet.
2907 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2908 ccb->ccb_h.status |= CAM_SEL_TIMEOUT;
2909 debug_asr_cmd_printf (" B\n");
2914 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */
2915 /* Rese HBA device ... */
2917 ccb->ccb_h.status = CAM_REQ_CMP;
2921 # if (defined(REPORT_LUNS))
2924 case XPT_ABORT: /* Abort the specified CCB */
2926 ccb->ccb_h.status = CAM_REQ_INVALID;
2930 case XPT_SET_TRAN_SETTINGS:
2932 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
2936 case XPT_GET_TRAN_SETTINGS:
2937 /* Get default/user set transfer settings for the target */
2939 struct ccb_trans_settings *cts;
2943 target_mask = 0x01 << ccb->ccb_h.target_id;
2944 if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) {
2945 cts->flags = CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB;
2946 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2947 cts->sync_period = 6; /* 40MHz */
2948 cts->sync_offset = 15;
2950 cts->valid = CCB_TRANS_SYNC_RATE_VALID
2951 | CCB_TRANS_SYNC_OFFSET_VALID
2952 | CCB_TRANS_BUS_WIDTH_VALID
2953 | CCB_TRANS_DISC_VALID
2954 | CCB_TRANS_TQ_VALID;
2955 ccb->ccb_h.status = CAM_REQ_CMP;
2957 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
2963 case XPT_CALC_GEOMETRY:
2965 struct ccb_calc_geometry *ccg;
2967 u_int32_t secs_per_cylinder;
2970 size_mb = ccg->volume_size
2971 / ((1024L * 1024L) / ccg->block_size);
2973 if (size_mb > 4096) {
2975 ccg->secs_per_track = 63;
2976 } else if (size_mb > 2048) {
2978 ccg->secs_per_track = 63;
2979 } else if (size_mb > 1024) {
2981 ccg->secs_per_track = 63;
2984 ccg->secs_per_track = 32;
2986 secs_per_cylinder = ccg->heads * ccg->secs_per_track;
2987 ccg->cylinders = ccg->volume_size / secs_per_cylinder;
2988 ccb->ccb_h.status = CAM_REQ_CMP;
2993 case XPT_RESET_BUS: /* Reset the specified SCSI bus */
2994 ASR_resetBus (sc, cam_sim_bus(sim));
2995 ccb->ccb_h.status = CAM_REQ_CMP;
2999 case XPT_TERM_IO: /* Terminate the I/O process */
3001 ccb->ccb_h.status = CAM_REQ_INVALID;
3005 case XPT_PATH_INQ: /* Path routing inquiry */
3007 struct ccb_pathinq *cpi = &(ccb->cpi);
3009 cpi->version_num = 1; /* XXX??? */
3010 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
3011 cpi->target_sprt = 0;
3012 /* Not necessary to reset bus, done by HDM initialization */
3013 cpi->hba_misc = PIM_NOBUSRESET;
3014 cpi->hba_eng_cnt = 0;
3015 cpi->max_target = sc->ha_MaxId;
3016 cpi->max_lun = sc->ha_MaxLun;
3017 cpi->initiator_id = sc->ha_adapter_target[cam_sim_bus(sim)];
3018 cpi->bus_id = cam_sim_bus(sim);
3019 cpi->base_transfer_speed = 3300;
3020 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
3021 strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN);
3022 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
3023 cpi->unit_number = cam_sim_unit(sim);
3024 cpi->ccb_h.status = CAM_REQ_CMP;
3029 ccb->ccb_h.status = CAM_REQ_INVALID;
3037 * Handle processing of current CCB as pointed to by the Status.
3041 IN Asr_softc_t * sc)
3046 sc->ha_Virt->Status & Mask_InterruptsDisabled;
3048 union asr_ccb * ccb;
3050 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply;
3052 if (((ReplyOffset = sc->ha_Virt->FromFIFO) == EMPTY_QUEUE)
3053 && ((ReplyOffset = sc->ha_Virt->FromFIFO) == EMPTY_QUEUE)) {
3056 Reply = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)(ReplyOffset
3057 - sc->ha_Msgs_Phys + (char *)(sc->ha_Msgs));
3059 * We do not need any (optional byteswapping) method access to
3060 * the Initiator context field.
3062 ccb = (union asr_ccb *)(long)
3063 I2O_MESSAGE_FRAME_getInitiatorContext64(
3064 &(Reply->StdReplyFrame.StdMessageFrame));
3065 if (I2O_MESSAGE_FRAME_getMsgFlags(
3066 &(Reply->StdReplyFrame.StdMessageFrame))
3067 & I2O_MESSAGE_FLAGS_FAIL) {
3068 defAlignLong(I2O_UTIL_NOP_MESSAGE,Message);
3069 PI2O_UTIL_NOP_MESSAGE Message_Ptr;
3072 MessageOffset = (u_long)
3073 I2O_FAILURE_REPLY_MESSAGE_FRAME_getPreservedMFA(
3074 (PI2O_FAILURE_REPLY_MESSAGE_FRAME)Reply);
3076 * Get the Original Message Frame's address, and get
3077 * it's Transaction Context into our space. (Currently
3078 * unused at original authorship, but better to be
3079 * safe than sorry). Straight copy means that we
3080 * need not concern ourselves with the (optional
3081 * byteswapping) method access.
3083 Reply->StdReplyFrame.TransactionContext
3084 = ((PI2O_SINGLE_REPLY_MESSAGE_FRAME)
3085 (sc->ha_Fvirt + MessageOffset))->TransactionContext;
3087 * For 64 bit machines, we need to reconstruct the
3090 ccb = (union asr_ccb *)(long)
3091 I2O_MESSAGE_FRAME_getInitiatorContext64(
3092 &(Reply->StdReplyFrame.StdMessageFrame));
3094 * Unique error code for command failure.
3096 I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode(
3097 &(Reply->StdReplyFrame), (u_int16_t)-2);
3099 * Modify the message frame to contain a NOP and
3100 * re-issue it to the controller.
3102 Message_Ptr = (PI2O_UTIL_NOP_MESSAGE)ASR_fillMessage(
3103 Message, sizeof(I2O_UTIL_NOP_MESSAGE));
3104 # if (I2O_UTIL_NOP != 0)
3105 I2O_MESSAGE_FRAME_setFunction (
3106 &(Message_Ptr->StdMessageFrame),
3110 * Copy the packet out to the Original Message
3112 bcopy ((caddr_t)Message_Ptr,
3113 sc->ha_Fvirt + MessageOffset,
3114 sizeof(I2O_UTIL_NOP_MESSAGE));
3118 sc->ha_Virt->ToFIFO = MessageOffset;
3122 * Asynchronous command with no return requirements,
3123 * and a generic handler for immunity against odd error
3124 * returns from the adapter.
3126 if (ccb == (union asr_ccb *)NULL) {
3128 * Return Reply so that it can be used for the
3131 sc->ha_Virt->FromFIFO = ReplyOffset;
3135 /* Welease Wadjah! (and stop timeouts) */
3136 ASR_ccbRemove (sc, ccb);
3139 I2O_SINGLE_REPLY_MESSAGE_FRAME_getDetailedStatusCode(
3140 &(Reply->StdReplyFrame))) {
3142 case I2O_SCSI_DSC_SUCCESS:
3143 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3144 ccb->ccb_h.status |= CAM_REQ_CMP;
3147 case I2O_SCSI_DSC_CHECK_CONDITION:
3148 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3149 ccb->ccb_h.status |= CAM_REQ_CMP|CAM_AUTOSNS_VALID;
3152 case I2O_SCSI_DSC_BUSY:
3154 case I2O_SCSI_HBA_DSC_ADAPTER_BUSY:
3156 case I2O_SCSI_HBA_DSC_SCSI_BUS_RESET:
3158 case I2O_SCSI_HBA_DSC_BUS_BUSY:
3159 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3160 ccb->ccb_h.status |= CAM_SCSI_BUSY;
3163 case I2O_SCSI_HBA_DSC_SELECTION_TIMEOUT:
3164 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3165 ccb->ccb_h.status |= CAM_SEL_TIMEOUT;
3168 case I2O_SCSI_HBA_DSC_COMMAND_TIMEOUT:
3170 case I2O_SCSI_HBA_DSC_DEVICE_NOT_PRESENT:
3172 case I2O_SCSI_HBA_DSC_LUN_INVALID:
3174 case I2O_SCSI_HBA_DSC_SCSI_TID_INVALID:
3175 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3176 ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
3179 case I2O_SCSI_HBA_DSC_DATA_OVERRUN:
3181 case I2O_SCSI_HBA_DSC_REQUEST_LENGTH_ERROR:
3182 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3183 ccb->ccb_h.status |= CAM_DATA_RUN_ERR;
3187 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3188 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
3191 if ((ccb->csio.resid = ccb->csio.dxfer_len) != 0) {
3193 I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_getTransferCount(
3197 /* Sense data in reply packet */
3198 if (ccb->ccb_h.status & CAM_AUTOSNS_VALID) {
3199 u_int16_t size = I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_getAutoSenseTransferCount(Reply);
3202 if (size > sizeof(ccb->csio.sense_data)) {
3203 size = sizeof(ccb->csio.sense_data);
3205 if (size > I2O_SCSI_SENSE_DATA_SZ) {
3206 size = I2O_SCSI_SENSE_DATA_SZ;
3208 if ((ccb->csio.sense_len)
3209 && (size > ccb->csio.sense_len)) {
3210 size = ccb->csio.sense_len;
3212 bcopy ((caddr_t)Reply->SenseData,
3213 (caddr_t)&(ccb->csio.sense_data), size);
3218 * Return Reply so that it can be used for the next command
3219 * since we have no more need for it now
3221 sc->ha_Virt->FromFIFO = ReplyOffset;
3223 if (ccb->ccb_h.path) {
3224 xpt_done ((union ccb *)ccb);
3226 wakeup ((caddr_t)ccb);
3232 #undef QueueSize /* Grrrr */
3233 #undef SG_Size /* Grrrr */
3236 * Meant to be included at the bottom of asr.c !!!
3240 * Included here as hard coded. Done because other necessary include
3241 * files utilize C++ comment structures which make them a nuisance to
3242 * included here just to pick up these three typedefs.
3244 typedef U32 DPT_TAG_T;
3245 typedef U32 DPT_MSG_T;
3246 typedef U32 DPT_RTN_T;
3248 #undef SCSI_RESET /* Conflicts with "scsi/scsiconf.h" defintion */
3249 #include "osd_unix.h"
3251 #define asr_unit(dev) minor(dev)
3253 STATIC INLINE Asr_softc_t *
3257 int unit = asr_unit(dev);
3258 OUT Asr_softc_t * sc = Asr_softc;
3260 while (sc && sc->ha_sim[0] && (cam_sim_unit(sc->ha_sim[0]) != unit)) {
3266 STATIC u_int8_t ASR_ctlr_held;
3267 #if (!defined(UNREFERENCED_PARAMETER))
3268 # define UNREFERENCED_PARAMETER(x) (void)(x)
3272 asr_open(struct dev_open_args *ap)
3274 cdev_t dev = ap->a_head.a_dev;
3277 if (ASR_get_sc (dev) == (Asr_softc_t *)NULL) {
3281 if (ASR_ctlr_held) {
3283 } else if ((error = suser_cred(ap->a_cred, 0)) == 0) {
3291 asr_close(struct dev_close_args *ap)
3298 /*-------------------------------------------------------------------------*/
3299 /* Function ASR_queue_i */
3300 /*-------------------------------------------------------------------------*/
3301 /* The Parameters Passed To This Function Are : */
3302 /* Asr_softc_t * : HBA miniport driver's adapter data storage. */
3303 /* PI2O_MESSAGE_FRAME : Msg Structure Pointer For This Command */
3304 /* I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME following the Msg Structure */
3306 /* This Function Will Take The User Request Packet And Convert It To An */
3307 /* I2O MSG And Send It Off To The Adapter. */
3309 /* Return : 0 For OK, Error Code Otherwise */
3310 /*-------------------------------------------------------------------------*/
3313 IN Asr_softc_t * sc,
3314 INOUT PI2O_MESSAGE_FRAME Packet)
3316 union asr_ccb * ccb;
3317 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply;
3318 PI2O_MESSAGE_FRAME Message_Ptr;
3319 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply_Ptr;
3320 int MessageSizeInBytes;
3321 int ReplySizeInBytes;
3324 /* Scatter Gather buffer list */
3325 struct ioctlSgList_S {
3326 SLIST_ENTRY(ioctlSgList_S) link;
3328 I2O_FLAGS_COUNT FlagsCount;
3329 char KernelSpace[sizeof(long)];
3331 /* Generates a `first' entry */
3332 SLIST_HEAD(ioctlSgListHead_S, ioctlSgList_S) sgList;
3334 if (ASR_getBlinkLedCode(sc)) {
3335 debug_usr_cmd_printf ("Adapter currently in BlinkLed %x\n",
3336 ASR_getBlinkLedCode(sc));
3339 /* Copy in the message into a local allocation */
3340 if ((Message_Ptr = (PI2O_MESSAGE_FRAME)kmalloc (
3341 sizeof(I2O_MESSAGE_FRAME), M_TEMP, M_WAITOK))
3342 == (PI2O_MESSAGE_FRAME)NULL) {
3343 debug_usr_cmd_printf (
3344 "Failed to acquire I2O_MESSAGE_FRAME memory\n");
3347 if ((error = copyin ((caddr_t)Packet, (caddr_t)Message_Ptr,
3348 sizeof(I2O_MESSAGE_FRAME))) != 0) {
3349 kfree (Message_Ptr, M_TEMP);
3350 debug_usr_cmd_printf ("Can't copy in packet errno=%d\n", error);
3353 /* Acquire information to determine type of packet */
3354 MessageSizeInBytes = (I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr)<<2);
3355 /* The offset of the reply information within the user packet */
3356 Reply = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)((char *)Packet
3357 + MessageSizeInBytes);
3359 /* Check if the message is a synchronous initialization command */
3360 s = I2O_MESSAGE_FRAME_getFunction(Message_Ptr);
3361 kfree (Message_Ptr, M_TEMP);
3364 case I2O_EXEC_IOP_RESET:
3367 status = ASR_resetIOP(sc->ha_Virt, sc->ha_Fvirt);
3368 ReplySizeInBytes = sizeof(status);
3369 debug_usr_cmd_printf ("resetIOP done\n");
3370 return (copyout ((caddr_t)&status, (caddr_t)Reply,
3374 case I2O_EXEC_STATUS_GET:
3375 { I2O_EXEC_STATUS_GET_REPLY status;
3377 if (ASR_getStatus (sc->ha_Virt, sc->ha_Fvirt, &status)
3378 == (PI2O_EXEC_STATUS_GET_REPLY)NULL) {
3379 debug_usr_cmd_printf ("getStatus failed\n");
3382 ReplySizeInBytes = sizeof(status);
3383 debug_usr_cmd_printf ("getStatus done\n");
3384 return (copyout ((caddr_t)&status, (caddr_t)Reply,
3388 case I2O_EXEC_OUTBOUND_INIT:
3391 status = ASR_initOutBound(sc);
3392 ReplySizeInBytes = sizeof(status);
3393 debug_usr_cmd_printf ("intOutBound done\n");
3394 return (copyout ((caddr_t)&status, (caddr_t)Reply,
3399 /* Determine if the message size is valid */
3400 if ((MessageSizeInBytes < sizeof(I2O_MESSAGE_FRAME))
3401 || (MAX_INBOUND_SIZE < MessageSizeInBytes)) {
3402 debug_usr_cmd_printf ("Packet size %d incorrect\n",
3403 MessageSizeInBytes);
3407 if ((Message_Ptr = (PI2O_MESSAGE_FRAME)kmalloc (MessageSizeInBytes,
3408 M_TEMP, M_WAITOK)) == (PI2O_MESSAGE_FRAME)NULL) {
3409 debug_usr_cmd_printf ("Failed to acquire frame[%d] memory\n",
3410 MessageSizeInBytes);
3413 if ((error = copyin ((caddr_t)Packet, (caddr_t)Message_Ptr,
3414 MessageSizeInBytes)) != 0) {
3415 kfree (Message_Ptr, M_TEMP);
3416 debug_usr_cmd_printf ("Can't copy in packet[%d] errno=%d\n",
3417 MessageSizeInBytes, error);
3421 /* Check the size of the reply frame, and start constructing */
3423 if ((Reply_Ptr = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)kmalloc (
3424 sizeof(I2O_MESSAGE_FRAME), M_TEMP, M_WAITOK))
3425 == (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL) {
3426 kfree (Message_Ptr, M_TEMP);
3427 debug_usr_cmd_printf (
3428 "Failed to acquire I2O_MESSAGE_FRAME memory\n");
3431 if ((error = copyin ((caddr_t)Reply, (caddr_t)Reply_Ptr,
3432 sizeof(I2O_MESSAGE_FRAME))) != 0) {
3433 kfree (Reply_Ptr, M_TEMP);
3434 kfree (Message_Ptr, M_TEMP);
3435 debug_usr_cmd_printf (
3436 "Failed to copy in reply frame, errno=%d\n",
3440 ReplySizeInBytes = (I2O_MESSAGE_FRAME_getMessageSize(
3441 &(Reply_Ptr->StdReplyFrame.StdMessageFrame)) << 2);
3442 kfree (Reply_Ptr, M_TEMP);
3443 if (ReplySizeInBytes < sizeof(I2O_SINGLE_REPLY_MESSAGE_FRAME)) {
3444 kfree (Message_Ptr, M_TEMP);
3445 debug_usr_cmd_printf (
3446 "Failed to copy in reply frame[%d], errno=%d\n",
3447 ReplySizeInBytes, error);
3451 if ((Reply_Ptr = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)kmalloc (
3452 ((ReplySizeInBytes > sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME))
3454 : sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)),
3455 M_TEMP, M_WAITOK)) == (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL) {
3456 kfree (Message_Ptr, M_TEMP);
3457 debug_usr_cmd_printf ("Failed to acquire frame[%d] memory\n",
3461 (void)ASR_fillMessage ((char *)Reply_Ptr, ReplySizeInBytes);
3462 Reply_Ptr->StdReplyFrame.StdMessageFrame.InitiatorContext
3463 = Message_Ptr->InitiatorContext;
3464 Reply_Ptr->StdReplyFrame.TransactionContext
3465 = ((PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr)->TransactionContext;
3466 I2O_MESSAGE_FRAME_setMsgFlags(
3467 &(Reply_Ptr->StdReplyFrame.StdMessageFrame),
3468 I2O_MESSAGE_FRAME_getMsgFlags(
3469 &(Reply_Ptr->StdReplyFrame.StdMessageFrame))
3470 | I2O_MESSAGE_FLAGS_REPLY);
3472 /* Check if the message is a special case command */
3473 switch (I2O_MESSAGE_FRAME_getFunction(Message_Ptr)) {
3474 case I2O_EXEC_SYS_TAB_SET: /* Special Case of empty Scatter Gather */
3475 if (MessageSizeInBytes == ((I2O_MESSAGE_FRAME_getVersionOffset(
3476 Message_Ptr) & 0xF0) >> 2)) {
3477 kfree (Message_Ptr, M_TEMP);
3478 I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode(
3479 &(Reply_Ptr->StdReplyFrame),
3480 (ASR_setSysTab(sc) != CAM_REQ_CMP));
3481 I2O_MESSAGE_FRAME_setMessageSize(
3482 &(Reply_Ptr->StdReplyFrame.StdMessageFrame),
3483 sizeof(I2O_SINGLE_REPLY_MESSAGE_FRAME));
3484 error = copyout ((caddr_t)Reply_Ptr, (caddr_t)Reply,
3486 kfree (Reply_Ptr, M_TEMP);
3491 /* Deal in the general case */
3492 /* First allocate and optionally copy in each scatter gather element */
3493 SLIST_INIT(&sgList);
3494 if ((I2O_MESSAGE_FRAME_getVersionOffset(Message_Ptr) & 0xF0) != 0) {
3495 PI2O_SGE_SIMPLE_ELEMENT sg;
3498 * since this code is reused in several systems, code
3499 * efficiency is greater by using a shift operation rather
3500 * than a divide by sizeof(u_int32_t).
3502 sg = (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr
3503 + ((I2O_MESSAGE_FRAME_getVersionOffset(Message_Ptr) & 0xF0)
3505 while (sg < (PI2O_SGE_SIMPLE_ELEMENT)(((caddr_t)Message_Ptr)
3506 + MessageSizeInBytes)) {
3510 if ((I2O_FLAGS_COUNT_getFlags(&(sg->FlagsCount))
3511 & I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT) == 0) {
3515 len = I2O_FLAGS_COUNT_getCount(&(sg->FlagsCount));
3516 debug_usr_cmd_printf ("SG[%d] = %x[%d]\n",
3517 sg - (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr
3518 + ((I2O_MESSAGE_FRAME_getVersionOffset(
3519 Message_Ptr) & 0xF0) >> 2)),
3520 I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg), len);
3522 if ((elm = (struct ioctlSgList_S *)kmalloc (
3523 sizeof(*elm) - sizeof(elm->KernelSpace) + len,
3525 == (struct ioctlSgList_S *)NULL) {
3526 debug_usr_cmd_printf (
3527 "Failed to allocate SG[%d]\n", len);
3531 SLIST_INSERT_HEAD(&sgList, elm, link);
3532 elm->FlagsCount = sg->FlagsCount;
3533 elm->UserSpace = (caddr_t)
3534 (I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg));
3535 v = elm->KernelSpace;
3536 /* Copy in outgoing data (DIR bit could be invalid) */
3537 if ((error = copyin (elm->UserSpace, (caddr_t)v, len))
3542 * If the buffer is not contiguous, lets
3543 * break up the scatter/gather entries.
3546 && (sg < (PI2O_SGE_SIMPLE_ELEMENT)
3547 (((caddr_t)Message_Ptr) + MAX_INBOUND_SIZE))) {
3548 int next, base, span;
3551 next = base = KVTOPHYS(v);
3552 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg,
3555 /* How far can we go physically contiguously */
3556 while ((len > 0) && (base == next)) {
3559 next = trunc_page(base) + PAGE_SIZE;
3570 /* Construct the Flags */
3571 I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount),
3574 int flags = I2O_FLAGS_COUNT_getFlags(
3575 &(elm->FlagsCount));
3576 /* Any remaining length? */
3579 ~(I2O_SGL_FLAGS_END_OF_BUFFER
3580 | I2O_SGL_FLAGS_LAST_ELEMENT);
3582 I2O_FLAGS_COUNT_setFlags(
3583 &(sg->FlagsCount), flags);
3586 debug_usr_cmd_printf ("sg[%d] = %x[%d]\n",
3587 sg - (PI2O_SGE_SIMPLE_ELEMENT)
3588 ((char *)Message_Ptr
3589 + ((I2O_MESSAGE_FRAME_getVersionOffset(
3590 Message_Ptr) & 0xF0) >> 2)),
3591 I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg),
3598 * Incrementing requires resizing of the
3599 * packet, and moving up the existing SG
3603 MessageSizeInBytes += sizeof(*sg);
3604 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
3605 I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr)
3606 + (sizeof(*sg) / sizeof(U32)));
3608 PI2O_MESSAGE_FRAME NewMessage_Ptr;
3611 = (PI2O_MESSAGE_FRAME)
3612 kmalloc (MessageSizeInBytes,
3614 == (PI2O_MESSAGE_FRAME)NULL) {
3615 debug_usr_cmd_printf (
3616 "Failed to acquire frame[%d] memory\n",
3617 MessageSizeInBytes);
3621 span = ((caddr_t)sg)
3622 - (caddr_t)Message_Ptr;
3623 bcopy ((caddr_t)Message_Ptr,
3624 (caddr_t)NewMessage_Ptr, span);
3625 bcopy ((caddr_t)(sg-1),
3626 ((caddr_t)NewMessage_Ptr) + span,
3627 MessageSizeInBytes - span);
3628 kfree (Message_Ptr, M_TEMP);
3629 sg = (PI2O_SGE_SIMPLE_ELEMENT)
3630 (((caddr_t)NewMessage_Ptr) + span);
3631 Message_Ptr = NewMessage_Ptr;
3635 || ((I2O_FLAGS_COUNT_getFlags(&(sg->FlagsCount))
3636 & I2O_SGL_FLAGS_LAST_ELEMENT) != 0)) {
3642 while ((elm = SLIST_FIRST(&sgList))
3643 != (struct ioctlSgList_S *)NULL) {
3644 SLIST_REMOVE_HEAD(&sgList, link);
3645 kfree (elm, M_TEMP);
3647 kfree (Reply_Ptr, M_TEMP);
3648 kfree (Message_Ptr, M_TEMP);
3653 debug_usr_cmd_printf ("Inbound: ");
3654 debug_usr_cmd_dump_message(Message_Ptr);
3656 /* Send the command */
3657 if ((ccb = asr_alloc_ccb (sc)) == (union asr_ccb *)NULL) {
3658 /* Free up in-kernel buffers */
3659 while ((elm = SLIST_FIRST(&sgList))
3660 != (struct ioctlSgList_S *)NULL) {
3661 SLIST_REMOVE_HEAD(&sgList, link);
3662 kfree (elm, M_TEMP);
3664 kfree (Reply_Ptr, M_TEMP);
3665 kfree (Message_Ptr, M_TEMP);
3670 * We do not need any (optional byteswapping) method access to
3671 * the Initiator context field.
3673 I2O_MESSAGE_FRAME_setInitiatorContext64(
3674 (PI2O_MESSAGE_FRAME)Message_Ptr, (long)ccb);
3676 (void)ASR_queue (sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
3678 kfree (Message_Ptr, M_TEMP);
3681 * Wait for the board to report a finished instruction.
3684 while ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
3685 if (ASR_getBlinkLedCode(sc)) {
3687 printf ("asr%d: Blink LED 0x%x resetting adapter\n",
3688 cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)),
3689 ASR_getBlinkLedCode(sc));
3690 if (ASR_reset (sc) == ENXIO) {
3691 /* Command Cleanup */
3692 ASR_ccbRemove(sc, ccb);
3695 /* Free up in-kernel buffers */
3696 while ((elm = SLIST_FIRST(&sgList))
3697 != (struct ioctlSgList_S *)NULL) {
3698 SLIST_REMOVE_HEAD(&sgList, link);
3699 kfree (elm, M_TEMP);
3701 kfree (Reply_Ptr, M_TEMP);
3705 /* Check every second for BlinkLed */
3706 tsleep((caddr_t)ccb, 0, "asr", hz);
3710 debug_usr_cmd_printf ("Outbound: ");
3711 debug_usr_cmd_dump_message(Reply_Ptr);
3713 I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode(
3714 &(Reply_Ptr->StdReplyFrame),
3715 (ccb->ccb_h.status != CAM_REQ_CMP));
3717 if (ReplySizeInBytes >= (sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
3718 - I2O_SCSI_SENSE_DATA_SZ - sizeof(U32))) {
3719 I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_setTransferCount(Reply_Ptr,
3720 ccb->csio.dxfer_len - ccb->csio.resid);
3722 if ((ccb->ccb_h.status & CAM_AUTOSNS_VALID) && (ReplySizeInBytes
3723 > (sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
3724 - I2O_SCSI_SENSE_DATA_SZ))) {
3725 int size = ReplySizeInBytes
3726 - sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
3727 - I2O_SCSI_SENSE_DATA_SZ;
3729 if (size > sizeof(ccb->csio.sense_data)) {
3730 size = sizeof(ccb->csio.sense_data);
3732 bcopy ((caddr_t)&(ccb->csio.sense_data), (caddr_t)Reply_Ptr->SenseData,
3734 I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_setAutoSenseTransferCount(
3738 /* Free up in-kernel buffers */
3739 while ((elm = SLIST_FIRST(&sgList)) != (struct ioctlSgList_S *)NULL) {
3740 /* Copy out as necessary */
3742 /* DIR bit considered `valid', error due to ignorance works */
3743 && ((I2O_FLAGS_COUNT_getFlags(&(elm->FlagsCount))
3744 & I2O_SGL_FLAGS_DIR) == 0)) {
3745 error = copyout ((caddr_t)(elm->KernelSpace),
3747 I2O_FLAGS_COUNT_getCount(&(elm->FlagsCount)));
3749 SLIST_REMOVE_HEAD(&sgList, link);
3750 kfree (elm, M_TEMP);
3753 /* Copy reply frame to user space */
3754 error = copyout ((caddr_t)Reply_Ptr, (caddr_t)Reply,
3757 kfree (Reply_Ptr, M_TEMP);
3763 /*----------------------------------------------------------------------*/
3764 /* Function asr_ioctl */
3765 /*----------------------------------------------------------------------*/
3766 /* The parameters passed to this function are : */
3767 /* dev : Device number. */
3768 /* cmd : Ioctl Command */
3769 /* data : User Argument Passed In. */
3770 /* flag : Mode Parameter */
3771 /* proc : Process Parameter */
3773 /* This function is the user interface into this adapter driver */
3775 /* Return : zero if OK, error code if not */
3776 /*----------------------------------------------------------------------*/
3779 asr_ioctl(struct dev_ioctl_args *ap)
3781 cdev_t dev = ap->a_head.a_dev;
3782 caddr_t data = ap->a_data;
3785 Asr_softc_t * sc = ASR_get_sc (dev);
3787 if (sc != (Asr_softc_t *)NULL)
3791 # if (dsDescription_size != 50)
3792 case DPT_SIGNATURE + ((50 - dsDescription_size) << 16):
3794 if (ap->a_cmd & 0xFFFF0000) {
3795 (void)bcopy ((caddr_t)(&ASR_sig), data,
3799 /* Traditional version of the ioctl interface */
3800 case DPT_SIGNATURE & 0x0000FFFF:
3801 return (copyout ((caddr_t)(&ASR_sig), *((caddr_t *)data),
3802 sizeof(dpt_sig_S)));
3804 /* Traditional version of the ioctl interface */
3805 case DPT_CTRLINFO & 0x0000FFFF:
3806 case DPT_CTRLINFO: {
3809 u_int16_t drvrHBAnum;
3811 u_int16_t blinkState;
3813 u_int8_t pciDeviceNum;
3815 u_int16_t Interrupt;
3816 u_int32_t reserved1;
3817 u_int32_t reserved2;
3818 u_int32_t reserved3;
3821 bzero (&CtlrInfo, sizeof(CtlrInfo));
3822 CtlrInfo.length = sizeof(CtlrInfo) - sizeof(u_int16_t);
3823 CtlrInfo.drvrHBAnum = asr_unit(dev);
3824 CtlrInfo.baseAddr = (u_long)sc->ha_Base;
3825 i = ASR_getBlinkLedCode (sc);
3829 CtlrInfo.blinkState = i;
3830 CtlrInfo.pciBusNum = sc->ha_pciBusNum;
3831 CtlrInfo.pciDeviceNum = sc->ha_pciDeviceNum;
3832 #define FLG_OSD_PCI_VALID 0x0001
3833 #define FLG_OSD_DMA 0x0002
3834 #define FLG_OSD_I2O 0x0004
3835 CtlrInfo.hbaFlags = FLG_OSD_PCI_VALID | FLG_OSD_DMA | FLG_OSD_I2O;
3836 CtlrInfo.Interrupt = sc->ha_irq;
3837 if (ap->a_cmd & 0xFFFF0000) {
3838 bcopy (&CtlrInfo, data, sizeof(CtlrInfo));
3840 error = copyout (&CtlrInfo, *(caddr_t *)data, sizeof(CtlrInfo));
3844 /* Traditional version of the ioctl interface */
3845 case DPT_SYSINFO & 0x0000FFFF:
3849 /* Kernel Specific ptok `hack' */
3850 # define ptok(a) ((char *)(a) + KERNBASE)
3852 bzero (&Info, sizeof(Info));
3854 /* Appears I am the only person in the Kernel doing this */
3862 Info.drive0CMOS = j;
3869 Info.drive1CMOS = j;
3871 Info.numDrives = *((char *)ptok(0x475));
3873 Info.processorFamily = ASR_sig.dsProcessorFamily;
3875 case CPU_386SX: case CPU_386:
3876 Info.processorType = PROC_386; break;
3877 case CPU_486SX: case CPU_486:
3878 Info.processorType = PROC_486; break;
3880 Info.processorType = PROC_PENTIUM; break;
3882 Info.processorType = PROC_SEXIUM; break;
3884 Info.osType = OS_BSDI_UNIX;
3885 Info.osMajorVersion = osrelease[0] - '0';
3886 Info.osMinorVersion = osrelease[2] - '0';
3887 /* Info.osRevision = 0; */
3888 /* Info.osSubRevision = 0; */
3889 Info.busType = SI_PCI_BUS;
3890 Info.flags = SI_CMOS_Valid | SI_NumDrivesValid
3891 | SI_OSversionValid | SI_BusTypeValid | SI_NO_SmartROM;
3893 /* Go Out And Look For I2O SmartROM */
3894 for(j = 0xC8000; j < 0xE0000; j += 2048) {
3898 if (*((unsigned short *)cp) != 0xAA55) {
3901 j += (cp[2] * 512) - 2048;
3902 if ((*((u_long *)(cp + 6))
3903 != ('S' + (' ' * 256) + (' ' * 65536L)))
3904 || (*((u_long *)(cp + 10))
3905 != ('I' + ('2' * 256) + ('0' * 65536L)))) {
3909 for (k = 0; k < 64; ++k) {
3910 if (*((unsigned short *)cp)
3911 == (' ' + ('v' * 256))) {
3916 Info.smartROMMajorVersion
3917 = *((unsigned char *)(cp += 4)) - '0';
3918 Info.smartROMMinorVersion
3919 = *((unsigned char *)(cp += 2));
3920 Info.smartROMRevision
3921 = *((unsigned char *)(++cp));
3922 Info.flags |= SI_SmartROMverValid;
3923 Info.flags &= ~SI_NO_SmartROM;
3927 /* Get The Conventional Memory Size From CMOS */
3933 Info.conventionalMemSize = j;
3935 /* Get The Extended Memory Found At Power On From CMOS */
3941 Info.extendedMemSize = j;
3942 Info.flags |= SI_MemorySizeValid;
3944 # if (defined(THIS_IS_BROKEN))
3945 /* If There Is 1 or 2 Drives Found, Set Up Drive Parameters */
3946 if (Info.numDrives > 0) {
3948 * Get The Pointer From Int 41 For The First
3951 j = ((unsigned)(*((unsigned short *)ptok(0x104+2))) << 4)
3952 + (unsigned)(*((unsigned short *)ptok(0x104+0)));
3954 * It appears that SmartROM's Int41/Int46 pointers
3955 * use memory that gets stepped on by the kernel
3956 * loading. We no longer have access to this
3957 * geometry information but try anyways (!?)
3959 Info.drives[0].cylinders = *((unsigned char *)ptok(j));
3961 Info.drives[0].cylinders += ((int)*((unsigned char *)
3964 Info.drives[0].heads = *((unsigned char *)ptok(j));
3966 Info.drives[0].sectors = *((unsigned char *)ptok(j));
3967 Info.flags |= SI_DriveParamsValid;
3968 if ((Info.drives[0].cylinders == 0)
3969 || (Info.drives[0].heads == 0)
3970 || (Info.drives[0].sectors == 0)) {
3971 Info.flags &= ~SI_DriveParamsValid;
3973 if (Info.numDrives > 1) {
3975 * Get The Pointer From Int 46 For The
3976 * Second Drive Parameters
3978 j = ((unsigned)(*((unsigned short *)ptok(0x118+2))) << 4)
3979 + (unsigned)(*((unsigned short *)ptok(0x118+0)));
3980 Info.drives[1].cylinders = *((unsigned char *)
3983 Info.drives[1].cylinders += ((int)
3984 *((unsigned char *)ptok(j))) << 8;
3986 Info.drives[1].heads = *((unsigned char *)
3989 Info.drives[1].sectors = *((unsigned char *)
3991 if ((Info.drives[1].cylinders == 0)
3992 || (Info.drives[1].heads == 0)
3993 || (Info.drives[1].sectors == 0)) {
3994 Info.flags &= ~SI_DriveParamsValid;
3999 /* Copy Out The Info Structure To The User */
4000 if (ap->a_cmd & 0xFFFF0000) {
4001 bcopy (&Info, data, sizeof(Info));
4003 error = copyout (&Info, *(caddr_t *)data, sizeof(Info));
4007 /* Get The BlinkLED State */
4009 i = ASR_getBlinkLedCode (sc);
4013 if (ap->a_cmd & 0xFFFF0000) {
4014 bcopy ((caddr_t)(&i), data, sizeof(i));
4016 error = copyout (&i, *(caddr_t *)data, sizeof(i));
4020 /* Send an I2O command */
4022 return (ASR_queue_i (sc, *((PI2O_MESSAGE_FRAME *)data)));
4024 /* Reset and re-initialize the adapter */
4026 return (ASR_reset (sc));
4028 /* Rescan the LCT table and resynchronize the information */
4030 return (ASR_rescan (sc));