Create 'k' versions of the kernel malloc API.
[dragonfly.git] / sys / dev / raid / asr / asr.c
CommitLineData
984263bc 1/* $FreeBSD: src/sys/dev/asr/asr.c,v 1.3.2.2 2001/08/23 05:21:29 scottl Exp $ */
fef8985e 2/* $DragonFly: src/sys/dev/raid/asr/asr.c,v 1.23 2006/07/28 02:17:37 dillon Exp $ */
984263bc
MD
3/*
4 * Copyright (c) 1996-2000 Distributed Processing Technology Corporation
5 * Copyright (c) 2000-2001 Adaptec Corporation
6 * All rights reserved.
7 *
8 * TERMS AND CONDITIONS OF USE
9 *
10 * Redistribution and use in source form, with or without modification, are
11 * permitted provided that redistributions of source code must retain the
12 * above copyright notice, this list of conditions and the following disclaimer.
13 *
14 * This software is provided `as is' by Adaptec and any express or implied
15 * warranties, including, but not limited to, the implied warranties of
16 * merchantability and fitness for a particular purpose, are disclaimed. In no
17 * event shall Adaptec be liable for any direct, indirect, incidental, special,
18 * exemplary or consequential damages (including, but not limited to,
19 * procurement of substitute goods or services; loss of use, data, or profits;
20 * or business interruptions) however caused and on any theory of liability,
21 * whether in contract, strict liability, or tort (including negligence or
22 * otherwise) arising in any way out of the use of this driver software, even
23 * if advised of the possibility of such damage.
24 *
25 * SCSI I2O host adapter driver
26 *
27 * V1.08 2001/08/21 Mark_Salyzyn@adaptec.com
28 * - The 2000S and 2005S do not initialize on some machines,
29 * increased timeout to 255ms from 50ms for the StatusGet
30 * command.
31 * V1.07 2001/05/22 Mark_Salyzyn@adaptec.com
32 * - I knew this one was too good to be true. The error return
33 * on ioctl commands needs to be compared to CAM_REQ_CMP, not
34 * to the bit masked status.
35 * V1.06 2001/05/08 Mark_Salyzyn@adaptec.com
36 * - The 2005S that was supported is affectionately called the
37 * Conjoined BAR Firmware. In order to support RAID-5 in a
38 * 16MB low-cost configuration, Firmware was forced to go
39 * to a Split BAR Firmware. This requires a separate IOP and
40 * Messaging base address.
41 * V1.05 2001/04/25 Mark_Salyzyn@adaptec.com
42 * - Handle support for 2005S Zero Channel RAID solution.
43 * - System locked up if the Adapter locked up. Do not try
44 * to send other commands if the resetIOP command fails. The
45 * fail outstanding command discovery loop was flawed as the
46 * removal of the command from the list prevented discovering
47 * all the commands.
48 * - Comment changes to clarify driver.
49 * - SysInfo searched for an EATA SmartROM, not an I2O SmartROM.
50 * - We do not use the AC_FOUND_DEV event because of I2O.
51 * Removed asr_async.
52 * V1.04 2000/09/22 Mark_Salyzyn@adaptec.com, msmith@freebsd.org,
53 * lampa@fee.vutbr.cz and Scott_Long@adaptec.com.
54 * - Removed support for PM1554, PM2554 and PM2654 in Mode-0
55 * mode as this is confused with competitor adapters in run
56 * mode.
57 * - critical locking needed in ASR_ccbAdd and ASR_ccbRemove
58 * to prevent operating system panic.
59 * - moved default major number to 154 from 97.
60 * V1.03 2000/07/12 Mark_Salyzyn@adaptec.com
61 * - The controller is not actually an ASR (Adaptec SCSI RAID)
62 * series that is visible, it's more of an internal code name.
63 * remove any visible references within reason for now.
64 * - bus_ptr->LUN was not correctly zeroed when initially
65 * allocated causing a possible panic of the operating system
66 * during boot.
67 * V1.02 2000/06/26 Mark_Salyzyn@adaptec.com
68 * - Code always fails for ASR_getTid affecting performance.
69 * - initiated a set of changes that resulted from a formal
70 * code inspection by Mark_Salyzyn@adaptec.com,
71 * George_Dake@adaptec.com, Jeff_Zeak@adaptec.com,
72 * Martin_Wilson@adaptec.com and Vincent_Trandoan@adaptec.com.
73 * Their findings were focussed on the LCT & TID handler, and
74 * all resulting changes were to improve code readability,
75 * consistency or have a positive effect on performance.
76 * V1.01 2000/06/14 Mark_Salyzyn@adaptec.com
77 * - Passthrough returned an incorrect error.
78 * - Passthrough did not migrate the intrinsic scsi layer wakeup
79 * on command completion.
80 * - generate control device nodes using make_dev and delete_dev.
81 * - Performance affected by TID caching reallocing.
82 * - Made suggested changes by Justin_Gibbs@adaptec.com
83 * - use splcam instead of splbio.
984263bc
MD
84 * - use u_int8_t instead of u_char.
85 * - use u_int16_t instead of u_short.
86 * - use u_int32_t instead of u_long where appropriate.
87 * - use 64 bit context handler instead of 32 bit.
88 * - create_ccb should only allocate the worst case
89 * requirements for the driver since CAM may evolve
90 * making union ccb much larger than needed here.
91 * renamed create_ccb to asr_alloc_ccb.
92 * - go nutz justifying all debug prints as macros
93 * defined at the top and remove unsightly ifdefs.
94 * - INLINE STATIC viewed as confusing. Historically
95 * utilized to affect code performance and debug
96 * issues in OS, Compiler or OEM specific situations.
97 * V1.00 2000/05/31 Mark_Salyzyn@adaptec.com
98 * - Ported from FreeBSD 2.2.X DPT I2O driver.
99 * changed struct scsi_xfer to union ccb/struct ccb_hdr
100 * changed variable name xs to ccb
101 * changed struct scsi_link to struct cam_path
102 * changed struct scsibus_data to struct cam_sim
103 * stopped using fordriver for holding on to the TID
104 * use proprietary packet creation instead of scsi_inquire
105 * CAM layer sends synchronize commands.
106 */
107
108#define ASR_VERSION 1
109#define ASR_REVISION '0'
110#define ASR_SUBREVISION '8'
111#define ASR_MONTH 8
112#define ASR_DAY 21
113#define ASR_YEAR 2001 - 1980
114
115/*
116 * Debug macros to reduce the unsightly ifdefs
117 */
118#if (defined(DEBUG_ASR) || defined(DEBUG_ASR_USR_CMD) || defined(DEBUG_ASR_CMD))
119# define debug_asr_message(message) \
120 { \
121 u_int32_t * pointer = (u_int32_t *)message; \
122 u_int32_t length = I2O_MESSAGE_FRAME_getMessageSize(message);\
123 u_int32_t counter = 0; \
124 \
125 while (length--) { \
126 printf ("%08lx%c", (u_long)*(pointer++), \
127 (((++counter & 7) == 0) || (length == 0)) \
128 ? '\n' \
129 : ' '); \
130 } \
131 }
132#endif /* DEBUG_ASR || DEBUG_ASR_USR_CMD || DEBUG_ASR_CMD */
133
134#if (defined(DEBUG_ASR))
135 /* Breaks on none STDC based compilers :-( */
136# define debug_asr_printf(fmt,args...) printf(fmt, ##args)
137# define debug_asr_dump_message(message) debug_asr_message(message)
138# define debug_asr_print_path(ccb) xpt_print_path(ccb->ccb_h.path);
139 /* None fatal version of the ASSERT macro */
140# if (defined(__STDC__))
141# define ASSERT(phrase) if(!(phrase))printf(#phrase " at line %d file %s\n",__LINE__,__FILE__)
142# else
143# define ASSERT(phrase) if(!(phrase))printf("phrase" " at line %d file %s\n",__LINE__,__FILE__)
144# endif
145#else /* DEBUG_ASR */
146# define debug_asr_printf(fmt,args...)
147# define debug_asr_dump_message(message)
148# define debug_asr_print_path(ccb)
149# define ASSERT(x)
150#endif /* DEBUG_ASR */
151
152/*
153 * If DEBUG_ASR_CMD is defined:
154 * 0 - Display incoming SCSI commands
155 * 1 - add in a quick character before queueing.
156 * 2 - add in outgoing message frames.
157 */
158#if (defined(DEBUG_ASR_CMD))
159# define debug_asr_cmd_printf(fmt,args...) printf(fmt,##args)
160# define debug_asr_dump_ccb(ccb) \
161 { \
162 u_int8_t * cp = (unsigned char *)&(ccb->csio.cdb_io); \
163 int len = ccb->csio.cdb_len; \
164 \
165 while (len) { \
166 debug_asr_cmd_printf (" %02x", *(cp++)); \
167 --len; \
168 } \
169 }
170# if (DEBUG_ASR_CMD > 0)
171# define debug_asr_cmd1_printf debug_asr_cmd_printf
172# else
173# define debug_asr_cmd1_printf(fmt,args...)
174# endif
175# if (DEBUG_ASR_CMD > 1)
176# define debug_asr_cmd2_printf debug_asr_cmd_printf
177# define debug_asr_cmd2_dump_message(message) debug_asr_message(message)
178# else
179# define debug_asr_cmd2_printf(fmt,args...)
180# define debug_asr_cmd2_dump_message(message)
181# endif
182#else /* DEBUG_ASR_CMD */
183# define debug_asr_cmd_printf(fmt,args...)
184# define debug_asr_cmd_dump_ccb(ccb)
185# define debug_asr_cmd1_printf(fmt,args...)
186# define debug_asr_cmd2_printf(fmt,args...)
187# define debug_asr_cmd2_dump_message(message)
188#endif /* DEBUG_ASR_CMD */
189
190#if (defined(DEBUG_ASR_USR_CMD))
191# define debug_usr_cmd_printf(fmt,args...) printf(fmt,##args)
192# define debug_usr_cmd_dump_message(message) debug_usr_message(message)
193#else /* DEBUG_ASR_USR_CMD */
194# define debug_usr_cmd_printf(fmt,args...)
195# define debug_usr_cmd_dump_message(message)
196#endif /* DEBUG_ASR_USR_CMD */
197
198#define dsDescription_size 46 /* Snug as a bug in a rug */
1f2de5d4 199#include "dptsig.h"
984263bc
MD
200
201static dpt_sig_S ASR_sig = {
202 { 'd', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION, PROC_INTEL,
203 PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM, FT_HBADRVR, 0,
204 OEM_DPT, OS_FREE_BSD, CAP_ABOVE16MB, DEV_ALL,
205 ADF_ALL_SC5,
206 0, 0, ASR_VERSION, ASR_REVISION, ASR_SUBREVISION,
207 ASR_MONTH, ASR_DAY, ASR_YEAR,
208/* 01234567890123456789012345678901234567890123456789 < 50 chars */
209 "Adaptec FreeBSD 4.0.0 Unix SCSI I2O HBA Driver"
210 /* ^^^^^ asr_attach alters these to match OS */
211};
212
213#include <sys/param.h> /* TRUE=1 and FALSE=0 defined here */
214#include <sys/kernel.h>
215#include <sys/systm.h>
216#include <sys/malloc.h>
217#include <sys/proc.h>
218#include <sys/conf.h>
219#include <sys/disklabel.h>
220#include <sys/bus.h>
221#include <machine/resource.h>
222#include <machine/bus.h>
223#include <sys/rman.h>
224#include <sys/stat.h>
f15db79e 225#include <sys/device.h>
7f2216bc 226#include <sys/thread2.h>
984263bc 227
1f2de5d4
MD
228#include <bus/cam/cam.h>
229#include <bus/cam/cam_ccb.h>
230#include <bus/cam/cam_sim.h>
231#include <bus/cam/cam_xpt_sim.h>
232#include <bus/cam/cam_xpt_periph.h>
984263bc 233
1f2de5d4
MD
234#include <bus/cam/scsi/scsi_all.h>
235#include <bus/cam/scsi/scsi_message.h>
984263bc
MD
236
237#include <vm/vm.h>
238#include <vm/pmap.h>
239#include <machine/cputypes.h>
240#include <machine/clock.h>
241#include <i386/include/vmparam.h>
242
1f2de5d4
MD
243#include <bus/pci/pcivar.h>
244#include <bus/pci/pcireg.h>
984263bc
MD
245
246#define STATIC static
247#define INLINE
248
249#if (defined(DEBUG_ASR) && (DEBUG_ASR > 0))
250# undef STATIC
251# define STATIC
252# undef INLINE
253# define INLINE
254#endif
255#define IN
256#define OUT
257#define INOUT
258
259#define osdSwap4(x) ((u_long)ntohl((u_long)(x)))
260#define KVTOPHYS(x) vtophys(x)
1f2de5d4
MD
261#include "dptalign.h"
262#include "i2oexec.h"
263#include "i2obscsi.h"
264#include "i2odpt.h"
265#include "i2oadptr.h"
1f2de5d4 266#include "sys_info.h"
984263bc
MD
267
268/* Configuration Definitions */
269
270#define SG_SIZE 58 /* Scatter Gather list Size */
271#define MAX_TARGET_ID 126 /* Maximum Target ID supported */
272#define MAX_LUN 255 /* Maximum LUN Supported */
273#define MAX_CHANNEL 7 /* Maximum Channel # Supported by driver */
274#define MAX_INBOUND 2000 /* Max CCBs, Also Max Queue Size */
275#define MAX_OUTBOUND 256 /* Maximum outbound frames/adapter */
276#define MAX_INBOUND_SIZE 512 /* Maximum inbound frame size */
277#define MAX_MAP 4194304L /* Maximum mapping size of IOP */
278 /* Also serves as the minimum map for */
279 /* the 2005S zero channel RAID product */
280
281/**************************************************************************
282** ASR Host Adapter structure - One Structure For Each Host Adapter That **
283** Is Configured Into The System. The Structure Supplies Configuration **
284** Information, Status Info, Queue Info And An Active CCB List Pointer. **
285***************************************************************************/
286
287/* I2O register set */
288typedef struct {
289 U8 Address[0x30];
290 volatile U32 Status;
291 volatile U32 Mask;
292# define Mask_InterruptsDisabled 0x08
293 U32 x[2];
294 volatile U32 ToFIFO; /* In Bound FIFO */
295 volatile U32 FromFIFO; /* Out Bound FIFO */
296} i2oRegs_t;
297
298/*
299 * A MIX of performance and space considerations for TID lookups
300 */
301typedef u_int16_t tid_t;
302
303typedef struct {
304 u_int32_t size; /* up to MAX_LUN */
305 tid_t TID[1];
306} lun2tid_t;
307
308typedef struct {
309 u_int32_t size; /* up to MAX_TARGET */
310 lun2tid_t * LUN[1];
311} target2lun_t;
312
313/*
314 * To ensure that we only allocate and use the worst case ccb here, lets
315 * make our own local ccb union. If asr_alloc_ccb is utilized for another
316 * ccb type, ensure that you add the additional structures into our local
317 * ccb union. To ensure strict type checking, we will utilize the local
318 * ccb definition wherever possible.
319 */
320union asr_ccb {
321 struct ccb_hdr ccb_h; /* For convenience */
322 struct ccb_scsiio csio;
323 struct ccb_setasync csa;
324};
325
326typedef struct Asr_softc {
327 u_int16_t ha_irq;
328 void * ha_Base; /* base port for each board */
329 u_int8_t * volatile ha_blinkLED;
330 i2oRegs_t * ha_Virt; /* Base address of IOP */
331 U8 * ha_Fvirt; /* Base address of Frames */
332 I2O_IOP_ENTRY ha_SystemTable;
333 LIST_HEAD(,ccb_hdr) ha_ccb; /* ccbs in use */
334 struct cam_path * ha_path[MAX_CHANNEL+1];
335 struct cam_sim * ha_sim[MAX_CHANNEL+1];
984263bc
MD
336 struct resource * ha_mem_res;
337 struct resource * ha_mes_res;
338 struct resource * ha_irq_res;
339 void * ha_intr;
984263bc
MD
340 PI2O_LCT ha_LCT; /* Complete list of devices */
341# define le_type IdentityTag[0]
342# define I2O_BSA 0x20
343# define I2O_FCA 0x40
344# define I2O_SCSI 0x00
345# define I2O_PORT 0x80
346# define I2O_UNKNOWN 0x7F
347# define le_bus IdentityTag[1]
348# define le_target IdentityTag[2]
349# define le_lun IdentityTag[3]
350 target2lun_t * ha_targets[MAX_CHANNEL+1];
351 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME ha_Msgs;
352 u_long ha_Msgs_Phys;
353
354 u_int8_t ha_in_reset;
355# define HA_OPERATIONAL 0
356# define HA_IN_RESET 1
357# define HA_OFF_LINE 2
358# define HA_OFF_LINE_RECOVERY 3
359 /* Configuration information */
360 /* The target id maximums we take */
361 u_int8_t ha_MaxBus; /* Maximum bus */
362 u_int8_t ha_MaxId; /* Maximum target ID */
363 u_int8_t ha_MaxLun; /* Maximum target LUN */
364 u_int8_t ha_SgSize; /* Max SG elements */
365 u_int8_t ha_pciBusNum;
366 u_int8_t ha_pciDeviceNum;
367 u_int8_t ha_adapter_target[MAX_CHANNEL+1];
368 u_int16_t ha_QueueSize; /* Max outstanding commands */
369 u_int16_t ha_Msgs_Count;
370
371 /* Links into other parents and HBAs */
372 struct Asr_softc * ha_next; /* HBA list */
984263bc
MD
373} Asr_softc_t;
374
375STATIC Asr_softc_t * Asr_softc;
376
377/*
378 * Prototypes of the routines we have in this object.
379 */
380
381/* Externally callable routines */
984263bc
MD
382#define PROBE_ARGS IN device_t tag
383#define PROBE_RET int
384#define PROBE_SET() u_long id = (pci_get_device(tag)<<16)|pci_get_vendor(tag)
385#define PROBE_RETURN(retval) if(retval){device_set_desc(tag,retval);return(0);}else{return(ENXIO);}
386#define ATTACH_ARGS IN device_t tag
387#define ATTACH_RET int
388#define ATTACH_SET() int unit = device_get_unit(tag)
389#define ATTACH_RETURN(retval) return(retval)
984263bc 390/* I2O HDM interface */
5ca58d54
RG
391STATIC PROBE_RET asr_probe (PROBE_ARGS);
392STATIC ATTACH_RET asr_attach (ATTACH_ARGS);
984263bc 393/* DOMINO placeholder */
5ca58d54
RG
394STATIC PROBE_RET domino_probe (PROBE_ARGS);
395STATIC ATTACH_RET domino_attach (ATTACH_ARGS);
984263bc 396/* MODE0 adapter placeholder */
5ca58d54
RG
397STATIC PROBE_RET mode0_probe (PROBE_ARGS);
398STATIC ATTACH_RET mode0_attach (ATTACH_ARGS);
984263bc 399
fef8985e
MD
400STATIC Asr_softc_t * ASR_get_sc (dev_t dev);
401STATIC d_ioctl_t asr_ioctl;
402STATIC d_open_t asr_open;
403STATIC d_close_t asr_close;
404STATIC int asr_intr (IN Asr_softc_t *sc);
405STATIC void asr_timeout (INOUT void *arg);
406STATIC int ASR_init (IN Asr_softc_t *sc);
407STATIC INLINE int ASR_acquireLct (INOUT Asr_softc_t *sc);
408STATIC INLINE int ASR_acquireHrt (INOUT Asr_softc_t *sc);
409STATIC void asr_action (IN struct cam_sim *sim,
410 IN union ccb *ccb);
411STATIC void asr_poll (IN struct cam_sim * sim);
984263bc
MD
412
413/*
414 * Here is the auto-probe structure used to nest our tests appropriately
415 * during the startup phase of the operating system.
416 */
984263bc
MD
417STATIC device_method_t asr_methods[] = {
418 DEVMETHOD(device_probe, asr_probe),
419 DEVMETHOD(device_attach, asr_attach),
420 { 0, 0 }
421};
422
423STATIC driver_t asr_driver = {
424 "asr",
425 asr_methods,
426 sizeof(Asr_softc_t)
427};
428
429STATIC devclass_t asr_devclass;
430
32832096 431DECLARE_DUMMY_MODULE(asr);
984263bc
MD
432DRIVER_MODULE(asr, pci, asr_driver, asr_devclass, 0, 0);
433
434STATIC device_method_t domino_methods[] = {
435 DEVMETHOD(device_probe, domino_probe),
436 DEVMETHOD(device_attach, domino_attach),
437 { 0, 0 }
438};
439
440STATIC driver_t domino_driver = {
441 "domino",
442 domino_methods,
443 0
444};
445
446STATIC devclass_t domino_devclass;
447
448DRIVER_MODULE(domino, pci, domino_driver, domino_devclass, 0, 0);
449
450STATIC device_method_t mode0_methods[] = {
451 DEVMETHOD(device_probe, mode0_probe),
452 DEVMETHOD(device_attach, mode0_attach),
453 { 0, 0 }
454};
455
456STATIC driver_t mode0_driver = {
457 "mode0",
458 mode0_methods,
459 0
460};
461
462STATIC devclass_t mode0_devclass;
463
464DRIVER_MODULE(mode0, pci, mode0_driver, mode0_devclass, 0, 0);
984263bc
MD
465
466/*
467 * devsw for asr hba driver
468 *
469 * only ioctl is used. the sd driver provides all other access.
470 */
471#define CDEV_MAJOR 154 /* prefered default character major */
fef8985e
MD
472STATIC struct dev_ops asr_ops = {
473 { "asr", CDEV_MAJOR, 0 },
474 .d_open = asr_open,
475 .d_close = asr_close,
476 .d_ioctl = asr_ioctl,
984263bc
MD
477};
478
984263bc 479/*
fef8985e 480 * Initialize the dynamic dev_ops hooks.
984263bc
MD
481 */
482STATIC void
e4c9c0c8 483asr_drvinit (void * unused)
984263bc
MD
484{
485 static int asr_devsw_installed = 0;
486
487 if (asr_devsw_installed) {
488 return;
489 }
490 asr_devsw_installed++;
491 /*
492 * Find a free spot (the report during driver load used by
493 * osd layer in engine to generate the controlling nodes).
e4c9c0c8 494 *
fef8985e 495 * XXX this is garbage code, store a unit number in asr_ops
e4c9c0c8 496 * and iterate through that instead?
984263bc 497 */
fef8985e
MD
498 while (asr_ops.head.maj < NUMCDEVSW &&
499 dev_ops_get(asr_ops.head.maj, -1) != NULL
e4c9c0c8 500 ) {
fef8985e 501 ++asr_ops.head.maj;
984263bc 502 }
fef8985e
MD
503 if (asr_ops.head.maj >= NUMCDEVSW) {
504 asr_ops.head.maj = 0;
505 while (asr_ops.head.maj < CDEV_MAJOR &&
506 dev_ops_get(asr_ops.head.maj, -1) != NULL
e4c9c0c8 507 ) {
fef8985e 508 ++asr_ops.head.maj;
e4c9c0c8
MD
509 }
510 }
511
984263bc
MD
512 /*
513 * Come to papa
514 */
fef8985e 515 dev_ops_add(&asr_ops, 0, 0);
984263bc
MD
516} /* asr_drvinit */
517
518/* Must initialize before CAM layer picks up our HBA driver */
519SYSINIT(asrdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,asr_drvinit,NULL)
520
521/* I2O support routines */
522#define defAlignLong(STRUCT,NAME) char NAME[sizeof(STRUCT)]
523#define getAlignLong(STRUCT,NAME) ((STRUCT *)(NAME))
524
525/*
526 * Fill message with default.
527 */
528STATIC PI2O_MESSAGE_FRAME
529ASR_fillMessage (
530 IN char * Message,
531 IN u_int16_t size)
532{
533 OUT PI2O_MESSAGE_FRAME Message_Ptr;
534
535 Message_Ptr = getAlignLong(I2O_MESSAGE_FRAME, Message);
536 bzero ((void *)Message_Ptr, size);
537 I2O_MESSAGE_FRAME_setVersionOffset(Message_Ptr, I2O_VERSION_11);
538 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
539 (size + sizeof(U32) - 1) >> 2);
540 I2O_MESSAGE_FRAME_setInitiatorAddress (Message_Ptr, 1);
541 return (Message_Ptr);
542} /* ASR_fillMessage */
543
544#define EMPTY_QUEUE ((U32)-1L)
545
546STATIC INLINE U32
547ASR_getMessage(
548 IN i2oRegs_t * virt)
549{
550 OUT U32 MessageOffset;
551
552 if ((MessageOffset = virt->ToFIFO) == EMPTY_QUEUE) {
553 MessageOffset = virt->ToFIFO;
554 }
555 return (MessageOffset);
556} /* ASR_getMessage */
557
558/* Issue a polled command */
559STATIC U32
560ASR_initiateCp (
561 INOUT i2oRegs_t * virt,
562 INOUT U8 * fvirt,
563 IN PI2O_MESSAGE_FRAME Message)
564{
565 OUT U32 Mask = -1L;
566 U32 MessageOffset;
567 u_int Delay = 1500;
568
569 /*
570 * ASR_initiateCp is only used for synchronous commands and will
571 * be made more resiliant to adapter delays since commands like
572 * resetIOP can cause the adapter to be deaf for a little time.
573 */
574 while (((MessageOffset = ASR_getMessage(virt)) == EMPTY_QUEUE)
575 && (--Delay != 0)) {
576 DELAY (10000);
577 }
578 if (MessageOffset != EMPTY_QUEUE) {
579 bcopy (Message, fvirt + MessageOffset,
580 I2O_MESSAGE_FRAME_getMessageSize(Message) << 2);
581 /*
582 * Disable the Interrupts
583 */
584 virt->Mask = (Mask = virt->Mask) | Mask_InterruptsDisabled;
585 virt->ToFIFO = MessageOffset;
586 }
587 return (Mask);
588} /* ASR_initiateCp */
589
590/*
591 * Reset the adapter.
592 */
593STATIC U32
594ASR_resetIOP (
595 INOUT i2oRegs_t * virt,
596 INOUT U8 * fvirt)
597{
598 struct resetMessage {
599 I2O_EXEC_IOP_RESET_MESSAGE M;
600 U32 R;
601 };
602 defAlignLong(struct resetMessage,Message);
603 PI2O_EXEC_IOP_RESET_MESSAGE Message_Ptr;
604 OUT U32 * volatile Reply_Ptr;
605 U32 Old;
606
607 /*
608 * Build up our copy of the Message.
609 */
610 Message_Ptr = (PI2O_EXEC_IOP_RESET_MESSAGE)ASR_fillMessage(Message,
611 sizeof(I2O_EXEC_IOP_RESET_MESSAGE));
612 I2O_EXEC_IOP_RESET_MESSAGE_setFunction(Message_Ptr, I2O_EXEC_IOP_RESET);
613 /*
614 * Reset the Reply Status
615 */
616 *(Reply_Ptr = (U32 *)((char *)Message_Ptr
617 + sizeof(I2O_EXEC_IOP_RESET_MESSAGE))) = 0;
618 I2O_EXEC_IOP_RESET_MESSAGE_setStatusWordLowAddress(Message_Ptr,
619 KVTOPHYS((void *)Reply_Ptr));
620 /*
621 * Send the Message out
622 */
623 if ((Old = ASR_initiateCp (virt, fvirt, (PI2O_MESSAGE_FRAME)Message_Ptr)) != (U32)-1L) {
624 /*
625 * Wait for a response (Poll), timeouts are dangerous if
626 * the card is truly responsive. We assume response in 2s.
627 */
628 u_int8_t Delay = 200;
629
630 while ((*Reply_Ptr == 0) && (--Delay != 0)) {
631 DELAY (10000);
632 }
633 /*
634 * Re-enable the interrupts.
635 */
636 virt->Mask = Old;
637 ASSERT (*Reply_Ptr);
638 return (*Reply_Ptr);
639 }
640 ASSERT (Old != (U32)-1L);
641 return (0);
642} /* ASR_resetIOP */
643
644/*
645 * Get the curent state of the adapter
646 */
647STATIC INLINE PI2O_EXEC_STATUS_GET_REPLY
648ASR_getStatus (
649 INOUT i2oRegs_t * virt,
650 INOUT U8 * fvirt,
651 OUT PI2O_EXEC_STATUS_GET_REPLY buffer)
652{
653 defAlignLong(I2O_EXEC_STATUS_GET_MESSAGE,Message);
654 PI2O_EXEC_STATUS_GET_MESSAGE Message_Ptr;
655 U32 Old;
656
657 /*
658 * Build up our copy of the Message.
659 */
660 Message_Ptr = (PI2O_EXEC_STATUS_GET_MESSAGE)ASR_fillMessage(Message,
661 sizeof(I2O_EXEC_STATUS_GET_MESSAGE));
662 I2O_EXEC_STATUS_GET_MESSAGE_setFunction(Message_Ptr,
663 I2O_EXEC_STATUS_GET);
664 I2O_EXEC_STATUS_GET_MESSAGE_setReplyBufferAddressLow(Message_Ptr,
665 KVTOPHYS((void *)buffer));
666 /* This one is a Byte Count */
667 I2O_EXEC_STATUS_GET_MESSAGE_setReplyBufferLength(Message_Ptr,
668 sizeof(I2O_EXEC_STATUS_GET_REPLY));
669 /*
670 * Reset the Reply Status
671 */
672 bzero ((void *)buffer, sizeof(I2O_EXEC_STATUS_GET_REPLY));
673 /*
674 * Send the Message out
675 */
676 if ((Old = ASR_initiateCp (virt, fvirt, (PI2O_MESSAGE_FRAME)Message_Ptr)) != (U32)-1L) {
677 /*
678 * Wait for a response (Poll), timeouts are dangerous if
679 * the card is truly responsive. We assume response in 50ms.
680 */
681 u_int8_t Delay = 255;
682
683 while (*((U8 * volatile)&(buffer->SyncByte)) == 0) {
684 if (--Delay == 0) {
685 buffer = (PI2O_EXEC_STATUS_GET_REPLY)NULL;
686 break;
687 }
688 DELAY (1000);
689 }
690 /*
691 * Re-enable the interrupts.
692 */
693 virt->Mask = Old;
694 return (buffer);
695 }
696 return ((PI2O_EXEC_STATUS_GET_REPLY)NULL);
697} /* ASR_getStatus */
698
699/*
700 * Check if the device is a SCSI I2O HBA, and add it to the list.
701 */
702
703/*
704 * Probe for ASR controller. If we find it, we will use it.
705 * virtual adapters.
706 */
707STATIC PROBE_RET
708asr_probe(PROBE_ARGS)
709{
710 PROBE_SET();
711 if ((id == 0xA5011044) || (id == 0xA5111044)) {
712 PROBE_RETURN ("Adaptec Caching SCSI RAID");
713 }
714 PROBE_RETURN (NULL);
715} /* asr_probe */
716
717/*
718 * Probe/Attach for DOMINO chipset.
719 */
720STATIC PROBE_RET
721domino_probe(PROBE_ARGS)
722{
723 PROBE_SET();
724 if (id == 0x10121044) {
725 PROBE_RETURN ("Adaptec Caching Memory Controller");
726 }
727 PROBE_RETURN (NULL);
728} /* domino_probe */
729
730STATIC ATTACH_RET
731domino_attach (ATTACH_ARGS)
732{
733 ATTACH_RETURN (0);
734} /* domino_attach */
735
736/*
737 * Probe/Attach for MODE0 adapters.
738 */
739STATIC PROBE_RET
740mode0_probe(PROBE_ARGS)
741{
742 PROBE_SET();
743
744 /*
745 * If/When we can get a business case to commit to a
746 * Mode0 driver here, we can make all these tests more
747 * specific and robust. Mode0 adapters have their processors
748 * turned off, this the chips are in a raw state.
749 */
750
751 /* This is a PLX9054 */
752 if (id == 0x905410B5) {
753 PROBE_RETURN ("Adaptec Mode0 PM3757");
754 }
755 /* This is a PLX9080 */
756 if (id == 0x908010B5) {
757 PROBE_RETURN ("Adaptec Mode0 PM3754/PM3755");
758 }
759 /* This is a ZION 80303 */
760 if (id == 0x53098086) {
761 PROBE_RETURN ("Adaptec Mode0 3010S");
762 }
763 /* This is an i960RS */
764 if (id == 0x39628086) {
765 PROBE_RETURN ("Adaptec Mode0 2100S");
766 }
767 /* This is an i960RN */
768 if (id == 0x19648086) {
769 PROBE_RETURN ("Adaptec Mode0 PM2865/2400A/3200S/3400S");
770 }
771#if 0 /* this would match any generic i960 -- mjs */
772 /* This is an i960RP (typically also on Motherboards) */
773 if (id == 0x19608086) {
774 PROBE_RETURN ("Adaptec Mode0 PM2554/PM1554/PM2654");
775 }
776#endif
777 PROBE_RETURN (NULL);
778} /* mode0_probe */
779
780STATIC ATTACH_RET
781mode0_attach (ATTACH_ARGS)
782{
783 ATTACH_RETURN (0);
784} /* mode0_attach */
785
786STATIC INLINE union asr_ccb *
787asr_alloc_ccb (
788 IN Asr_softc_t * sc)
789{
790 OUT union asr_ccb * new_ccb;
791
792 if ((new_ccb = (union asr_ccb *)malloc(sizeof(*new_ccb),
793 M_DEVBUF, M_WAITOK)) != (union asr_ccb *)NULL) {
794 bzero (new_ccb, sizeof(*new_ccb));
795 new_ccb->ccb_h.pinfo.priority = 1;
796 new_ccb->ccb_h.pinfo.index = CAM_UNQUEUED_INDEX;
797 new_ccb->ccb_h.spriv_ptr0 = sc;
798 }
799 return (new_ccb);
800} /* asr_alloc_ccb */
801
802STATIC INLINE void
803asr_free_ccb (
804 IN union asr_ccb * free_ccb)
805{
806 free(free_ccb, M_DEVBUF);
807} /* asr_free_ccb */
808
809/*
810 * Print inquiry data `carefully'
811 */
812STATIC void
813ASR_prstring (
814 u_int8_t * s,
815 int len)
816{
817 while ((--len >= 0) && (*s) && (*s != ' ') && (*s != '-')) {
818 printf ("%c", *(s++));
819 }
820} /* ASR_prstring */
821
822/*
823 * Prototypes
824 */
5ca58d54 825STATIC INLINE int ASR_queue (
984263bc 826 IN Asr_softc_t * sc,
5ca58d54 827 IN PI2O_MESSAGE_FRAME Message);
984263bc
MD
828/*
829 * Send a message synchronously and without Interrupt to a ccb.
830 */
831STATIC int
832ASR_queue_s (
833 INOUT union asr_ccb * ccb,
834 IN PI2O_MESSAGE_FRAME Message)
835{
984263bc
MD
836 U32 Mask;
837 Asr_softc_t * sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
838
839 /*
840 * We do not need any (optional byteswapping) method access to
841 * the Initiator context field.
842 */
843 I2O_MESSAGE_FRAME_setInitiatorContext64(Message, (long)ccb);
844
845 /* Prevent interrupt service */
7f2216bc 846 crit_enter();
984263bc
MD
847 sc->ha_Virt->Mask = (Mask = sc->ha_Virt->Mask)
848 | Mask_InterruptsDisabled;
849
850 if (ASR_queue (sc, Message) == EMPTY_QUEUE) {
851 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
852 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
853 }
854
855 /*
856 * Wait for this board to report a finished instruction.
857 */
858 while ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
859 (void)asr_intr (sc);
860 }
861
862 /* Re-enable Interrupts */
863 sc->ha_Virt->Mask = Mask;
7f2216bc 864 crit_exit();
984263bc
MD
865
866 return (ccb->ccb_h.status);
867} /* ASR_queue_s */
868
869/*
870 * Send a message synchronously to a Asr_softc_t
871 */
872STATIC int
873ASR_queue_c (
874 IN Asr_softc_t * sc,
875 IN PI2O_MESSAGE_FRAME Message)
876{
877 union asr_ccb * ccb;
878 OUT int status;
879
880 if ((ccb = asr_alloc_ccb (sc)) == (union asr_ccb *)NULL) {
881 return (CAM_REQUEUE_REQ);
882 }
883
884 status = ASR_queue_s (ccb, Message);
885
886 asr_free_ccb(ccb);
887
888 return (status);
889} /* ASR_queue_c */
890
891/*
892 * Add the specified ccb to the active queue
893 */
894STATIC INLINE void
895ASR_ccbAdd (
896 IN Asr_softc_t * sc,
897 INOUT union asr_ccb * ccb)
898{
7f2216bc 899 crit_enter();
984263bc
MD
900 LIST_INSERT_HEAD(&(sc->ha_ccb), &(ccb->ccb_h), sim_links.le);
901 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
902 if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT) {
903 /*
904 * RAID systems can take considerable time to
905 * complete some commands given the large cache
906 * flashes switching from write back to write thru.
907 */
908 ccb->ccb_h.timeout = 6 * 60 * 1000;
909 }
ddcafce9
JS
910 callout_reset(&ccb->ccb_h.timeout_ch,
911 (ccb->ccb_h.timeout * hz) / 1000, asr_timeout, ccb);
984263bc 912 }
7f2216bc 913 crit_exit();
984263bc
MD
914} /* ASR_ccbAdd */
915
916/*
917 * Remove the specified ccb from the active queue.
918 */
919STATIC INLINE void
920ASR_ccbRemove (
921 IN Asr_softc_t * sc,
922 INOUT union asr_ccb * ccb)
923{
7f2216bc 924 crit_enter();
ddcafce9 925 callout_stop(&ccb->ccb_h.timeout_ch);
984263bc 926 LIST_REMOVE(&(ccb->ccb_h), sim_links.le);
7f2216bc 927 crit_exit();
984263bc
MD
928} /* ASR_ccbRemove */
929
930/*
931 * Fail all the active commands, so they get re-issued by the operating
932 * system.
933 */
934STATIC INLINE void
935ASR_failActiveCommands (
936 IN Asr_softc_t * sc)
937{
938 struct ccb_hdr * ccb;
984263bc
MD
939
940#if 0 /* Currently handled by callers, unnecessary paranoia currently */
941 /* Left in for historical perspective. */
942 defAlignLong(I2O_EXEC_LCT_NOTIFY_MESSAGE,Message);
943 PI2O_EXEC_LCT_NOTIFY_MESSAGE Message_Ptr;
944
945 /* Send a blind LCT command to wait for the enableSys to complete */
946 Message_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)ASR_fillMessage(Message,
947 sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) - sizeof(I2O_SG_ELEMENT));
948 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
949 I2O_EXEC_LCT_NOTIFY);
950 I2O_EXEC_LCT_NOTIFY_MESSAGE_setClassIdentifier(Message_Ptr,
951 I2O_CLASS_MATCH_ANYCLASS);
952 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
953#endif
954
7f2216bc 955 crit_enter();
984263bc
MD
956 /*
957 * We do not need to inform the CAM layer that we had a bus
958 * reset since we manage it on our own, this also prevents the
959 * SCSI_DELAY settling that would be required on other systems.
960 * The `SCSI_DELAY' has already been handled by the card via the
961 * acquisition of the LCT table while we are at CAM priority level.
962 * for (int bus = 0; bus <= sc->ha_MaxBus; ++bus) {
963 * xpt_async (AC_BUS_RESET, sc->ha_path[bus], NULL);
964 * }
965 */
966 while ((ccb = LIST_FIRST(&(sc->ha_ccb))) != (struct ccb_hdr *)NULL) {
967 ASR_ccbRemove (sc, (union asr_ccb *)ccb);
968
969 ccb->status &= ~CAM_STATUS_MASK;
970 ccb->status |= CAM_REQUEUE_REQ;
971 /* Nothing Transfered */
972 ((struct ccb_scsiio *)ccb)->resid
973 = ((struct ccb_scsiio *)ccb)->dxfer_len;
974
975 if (ccb->path) {
976 xpt_done ((union ccb *)ccb);
977 } else {
978 wakeup ((caddr_t)ccb);
979 }
980 }
7f2216bc 981 crit_exit();
984263bc
MD
982} /* ASR_failActiveCommands */
983
984/*
985 * The following command causes the HBA to reset the specific bus
986 */
987STATIC INLINE void
988ASR_resetBus(
989 IN Asr_softc_t * sc,
990 IN int bus)
991{
992 defAlignLong(I2O_HBA_BUS_RESET_MESSAGE,Message);
993 I2O_HBA_BUS_RESET_MESSAGE * Message_Ptr;
994 PI2O_LCT_ENTRY Device;
995
996 Message_Ptr = (I2O_HBA_BUS_RESET_MESSAGE *)ASR_fillMessage(Message,
997 sizeof(I2O_HBA_BUS_RESET_MESSAGE));
998 I2O_MESSAGE_FRAME_setFunction(&Message_Ptr->StdMessageFrame,
999 I2O_HBA_BUS_RESET);
1000 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
1001 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
1002 ++Device) {
1003 if (((Device->le_type & I2O_PORT) != 0)
1004 && (Device->le_bus == bus)) {
1005 I2O_MESSAGE_FRAME_setTargetAddress(
1006 &Message_Ptr->StdMessageFrame,
1007 I2O_LCT_ENTRY_getLocalTID(Device));
1008 /* Asynchronous command, with no expectations */
1009 (void)ASR_queue(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1010 break;
1011 }
1012 }
1013} /* ASR_resetBus */
1014
1015STATIC INLINE int
1016ASR_getBlinkLedCode (
1017 IN Asr_softc_t * sc)
1018{
1019 if ((sc != (Asr_softc_t *)NULL)
1020 && (sc->ha_blinkLED != (u_int8_t *)NULL)
1021 && (sc->ha_blinkLED[1] == 0xBC)) {
1022 return (sc->ha_blinkLED[0]);
1023 }
1024 return (0);
1025} /* ASR_getBlinkCode */
1026
1027/*
1028 * Determine the address of an TID lookup. Must be done at high priority
1029 * since the address can be changed by other threads of execution.
1030 *
1031 * Returns NULL pointer if not indexible (but will attempt to generate
1032 * an index if `new_entry' flag is set to TRUE).
1033 *
1034 * All addressible entries are to be guaranteed zero if never initialized.
1035 */
1036STATIC INLINE tid_t *
1037ASR_getTidAddress(
1038 INOUT Asr_softc_t * sc,
1039 IN int bus,
1040 IN int target,
1041 IN int lun,
1042 IN int new_entry)
1043{
1044 target2lun_t * bus_ptr;
1045 lun2tid_t * target_ptr;
1046 unsigned new_size;
1047
1048 /*
1049 * Validity checking of incoming parameters. More of a bound
1050 * expansion limit than an issue with the code dealing with the
1051 * values.
1052 *
1053 * sc must be valid before it gets here, so that check could be
1054 * dropped if speed a critical issue.
1055 */
1056 if ((sc == (Asr_softc_t *)NULL)
1057 || (bus > MAX_CHANNEL)
1058 || (target > sc->ha_MaxId)
1059 || (lun > sc->ha_MaxLun)) {
1060 debug_asr_printf("(%lx,%d,%d,%d) target out of range\n",
1061 (u_long)sc, bus, target, lun);
1062 return ((tid_t *)NULL);
1063 }
1064 /*
1065 * See if there is an associated bus list.
1066 *
1067 * for performance, allocate in size of BUS_CHUNK chunks.
1068 * BUS_CHUNK must be a power of two. This is to reduce
1069 * fragmentation effects on the allocations.
1070 */
1071# define BUS_CHUNK 8
1072 new_size = ((target + BUS_CHUNK - 1) & ~(BUS_CHUNK - 1));
1073 if ((bus_ptr = sc->ha_targets[bus]) == (target2lun_t *)NULL) {
1074 /*
1075 * Allocate a new structure?
1076 * Since one element in structure, the +1
1077 * needed for size has been abstracted.
1078 */
1079 if ((new_entry == FALSE)
1080 || ((sc->ha_targets[bus] = bus_ptr = (target2lun_t *)malloc (
1081 sizeof(*bus_ptr) + (sizeof(bus_ptr->LUN) * new_size),
1082 M_TEMP, M_WAITOK))
1083 == (target2lun_t *)NULL)) {
1084 debug_asr_printf("failed to allocate bus list\n");
1085 return ((tid_t *)NULL);
1086 }
1087 bzero (bus_ptr, sizeof(*bus_ptr)
1088 + (sizeof(bus_ptr->LUN) * new_size));
1089 bus_ptr->size = new_size + 1;
1090 } else if (bus_ptr->size <= new_size) {
1091 target2lun_t * new_bus_ptr;
1092
1093 /*
1094 * Reallocate a new structure?
1095 * Since one element in structure, the +1
1096 * needed for size has been abstracted.
1097 */
1098 if ((new_entry == FALSE)
1099 || ((new_bus_ptr = (target2lun_t *)malloc (
1100 sizeof(*bus_ptr) + (sizeof(bus_ptr->LUN) * new_size),
1101 M_TEMP, M_WAITOK))
1102 == (target2lun_t *)NULL)) {
1103 debug_asr_printf("failed to reallocate bus list\n");
1104 return ((tid_t *)NULL);
1105 }
1106 /*
1107 * Zero and copy the whole thing, safer, simpler coding
1108 * and not really performance critical at this point.
1109 */
1110 bzero (new_bus_ptr, sizeof(*bus_ptr)
1111 + (sizeof(bus_ptr->LUN) * new_size));
1112 bcopy (bus_ptr, new_bus_ptr, sizeof(*bus_ptr)
1113 + (sizeof(bus_ptr->LUN) * (bus_ptr->size - 1)));
1114 sc->ha_targets[bus] = new_bus_ptr;
1115 free (bus_ptr, M_TEMP);
1116 bus_ptr = new_bus_ptr;
1117 bus_ptr->size = new_size + 1;
1118 }
1119 /*
1120 * We now have the bus list, lets get to the target list.
1121 * Since most systems have only *one* lun, we do not allocate
1122 * in chunks as above, here we allow one, then in chunk sizes.
1123 * TARGET_CHUNK must be a power of two. This is to reduce
1124 * fragmentation effects on the allocations.
1125 */
1126# define TARGET_CHUNK 8
1127 if ((new_size = lun) != 0) {
1128 new_size = ((lun + TARGET_CHUNK - 1) & ~(TARGET_CHUNK - 1));
1129 }
1130 if ((target_ptr = bus_ptr->LUN[target]) == (lun2tid_t *)NULL) {
1131 /*
1132 * Allocate a new structure?
1133 * Since one element in structure, the +1
1134 * needed for size has been abstracted.
1135 */
1136 if ((new_entry == FALSE)
1137 || ((bus_ptr->LUN[target] = target_ptr = (lun2tid_t *)malloc (
1138 sizeof(*target_ptr) + (sizeof(target_ptr->TID) * new_size),
1139 M_TEMP, M_WAITOK))
1140 == (lun2tid_t *)NULL)) {
1141 debug_asr_printf("failed to allocate target list\n");
1142 return ((tid_t *)NULL);
1143 }
1144 bzero (target_ptr, sizeof(*target_ptr)
1145 + (sizeof(target_ptr->TID) * new_size));
1146 target_ptr->size = new_size + 1;
1147 } else if (target_ptr->size <= new_size) {
1148 lun2tid_t * new_target_ptr;
1149
1150 /*
1151 * Reallocate a new structure?
1152 * Since one element in structure, the +1
1153 * needed for size has been abstracted.
1154 */
1155 if ((new_entry == FALSE)
1156 || ((new_target_ptr = (lun2tid_t *)malloc (
1157 sizeof(*target_ptr) + (sizeof(target_ptr->TID) * new_size),
1158 M_TEMP, M_WAITOK))
1159 == (lun2tid_t *)NULL)) {
1160 debug_asr_printf("failed to reallocate target list\n");
1161 return ((tid_t *)NULL);
1162 }
1163 /*
1164 * Zero and copy the whole thing, safer, simpler coding
1165 * and not really performance critical at this point.
1166 */
1167 bzero (new_target_ptr, sizeof(*target_ptr)
1168 + (sizeof(target_ptr->TID) * new_size));
1169 bcopy (target_ptr, new_target_ptr,
1170 sizeof(*target_ptr)
1171 + (sizeof(target_ptr->TID) * (target_ptr->size - 1)));
1172 bus_ptr->LUN[target] = new_target_ptr;
1173 free (target_ptr, M_TEMP);
1174 target_ptr = new_target_ptr;
1175 target_ptr->size = new_size + 1;
1176 }
1177 /*
1178 * Now, acquire the TID address from the LUN indexed list.
1179 */
1180 return (&(target_ptr->TID[lun]));
1181} /* ASR_getTidAddress */
1182
1183/*
1184 * Get a pre-existing TID relationship.
1185 *
1186 * If the TID was never set, return (tid_t)-1.
1187 *
1188 * should use mutex rather than spl.
1189 */
1190STATIC INLINE tid_t
1191ASR_getTid (
1192 IN Asr_softc_t * sc,
1193 IN int bus,
1194 IN int target,
1195 IN int lun)
1196{
1197 tid_t * tid_ptr;
984263bc
MD
1198 OUT tid_t retval;
1199
7f2216bc 1200 crit_enter();
984263bc
MD
1201 if (((tid_ptr = ASR_getTidAddress (sc, bus, target, lun, FALSE))
1202 == (tid_t *)NULL)
1203 /* (tid_t)0 or (tid_t)-1 indicate no TID */
1204 || (*tid_ptr == (tid_t)0)) {
7f2216bc 1205 crit_exit();
984263bc
MD
1206 return ((tid_t)-1);
1207 }
1208 retval = *tid_ptr;
7f2216bc 1209 crit_exit();
984263bc
MD
1210 return (retval);
1211} /* ASR_getTid */
1212
1213/*
1214 * Set a TID relationship.
1215 *
1216 * If the TID was not set, return (tid_t)-1.
1217 *
1218 * should use mutex rather than spl.
1219 */
1220STATIC INLINE tid_t
1221ASR_setTid (
1222 INOUT Asr_softc_t * sc,
1223 IN int bus,
1224 IN int target,
1225 IN int lun,
1226 INOUT tid_t TID)
1227{
1228 tid_t * tid_ptr;
984263bc
MD
1229
1230 if (TID != (tid_t)-1) {
1231 if (TID == 0) {
1232 return ((tid_t)-1);
1233 }
7f2216bc 1234 crit_enter();
984263bc
MD
1235 if ((tid_ptr = ASR_getTidAddress (sc, bus, target, lun, TRUE))
1236 == (tid_t *)NULL) {
7f2216bc 1237 crit_exit();
984263bc
MD
1238 return ((tid_t)-1);
1239 }
1240 *tid_ptr = TID;
7f2216bc 1241 crit_exit();
984263bc
MD
1242 }
1243 return (TID);
1244} /* ASR_setTid */
1245
1246/*-------------------------------------------------------------------------*/
1247/* Function ASR_rescan */
1248/*-------------------------------------------------------------------------*/
1249/* The Parameters Passed To This Function Are : */
1250/* Asr_softc_t * : HBA miniport driver's adapter data storage. */
1251/* */
1252/* This Function Will rescan the adapter and resynchronize any data */
1253/* */
1254/* Return : 0 For OK, Error Code Otherwise */
1255/*-------------------------------------------------------------------------*/
1256
1257STATIC INLINE int
1258ASR_rescan(
1259 IN Asr_softc_t * sc)
1260{
1261 int bus;
1262 OUT int error;
1263
1264 /*
1265 * Re-acquire the LCT table and synchronize us to the adapter.
1266 */
1267 if ((error = ASR_acquireLct(sc)) == 0) {
1268 error = ASR_acquireHrt(sc);
1269 }
1270
1271 if (error != 0) {
1272 return error;
1273 }
1274
1275 bus = sc->ha_MaxBus;
1276 /* Reset all existing cached TID lookups */
1277 do {
1278 int target, event = 0;
1279
1280 /*
1281 * Scan for all targets on this bus to see if they
1282 * got affected by the rescan.
1283 */
1284 for (target = 0; target <= sc->ha_MaxId; ++target) {
1285 int lun;
1286
1287 /* Stay away from the controller ID */
1288 if (target == sc->ha_adapter_target[bus]) {
1289 continue;
1290 }
1291 for (lun = 0; lun <= sc->ha_MaxLun; ++lun) {
1292 PI2O_LCT_ENTRY Device;
1293 tid_t TID = (tid_t)-1;
1294 tid_t LastTID;
1295
1296 /*
1297 * See if the cached TID changed. Search for
1298 * the device in our new LCT.
1299 */
1300 for (Device = sc->ha_LCT->LCTEntry;
1301 Device < (PI2O_LCT_ENTRY)(((U32 *)sc->ha_LCT)
1302 + I2O_LCT_getTableSize(sc->ha_LCT));
1303 ++Device) {
1304 if ((Device->le_type != I2O_UNKNOWN)
1305 && (Device->le_bus == bus)
1306 && (Device->le_target == target)
1307 && (Device->le_lun == lun)
1308 && (I2O_LCT_ENTRY_getUserTID(Device)
1309 == 0xFFF)) {
1310 TID = I2O_LCT_ENTRY_getLocalTID(
1311 Device);
1312 break;
1313 }
1314 }
1315 /*
1316 * Indicate to the OS that the label needs
1317 * to be recalculated, or that the specific
1318 * open device is no longer valid (Merde)
1319 * because the cached TID changed.
1320 */
1321 LastTID = ASR_getTid (sc, bus, target, lun);
1322 if (LastTID != TID) {
1323 struct cam_path * path;
1324
1325 if (xpt_create_path(&path,
1326 /*periph*/NULL,
1327 cam_sim_path(sc->ha_sim[bus]),
1328 target, lun) != CAM_REQ_CMP) {
1329 if (TID == (tid_t)-1) {
1330 event |= AC_LOST_DEVICE;
1331 } else {
1332 event |= AC_INQ_CHANGED
1333 | AC_GETDEV_CHANGED;
1334 }
1335 } else {
1336 if (TID == (tid_t)-1) {
1337 xpt_async(
1338 AC_LOST_DEVICE,
1339 path, NULL);
1340 } else if (LastTID == (tid_t)-1) {
1341 struct ccb_getdev ccb;
1342
1343 xpt_setup_ccb(
1344 &(ccb.ccb_h),
1345 path, /*priority*/5);
1346 xpt_async(
1347 AC_FOUND_DEVICE,
1348 path,
1349 &ccb);
1350 } else {
1351 xpt_async(
1352 AC_INQ_CHANGED,
1353 path, NULL);
1354 xpt_async(
1355 AC_GETDEV_CHANGED,
1356 path, NULL);
1357 }
1358 }
1359 }
1360 /*
1361 * We have the option of clearing the
1362 * cached TID for it to be rescanned, or to
1363 * set it now even if the device never got
1364 * accessed. We chose the later since we
1365 * currently do not use the condition that
1366 * the TID ever got cached.
1367 */
1368 ASR_setTid (sc, bus, target, lun, TID);
1369 }
1370 }
1371 /*
1372 * The xpt layer can not handle multiple events at the
1373 * same call.
1374 */
1375 if (event & AC_LOST_DEVICE) {
1376 xpt_async(AC_LOST_DEVICE, sc->ha_path[bus], NULL);
1377 }
1378 if (event & AC_INQ_CHANGED) {
1379 xpt_async(AC_INQ_CHANGED, sc->ha_path[bus], NULL);
1380 }
1381 if (event & AC_GETDEV_CHANGED) {
1382 xpt_async(AC_GETDEV_CHANGED, sc->ha_path[bus], NULL);
1383 }
1384 } while (--bus >= 0);
1385 return (error);
1386} /* ASR_rescan */
1387
1388/*-------------------------------------------------------------------------*/
1389/* Function ASR_reset */
1390/*-------------------------------------------------------------------------*/
1391/* The Parameters Passed To This Function Are : */
1392/* Asr_softc_t * : HBA miniport driver's adapter data storage. */
1393/* */
1394/* This Function Will reset the adapter and resynchronize any data */
1395/* */
1396/* Return : None */
1397/*-------------------------------------------------------------------------*/
1398
1399STATIC INLINE int
1400ASR_reset(
1401 IN Asr_softc_t * sc)
1402{
7f2216bc 1403 int retVal;
984263bc 1404
7f2216bc 1405 crit_enter();
984263bc
MD
1406 if ((sc->ha_in_reset == HA_IN_RESET)
1407 || (sc->ha_in_reset == HA_OFF_LINE_RECOVERY)) {
7f2216bc 1408 crit_exit();
984263bc
MD
1409 return (EBUSY);
1410 }
1411 /*
1412 * Promotes HA_OPERATIONAL to HA_IN_RESET,
1413 * or HA_OFF_LINE to HA_OFF_LINE_RECOVERY.
1414 */
1415 ++(sc->ha_in_reset);
1416 if (ASR_resetIOP (sc->ha_Virt, sc->ha_Fvirt) == 0) {
1417 debug_asr_printf ("ASR_resetIOP failed\n");
1418 /*
1419 * We really need to take this card off-line, easier said
1420 * than make sense. Better to keep retrying for now since if a
1421 * UART cable is connected the blinkLEDs the adapter is now in
1422 * a hard state requiring action from the monitor commands to
1423 * the HBA to continue. For debugging waiting forever is a
1424 * good thing. In a production system, however, one may wish
1425 * to instead take the card off-line ...
1426 */
1427# if 0 && (defined(HA_OFF_LINE))
1428 /*
1429 * Take adapter off-line.
1430 */
1431 printf ("asr%d: Taking adapter off-line\n",
1432 sc->ha_path[0]
1433 ? cam_sim_unit(xpt_path_sim(sc->ha_path[0]))
1434 : 0);
1435 sc->ha_in_reset = HA_OFF_LINE;
7f2216bc 1436 crit_exit();
984263bc
MD
1437 return (ENXIO);
1438# else
1439 /* Wait Forever */
1440 while (ASR_resetIOP (sc->ha_Virt, sc->ha_Fvirt) == 0);
1441# endif
1442 }
1443 retVal = ASR_init (sc);
7f2216bc 1444 crit_exit();
984263bc
MD
1445 if (retVal != 0) {
1446 debug_asr_printf ("ASR_init failed\n");
1447 sc->ha_in_reset = HA_OFF_LINE;
1448 return (ENXIO);
1449 }
1450 if (ASR_rescan (sc) != 0) {
1451 debug_asr_printf ("ASR_rescan failed\n");
1452 }
1453 ASR_failActiveCommands (sc);
1454 if (sc->ha_in_reset == HA_OFF_LINE_RECOVERY) {
1455 printf ("asr%d: Brining adapter back on-line\n",
1456 sc->ha_path[0]
1457 ? cam_sim_unit(xpt_path_sim(sc->ha_path[0]))
1458 : 0);
1459 }
1460 sc->ha_in_reset = HA_OPERATIONAL;
1461 return (0);
1462} /* ASR_reset */
1463
1464/*
1465 * Device timeout handler.
1466 */
1467STATIC void
1468asr_timeout(
1469 INOUT void * arg)
1470{
1471 union asr_ccb * ccb = (union asr_ccb *)arg;
1472 Asr_softc_t * sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
1473 int s;
1474
1475 debug_asr_print_path(ccb);
1476 debug_asr_printf("timed out");
1477
1478 /*
1479 * Check if the adapter has locked up?
1480 */
1481 if ((s = ASR_getBlinkLedCode(sc)) != 0) {
1482 /* Reset Adapter */
1483 printf ("asr%d: Blink LED 0x%x resetting adapter\n",
1484 cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)), s);
1485 if (ASR_reset (sc) == ENXIO) {
1486 /* Try again later */
ddcafce9
JS
1487 callout_reset(&ccb->ccb_h.timeout_ch,
1488 (ccb->ccb_h.timeout * hz) / 1000, asr_timeout, ccb);
984263bc
MD
1489 }
1490 return;
1491 }
1492 /*
1493 * Abort does not function on the ASR card!!! Walking away from
1494 * the SCSI command is also *very* dangerous. A SCSI BUS reset is
1495 * our best bet, followed by a complete adapter reset if that fails.
1496 */
7f2216bc 1497 crit_enter();
984263bc
MD
1498 /* Check if we already timed out once to raise the issue */
1499 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_CMD_TIMEOUT) {
1500 debug_asr_printf (" AGAIN\nreinitializing adapter\n");
1501 if (ASR_reset (sc) == ENXIO) {
ddcafce9
JS
1502 callout_reset(&ccb->ccb_h.timeout_ch,
1503 (ccb->ccb_h.timeout * hz) / 1000, asr_timeout, ccb);
984263bc 1504 }
7f2216bc 1505 crit_exit();
984263bc
MD
1506 return;
1507 }
1508 debug_asr_printf ("\nresetting bus\n");
1509 /* If the BUS reset does not take, then an adapter reset is next! */
1510 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1511 ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
ddcafce9
JS
1512 callout_reset(&ccb->ccb_h.timeout_ch, (ccb->ccb_h.timeout * hz) / 1000,
1513 asr_timeout, ccb);
984263bc
MD
1514 ASR_resetBus (sc, cam_sim_bus(xpt_path_sim(ccb->ccb_h.path)));
1515 xpt_async (AC_BUS_RESET, ccb->ccb_h.path, NULL);
7f2216bc 1516 crit_exit();
984263bc
MD
1517} /* asr_timeout */
1518
1519/*
1520 * send a message asynchronously
1521 */
1522STATIC INLINE int
1523ASR_queue(
1524 IN Asr_softc_t * sc,
1525 IN PI2O_MESSAGE_FRAME Message)
1526{
1527 OUT U32 MessageOffset;
1528 union asr_ccb * ccb;
1529
1530 debug_asr_printf ("Host Command Dump:\n");
1531 debug_asr_dump_message (Message);
1532
1533 ccb = (union asr_ccb *)(long)
1534 I2O_MESSAGE_FRAME_getInitiatorContext64(Message);
1535
1536 if ((MessageOffset = ASR_getMessage(sc->ha_Virt)) != EMPTY_QUEUE) {
984263bc
MD
1537 bcopy (Message, sc->ha_Fvirt + MessageOffset,
1538 I2O_MESSAGE_FRAME_getMessageSize(Message) << 2);
1539 if (ccb) {
1540 ASR_ccbAdd (sc, ccb);
1541 }
1542 /* Post the command */
1543 sc->ha_Virt->ToFIFO = MessageOffset;
1544 } else {
1545 if (ASR_getBlinkLedCode(sc)) {
1546 /*
1547 * Unlikely we can do anything if we can't grab a
1548 * message frame :-(, but lets give it a try.
1549 */
1550 (void)ASR_reset (sc);
1551 }
1552 }
1553 return (MessageOffset);
1554} /* ASR_queue */
1555
1556
1557/* Simple Scatter Gather elements */
1558#define SG(SGL,Index,Flags,Buffer,Size) \
1559 I2O_FLAGS_COUNT_setCount( \
1560 &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index].FlagsCount), \
1561 Size); \
1562 I2O_FLAGS_COUNT_setFlags( \
1563 &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index].FlagsCount), \
1564 I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT | (Flags)); \
1565 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress( \
1566 &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index]), \
1567 (Buffer == NULL) ? NULL : KVTOPHYS(Buffer))
1568
1569/*
1570 * Retrieve Parameter Group.
1571 * Buffer must be allocated using defAlignLong macro.
1572 */
1573STATIC void *
1574ASR_getParams(
1575 IN Asr_softc_t * sc,
1576 IN tid_t TID,
1577 IN int Group,
1578 OUT void * Buffer,
1579 IN unsigned BufferSize)
1580{
1581 struct paramGetMessage {
1582 I2O_UTIL_PARAMS_GET_MESSAGE M;
1583 char F[
1584 sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT)];
1585 struct Operations {
1586 I2O_PARAM_OPERATIONS_LIST_HEADER Header;
1587 I2O_PARAM_OPERATION_ALL_TEMPLATE Template[1];
1588 } O;
1589 };
1590 defAlignLong(struct paramGetMessage, Message);
1591 struct Operations * Operations_Ptr;
1592 I2O_UTIL_PARAMS_GET_MESSAGE * Message_Ptr;
1593 struct ParamBuffer {
1594 I2O_PARAM_RESULTS_LIST_HEADER Header;
1595 I2O_PARAM_READ_OPERATION_RESULT Read;
1596 char Info[1];
1597 } * Buffer_Ptr;
1598
1599 Message_Ptr = (I2O_UTIL_PARAMS_GET_MESSAGE *)ASR_fillMessage(Message,
1600 sizeof(I2O_UTIL_PARAMS_GET_MESSAGE)
1601 + sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT));
1602 Operations_Ptr = (struct Operations *)((char *)Message_Ptr
1603 + sizeof(I2O_UTIL_PARAMS_GET_MESSAGE)
1604 + sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT));
1605 bzero ((void *)Operations_Ptr, sizeof(struct Operations));
1606 I2O_PARAM_OPERATIONS_LIST_HEADER_setOperationCount(
1607 &(Operations_Ptr->Header), 1);
1608 I2O_PARAM_OPERATION_ALL_TEMPLATE_setOperation(
1609 &(Operations_Ptr->Template[0]), I2O_PARAMS_OPERATION_FIELD_GET);
1610 I2O_PARAM_OPERATION_ALL_TEMPLATE_setFieldCount(
1611 &(Operations_Ptr->Template[0]), 0xFFFF);
1612 I2O_PARAM_OPERATION_ALL_TEMPLATE_setGroupNumber(
1613 &(Operations_Ptr->Template[0]), Group);
1614 bzero ((void *)(Buffer_Ptr = getAlignLong(struct ParamBuffer, Buffer)),
1615 BufferSize);
1616
1617 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
1618 I2O_VERSION_11
1619 + (((sizeof(I2O_UTIL_PARAMS_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1620 / sizeof(U32)) << 4));
1621 I2O_MESSAGE_FRAME_setTargetAddress (&(Message_Ptr->StdMessageFrame),
1622 TID);
1623 I2O_MESSAGE_FRAME_setFunction (&(Message_Ptr->StdMessageFrame),
1624 I2O_UTIL_PARAMS_GET);
1625 /*
1626 * Set up the buffers as scatter gather elements.
1627 */
1628 SG(&(Message_Ptr->SGL), 0,
1629 I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER,
1630 Operations_Ptr, sizeof(struct Operations));
1631 SG(&(Message_Ptr->SGL), 1,
1632 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
1633 Buffer_Ptr, BufferSize);
1634
1635 if ((ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) == CAM_REQ_CMP)
1636 && (Buffer_Ptr->Header.ResultCount)) {
1637 return ((void *)(Buffer_Ptr->Info));
1638 }
1639 return ((void *)NULL);
1640} /* ASR_getParams */
1641
1642/*
1643 * Acquire the LCT information.
1644 */
1645STATIC INLINE int
1646ASR_acquireLct (
1647 INOUT Asr_softc_t * sc)
1648{
1649 PI2O_EXEC_LCT_NOTIFY_MESSAGE Message_Ptr;
1650 PI2O_SGE_SIMPLE_ELEMENT sg;
1651 int MessageSizeInBytes;
1652 caddr_t v;
1653 int len;
1654 I2O_LCT Table;
1655 PI2O_LCT_ENTRY Entry;
1656
1657 /*
1658 * sc value assumed valid
1659 */
1660 MessageSizeInBytes = sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE)
1661 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT);
1662 if ((Message_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)malloc (
1663 MessageSizeInBytes, M_TEMP, M_WAITOK))
1664 == (PI2O_EXEC_LCT_NOTIFY_MESSAGE)NULL) {
1665 return (ENOMEM);
1666 }
1667 (void)ASR_fillMessage((char *)Message_Ptr, MessageSizeInBytes);
1668 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
1669 (I2O_VERSION_11 +
1670 (((sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1671 / sizeof(U32)) << 4)));
1672 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
1673 I2O_EXEC_LCT_NOTIFY);
1674 I2O_EXEC_LCT_NOTIFY_MESSAGE_setClassIdentifier(Message_Ptr,
1675 I2O_CLASS_MATCH_ANYCLASS);
1676 /*
1677 * Call the LCT table to determine the number of device entries
1678 * to reserve space for.
1679 */
1680 SG(&(Message_Ptr->SGL), 0,
1681 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER, &Table,
1682 sizeof(I2O_LCT));
1683 /*
1684 * since this code is reused in several systems, code efficiency
1685 * is greater by using a shift operation rather than a divide by
1686 * sizeof(u_int32_t).
1687 */
1688 I2O_LCT_setTableSize(&Table,
1689 (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)) >> 2);
1690 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1691 /*
1692 * Determine the size of the LCT table.
1693 */
1694 if (sc->ha_LCT) {
1695 free (sc->ha_LCT, M_TEMP);
1696 }
1697 /*
1698 * malloc only generates contiguous memory when less than a
1699 * page is expected. We must break the request up into an SG list ...
1700 */
1701 if (((len = (I2O_LCT_getTableSize(&Table) << 2)) <=
1702 (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)))
1703 || (len > (128 * 1024))) { /* Arbitrary */
1704 free (Message_Ptr, M_TEMP);
1705 return (EINVAL);
1706 }
1707 if ((sc->ha_LCT = (PI2O_LCT)malloc (len, M_TEMP, M_WAITOK))
1708 == (PI2O_LCT)NULL) {
1709 free (Message_Ptr, M_TEMP);
1710 return (ENOMEM);
1711 }
1712 /*
1713 * since this code is reused in several systems, code efficiency
1714 * is greater by using a shift operation rather than a divide by
1715 * sizeof(u_int32_t).
1716 */
1717 I2O_LCT_setTableSize(sc->ha_LCT,
1718 (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)) >> 2);
1719 /*
1720 * Convert the access to the LCT table into a SG list.
1721 */
1722 sg = Message_Ptr->SGL.u.Simple;
1723 v = (caddr_t)(sc->ha_LCT);
1724 for (;;) {
1725 int next, base, span;
1726
1727 span = 0;
1728 next = base = KVTOPHYS(v);
1729 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, base);
1730
1731 /* How far can we go contiguously */
1732 while ((len > 0) && (base == next)) {
1733 int size;
1734
1735 next = trunc_page(base) + PAGE_SIZE;
1736 size = next - base;
1737 if (size > len) {
1738 size = len;
1739 }
1740 span += size;
1741 v += size;
1742 len -= size;
1743 base = KVTOPHYS(v);
1744 }
1745
1746 /* Construct the Flags */
1747 I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), span);
1748 {
1749 int rw = I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT;
1750 if (len <= 0) {
1751 rw = (I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT
1752 | I2O_SGL_FLAGS_LAST_ELEMENT
1753 | I2O_SGL_FLAGS_END_OF_BUFFER);
1754 }
1755 I2O_FLAGS_COUNT_setFlags(&(sg->FlagsCount), rw);
1756 }
1757
1758 if (len <= 0) {
1759 break;
1760 }
1761
1762 /*
1763 * Incrementing requires resizing of the packet.
1764 */
1765 ++sg;
1766 MessageSizeInBytes += sizeof(*sg);
1767 I2O_MESSAGE_FRAME_setMessageSize(
1768 &(Message_Ptr->StdMessageFrame),
1769 I2O_MESSAGE_FRAME_getMessageSize(
1770 &(Message_Ptr->StdMessageFrame))
1771 + (sizeof(*sg) / sizeof(U32)));
1772 {
1773 PI2O_EXEC_LCT_NOTIFY_MESSAGE NewMessage_Ptr;
1774
1775 if ((NewMessage_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)
1776 malloc (MessageSizeInBytes, M_TEMP, M_WAITOK))
1777 == (PI2O_EXEC_LCT_NOTIFY_MESSAGE)NULL) {
1778 free (sc->ha_LCT, M_TEMP);
1779 sc->ha_LCT = (PI2O_LCT)NULL;
1780 free (Message_Ptr, M_TEMP);
1781 return (ENOMEM);
1782 }
1783 span = ((caddr_t)sg) - (caddr_t)Message_Ptr;
1784 bcopy ((caddr_t)Message_Ptr,
1785 (caddr_t)NewMessage_Ptr, span);
1786 free (Message_Ptr, M_TEMP);
1787 sg = (PI2O_SGE_SIMPLE_ELEMENT)
1788 (((caddr_t)NewMessage_Ptr) + span);
1789 Message_Ptr = NewMessage_Ptr;
1790 }
1791 }
1792 { int retval;
1793
1794 retval = ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1795 free (Message_Ptr, M_TEMP);
1796 if (retval != CAM_REQ_CMP) {
1797 return (ENODEV);
1798 }
1799 }
1800 /* If the LCT table grew, lets truncate accesses */
1801 if (I2O_LCT_getTableSize(&Table) < I2O_LCT_getTableSize(sc->ha_LCT)) {
1802 I2O_LCT_setTableSize(sc->ha_LCT, I2O_LCT_getTableSize(&Table));
1803 }
1804 for (Entry = sc->ha_LCT->LCTEntry; Entry < (PI2O_LCT_ENTRY)
1805 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
1806 ++Entry) {
1807 Entry->le_type = I2O_UNKNOWN;
1808 switch (I2O_CLASS_ID_getClass(&(Entry->ClassID))) {
1809
1810 case I2O_CLASS_RANDOM_BLOCK_STORAGE:
1811 Entry->le_type = I2O_BSA;
1812 break;
1813
1814 case I2O_CLASS_SCSI_PERIPHERAL:
1815 Entry->le_type = I2O_SCSI;
1816 break;
1817
1818 case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
1819 Entry->le_type = I2O_FCA;
1820 break;
1821
1822 case I2O_CLASS_BUS_ADAPTER_PORT:
1823 Entry->le_type = I2O_PORT | I2O_SCSI;
1824 /* FALLTHRU */
1825 case I2O_CLASS_FIBRE_CHANNEL_PORT:
1826 if (I2O_CLASS_ID_getClass(&(Entry->ClassID)) ==
1827 I2O_CLASS_FIBRE_CHANNEL_PORT) {
1828 Entry->le_type = I2O_PORT | I2O_FCA;
1829 }
1830 { struct ControllerInfo {
1831 I2O_PARAM_RESULTS_LIST_HEADER Header;
1832 I2O_PARAM_READ_OPERATION_RESULT Read;
1833 I2O_HBA_SCSI_CONTROLLER_INFO_SCALAR Info;
1834 };
1835 defAlignLong(struct ControllerInfo, Buffer);
1836 PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR Info;
1837
1838 Entry->le_bus = 0xff;
1839 Entry->le_target = 0xff;
1840 Entry->le_lun = 0xff;
1841
1842 if ((Info = (PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR)
1843 ASR_getParams(sc,
1844 I2O_LCT_ENTRY_getLocalTID(Entry),
1845 I2O_HBA_SCSI_CONTROLLER_INFO_GROUP_NO,
1846 Buffer, sizeof(struct ControllerInfo)))
1847 == (PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR)NULL) {
1848 continue;
1849 }
1850 Entry->le_target
1851 = I2O_HBA_SCSI_CONTROLLER_INFO_SCALAR_getInitiatorID(
1852 Info);
1853 Entry->le_lun = 0;
1854 } /* FALLTHRU */
1855 default:
1856 continue;
1857 }
1858 { struct DeviceInfo {
1859 I2O_PARAM_RESULTS_LIST_HEADER Header;
1860 I2O_PARAM_READ_OPERATION_RESULT Read;
1861 I2O_DPT_DEVICE_INFO_SCALAR Info;
1862 };
1863 defAlignLong (struct DeviceInfo, Buffer);
1864 PI2O_DPT_DEVICE_INFO_SCALAR Info;
1865
1866 Entry->le_bus = 0xff;
1867 Entry->le_target = 0xff;
1868 Entry->le_lun = 0xff;
1869
1870 if ((Info = (PI2O_DPT_DEVICE_INFO_SCALAR)
1871 ASR_getParams(sc,
1872 I2O_LCT_ENTRY_getLocalTID(Entry),
1873 I2O_DPT_DEVICE_INFO_GROUP_NO,
1874 Buffer, sizeof(struct DeviceInfo)))
1875 == (PI2O_DPT_DEVICE_INFO_SCALAR)NULL) {
1876 continue;
1877 }
1878 Entry->le_type
1879 |= I2O_DPT_DEVICE_INFO_SCALAR_getDeviceType(Info);
1880 Entry->le_bus
1881 = I2O_DPT_DEVICE_INFO_SCALAR_getBus(Info);
1882 if ((Entry->le_bus > sc->ha_MaxBus)
1883 && (Entry->le_bus <= MAX_CHANNEL)) {
1884 sc->ha_MaxBus = Entry->le_bus;
1885 }
1886 Entry->le_target
1887 = I2O_DPT_DEVICE_INFO_SCALAR_getIdentifier(Info);
1888 Entry->le_lun
1889 = I2O_DPT_DEVICE_INFO_SCALAR_getLunInfo(Info);
1890 }
1891 }
1892 /*
1893 * A zero return value indicates success.
1894 */
1895 return (0);
1896} /* ASR_acquireLct */
1897
1898/*
1899 * Initialize a message frame.
1900 * We assume that the CDB has already been set up, so all we do here is
1901 * generate the Scatter Gather list.
1902 */
1903STATIC INLINE PI2O_MESSAGE_FRAME
1904ASR_init_message(
1905 IN union asr_ccb * ccb,
1906 OUT PI2O_MESSAGE_FRAME Message)
1907{
1908 int next, span, base, rw;
1909 OUT PI2O_MESSAGE_FRAME Message_Ptr;
1910 Asr_softc_t * sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
1911 PI2O_SGE_SIMPLE_ELEMENT sg;
1912 caddr_t v;
1913 vm_size_t size, len;
1914 U32 MessageSize;
1915
1916 /* We only need to zero out the PRIVATE_SCSI_SCB_EXECUTE_MESSAGE */
1917 bzero (Message_Ptr = getAlignLong(I2O_MESSAGE_FRAME, Message),
1918 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) - sizeof(I2O_SG_ELEMENT)));
1919
1920 {
1921 int target = ccb->ccb_h.target_id;
1922 int lun = ccb->ccb_h.target_lun;
1923 int bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path));
1924 tid_t TID;
1925
1926 if ((TID = ASR_getTid (sc, bus, target, lun)) == (tid_t)-1) {
1927 PI2O_LCT_ENTRY Device;
1928
1929 TID = (tid_t)0;
1930 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
1931 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
1932 ++Device) {
1933 if ((Device->le_type != I2O_UNKNOWN)
1934 && (Device->le_bus == bus)
1935 && (Device->le_target == target)
1936 && (Device->le_lun == lun)
1937 && (I2O_LCT_ENTRY_getUserTID(Device) == 0xFFF)) {
1938 TID = I2O_LCT_ENTRY_getLocalTID(Device);
1939 ASR_setTid (sc, Device->le_bus,
1940 Device->le_target, Device->le_lun,
1941 TID);
1942 break;
1943 }
1944 }
1945 }
1946 if (TID == (tid_t)0) {
1947 return ((PI2O_MESSAGE_FRAME)NULL);
1948 }
1949 I2O_MESSAGE_FRAME_setTargetAddress(Message_Ptr, TID);
1950 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setTID(
1951 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, TID);
1952 }
1953 I2O_MESSAGE_FRAME_setVersionOffset(Message_Ptr, I2O_VERSION_11 |
1954 (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1955 / sizeof(U32)) << 4));
1956 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
1957 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
1958 - sizeof(I2O_SG_ELEMENT)) / sizeof(U32));
1959 I2O_MESSAGE_FRAME_setInitiatorAddress (Message_Ptr, 1);
1960 I2O_MESSAGE_FRAME_setFunction(Message_Ptr, I2O_PRIVATE_MESSAGE);
1961 I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode (
1962 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, I2O_SCSI_SCB_EXEC);
1963 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (
1964 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
1965 I2O_SCB_FLAG_ENABLE_DISCONNECT
1966 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
1967 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
1968 /*
1969 * We do not need any (optional byteswapping) method access to
1970 * the Initiator & Transaction context field.
1971 */
1972 I2O_MESSAGE_FRAME_setInitiatorContext64(Message, (long)ccb);
1973
1974 I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
1975 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, DPT_ORGANIZATION_ID);
1976 /*
1977 * copy the cdb over
1978 */
1979 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(
1980 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, ccb->csio.cdb_len);
1981 bcopy (&(ccb->csio.cdb_io),
1982 ((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)->CDB, ccb->csio.cdb_len);
1983
1984 /*
1985 * Given a buffer describing a transfer, set up a scatter/gather map
1986 * in a ccb to map that SCSI transfer.
1987 */
1988
1989 rw = (ccb->ccb_h.flags & CAM_DIR_IN) ? 0 : I2O_SGL_FLAGS_DIR;
1990
1991 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (
1992 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
1993 (ccb->csio.dxfer_len)
1994 ? ((rw) ? (I2O_SCB_FLAG_XFER_TO_DEVICE
1995 | I2O_SCB_FLAG_ENABLE_DISCONNECT
1996 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
1997 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER)
1998 : (I2O_SCB_FLAG_XFER_FROM_DEVICE
1999 | I2O_SCB_FLAG_ENABLE_DISCONNECT
2000 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2001 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER))
2002 : (I2O_SCB_FLAG_ENABLE_DISCONNECT
2003 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2004 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
2005
2006 /*
2007 * Given a transfer described by a `data', fill in the SG list.
2008 */
2009 sg = &((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)->SGL.u.Simple[0];
2010
2011 len = ccb->csio.dxfer_len;
2012 v = ccb->csio.data_ptr;
2013 ASSERT (ccb->csio.dxfer_len >= 0);
2014 MessageSize = I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr);
2015 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setByteCount(
2016 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, len);
2017 while ((len > 0) && (sg < &((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2018 Message_Ptr)->SGL.u.Simple[SG_SIZE])) {
2019 span = 0;
2020 next = base = KVTOPHYS(v);
2021 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, base);
2022
2023 /* How far can we go contiguously */
2024 while ((len > 0) && (base == next)) {
2025 next = trunc_page(base) + PAGE_SIZE;
2026 size = next - base;
2027 if (size > len) {
2028 size = len;
2029 }
2030 span += size;
2031 v += size;
2032 len -= size;
2033 base = KVTOPHYS(v);
2034 }
2035
2036 I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), span);
2037 if (len == 0) {
2038 rw |= I2O_SGL_FLAGS_LAST_ELEMENT;
2039 }
2040 I2O_FLAGS_COUNT_setFlags(&(sg->FlagsCount),
2041 I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT | rw);
2042 ++sg;
2043 MessageSize += sizeof(*sg) / sizeof(U32);
2044 }
2045 /* We always do the request sense ... */
2046 if ((span = ccb->csio.sense_len) == 0) {
2047 span = sizeof(ccb->csio.sense_data);
2048 }
2049 SG(sg, 0, I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
2050 &(ccb->csio.sense_data), span);
2051 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
2052 MessageSize + (sizeof(*sg) / sizeof(U32)));
2053 return (Message_Ptr);
2054} /* ASR_init_message */
2055
2056/*
2057 * Reset the adapter.
2058 */
2059STATIC INLINE U32
2060ASR_initOutBound (
2061 INOUT Asr_softc_t * sc)
2062{
2063 struct initOutBoundMessage {
2064 I2O_EXEC_OUTBOUND_INIT_MESSAGE M;
2065 U32 R;
2066 };
2067 defAlignLong(struct initOutBoundMessage,Message);
2068 PI2O_EXEC_OUTBOUND_INIT_MESSAGE Message_Ptr;
2069 OUT U32 * volatile Reply_Ptr;
2070 U32 Old;
2071
2072 /*
2073 * Build up our copy of the Message.
2074 */
2075 Message_Ptr = (PI2O_EXEC_OUTBOUND_INIT_MESSAGE)ASR_fillMessage(Message,
2076 sizeof(I2O_EXEC_OUTBOUND_INIT_MESSAGE));
2077 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
2078 I2O_EXEC_OUTBOUND_INIT);
2079 I2O_EXEC_OUTBOUND_INIT_MESSAGE_setHostPageFrameSize(Message_Ptr, PAGE_SIZE);
2080 I2O_EXEC_OUTBOUND_INIT_MESSAGE_setOutboundMFrameSize(Message_Ptr,
2081 sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME));
2082 /*
2083 * Reset the Reply Status
2084 */
2085 *(Reply_Ptr = (U32 *)((char *)Message_Ptr
2086 + sizeof(I2O_EXEC_OUTBOUND_INIT_MESSAGE))) = 0;
2087 SG (&(Message_Ptr->SGL), 0, I2O_SGL_FLAGS_LAST_ELEMENT, Reply_Ptr,
2088 sizeof(U32));
2089 /*
2090 * Send the Message out
2091 */
2092 if ((Old = ASR_initiateCp (sc->ha_Virt, sc->ha_Fvirt, (PI2O_MESSAGE_FRAME)Message_Ptr)) != (U32)-1L) {
2093 u_long size, addr;
2094
2095 /*
2096 * Wait for a response (Poll).
2097 */
2098 while (*Reply_Ptr < I2O_EXEC_OUTBOUND_INIT_REJECTED);
2099 /*
2100 * Re-enable the interrupts.
2101 */
2102 sc->ha_Virt->Mask = Old;
2103 /*
2104 * Populate the outbound table.
2105 */
2106 if (sc->ha_Msgs == (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL) {
2107
2108 /* Allocate the reply frames */
2109 size = sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
2110 * sc->ha_Msgs_Count;
2111
2112 /*
2113 * contigmalloc only works reliably at
2114 * initialization time.
2115 */
2116 if ((sc->ha_Msgs = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
2117 contigmalloc (size, M_DEVBUF, M_WAITOK, 0ul,
2118 0xFFFFFFFFul, (u_long)sizeof(U32), 0ul))
2119 != (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL) {
2120 (void)bzero ((char *)sc->ha_Msgs, size);
2121 sc->ha_Msgs_Phys = KVTOPHYS(sc->ha_Msgs);
2122 }
2123 }
2124
2125 /* Initialize the outbound FIFO */
2126 if (sc->ha_Msgs != (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL)
2127 for (size = sc->ha_Msgs_Count, addr = sc->ha_Msgs_Phys;
2128 size; --size) {
2129 sc->ha_Virt->FromFIFO = addr;
2130 addr += sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME);
2131 }
2132 return (*Reply_Ptr);
2133 }
2134 return (0);
2135} /* ASR_initOutBound */
2136
2137/*
2138 * Set the system table
2139 */
2140STATIC INLINE int
2141ASR_setSysTab(
2142 IN Asr_softc_t * sc)
2143{
2144 PI2O_EXEC_SYS_TAB_SET_MESSAGE Message_Ptr;
2145 PI2O_SET_SYSTAB_HEADER SystemTable;
2146 Asr_softc_t * ha;
2147 PI2O_SGE_SIMPLE_ELEMENT sg;
2148 int retVal;
2149
2150 if ((SystemTable = (PI2O_SET_SYSTAB_HEADER)malloc (
2151 sizeof(I2O_SET_SYSTAB_HEADER), M_TEMP, M_WAITOK))
2152 == (PI2O_SET_SYSTAB_HEADER)NULL) {
2153 return (ENOMEM);
2154 }
2155 bzero (SystemTable, sizeof(I2O_SET_SYSTAB_HEADER));
2156 for (ha = Asr_softc; ha; ha = ha->ha_next) {
2157 ++SystemTable->NumberEntries;
2158 }
2159 if ((Message_Ptr = (PI2O_EXEC_SYS_TAB_SET_MESSAGE)malloc (
2160 sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
2161 + ((3+SystemTable->NumberEntries) * sizeof(I2O_SGE_SIMPLE_ELEMENT)),
2162 M_TEMP, M_WAITOK)) == (PI2O_EXEC_SYS_TAB_SET_MESSAGE)NULL) {
2163 free (SystemTable, M_TEMP);
2164 return (ENOMEM);
2165 }
2166 (void)ASR_fillMessage((char *)Message_Ptr,
2167 sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
2168 + ((3+SystemTable->NumberEntries) * sizeof(I2O_SGE_SIMPLE_ELEMENT)));
2169 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
2170 (I2O_VERSION_11 +
2171 (((sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
2172 / sizeof(U32)) << 4)));
2173 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
2174 I2O_EXEC_SYS_TAB_SET);
2175 /*
2176 * Call the LCT table to determine the number of device entries
2177 * to reserve space for.
2178 * since this code is reused in several systems, code efficiency
2179 * is greater by using a shift operation rather than a divide by
2180 * sizeof(u_int32_t).
2181 */
2182 sg = (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr
2183 + ((I2O_MESSAGE_FRAME_getVersionOffset(
2184 &(Message_Ptr->StdMessageFrame)) & 0xF0) >> 2));
2185 SG(sg, 0, I2O_SGL_FLAGS_DIR, SystemTable, sizeof(I2O_SET_SYSTAB_HEADER));
2186 ++sg;
2187 for (ha = Asr_softc; ha; ha = ha->ha_next) {
2188 SG(sg, 0,
2189 ((ha->ha_next)
2190 ? (I2O_SGL_FLAGS_DIR)
2191 : (I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER)),
2192 &(ha->ha_SystemTable), sizeof(ha->ha_SystemTable));
2193 ++sg;
2194 }
2195 SG(sg, 0, I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER, NULL, 0);
2196 SG(sg, 1, I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_LAST_ELEMENT
2197 | I2O_SGL_FLAGS_END_OF_BUFFER, NULL, 0);
2198 retVal = ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
2199 free (Message_Ptr, M_TEMP);
2200 free (SystemTable, M_TEMP);
2201 return (retVal);
2202} /* ASR_setSysTab */
2203
2204STATIC INLINE int
2205ASR_acquireHrt (
2206 INOUT Asr_softc_t * sc)
2207{
2208 defAlignLong(I2O_EXEC_HRT_GET_MESSAGE,Message);
2209 I2O_EXEC_HRT_GET_MESSAGE * Message_Ptr;
2210 struct {
2211 I2O_HRT Header;
2212 I2O_HRT_ENTRY Entry[MAX_CHANNEL];
2213 } Hrt;
2214 u_int8_t NumberOfEntries;
2215 PI2O_HRT_ENTRY Entry;
2216
2217 bzero ((void *)&Hrt, sizeof (Hrt));
2218 Message_Ptr = (I2O_EXEC_HRT_GET_MESSAGE *)ASR_fillMessage(Message,
2219 sizeof(I2O_EXEC_HRT_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
2220 + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2221 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
2222 (I2O_VERSION_11
2223 + (((sizeof(I2O_EXEC_HRT_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
2224 / sizeof(U32)) << 4)));
2225 I2O_MESSAGE_FRAME_setFunction (&(Message_Ptr->StdMessageFrame),
2226 I2O_EXEC_HRT_GET);
2227
2228 /*
2229 * Set up the buffers as scatter gather elements.
2230 */
2231 SG(&(Message_Ptr->SGL), 0,
2232 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
2233 &Hrt, sizeof(Hrt));
2234 if (ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) != CAM_REQ_CMP) {
2235 return (ENODEV);
2236 }
2237 if ((NumberOfEntries = I2O_HRT_getNumberEntries(&Hrt.Header))
2238 > (MAX_CHANNEL + 1)) {
2239 NumberOfEntries = MAX_CHANNEL + 1;
2240 }
2241 for (Entry = Hrt.Header.HRTEntry;
2242 NumberOfEntries != 0;
2243 ++Entry, --NumberOfEntries) {
2244 PI2O_LCT_ENTRY Device;
2245
2246 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
2247 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
2248 ++Device) {
2249 if (I2O_LCT_ENTRY_getLocalTID(Device)
2250 == (I2O_HRT_ENTRY_getAdapterID(Entry) & 0xFFF)) {
2251 Device->le_bus = I2O_HRT_ENTRY_getAdapterID(
2252 Entry) >> 16;
2253 if ((Device->le_bus > sc->ha_MaxBus)
2254 && (Device->le_bus <= MAX_CHANNEL)) {
2255 sc->ha_MaxBus = Device->le_bus;
2256 }
2257 }
2258 }
2259 }
2260 return (0);
2261} /* ASR_acquireHrt */
2262
2263/*
2264 * Enable the adapter.
2265 */
2266STATIC INLINE int
2267ASR_enableSys (
2268 IN Asr_softc_t * sc)
2269{
2270 defAlignLong(I2O_EXEC_SYS_ENABLE_MESSAGE,Message);
2271 PI2O_EXEC_SYS_ENABLE_MESSAGE Message_Ptr;
2272
2273 Message_Ptr = (PI2O_EXEC_SYS_ENABLE_MESSAGE)ASR_fillMessage(Message,
2274 sizeof(I2O_EXEC_SYS_ENABLE_MESSAGE));
2275 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
2276 I2O_EXEC_SYS_ENABLE);
2277 return (ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) != 0);
2278} /* ASR_enableSys */
2279
2280/*
2281 * Perform the stages necessary to initialize the adapter
2282 */
2283STATIC int
2284ASR_init(
2285 IN Asr_softc_t * sc)
2286{
2287 return ((ASR_initOutBound(sc) == 0)
2288 || (ASR_setSysTab(sc) != CAM_REQ_CMP)
2289 || (ASR_enableSys(sc) != CAM_REQ_CMP));
2290} /* ASR_init */
2291
2292/*
2293 * Send a Synchronize Cache command to the target device.
2294 */
2295STATIC INLINE void
2296ASR_sync (
2297 IN Asr_softc_t * sc,
2298 IN int bus,
2299 IN int target,
2300 IN int lun)
2301{
2302 tid_t TID;
2303
2304 /*
2305 * We will not synchronize the device when there are outstanding
2306 * commands issued by the OS (this is due to a locked up device,
2307 * as the OS normally would flush all outstanding commands before
2308 * issuing a shutdown or an adapter reset).
2309 */
2310 if ((sc != (Asr_softc_t *)NULL)
2311 && (LIST_FIRST(&(sc->ha_ccb)) != (struct ccb_hdr *)NULL)
2312 && ((TID = ASR_getTid (sc, bus, target, lun)) != (tid_t)-1)
2313 && (TID != (tid_t)0)) {
2314 defAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE,Message);
2315 PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE Message_Ptr;
2316
2317 bzero (Message_Ptr
2318 = getAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE, Message),
2319 sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2320 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2321
2322 I2O_MESSAGE_FRAME_setVersionOffset(
2323 (PI2O_MESSAGE_FRAME)Message_Ptr,
2324 I2O_VERSION_11
2325 | (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2326 - sizeof(I2O_SG_ELEMENT))
2327 / sizeof(U32)) << 4));
2328 I2O_MESSAGE_FRAME_setMessageSize(
2329 (PI2O_MESSAGE_FRAME)Message_Ptr,
2330 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2331 - sizeof(I2O_SG_ELEMENT))
2332 / sizeof(U32));
2333 I2O_MESSAGE_FRAME_setInitiatorAddress (
2334 (PI2O_MESSAGE_FRAME)Message_Ptr, 1);
2335 I2O_MESSAGE_FRAME_setFunction(
2336 (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_PRIVATE_MESSAGE);
2337 I2O_MESSAGE_FRAME_setTargetAddress(
2338 (PI2O_MESSAGE_FRAME)Message_Ptr, TID);
2339 I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode (
2340 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2341 I2O_SCSI_SCB_EXEC);
2342 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setTID(Message_Ptr, TID);
2343 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2344 I2O_SCB_FLAG_ENABLE_DISCONNECT
2345 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2346 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
2347 I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
2348 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2349 DPT_ORGANIZATION_ID);
2350 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(Message_Ptr, 6);
2351 Message_Ptr->CDB[0] = SYNCHRONIZE_CACHE;
2352 Message_Ptr->CDB[1] = (lun << 5);
2353
2354 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2355 (I2O_SCB_FLAG_XFER_FROM_DEVICE
2356 | I2O_SCB_FLAG_ENABLE_DISCONNECT
2357 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2358 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
2359
2360 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
2361
2362 }
2363}
2364
2365STATIC INLINE void
2366ASR_synchronize (
2367 IN Asr_softc_t * sc)
2368{
2369 int bus, target, lun;
2370
2371 for (bus = 0; bus <= sc->ha_MaxBus; ++bus) {
2372 for (target = 0; target <= sc->ha_MaxId; ++target) {
2373 for (lun = 0; lun <= sc->ha_MaxLun; ++lun) {
2374 ASR_sync(sc,bus,target,lun);
2375 }
2376 }
2377 }
2378}
2379
2380/*
2381 * Reset the HBA, targets and BUS.
2382 * Currently this resets *all* the SCSI busses.
2383 */
2384STATIC INLINE void
2385asr_hbareset(
2386 IN Asr_softc_t * sc)
2387{
2388 ASR_synchronize (sc);
2389 (void)ASR_reset (sc);
2390} /* asr_hbareset */
2391
2392/*
2393 * A reduced copy of the real pci_map_mem, incorporating the MAX_MAP
2394 * limit and a reduction in error checking (in the pre 4.0 case).
2395 */
2396STATIC int
2397asr_pci_map_mem (
984263bc 2398 IN device_t tag,
984263bc
MD
2399 IN Asr_softc_t * sc)
2400{
2401 int rid;
2402 u_int32_t p, l, s;
2403
984263bc
MD
2404 /*
2405 * I2O specification says we must find first *memory* mapped BAR
2406 */
2407 for (rid = PCIR_MAPS;
2408 rid < (PCIR_MAPS + 4 * sizeof(u_int32_t));
2409 rid += sizeof(u_int32_t)) {
2410 p = pci_read_config(tag, rid, sizeof(p));
2411 if ((p & 1) == 0) {
2412 break;
2413 }
2414 }
2415 /*
2416 * Give up?
2417 */
2418 if (rid >= (PCIR_MAPS + 4 * sizeof(u_int32_t))) {
2419 rid = PCIR_MAPS;
2420 }
2421 p = pci_read_config(tag, rid, sizeof(p));
2422 pci_write_config(tag, rid, -1, sizeof(p));
2423 l = 0 - (pci_read_config(tag, rid, sizeof(l)) & ~15);
2424 pci_write_config(tag, rid, p, sizeof(p));
2425 if (l > MAX_MAP) {
2426 l = MAX_MAP;
2427 }
2428 /*
2429 * The 2005S Zero Channel RAID solution is not a perfect PCI
2430 * citizen. It asks for 4MB on BAR0, and 0MB on BAR1, once
2431 * enabled it rewrites the size of BAR0 to 2MB, sets BAR1 to
2432 * BAR0+2MB and sets it's size to 2MB. The IOP registers are
2433 * accessible via BAR0, the messaging registers are accessible
2434 * via BAR1. If the subdevice code is 50 to 59 decimal.
2435 */
2436 s = pci_read_config(tag, PCIR_DEVVENDOR, sizeof(s));
2437 if (s != 0xA5111044) {
2438 s = pci_read_config(tag, PCIR_SUBVEND_0, sizeof(s));
2439 if ((((ADPTDOMINATOR_SUB_ID_START ^ s) & 0xF000FFFF) == 0)
2440 && (ADPTDOMINATOR_SUB_ID_START <= s)
2441 && (s <= ADPTDOMINATOR_SUB_ID_END)) {
2442 l = MAX_MAP; /* Conjoined BAR Raptor Daptor */
2443 }
2444 }
2445 p &= ~15;
2446 sc->ha_mem_res = bus_alloc_resource(tag, SYS_RES_MEMORY, &rid,
2447 p, p + l, l, RF_ACTIVE);
2448 if (sc->ha_mem_res == (struct resource *)NULL) {
2449 return (0);
2450 }
2451 sc->ha_Base = (void *)rman_get_start(sc->ha_mem_res);
2452 if (sc->ha_Base == (void *)NULL) {
2453 return (0);
2454 }
2455 sc->ha_Virt = (i2oRegs_t *) rman_get_virtual(sc->ha_mem_res);
2456 if (s == 0xA5111044) { /* Split BAR Raptor Daptor */
2457 if ((rid += sizeof(u_int32_t))
2458 >= (PCIR_MAPS + 4 * sizeof(u_int32_t))) {
2459 return (0);
2460 }
2461 p = pci_read_config(tag, rid, sizeof(p));
2462 pci_write_config(tag, rid, -1, sizeof(p));
2463 l = 0 - (pci_read_config(tag, rid, sizeof(l)) & ~15);
2464 pci_write_config(tag, rid, p, sizeof(p));
2465 if (l > MAX_MAP) {
2466 l = MAX_MAP;
2467 }
2468 p &= ~15;
2469 sc->ha_mes_res = bus_alloc_resource(tag, SYS_RES_MEMORY, &rid,
2470 p, p + l, l, RF_ACTIVE);
2471 if (sc->ha_mes_res == (struct resource *)NULL) {
2472 return (0);
2473 }
2474 if ((void *)rman_get_start(sc->ha_mes_res) == (void *)NULL) {
2475 return (0);
2476 }
2477 sc->ha_Fvirt = (U8 *) rman_get_virtual(sc->ha_mes_res);
2478 } else {
2479 sc->ha_Fvirt = (U8 *)(sc->ha_Virt);
2480 }
984263bc
MD
2481 return (1);
2482} /* asr_pci_map_mem */
2483
2484/*
2485 * A simplified copy of the real pci_map_int with additional
2486 * registration requirements.
2487 */
2488STATIC int
2489asr_pci_map_int (
984263bc 2490 IN device_t tag,
984263bc
MD
2491 IN Asr_softc_t * sc)
2492{
e9cb6d99
MD
2493 int rid = 0;
2494 int error;
984263bc
MD
2495
2496 sc->ha_irq_res = bus_alloc_resource(tag, SYS_RES_IRQ, &rid,
2497 0, ~0, 1, RF_ACTIVE | RF_SHAREABLE);
2498 if (sc->ha_irq_res == (struct resource *)NULL) {
2499 return (0);
2500 }
ee61f228 2501 error = bus_setup_intr(tag, sc->ha_irq_res, 0,
e9cb6d99
MD
2502 (driver_intr_t *)asr_intr, (void *)sc,
2503 &(sc->ha_intr), NULL);
2504 if (error) {
984263bc
MD
2505 return (0);
2506 }
2507 sc->ha_irq = pci_read_config(tag, PCIR_INTLINE, sizeof(char));
984263bc
MD
2508 return (1);
2509} /* asr_pci_map_int */
2510
2511/*
2512 * Attach the devices, and virtual devices to the driver list.
2513 */
2514STATIC ATTACH_RET
2515asr_attach (ATTACH_ARGS)
2516{
2517 Asr_softc_t * sc;
2518 struct scsi_inquiry_data * iq;
2519 ATTACH_SET();
2520
076ae0ab 2521 sc = malloc(sizeof(*sc), M_DEVBUF, M_INTWAIT);
984263bc
MD
2522 if (Asr_softc == (Asr_softc_t *)NULL) {
2523 /*
2524 * Fixup the OS revision as saved in the dptsig for the
2525 * engine (dptioctl.h) to pick up.
2526 */
2527 bcopy (osrelease, &ASR_sig.dsDescription[16], 5);
fef8985e 2528 printf ("asr%d: major=%d\n", unit, asr_ops.head.maj);
984263bc
MD
2529 }
2530 /*
2531 * Initialize the software structure
2532 */
2533 bzero (sc, sizeof(*sc));
2534 LIST_INIT(&(sc->ha_ccb));
984263bc
MD
2535 /* Link us into the HA list */
2536 {
2537 Asr_softc_t **ha;
2538
2539 for (ha = &Asr_softc; *ha; ha = &((*ha)->ha_next));
2540 *(ha) = sc;
2541 }
2542 {
2543 PI2O_EXEC_STATUS_GET_REPLY status;
2544 int size;
2545
2546 /*
2547 * This is the real McCoy!
2548 */
2549 if (!asr_pci_map_mem(tag, sc)) {
2550 printf ("asr%d: could not map memory\n", unit);
2551 ATTACH_RETURN(ENXIO);
2552 }
2553 /* Enable if not formerly enabled */
984263bc
MD
2554 pci_write_config (tag, PCIR_COMMAND,
2555 pci_read_config (tag, PCIR_COMMAND, sizeof(char))
2556 | PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN, sizeof(char));
2557 /* Knowledge is power, responsibility is direct */
2558 {
2559 struct pci_devinfo {
2560 STAILQ_ENTRY(pci_devinfo) pci_links;
2561 struct resource_list resources;
2562 pcicfgregs cfg;
2563 } * dinfo = device_get_ivars(tag);
2564 sc->ha_pciBusNum = dinfo->cfg.bus;
2565 sc->ha_pciDeviceNum = (dinfo->cfg.slot << 3)
2566 | dinfo->cfg.func;
2567 }
984263bc
MD
2568 /* Check if the device is there? */
2569 if ((ASR_resetIOP(sc->ha_Virt, sc->ha_Fvirt) == 0)
2570 || ((status = (PI2O_EXEC_STATUS_GET_REPLY)malloc (
2571 sizeof(I2O_EXEC_STATUS_GET_REPLY), M_TEMP, M_WAITOK))
2572 == (PI2O_EXEC_STATUS_GET_REPLY)NULL)
2573 || (ASR_getStatus(sc->ha_Virt, sc->ha_Fvirt, status) == NULL)) {
2574 printf ("asr%d: could not initialize hardware\n", unit);
2575 ATTACH_RETURN(ENODEV); /* Get next, maybe better luck */
2576 }
2577 sc->ha_SystemTable.OrganizationID = status->OrganizationID;
2578 sc->ha_SystemTable.IOP_ID = status->IOP_ID;
2579 sc->ha_SystemTable.I2oVersion = status->I2oVersion;
2580 sc->ha_SystemTable.IopState = status->IopState;
2581 sc->ha_SystemTable.MessengerType = status->MessengerType;
2582 sc->ha_SystemTable.InboundMessageFrameSize
2583 = status->InboundMFrameSize;
2584 sc->ha_SystemTable.MessengerInfo.InboundMessagePortAddressLow
2585 = (U32)(sc->ha_Base) + (U32)(&(((i2oRegs_t *)NULL)->ToFIFO));
2586
2587 if (!asr_pci_map_int(tag, (void *)sc)) {
2588 printf ("asr%d: could not map interrupt\n", unit);
2589 ATTACH_RETURN(ENXIO);
2590 }
2591
2592 /* Adjust the maximim inbound count */
2593 if (((sc->ha_QueueSize
2594 = I2O_EXEC_STATUS_GET_REPLY_getMaxInboundMFrames(status))
2595 > MAX_INBOUND)
2596 || (sc->ha_QueueSize == 0)) {
2597 sc->ha_QueueSize = MAX_INBOUND;
2598 }
2599
2600 /* Adjust the maximum outbound count */
2601 if (((sc->ha_Msgs_Count
2602 = I2O_EXEC_STATUS_GET_REPLY_getMaxOutboundMFrames(status))
2603 > MAX_OUTBOUND)
2604 || (sc->ha_Msgs_Count == 0)) {
2605 sc->ha_Msgs_Count = MAX_OUTBOUND;
2606 }
2607 if (sc->ha_Msgs_Count > sc->ha_QueueSize) {
2608 sc->ha_Msgs_Count = sc->ha_QueueSize;
2609 }
2610
2611 /* Adjust the maximum SG size to adapter */
2612 if ((size = (I2O_EXEC_STATUS_GET_REPLY_getInboundMFrameSize(
2613 status) << 2)) > MAX_INBOUND_SIZE) {
2614 size = MAX_INBOUND_SIZE;
2615 }
2616 free (status, M_TEMP);
2617 sc->ha_SgSize = (size - sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2618 + sizeof(I2O_SG_ELEMENT)) / sizeof(I2O_SGE_SIMPLE_ELEMENT);
2619 }
2620
2621 /*
2622 * Only do a bus/HBA reset on the first time through. On this
2623 * first time through, we do not send a flush to the devices.
2624 */
2625 if (ASR_init(sc) == 0) {
2626 struct BufferInfo {
2627 I2O_PARAM_RESULTS_LIST_HEADER Header;
2628 I2O_PARAM_READ_OPERATION_RESULT Read;
2629 I2O_DPT_EXEC_IOP_BUFFERS_SCALAR Info;
2630 };
2631 defAlignLong (struct BufferInfo, Buffer);
2632 PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR Info;
2633# define FW_DEBUG_BLED_OFFSET 8
2634
2635 if ((Info = (PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR)
2636 ASR_getParams(sc, 0,
2637 I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO,
2638 Buffer, sizeof(struct BufferInfo)))
2639 != (PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR)NULL) {
2640 sc->ha_blinkLED = sc->ha_Fvirt
2641 + I2O_DPT_EXEC_IOP_BUFFERS_SCALAR_getSerialOutputOffset(Info)
2642 + FW_DEBUG_BLED_OFFSET;
2643 }
2644 if (ASR_acquireLct(sc) == 0) {
2645 (void)ASR_acquireHrt(sc);
2646 }
2647 } else {
2648 printf ("asr%d: failed to initialize\n", unit);
2649 ATTACH_RETURN(ENXIO);
2650 }
2651 /*
2652 * Add in additional probe responses for more channels. We
2653 * are reusing the variable `target' for a channel loop counter.
2654 * Done here because of we need both the acquireLct and
2655 * acquireHrt data.
2656 */
2657 { PI2O_LCT_ENTRY Device;
2658
2659 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
2660 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
2661 ++Device) {
2662 if (Device->le_type == I2O_UNKNOWN) {
2663 continue;
2664 }
2665 if (I2O_LCT_ENTRY_getUserTID(Device) == 0xFFF) {
2666 if (Device->le_target > sc->ha_MaxId) {
2667 sc->ha_MaxId = Device->le_target;
2668 }
2669 if (Device->le_lun > sc->ha_MaxLun) {
2670 sc->ha_MaxLun = Device->le_lun;
2671 }
2672 }
2673 if (((Device->le_type & I2O_PORT) != 0)
2674 && (Device->le_bus <= MAX_CHANNEL)) {
2675 /* Do not increase MaxId for efficiency */
2676 sc->ha_adapter_target[Device->le_bus]
2677 = Device->le_target;
2678 }
2679 }
2680 }
2681
2682
2683 /*
2684 * Print the HBA model number as inquired from the card.
2685 */
2686
2687 printf ("asr%d:", unit);
2688
2689 if ((iq = (struct scsi_inquiry_data *)malloc (
2690 sizeof(struct scsi_inquiry_data), M_TEMP, M_WAITOK))
2691 != (struct scsi_inquiry_data *)NULL) {
2692 defAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE,Message);
2693 PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE Message_Ptr;
2694 int posted = 0;
2695
2696 bzero (iq, sizeof(struct scsi_inquiry_data));
2697 bzero (Message_Ptr
2698 = getAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE, Message),
2699 sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2700 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2701
2702 I2O_MESSAGE_FRAME_setVersionOffset(
2703 (PI2O_MESSAGE_FRAME)Message_Ptr,
2704 I2O_VERSION_11
2705 | (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2706 - sizeof(I2O_SG_ELEMENT))
2707 / sizeof(U32)) << 4));
2708 I2O_MESSAGE_FRAME_setMessageSize(
2709 (PI2O_MESSAGE_FRAME)Message_Ptr,
2710 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2711 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT))
2712 / sizeof(U32));
2713 I2O_MESSAGE_FRAME_setInitiatorAddress (
2714 (PI2O_MESSAGE_FRAME)Message_Ptr, 1);
2715 I2O_MESSAGE_FRAME_setFunction(
2716 (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_PRIVATE_MESSAGE);
2717 I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode (
2718 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2719 I2O_SCSI_SCB_EXEC);
2720 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2721 I2O_SCB_FLAG_ENABLE_DISCONNECT
2722 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2723 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
2724 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setInterpret(Message_Ptr, 1);
2725 I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
2726 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2727 DPT_ORGANIZATION_ID);
2728 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(Message_Ptr, 6);
2729 Message_Ptr->CDB[0] = INQUIRY;
2730 Message_Ptr->CDB[4] = (unsigned char)sizeof(struct scsi_inquiry_data);
2731 if (Message_Ptr->CDB[4] == 0) {
2732 Message_Ptr->CDB[4] = 255;
2733 }
2734
2735 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2736 (I2O_SCB_FLAG_XFER_FROM_DEVICE
2737 | I2O_SCB_FLAG_ENABLE_DISCONNECT
2738 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2739 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
2740
2741 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setByteCount(
2742 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
2743 sizeof(struct scsi_inquiry_data));
2744 SG(&(Message_Ptr->SGL), 0,
2745 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
2746 iq, sizeof(struct scsi_inquiry_data));
2747 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
2748
2749 if (iq->vendor[0] && (iq->vendor[0] != ' ')) {
2750 printf (" ");
2751 ASR_prstring (iq->vendor, 8);
2752 ++posted;
2753 }
2754 if (iq->product[0] && (iq->product[0] != ' ')) {
2755 printf (" ");
2756 ASR_prstring (iq->product, 16);
2757 ++posted;
2758 }
2759 if (iq->revision[0] && (iq->revision[0] != ' ')) {
2760 printf (" FW Rev. ");
2761 ASR_prstring (iq->revision, 4);
2762 ++posted;
2763 }
2764 free ((caddr_t)iq, M_TEMP);
2765 if (posted) {
2766 printf (",");
2767 }
2768 }
2769 printf (" %d channel, %d CCBs, Protocol I2O\n", sc->ha_MaxBus + 1,
2770 (sc->ha_QueueSize > MAX_INBOUND) ? MAX_INBOUND : sc->ha_QueueSize);
2771
2772 /*
2773 * fill in the prototype cam_path.
2774 */
2775 {
2776 int bus;
2777 union asr_ccb * ccb;
2778
2779 if ((ccb = asr_alloc_ccb (sc)) == (union asr_ccb *)NULL) {
2780 printf ("asr%d: CAM could not be notified of asynchronous callback parameters\n", unit);
2781 ATTACH_RETURN(ENOMEM);
2782 }
2783 for (bus = 0; bus <= sc->ha_MaxBus; ++bus) {
984263bc
MD
2784 int QueueSize = sc->ha_QueueSize;
2785
2786 if (QueueSize > MAX_INBOUND) {
2787 QueueSize = MAX_INBOUND;
2788 }
2789
984263bc
MD
2790 /*
2791 * Construct our first channel SIM entry
2792 */
2793 sc->ha_sim[bus] = cam_sim_alloc(
2794 asr_action, asr_poll, "asr", sc,
521cf4d2
MD
2795 unit, 1, QueueSize, NULL);
2796 if (sc->ha_sim[bus] == NULL)
984263bc 2797 continue;
984263bc
MD
2798
2799 if (xpt_bus_register(sc->ha_sim[bus], bus)
2800 != CAM_SUCCESS) {
521cf4d2 2801 cam_sim_free(sc->ha_sim[bus]);
984263bc
MD
2802 sc->ha_sim[bus] = NULL;
2803 continue;
2804 }
2805
2806 if (xpt_create_path(&(sc->ha_path[bus]), /*periph*/NULL,
2807 cam_sim_path(sc->ha_sim[bus]), CAM_TARGET_WILDCARD,
2808 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2809 xpt_bus_deregister(
2810 cam_sim_path(sc->ha_sim[bus]));
521cf4d2 2811 cam_sim_free(sc->ha_sim[bus]);
984263bc
MD
2812 sc->ha_sim[bus] = NULL;
2813 continue;
2814 }
2815 }
2816 asr_free_ccb (ccb);
2817 }
2818 /*
2819 * Generate the device node information
2820 */
fef8985e 2821 make_dev(&asr_ops, unit, 0, 0, S_IRWXU, "rasr%d", unit);
984263bc
MD
2822 ATTACH_RETURN(0);
2823} /* asr_attach */
2824
2825STATIC void
2826asr_poll(
2827 IN struct cam_sim *sim)
2828{
2829 asr_intr(cam_sim_softc(sim));
2830} /* asr_poll */
2831
2832STATIC void
2833asr_action(
2834 IN struct cam_sim * sim,
2835 IN union ccb * ccb)
2836{
2837 struct Asr_softc * sc;
2838
2839 debug_asr_printf ("asr_action(%lx,%lx{%x})\n",
2840 (u_long)sim, (u_long)ccb, ccb->ccb_h.func_code);
2841
2842 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("asr_action\n"));
2843
2844 ccb->ccb_h.spriv_ptr0 = sc = (struct Asr_softc *)cam_sim_softc(sim);
2845
2846 switch (ccb->ccb_h.func_code) {
2847
2848 /* Common cases first */
2849 case XPT_SCSI_IO: /* Execute the requested I/O operation */
2850 {
2851 struct Message {
2852 char M[MAX_INBOUND_SIZE];
2853 };
2854 defAlignLong(struct Message,Message);
2855 PI2O_MESSAGE_FRAME Message_Ptr;
2856
2857 /* Reject incoming commands while we are resetting the card */
2858 if (sc->ha_in_reset != HA_OPERATIONAL) {
2859 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2860 if (sc->ha_in_reset >= HA_OFF_LINE) {
2861 /* HBA is now off-line */
2862 ccb->ccb_h.status |= CAM_UNREC_HBA_ERROR;
2863 } else {
2864 /* HBA currently resetting, try again later. */
2865 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
2866 }
2867 debug_asr_cmd_printf (" e\n");
2868 xpt_done(ccb);
2869 debug_asr_cmd_printf (" q\n");
2870 break;
2871 }
2872 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
2873 printf(
2874 "asr%d WARNING: scsi_cmd(%x) already done on b%dt%du%d\n",
2875 cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)),
2876 ccb->csio.cdb_io.cdb_bytes[0],
2877 cam_sim_bus(sim),
2878 ccb->ccb_h.target_id,
2879 ccb->ccb_h.target_lun);
2880 }
2881 debug_asr_cmd_printf ("(%d,%d,%d,%d)",
2882 cam_sim_unit(sim),
2883 cam_sim_bus(sim),
2884 ccb->ccb_h.target_id,
2885 ccb->ccb_h.target_lun);
2886 debug_asr_cmd_dump_ccb(ccb);
2887
2888 if ((Message_Ptr = ASR_init_message ((union asr_ccb *)ccb,
2889 (PI2O_MESSAGE_FRAME)Message)) != (PI2O_MESSAGE_FRAME)NULL) {
2890 debug_asr_cmd2_printf ("TID=%x:\n",
2891 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_getTID(
2892 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr));
2893 debug_asr_cmd2_dump_message(Message_Ptr);
2894 debug_asr_cmd1_printf (" q");
2895
2896 if (ASR_queue (sc, Message_Ptr) == EMPTY_QUEUE) {
984263bc
MD
2897 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2898 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
2899 debug_asr_cmd_printf (" E\n");
2900 xpt_done(ccb);
2901 }
2902 debug_asr_cmd_printf (" Q\n");
2903 break;
2904 }
2905 /*
2906 * We will get here if there is no valid TID for the device
2907 * referenced in the scsi command packet.
2908 */
2909 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2910 ccb->ccb_h.status |= CAM_SEL_TIMEOUT;
2911 debug_asr_cmd_printf (" B\n");
2912 xpt_done(ccb);
2913 break;
2914 }
2915
2916 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */
2917 /* Rese HBA device ... */
2918 asr_hbareset (sc);
2919 ccb->ccb_h.status = CAM_REQ_CMP;
2920 xpt_done(ccb);
2921 break;
2922
2923# if (defined(REPORT_LUNS))
2924 case REPORT_LUNS:
2925# endif
2926 case XPT_ABORT: /* Abort the specified CCB */
2927 /* XXX Implement */
2928 ccb->ccb_h.status = CAM_REQ_INVALID;
2929 xpt_done(ccb);
2930 break;
2931
2932 case XPT_SET_TRAN_SETTINGS:
2933 /* XXX Implement */
2934 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
2935 xpt_done(ccb);
2936 break;
2937
2938 case XPT_GET_TRAN_SETTINGS:
2939 /* Get default/user set transfer settings for the target */
2940 {
2941 struct ccb_trans_settings *cts;
2942 u_int target_mask;
2943
2944 cts = &(ccb->cts);
2945 target_mask = 0x01 << ccb->ccb_h.target_id;
2946 if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) {
2947 cts->flags = CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB;
2948 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2949 cts->sync_period = 6; /* 40MHz */
2950 cts->sync_offset = 15;
2951
2952 cts->valid = CCB_TRANS_SYNC_RATE_VALID
2953 | CCB_TRANS_SYNC_OFFSET_VALID
2954 | CCB_TRANS_BUS_WIDTH_VALID
2955 | CCB_TRANS_DISC_VALID
2956 | CCB_TRANS_TQ_VALID;
2957 ccb->ccb_h.status = CAM_REQ_CMP;
2958 } else {
2959 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
2960 }
2961 xpt_done(ccb);
2962 break;
2963 }
2964
2965 case XPT_CALC_GEOMETRY:
2966 {
2967 struct ccb_calc_geometry *ccg;
2968 u_int32_t size_mb;
2969 u_int32_t secs_per_cylinder;
2970
2971 ccg = &(ccb->ccg);
2972 size_mb = ccg->volume_size
2973 / ((1024L * 1024L) / ccg->block_size);
2974
2975 if (size_mb > 4096) {
2976 ccg->heads = 255;
2977 ccg->secs_per_track = 63;
2978 } else if (size_mb > 2048) {
2979 ccg->heads = 128;
2980 ccg->secs_per_track = 63;
2981 } else if (size_mb > 1024) {
2982 ccg->heads = 65;
2983 ccg->secs_per_track = 63;
2984 } else {
2985 ccg->heads = 64;
2986 ccg->secs_per_track = 32;
2987 }
2988 secs_per_cylinder = ccg->heads * ccg->secs_per_track;
2989 ccg->cylinders = ccg->volume_size / secs_per_cylinder;
2990 ccb->ccb_h.status = CAM_REQ_CMP;
2991 xpt_done(ccb);
2992 break;
2993 }
2994
2995 case XPT_RESET_BUS: /* Reset the specified SCSI bus */
2996 ASR_resetBus (sc, cam_sim_bus(sim));
2997 ccb->ccb_h.status = CAM_REQ_CMP;
2998 xpt_done(ccb);
2999 break;
3000
3001 case XPT_TERM_IO: /* Terminate the I/O process */
3002 /* XXX Implement */
3003 ccb->ccb_h.status = CAM_REQ_INVALID;
3004 xpt_done(ccb);
3005 break;
3006
3007 case XPT_PATH_INQ: /* Path routing inquiry */
3008 {
3009 struct ccb_pathinq *cpi = &(ccb->cpi);
3010
3011 cpi->version_num = 1; /* XXX??? */
3012 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
3013 cpi->target_sprt = 0;
3014 /* Not necessary to reset bus, done by HDM initialization */
3015 cpi->hba_misc = PIM_NOBUSRESET;
3016 cpi->hba_eng_cnt = 0;
3017 cpi->max_target = sc->ha_MaxId;
3018 cpi->max_lun = sc->ha_MaxLun;
3019 cpi->initiator_id = sc->ha_adapter_target[cam_sim_bus(sim)];
3020 cpi->bus_id = cam_sim_bus(sim);
3021 cpi->base_transfer_speed = 3300;
3022 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
3023 strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN);
3024 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
3025 cpi->unit_number = cam_sim_unit(sim);
3026 cpi->ccb_h.status = CAM_REQ_CMP;
3027 xpt_done(ccb);
3028 break;
3029 }
3030 default:
3031 ccb->ccb_h.status = CAM_REQ_INVALID;
3032 xpt_done(ccb);
3033 break;
3034 }
3035} /* asr_action */
3036
984263bc
MD
3037
3038/*
3039 * Handle processing of current CCB as pointed to by the Status.
3040 */
3041STATIC int
3042asr_intr (
3043 IN Asr_softc_t * sc)
3044{
3045 OUT int processed;
3046
984263bc
MD
3047 for (processed = 0;
3048 sc->ha_Virt->Status & Mask_InterruptsDisabled;
3049 processed = 1) {
3050 union asr_ccb * ccb;
3051 U32 ReplyOffset;
3052 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply;
3053
3054 if (((ReplyOffset = sc->ha_Virt->FromFIFO) == EMPTY_QUEUE)
3055 && ((ReplyOffset = sc->ha_Virt->FromFIFO) == EMPTY_QUEUE)) {
3056 break;
3057 }
3058 Reply = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)(ReplyOffset
3059 - sc->ha_Msgs_Phys + (char *)(sc->ha_Msgs));
3060 /*
3061 * We do not need any (optional byteswapping) method access to
3062 * the Initiator context field.
3063 */
3064 ccb = (union asr_ccb *)(long)
3065 I2O_MESSAGE_FRAME_getInitiatorContext64(
3066 &(Reply->StdReplyFrame.StdMessageFrame));
3067 if (I2O_MESSAGE_FRAME_getMsgFlags(
3068 &(Reply->StdReplyFrame.StdMessageFrame))
3069 & I2O_MESSAGE_FLAGS_FAIL) {
3070 defAlignLong(I2O_UTIL_NOP_MESSAGE,Message);
3071 PI2O_UTIL_NOP_MESSAGE Message_Ptr;
3072 U32 MessageOffset;
3073
3074 MessageOffset = (u_long)
3075 I2O_FAILURE_REPLY_MESSAGE_FRAME_getPreservedMFA(
3076 (PI2O_FAILURE_REPLY_MESSAGE_FRAME)Reply);
3077 /*
3078 * Get the Original Message Frame's address, and get
3079 * it's Transaction Context into our space. (Currently
3080 * unused at original authorship, but better to be
3081 * safe than sorry). Straight copy means that we
3082 * need not concern ourselves with the (optional
3083 * byteswapping) method access.
3084 */
3085 Reply->StdReplyFrame.TransactionContext
3086 = ((PI2O_SINGLE_REPLY_MESSAGE_FRAME)
3087 (sc->ha_Fvirt + MessageOffset))->TransactionContext;
3088 /*
3089 * For 64 bit machines, we need to reconstruct the
3090 * 64 bit context.
3091 */
3092 ccb = (union asr_ccb *)(long)
3093 I2O_MESSAGE_FRAME_getInitiatorContext64(
3094 &(Reply->StdReplyFrame.StdMessageFrame));
3095 /*
3096 * Unique error code for command failure.
3097 */
3098 I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode(
3099 &(Reply->StdReplyFrame), (u_int16_t)-2);
3100 /*
3101 * Modify the message frame to contain a NOP and
3102 * re-issue it to the controller.
3103 */
3104 Message_Ptr = (PI2O_UTIL_NOP_MESSAGE)ASR_fillMessage(
3105 Message, sizeof(I2O_UTIL_NOP_MESSAGE));
3106# if (I2O_UTIL_NOP != 0)
3107 I2O_MESSAGE_FRAME_setFunction (
3108 &(Message_Ptr->StdMessageFrame),
3109 I2O_UTIL_NOP);
3110# endif
3111 /*
3112 * Copy the packet out to the Original Message
3113 */
3114 bcopy ((caddr_t)Message_Ptr,
3115 sc->ha_Fvirt + MessageOffset,
3116 sizeof(I2O_UTIL_NOP_MESSAGE));
3117 /*
3118 * Issue the NOP
3119 */
3120 sc->ha_Virt->ToFIFO = MessageOffset;
3121 }
3122
3123 /*
3124 * Asynchronous command with no return requirements,
3125 * and a generic handler for immunity against odd error
3126 * returns from the adapter.
3127 */
3128 if (ccb == (union asr_ccb *)NULL) {
3129 /*
3130 * Return Reply so that it can be used for the
3131 * next command
3132 */
3133 sc->ha_Virt->FromFIFO = ReplyOffset;
3134 continue;
3135 }
3136
3137 /* Welease Wadjah! (and stop timeouts) */
3138 ASR_ccbRemove (sc, ccb);
3139
3140 switch (
3141 I2O_SINGLE_REPLY_MESSAGE_FRAME_getDetailedStatusCode(
3142 &(Reply->StdReplyFrame))) {
3143
3144 case I2O_SCSI_DSC_SUCCESS:
3145 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3146 ccb->ccb_h.status |= CAM_REQ_CMP;
3147 break;
3148
3149 case I2O_SCSI_DSC_CHECK_CONDITION:
3150 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3151 ccb->ccb_h.status |= CAM_REQ_CMP|CAM_AUTOSNS_VALID;
3152 break;
3153
3154 case I2O_SCSI_DSC_BUSY:
3155 /* FALLTHRU */
3156 case I2O_SCSI_HBA_DSC_ADAPTER_BUSY:
3157 /* FALLTHRU */
3158 case I2O_SCSI_HBA_DSC_SCSI_BUS_RESET:
3159 /* FALLTHRU */
3160 case I2O_SCSI_HBA_DSC_BUS_BUSY:
3161 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3162 ccb->ccb_h.status |= CAM_SCSI_BUSY;
3163 break;
3164
3165 case I2O_SCSI_HBA_DSC_SELECTION_TIMEOUT:
3166 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3167 ccb->ccb_h.status |= CAM_SEL_TIMEOUT;
3168 break;
3169
3170 case I2O_SCSI_HBA_DSC_COMMAND_TIMEOUT:
3171 /* FALLTHRU */
3172 case I2O_SCSI_HBA_DSC_DEVICE_NOT_PRESENT:
3173 /* FALLTHRU */
3174 case I2O_SCSI_HBA_DSC_LUN_INVALID:
3175 /* FALLTHRU */
3176 case I2O_SCSI_HBA_DSC_SCSI_TID_INVALID:
3177 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3178 ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
3179 break;
3180
3181 case I2O_SCSI_HBA_DSC_DATA_OVERRUN:
3182 /* FALLTHRU */
3183 case I2O_SCSI_HBA_DSC_REQUEST_LENGTH_ERROR:
3184 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3185 ccb->ccb_h.status |= CAM_DATA_RUN_ERR;
3186 break;
3187
3188 default:
3189 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3190 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
3191 break;
3192 }
3193 if ((ccb->csio.resid = ccb->csio.dxfer_len) != 0) {
3194 ccb->csio.resid -=
3195 I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_getTransferCount(
3196 Reply);
3197 }
3198
984263bc
MD
3199 /* Sense data in reply packet */
3200 if (ccb->ccb_h.status & CAM_AUTOSNS_VALID) {
3201 u_int16_t size = I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_getAutoSenseTransferCount(Reply);
3202
3203 if (size) {
3204 if (size > sizeof(ccb->csio.sense_data)) {
3205 size = sizeof(ccb->csio.sense_data);
3206 }
3207 if (size > I2O_SCSI_SENSE_DATA_SZ) {
3208 size = I2O_SCSI_SENSE_DATA_SZ;
3209 }
3210 if ((ccb->csio.sense_len)
3211 && (size > ccb->csio.sense_len)) {
3212 size = ccb->csio.sense_len;
3213 }
3214 bcopy ((caddr_t)Reply->SenseData,
3215 (caddr_t)&(ccb->csio.sense_data), size);
3216 }
3217 }
3218
3219 /*
3220 * Return Reply so that it can be used for the next command
3221 * since we have no more need for it now
3222 */
3223 sc->ha_Virt->FromFIFO = ReplyOffset;
3224
3225 if (ccb->ccb_h.path) {
3226 xpt_done ((union ccb *)ccb);
3227 } else {
3228 wakeup ((caddr_t)ccb);
3229 }
3230 }
984263bc
MD
3231 return (processed);
3232} /* asr_intr */
3233
3234#undef QueueSize /* Grrrr */
3235#undef SG_Size /* Grrrr */
3236
3237/*
3238 * Meant to be included at the bottom of asr.c !!!
3239 */
3240
3241/*
3242 * Included here as hard coded. Done because other necessary include
3243 * files utilize C++ comment structures which make them a nuisance to
3244 * included here just to pick up these three typedefs.
3245 */
3246typedef U32 DPT_TAG_T;
3247typedef U32 DPT_MSG_T;
3248typedef U32 DPT_RTN_T;
3249
3250#undef SCSI_RESET /* Conflicts with "scsi/scsiconf.h" defintion */
1f2de5d4 3251#include "osd_unix.h"
984263bc
MD
3252
3253#define asr_unit(dev) minor(dev)
3254
3255STATIC INLINE Asr_softc_t *
3256ASR_get_sc (
3257 IN dev_t dev)
3258{
3259 int unit = asr_unit(dev);
3260 OUT Asr_softc_t * sc = Asr_softc;
3261
3262 while (sc && sc->ha_sim[0] && (cam_sim_unit(sc->ha_sim[0]) != unit)) {
3263 sc = sc->ha_next;
3264 }
3265 return (sc);
3266} /* ASR_get_sc */
3267
3268STATIC u_int8_t ASR_ctlr_held;
3269#if (!defined(UNREFERENCED_PARAMETER))
3270# define UNREFERENCED_PARAMETER(x) (void)(x)
3271#endif
3272
3273STATIC int
fef8985e 3274asr_open(struct dev_open_args *ap)
984263bc 3275{
fef8985e
MD
3276 dev_t dev = ap->a_head.a_dev;
3277 OUT int error;
984263bc
MD
3278
3279 if (ASR_get_sc (dev) == (Asr_softc_t *)NULL) {
3280 return (ENODEV);
3281 }
7f2216bc 3282 crit_enter();
984263bc
MD
3283 if (ASR_ctlr_held) {
3284 error = EBUSY;
fef8985e 3285 } else if ((error = suser_cred(ap->a_cred, 0)) == 0) {
984263bc
MD
3286 ++ASR_ctlr_held;
3287 }
7f2216bc 3288 crit_exit();
984263bc
MD
3289 return (error);
3290} /* asr_open */
3291
3292STATIC int
fef8985e 3293asr_close(struct dev_close_args *ap)
984263bc 3294{
984263bc
MD
3295 ASR_ctlr_held = 0;
3296 return (0);
3297} /* asr_close */
3298
3299
3300/*-------------------------------------------------------------------------*/
3301/* Function ASR_queue_i */
3302/*-------------------------------------------------------------------------*/
3303/* The Parameters Passed To This Function Are : */
3304/* Asr_softc_t * : HBA miniport driver's adapter data storage. */
3305/* PI2O_MESSAGE_FRAME : Msg Structure Pointer For This Command */
3306/* I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME following the Msg Structure */
3307/* */
3308/* This Function Will Take The User Request Packet And Convert It To An */
3309/* I2O MSG And Send It Off To The Adapter. */
3310/* */
3311/* Return : 0 For OK, Error Code Otherwise */
3312/*-------------------------------------------------------------------------*/
3313STATIC INLINE int
3314ASR_queue_i(
3315 IN Asr_softc_t * sc,
3316 INOUT PI2O_MESSAGE_FRAME Packet)
3317{
3318 union asr_ccb * ccb;
3319 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply;
3320 PI2O_MESSAGE_FRAME Message_Ptr;
3321 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply_Ptr;
3322 int MessageSizeInBytes;
3323 int ReplySizeInBytes;
3324 int error;
3325 int s;
3326 /* Scatter Gather buffer list */
3327 struct ioctlSgList_S {
3328 SLIST_ENTRY(ioctlSgList_S) link;
3329 caddr_t UserSpace;
3330 I2O_FLAGS_COUNT FlagsCount;
3331 char KernelSpace[sizeof(long)];
3332 } * elm;
3333 /* Generates a `first' entry */
3334 SLIST_HEAD(ioctlSgListHead_S, ioctlSgList_S) sgList;
3335
3336 if (ASR_getBlinkLedCode(sc)) {
3337 debug_usr_cmd_printf ("Adapter currently in BlinkLed %x\n",
3338 ASR_getBlinkLedCode(sc));
3339 return (EIO);
3340 }
3341 /* Copy in the message into a local allocation */
3342 if ((Message_Ptr = (PI2O_MESSAGE_FRAME)malloc (
3343 sizeof(I2O_MESSAGE_FRAME), M_TEMP, M_WAITOK))
3344 == (PI2O_MESSAGE_FRAME)NULL) {
3345 debug_usr_cmd_printf (
3346 "Failed to acquire I2O_MESSAGE_FRAME memory\n");
3347 return (ENOMEM);
3348 }
3349 if ((error = copyin ((caddr_t)Packet, (caddr_t)Message_Ptr,
3350 sizeof(I2O_MESSAGE_FRAME))) != 0) {
3351 free (Message_Ptr, M_TEMP);
3352 debug_usr_cmd_printf ("Can't copy in packet errno=%d\n", error);
3353 return (error);
3354 }
3355 /* Acquire information to determine type of packet */
3356 MessageSizeInBytes = (I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr)<<2);
3357 /* The offset of the reply information within the user packet */
3358 Reply = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)((char *)Packet
3359 + MessageSizeInBytes);
3360
3361 /* Check if the message is a synchronous initialization command */
3362 s = I2O_MESSAGE_FRAME_getFunction(Message_Ptr);
3363 free (Message_Ptr, M_TEMP);
3364 switch (s) {
3365
3366 case I2O_EXEC_IOP_RESET:
3367 { U32 status;
3368
3369 status = ASR_resetIOP(sc->ha_Virt, sc->ha_Fvirt);
3370 ReplySizeInBytes = sizeof(status);
3371 debug_usr_cmd_printf ("resetIOP done\n");
3372 return (copyout ((caddr_t)&status, (caddr_t)Reply,
3373 ReplySizeInBytes));
3374 }
3375
3376 case I2O_EXEC_STATUS_GET:
3377 { I2O_EXEC_STATUS_GET_REPLY status;
3378
3379 if (ASR_getStatus (sc->ha_Virt, sc->ha_Fvirt, &status)
3380 == (PI2O_EXEC_STATUS_GET_REPLY)NULL) {
3381 debug_usr_cmd_printf ("getStatus failed\n");
3382 return (ENXIO);
3383 }
3384 ReplySizeInBytes = sizeof(status);
3385 debug_usr_cmd_printf ("getStatus done\n");
3386 return (copyout ((caddr_t)&status, (caddr_t)Reply,
3387 ReplySizeInBytes));
3388 }
3389
3390 case I2O_EXEC_OUTBOUND_INIT:
3391 { U32 status;
3392
3393 status = ASR_initOutBound(sc);
3394 ReplySizeInBytes = sizeof(status);
3395 debug_usr_cmd_printf ("intOutBound done\n");
3396 return (copyout ((caddr_t)&status, (caddr_t)Reply,
3397 ReplySizeInBytes));
3398 }
3399 }
3400
3401 /* Determine if the message size is valid */
3402 if ((MessageSizeInBytes < sizeof(I2O_MESSAGE_FRAME))
3403 || (MAX_INBOUND_SIZE < MessageSizeInBytes)) {
3404 debug_usr_cmd_printf ("Packet size %d incorrect\n",
3405 MessageSizeInBytes);
3406 return (EINVAL);
3407 }
3408
3409 if ((Message_Ptr = (PI2O_MESSAGE_FRAME)malloc (MessageSizeInBytes,
3410 M_TEMP, M_WAITOK)) == (PI2O_MESSAGE_FRAME)NULL) {
3411 debug_usr_cmd_printf ("Failed to acquire frame[%d] memory\n",
3412 MessageSizeInBytes);
3413 return (ENOMEM);
3414 }
3415 if ((error = copyin ((caddr_t)Packet, (caddr_t)Message_Ptr,
3416 MessageSizeInBytes)) != 0) {
3417 free (Message_Ptr, M_TEMP);
3418 debug_usr_cmd_printf ("Can't copy in packet[%d] errno=%d\n",
3419 MessageSizeInBytes, error);
3420 return (error);
3421 }
3422
3423 /* Check the size of the reply frame, and start constructing */
3424
3425 if ((Reply_Ptr = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)malloc (
3426 sizeof(I2O_MESSAGE_FRAME), M_TEMP, M_WAITOK))
3427 == (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL) {
3428 free (Message_Ptr, M_TEMP);
3429 debug_usr_cmd_printf (
3430 "Failed to acquire I2O_MESSAGE_FRAME memory\n");
3431 return (ENOMEM);
3432 }
3433 if ((error = copyin ((caddr_t)Reply, (caddr_t)Reply_Ptr,
3434 sizeof(I2O_MESSAGE_FRAME))) != 0) {
3435 free (Reply_Ptr, M_TEMP);
3436 free (Message_Ptr, M_TEMP);
3437 debug_usr_cmd_printf (
3438 "Failed to copy in reply frame, errno=%d\n",
3439 error);
3440 return (error);
3441 }
3442 ReplySizeInBytes = (I2O_MESSAGE_FRAME_getMessageSize(
3443 &(Reply_Ptr->StdReplyFrame.StdMessageFrame)) << 2);
3444 free (Reply_Ptr, M_TEMP);
3445 if (ReplySizeInBytes < sizeof(I2O_SINGLE_REPLY_MESSAGE_FRAME)) {
3446 free (Message_Ptr, M_TEMP);
3447 debug_usr_cmd_printf (
3448 "Failed to copy in reply frame[%d], errno=%d\n",
3449 ReplySizeInBytes, error);
3450 return (EINVAL);
3451 }
3452
3453 if ((Reply_Ptr = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)malloc (
3454 ((ReplySizeInBytes > sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME))
3455 ? ReplySizeInBytes
3456 : sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)),
3457 M_TEMP, M_WAITOK)) == (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL) {
3458 free (Message_Ptr, M_TEMP);
3459 debug_usr_cmd_printf ("Failed to acquire frame[%d] memory\n",
3460 ReplySizeInBytes);
3461 return (ENOMEM);
3462 }
3463 (void)ASR_fillMessage ((char *)Reply_Ptr, ReplySizeInBytes);
3464 Reply_Ptr->StdReplyFrame.StdMessageFrame.InitiatorContext
3465 = Message_Ptr->InitiatorContext;
3466 Reply_Ptr->StdReplyFrame.TransactionContext
3467 = ((PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr)->TransactionContext;
3468 I2O_MESSAGE_FRAME_setMsgFlags(
3469 &(Reply_Ptr->StdReplyFrame.StdMessageFrame),
3470 I2O_MESSAGE_FRAME_getMsgFlags(
3471 &(Reply_Ptr->StdReplyFrame.StdMessageFrame))
3472 | I2O_MESSAGE_FLAGS_REPLY);
3473
3474 /* Check if the message is a special case command */
3475 switch (I2O_MESSAGE_FRAME_getFunction(Message_Ptr)) {
3476 case I2O_EXEC_SYS_TAB_SET: /* Special Case of empty Scatter Gather */
3477 if (MessageSizeInBytes == ((I2O_MESSAGE_FRAME_getVersionOffset(
3478 Message_Ptr) & 0xF0) >> 2)) {
3479 free (Message_Ptr, M_TEMP);
3480 I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode(
3481 &(Reply_Ptr->StdReplyFrame),
3482 (ASR_setSysTab(sc) != CAM_REQ_CMP));
3483 I2O_MESSAGE_FRAME_setMessageSize(
3484 &(Reply_Ptr->StdReplyFrame.StdMessageFrame),
3485 sizeof(I2O_SINGLE_REPLY_MESSAGE_FRAME));
3486 error = copyout ((caddr_t)Reply_Ptr, (caddr_t)Reply,
3487 ReplySizeInBytes);
3488 free (Reply_Ptr, M_TEMP);
3489 return (error);
3490 }
3491 }
3492
3493 /* Deal in the general case */
3494 /* First allocate and optionally copy in each scatter gather element */
3495 SLIST_INIT(&sgList);
3496 if ((I2O_MESSAGE_FRAME_getVersionOffset(Message_Ptr) & 0xF0) != 0) {
3497 PI2O_SGE_SIMPLE_ELEMENT sg;
3498
3499 /*
3500 * since this code is reused in several systems, code
3501 * efficiency is greater by using a shift operation rather
3502 * than a divide by sizeof(u_int32_t).
3503 */
3504 sg = (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr
3505 + ((I2O_MESSAGE_FRAME_getVersionOffset(Message_Ptr) & 0xF0)
3506 >> 2));
3507 while (sg < (PI2O_SGE_SIMPLE_ELEMENT)(((caddr_t)Message_Ptr)
3508 + MessageSizeInBytes)) {
3509 caddr_t v;
3510 int len;
3511
3512 if ((I2O_FLAGS_COUNT_getFlags(&(sg->FlagsCount))
3513 & I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT) == 0) {
3514 error = EINVAL;
3515 break;
3516 }
3517 len = I2O_FLAGS_COUNT_getCount(&(sg->FlagsCount));
3518 debug_usr_cmd_printf ("SG[%d] = %x[%d]\n",
3519 sg - (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr
3520 + ((I2O_MESSAGE_FRAME_getVersionOffset(
3521 Message_Ptr) & 0xF0) >> 2)),
3522 I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg), len);
3523
3524 if ((elm = (struct ioctlSgList_S *)malloc (
3525 sizeof(*elm) - sizeof(elm->KernelSpace) + len,
3526 M_TEMP, M_WAITOK))
3527 == (struct ioctlSgList_S *)NULL) {
3528 debug_usr_cmd_printf (
3529 "Failed to allocate SG[%d]\n", len);
3530 error = ENOMEM;
3531 break;
3532 }
3533 SLIST_INSERT_HEAD(&sgList, elm, link);
3534 elm->FlagsCount = sg->FlagsCount;
3535 elm->UserSpace = (caddr_t)
3536 (I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg));
3537 v = elm->KernelSpace;
3538 /* Copy in outgoing data (DIR bit could be invalid) */
3539 if ((error = copyin (elm->UserSpace, (caddr_t)v, len))
3540 != 0) {
3541 break;
3542 }
3543 /*
3544 * If the buffer is not contiguous, lets
3545 * break up the scatter/gather entries.
3546 */
3547 while ((len > 0)
3548 && (sg < (PI2O_SGE_SIMPLE_ELEMENT)
3549 (((caddr_t)Message_Ptr) + MAX_INBOUND_SIZE))) {
3550 int next, base, span;
3551
3552 span = 0;
3553 next = base = KVTOPHYS(v);
3554 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg,
3555 base);
3556
3557 /* How far can we go physically contiguously */
3558 while ((len > 0) && (base == next)) {
3559 int size;
3560
3561 next = trunc_page(base) + PAGE_SIZE;
3562 size = next - base;
3563 if (size > len) {
3564 size = len;
3565 }
3566 span += size;
3567 v += size;
3568 len -= size;
3569 base = KVTOPHYS(v);
3570 }
3571
3572 /* Construct the Flags */
3573 I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount),
3574 span);
3575 {
3576 int flags = I2O_FLAGS_COUNT_getFlags(
3577 &(elm->FlagsCount));
3578 /* Any remaining length? */
3579 if (len > 0) {
3580 flags &=
3581 ~(I2O_SGL_FLAGS_END_OF_BUFFER
3582 | I2O_SGL_FLAGS_LAST_ELEMENT);
3583 }
3584 I2O_FLAGS_COUNT_setFlags(
3585 &(sg->FlagsCount), flags);
3586 }
3587
3588 debug_usr_cmd_printf ("sg[%d] = %x[%d]\n",
3589 sg - (PI2O_SGE_SIMPLE_ELEMENT)
3590 ((char *)Message_Ptr
3591 + ((I2O_MESSAGE_FRAME_getVersionOffset(
3592 Message_Ptr) & 0xF0) >> 2)),
3593 I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg),
3594 span);
3595 if (len <= 0) {
3596 break;
3597 }
3598
3599 /*
3600 * Incrementing requires resizing of the
3601 * packet, and moving up the existing SG
3602 * elements.
3603 */
3604 ++sg;
3605 MessageSizeInBytes += sizeof(*sg);
3606 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
3607 I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr)
3608 + (sizeof(*sg) / sizeof(U32)));
3609 {
3610 PI2O_MESSAGE_FRAME NewMessage_Ptr;
3611
3612 if ((NewMessage_Ptr
3613 = (PI2O_MESSAGE_FRAME)
3614 malloc (MessageSizeInBytes,
3615 M_TEMP, M_WAITOK))
3616 == (PI2O_MESSAGE_FRAME)NULL) {
3617 debug_usr_cmd_printf (
3618 "Failed to acquire frame[%d] memory\n",
3619 MessageSizeInBytes);
3620 error = ENOMEM;
3621 break;
3622 }
3623 span = ((caddr_t)sg)
3624 - (caddr_t)Message_Ptr;
3625 bcopy ((caddr_t)Message_Ptr,
3626 (caddr_t)NewMessage_Ptr, span);
3627 bcopy ((caddr_t)(sg-1),
3628 ((caddr_t)NewMessage_Ptr) + span,
3629 MessageSizeInBytes - span);
3630 free (Message_Ptr, M_TEMP);
3631 sg = (PI2O_SGE_SIMPLE_ELEMENT)
3632 (((caddr_t)NewMessage_Ptr) + span);
3633 Message_Ptr = NewMessage_Ptr;
3634 }
3635 }
3636 if ((error)
3637 || ((I2O_FLAGS_COUNT_getFlags(&(sg->FlagsCount))
3638 & I2O_SGL_FLAGS_LAST_ELEMENT) != 0)) {
3639 break;
3640 }
3641 ++sg;
3642 }
3643 if (error) {
3644 while ((elm = SLIST_FIRST(&sgList))
3645 != (struct ioctlSgList_S *)NULL) {
3646 SLIST_REMOVE_HEAD(&sgList, link);
3647 free (elm, M_TEMP);
3648 }
3649 free (Reply_Ptr, M_TEMP);
3650 free (Message_Ptr, M_TEMP);
3651 return (error);
3652 }
3653 }
3654
3655 debug_usr_cmd_printf ("Inbound: ");
3656 debug_usr_cmd_dump_message(Message_Ptr);
3657
3658 /* Send the command */
3659 if ((ccb = asr_alloc_ccb (sc)) == (union asr_ccb *)NULL) {
3660 /* Free up in-kernel buffers */
3661 while ((elm = SLIST_FIRST(&sgList))
3662 != (struct ioctlSgList_S *)NULL) {
3663 SLIST_REMOVE_HEAD(&sgList, link);
3664 free (elm, M_TEMP);
3665 }
3666 free (Reply_Ptr, M_TEMP);
3667 free (Message_Ptr, M_TEMP);
3668 return (ENOMEM);
3669 }
3670
3671 /*
3672 * We do not need any (optional byteswapping) method access to
3673 * the Initiator context field.
3674 */
3675 I2O_MESSAGE_FRAME_setInitiatorContext64(
3676 (PI2O_MESSAGE_FRAME)Message_Ptr, (long)ccb);
3677
3678 (void)ASR_queue (sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
3679
3680 free (Message_Ptr, M_TEMP);
3681
3682 /*
3683 * Wait for the board to report a finished instruction.
3684 */
7f2216bc 3685 crit_enter();
984263bc
MD
3686 while ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
3687 if (ASR_getBlinkLedCode(sc)) {
3688 /* Reset Adapter */
3689 printf ("asr%d: Blink LED 0x%x resetting adapter\n",
3690 cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)),
3691 ASR_getBlinkLedCode(sc));
3692 if (ASR_reset (sc) == ENXIO) {
3693 /* Command Cleanup */
3694 ASR_ccbRemove(sc, ccb);
3695 }
7f2216bc 3696 crit_exit();
984263bc
MD
3697 /* Free up in-kernel buffers */
3698 while ((elm = SLIST_FIRST(&sgList))
3699 != (struct ioctlSgList_S *)NULL) {
3700 SLIST_REMOVE_HEAD(&sgList, link);
3701 free (elm, M_TEMP);
3702 }
3703 free (Reply_Ptr, M_TEMP);
3704 asr_free_ccb(ccb);
3705 return (EIO);
3706 }
3707 /* Check every second for BlinkLed */
377d4740 3708 tsleep((caddr_t)ccb, 0, "asr", hz);
984263bc 3709 }
7f2216bc 3710 crit_exit();
984263bc
MD
3711
3712 debug_usr_cmd_printf ("Outbound: ");
3713 debug_usr_cmd_dump_message(Reply_Ptr);
3714
3715 I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode(
3716 &(Reply_Ptr->StdReplyFrame),
3717 (ccb->ccb_h.status != CAM_REQ_CMP));
3718
3719 if (ReplySizeInBytes >= (sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
3720 - I2O_SCSI_SENSE_DATA_SZ - sizeof(U32))) {
3721 I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_setTransferCount(Reply_Ptr,
3722 ccb->csio.dxfer_len - ccb->csio.resid);
3723 }
3724 if ((ccb->ccb_h.status & CAM_AUTOSNS_VALID) && (ReplySizeInBytes
3725 > (sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
3726 - I2O_SCSI_SENSE_DATA_SZ))) {
3727 int size = ReplySizeInBytes
3728 - sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
3729 - I2O_SCSI_SENSE_DATA_SZ;
3730
3731 if (size > sizeof(ccb->csio.sense_data)) {
3732 size = sizeof(ccb->csio.sense_data);
3733 }
3734 bcopy ((caddr_t)&(ccb->csio.sense_data), (caddr_t)Reply_Ptr->SenseData,
3735 size);
3736 I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_setAutoSenseTransferCount(
3737 Reply_Ptr, size);
3738 }
3739
3740 /* Free up in-kernel buffers */
3741 while ((elm = SLIST_FIRST(&sgList)) != (struct ioctlSgList_S *)NULL) {
3742 /* Copy out as necessary */
3743 if ((error == 0)
3744 /* DIR bit considered `valid', error due to ignorance works */
3745 && ((I2O_FLAGS_COUNT_getFlags(&(elm->FlagsCount))
3746 & I2O_SGL_FLAGS_DIR) == 0)) {
3747 error = copyout ((caddr_t)(elm->KernelSpace),
3748 elm->UserSpace,
3749 I2O_FLAGS_COUNT_getCount(&(elm->FlagsCount)));
3750 }
3751 SLIST_REMOVE_HEAD(&sgList, link);
3752 free (elm, M_TEMP);
3753 }
3754 if (error == 0) {
3755 /* Copy reply frame to user space */
3756 error = copyout ((caddr_t)Reply_Ptr, (caddr_t)Reply,
3757 ReplySizeInBytes);
3758 }
3759 free (Reply_Ptr, M_TEMP);
3760 asr_free_ccb(ccb);
3761
3762 return (error);
3763} /* ASR_queue_i */
3764
3765/*----------------------------------------------------------------------*/
3766/* Function asr_ioctl */
3767/*----------------------------------------------------------------------*/
3768/* The parameters passed to this function are : */
3769/* dev : Device number. */
3770/* cmd : Ioctl Command */
3771/* data : User Argument Passed In. */
3772/* flag : Mode Parameter */
3773/* proc : Process Parameter */
3774/* */
3775/* This function is the user interface into this adapter driver */
3776/* */
3777/* Return : zero if OK, error code if not */
3778/*----------------------------------------------------------------------*/
3779
3780STATIC int
fef8985e