Move the callout init below the softc allocation. *sigh*
[dragonfly.git] / sys / dev / raid / asr / asr.c
CommitLineData
984263bc 1/* $FreeBSD: src/sys/dev/asr/asr.c,v 1.3.2.2 2001/08/23 05:21:29 scottl Exp $ */
fc800962 2/* $DragonFly: src/sys/dev/raid/asr/asr.c,v 1.17 2004/08/23 16:13:03 joerg Exp $ */
984263bc
MD
3/*
4 * Copyright (c) 1996-2000 Distributed Processing Technology Corporation
5 * Copyright (c) 2000-2001 Adaptec Corporation
6 * All rights reserved.
7 *
8 * TERMS AND CONDITIONS OF USE
9 *
10 * Redistribution and use in source form, with or without modification, are
11 * permitted provided that redistributions of source code must retain the
12 * above copyright notice, this list of conditions and the following disclaimer.
13 *
14 * This software is provided `as is' by Adaptec and any express or implied
15 * warranties, including, but not limited to, the implied warranties of
16 * merchantability and fitness for a particular purpose, are disclaimed. In no
17 * event shall Adaptec be liable for any direct, indirect, incidental, special,
18 * exemplary or consequential damages (including, but not limited to,
19 * procurement of substitute goods or services; loss of use, data, or profits;
20 * or business interruptions) however caused and on any theory of liability,
21 * whether in contract, strict liability, or tort (including negligence or
22 * otherwise) arising in any way out of the use of this driver software, even
23 * if advised of the possibility of such damage.
24 *
25 * SCSI I2O host adapter driver
26 *
27 * V1.08 2001/08/21 Mark_Salyzyn@adaptec.com
28 * - The 2000S and 2005S do not initialize on some machines,
29 * increased timeout to 255ms from 50ms for the StatusGet
30 * command.
31 * V1.07 2001/05/22 Mark_Salyzyn@adaptec.com
32 * - I knew this one was too good to be true. The error return
33 * on ioctl commands needs to be compared to CAM_REQ_CMP, not
34 * to the bit masked status.
35 * V1.06 2001/05/08 Mark_Salyzyn@adaptec.com
36 * - The 2005S that was supported is affectionately called the
37 * Conjoined BAR Firmware. In order to support RAID-5 in a
38 * 16MB low-cost configuration, Firmware was forced to go
39 * to a Split BAR Firmware. This requires a separate IOP and
40 * Messaging base address.
41 * V1.05 2001/04/25 Mark_Salyzyn@adaptec.com
42 * - Handle support for 2005S Zero Channel RAID solution.
43 * - System locked up if the Adapter locked up. Do not try
44 * to send other commands if the resetIOP command fails. The
45 * fail outstanding command discovery loop was flawed as the
46 * removal of the command from the list prevented discovering
47 * all the commands.
48 * - Comment changes to clarify driver.
49 * - SysInfo searched for an EATA SmartROM, not an I2O SmartROM.
50 * - We do not use the AC_FOUND_DEV event because of I2O.
51 * Removed asr_async.
52 * V1.04 2000/09/22 Mark_Salyzyn@adaptec.com, msmith@freebsd.org,
53 * lampa@fee.vutbr.cz and Scott_Long@adaptec.com.
54 * - Removed support for PM1554, PM2554 and PM2654 in Mode-0
55 * mode as this is confused with competitor adapters in run
56 * mode.
57 * - critical locking needed in ASR_ccbAdd and ASR_ccbRemove
58 * to prevent operating system panic.
59 * - moved default major number to 154 from 97.
60 * V1.03 2000/07/12 Mark_Salyzyn@adaptec.com
61 * - The controller is not actually an ASR (Adaptec SCSI RAID)
62 * series that is visible, it's more of an internal code name.
63 * remove any visible references within reason for now.
64 * - bus_ptr->LUN was not correctly zeroed when initially
65 * allocated causing a possible panic of the operating system
66 * during boot.
67 * V1.02 2000/06/26 Mark_Salyzyn@adaptec.com
68 * - Code always fails for ASR_getTid affecting performance.
69 * - initiated a set of changes that resulted from a formal
70 * code inspection by Mark_Salyzyn@adaptec.com,
71 * George_Dake@adaptec.com, Jeff_Zeak@adaptec.com,
72 * Martin_Wilson@adaptec.com and Vincent_Trandoan@adaptec.com.
73 * Their findings were focussed on the LCT & TID handler, and
74 * all resulting changes were to improve code readability,
75 * consistency or have a positive effect on performance.
76 * V1.01 2000/06/14 Mark_Salyzyn@adaptec.com
77 * - Passthrough returned an incorrect error.
78 * - Passthrough did not migrate the intrinsic scsi layer wakeup
79 * on command completion.
80 * - generate control device nodes using make_dev and delete_dev.
81 * - Performance affected by TID caching reallocing.
82 * - Made suggested changes by Justin_Gibbs@adaptec.com
83 * - use splcam instead of splbio.
84 * - use cam_imask instead of bio_imask.
85 * - use u_int8_t instead of u_char.
86 * - use u_int16_t instead of u_short.
87 * - use u_int32_t instead of u_long where appropriate.
88 * - use 64 bit context handler instead of 32 bit.
89 * - create_ccb should only allocate the worst case
90 * requirements for the driver since CAM may evolve
91 * making union ccb much larger than needed here.
92 * renamed create_ccb to asr_alloc_ccb.
93 * - go nutz justifying all debug prints as macros
94 * defined at the top and remove unsightly ifdefs.
95 * - INLINE STATIC viewed as confusing. Historically
96 * utilized to affect code performance and debug
97 * issues in OS, Compiler or OEM specific situations.
98 * V1.00 2000/05/31 Mark_Salyzyn@adaptec.com
99 * - Ported from FreeBSD 2.2.X DPT I2O driver.
100 * changed struct scsi_xfer to union ccb/struct ccb_hdr
101 * changed variable name xs to ccb
102 * changed struct scsi_link to struct cam_path
103 * changed struct scsibus_data to struct cam_sim
104 * stopped using fordriver for holding on to the TID
105 * use proprietary packet creation instead of scsi_inquire
106 * CAM layer sends synchronize commands.
107 */
108
109#define ASR_VERSION 1
110#define ASR_REVISION '0'
111#define ASR_SUBREVISION '8'
112#define ASR_MONTH 8
113#define ASR_DAY 21
114#define ASR_YEAR 2001 - 1980
115
116/*
117 * Debug macros to reduce the unsightly ifdefs
118 */
119#if (defined(DEBUG_ASR) || defined(DEBUG_ASR_USR_CMD) || defined(DEBUG_ASR_CMD))
120# define debug_asr_message(message) \
121 { \
122 u_int32_t * pointer = (u_int32_t *)message; \
123 u_int32_t length = I2O_MESSAGE_FRAME_getMessageSize(message);\
124 u_int32_t counter = 0; \
125 \
126 while (length--) { \
127 printf ("%08lx%c", (u_long)*(pointer++), \
128 (((++counter & 7) == 0) || (length == 0)) \
129 ? '\n' \
130 : ' '); \
131 } \
132 }
133#endif /* DEBUG_ASR || DEBUG_ASR_USR_CMD || DEBUG_ASR_CMD */
134
135#if (defined(DEBUG_ASR))
136 /* Breaks on none STDC based compilers :-( */
137# define debug_asr_printf(fmt,args...) printf(fmt, ##args)
138# define debug_asr_dump_message(message) debug_asr_message(message)
139# define debug_asr_print_path(ccb) xpt_print_path(ccb->ccb_h.path);
140 /* None fatal version of the ASSERT macro */
141# if (defined(__STDC__))
142# define ASSERT(phrase) if(!(phrase))printf(#phrase " at line %d file %s\n",__LINE__,__FILE__)
143# else
144# define ASSERT(phrase) if(!(phrase))printf("phrase" " at line %d file %s\n",__LINE__,__FILE__)
145# endif
146#else /* DEBUG_ASR */
147# define debug_asr_printf(fmt,args...)
148# define debug_asr_dump_message(message)
149# define debug_asr_print_path(ccb)
150# define ASSERT(x)
151#endif /* DEBUG_ASR */
152
153/*
154 * If DEBUG_ASR_CMD is defined:
155 * 0 - Display incoming SCSI commands
156 * 1 - add in a quick character before queueing.
157 * 2 - add in outgoing message frames.
158 */
159#if (defined(DEBUG_ASR_CMD))
160# define debug_asr_cmd_printf(fmt,args...) printf(fmt,##args)
161# define debug_asr_dump_ccb(ccb) \
162 { \
163 u_int8_t * cp = (unsigned char *)&(ccb->csio.cdb_io); \
164 int len = ccb->csio.cdb_len; \
165 \
166 while (len) { \
167 debug_asr_cmd_printf (" %02x", *(cp++)); \
168 --len; \
169 } \
170 }
171# if (DEBUG_ASR_CMD > 0)
172# define debug_asr_cmd1_printf debug_asr_cmd_printf
173# else
174# define debug_asr_cmd1_printf(fmt,args...)
175# endif
176# if (DEBUG_ASR_CMD > 1)
177# define debug_asr_cmd2_printf debug_asr_cmd_printf
178# define debug_asr_cmd2_dump_message(message) debug_asr_message(message)
179# else
180# define debug_asr_cmd2_printf(fmt,args...)
181# define debug_asr_cmd2_dump_message(message)
182# endif
183#else /* DEBUG_ASR_CMD */
184# define debug_asr_cmd_printf(fmt,args...)
185# define debug_asr_cmd_dump_ccb(ccb)
186# define debug_asr_cmd1_printf(fmt,args...)
187# define debug_asr_cmd2_printf(fmt,args...)
188# define debug_asr_cmd2_dump_message(message)
189#endif /* DEBUG_ASR_CMD */
190
191#if (defined(DEBUG_ASR_USR_CMD))
192# define debug_usr_cmd_printf(fmt,args...) printf(fmt,##args)
193# define debug_usr_cmd_dump_message(message) debug_usr_message(message)
194#else /* DEBUG_ASR_USR_CMD */
195# define debug_usr_cmd_printf(fmt,args...)
196# define debug_usr_cmd_dump_message(message)
197#endif /* DEBUG_ASR_USR_CMD */
198
199#define dsDescription_size 46 /* Snug as a bug in a rug */
1f2de5d4 200#include "dptsig.h"
984263bc
MD
201
202static dpt_sig_S ASR_sig = {
203 { 'd', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION, PROC_INTEL,
204 PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM, FT_HBADRVR, 0,
205 OEM_DPT, OS_FREE_BSD, CAP_ABOVE16MB, DEV_ALL,
206 ADF_ALL_SC5,
207 0, 0, ASR_VERSION, ASR_REVISION, ASR_SUBREVISION,
208 ASR_MONTH, ASR_DAY, ASR_YEAR,
209/* 01234567890123456789012345678901234567890123456789 < 50 chars */
210 "Adaptec FreeBSD 4.0.0 Unix SCSI I2O HBA Driver"
211 /* ^^^^^ asr_attach alters these to match OS */
212};
213
214#include <sys/param.h> /* TRUE=1 and FALSE=0 defined here */
215#include <sys/kernel.h>
216#include <sys/systm.h>
217#include <sys/malloc.h>
218#include <sys/proc.h>
219#include <sys/conf.h>
220#include <sys/disklabel.h>
221#include <sys/bus.h>
222#include <machine/resource.h>
223#include <machine/bus.h>
224#include <sys/rman.h>
225#include <sys/stat.h>
f15db79e 226#include <sys/device.h>
984263bc 227
1f2de5d4
MD
228#include <bus/cam/cam.h>
229#include <bus/cam/cam_ccb.h>
230#include <bus/cam/cam_sim.h>
231#include <bus/cam/cam_xpt_sim.h>
232#include <bus/cam/cam_xpt_periph.h>
984263bc 233
1f2de5d4
MD
234#include <bus/cam/scsi/scsi_all.h>
235#include <bus/cam/scsi/scsi_message.h>
984263bc
MD
236
237#include <vm/vm.h>
238#include <vm/pmap.h>
239#include <machine/cputypes.h>
240#include <machine/clock.h>
241#include <i386/include/vmparam.h>
242
1f2de5d4
MD
243#include <bus/pci/pcivar.h>
244#include <bus/pci/pcireg.h>
984263bc
MD
245
246#define STATIC static
247#define INLINE
248
249#if (defined(DEBUG_ASR) && (DEBUG_ASR > 0))
250# undef STATIC
251# define STATIC
252# undef INLINE
253# define INLINE
254#endif
255#define IN
256#define OUT
257#define INOUT
258
259#define osdSwap4(x) ((u_long)ntohl((u_long)(x)))
260#define KVTOPHYS(x) vtophys(x)
1f2de5d4
MD
261#include "dptalign.h"
262#include "i2oexec.h"
263#include "i2obscsi.h"
264#include "i2odpt.h"
265#include "i2oadptr.h"
1f2de5d4 266#include "sys_info.h"
984263bc
MD
267
268/* Configuration Definitions */
269
270#define SG_SIZE 58 /* Scatter Gather list Size */
271#define MAX_TARGET_ID 126 /* Maximum Target ID supported */
272#define MAX_LUN 255 /* Maximum LUN Supported */
273#define MAX_CHANNEL 7 /* Maximum Channel # Supported by driver */
274#define MAX_INBOUND 2000 /* Max CCBs, Also Max Queue Size */
275#define MAX_OUTBOUND 256 /* Maximum outbound frames/adapter */
276#define MAX_INBOUND_SIZE 512 /* Maximum inbound frame size */
277#define MAX_MAP 4194304L /* Maximum mapping size of IOP */
278 /* Also serves as the minimum map for */
279 /* the 2005S zero channel RAID product */
280
281/**************************************************************************
282** ASR Host Adapter structure - One Structure For Each Host Adapter That **
283** Is Configured Into The System. The Structure Supplies Configuration **
284** Information, Status Info, Queue Info And An Active CCB List Pointer. **
285***************************************************************************/
286
287/* I2O register set */
288typedef struct {
289 U8 Address[0x30];
290 volatile U32 Status;
291 volatile U32 Mask;
292# define Mask_InterruptsDisabled 0x08
293 U32 x[2];
294 volatile U32 ToFIFO; /* In Bound FIFO */
295 volatile U32 FromFIFO; /* Out Bound FIFO */
296} i2oRegs_t;
297
298/*
299 * A MIX of performance and space considerations for TID lookups
300 */
301typedef u_int16_t tid_t;
302
303typedef struct {
304 u_int32_t size; /* up to MAX_LUN */
305 tid_t TID[1];
306} lun2tid_t;
307
308typedef struct {
309 u_int32_t size; /* up to MAX_TARGET */
310 lun2tid_t * LUN[1];
311} target2lun_t;
312
313/*
314 * To ensure that we only allocate and use the worst case ccb here, lets
315 * make our own local ccb union. If asr_alloc_ccb is utilized for another
316 * ccb type, ensure that you add the additional structures into our local
317 * ccb union. To ensure strict type checking, we will utilize the local
318 * ccb definition wherever possible.
319 */
320union asr_ccb {
321 struct ccb_hdr ccb_h; /* For convenience */
322 struct ccb_scsiio csio;
323 struct ccb_setasync csa;
324};
325
326typedef struct Asr_softc {
327 u_int16_t ha_irq;
328 void * ha_Base; /* base port for each board */
329 u_int8_t * volatile ha_blinkLED;
330 i2oRegs_t * ha_Virt; /* Base address of IOP */
331 U8 * ha_Fvirt; /* Base address of Frames */
332 I2O_IOP_ENTRY ha_SystemTable;
333 LIST_HEAD(,ccb_hdr) ha_ccb; /* ccbs in use */
334 struct cam_path * ha_path[MAX_CHANNEL+1];
335 struct cam_sim * ha_sim[MAX_CHANNEL+1];
984263bc
MD
336 struct resource * ha_mem_res;
337 struct resource * ha_mes_res;
338 struct resource * ha_irq_res;
339 void * ha_intr;
984263bc
MD
340 PI2O_LCT ha_LCT; /* Complete list of devices */
341# define le_type IdentityTag[0]
342# define I2O_BSA 0x20
343# define I2O_FCA 0x40
344# define I2O_SCSI 0x00
345# define I2O_PORT 0x80
346# define I2O_UNKNOWN 0x7F
347# define le_bus IdentityTag[1]
348# define le_target IdentityTag[2]
349# define le_lun IdentityTag[3]
350 target2lun_t * ha_targets[MAX_CHANNEL+1];
351 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME ha_Msgs;
352 u_long ha_Msgs_Phys;
353
354 u_int8_t ha_in_reset;
355# define HA_OPERATIONAL 0
356# define HA_IN_RESET 1
357# define HA_OFF_LINE 2
358# define HA_OFF_LINE_RECOVERY 3
359 /* Configuration information */
360 /* The target id maximums we take */
361 u_int8_t ha_MaxBus; /* Maximum bus */
362 u_int8_t ha_MaxId; /* Maximum target ID */
363 u_int8_t ha_MaxLun; /* Maximum target LUN */
364 u_int8_t ha_SgSize; /* Max SG elements */
365 u_int8_t ha_pciBusNum;
366 u_int8_t ha_pciDeviceNum;
367 u_int8_t ha_adapter_target[MAX_CHANNEL+1];
368 u_int16_t ha_QueueSize; /* Max outstanding commands */
369 u_int16_t ha_Msgs_Count;
370
371 /* Links into other parents and HBAs */
372 struct Asr_softc * ha_next; /* HBA list */
984263bc
MD
373} Asr_softc_t;
374
375STATIC Asr_softc_t * Asr_softc;
376
377/*
378 * Prototypes of the routines we have in this object.
379 */
380
381/* Externally callable routines */
984263bc
MD
382#define PROBE_ARGS IN device_t tag
383#define PROBE_RET int
384#define PROBE_SET() u_long id = (pci_get_device(tag)<<16)|pci_get_vendor(tag)
385#define PROBE_RETURN(retval) if(retval){device_set_desc(tag,retval);return(0);}else{return(ENXIO);}
386#define ATTACH_ARGS IN device_t tag
387#define ATTACH_RET int
388#define ATTACH_SET() int unit = device_get_unit(tag)
389#define ATTACH_RETURN(retval) return(retval)
984263bc 390/* I2O HDM interface */
5ca58d54
RG
391STATIC PROBE_RET asr_probe (PROBE_ARGS);
392STATIC ATTACH_RET asr_attach (ATTACH_ARGS);
984263bc 393/* DOMINO placeholder */
5ca58d54
RG
394STATIC PROBE_RET domino_probe (PROBE_ARGS);
395STATIC ATTACH_RET domino_attach (ATTACH_ARGS);
984263bc 396/* MODE0 adapter placeholder */
5ca58d54
RG
397STATIC PROBE_RET mode0_probe (PROBE_ARGS);
398STATIC ATTACH_RET mode0_attach (ATTACH_ARGS);
984263bc 399
5ca58d54
RG
400STATIC Asr_softc_t * ASR_get_sc (
401 IN dev_t dev);
402STATIC int asr_ioctl (
984263bc
MD
403 IN dev_t dev,
404 IN u_long cmd,
405 INOUT caddr_t data,
406 int flag,
5ca58d54
RG
407 d_thread_t *td);
408STATIC int asr_open (
984263bc
MD
409 IN dev_t dev,
410 int32_t flags,
411 int32_t ifmt,
5ca58d54
RG
412 IN d_thread_t *td);
413STATIC int asr_close (
984263bc
MD
414 dev_t dev,
415 int flags,
416 int ifmt,
5ca58d54
RG
417 d_thread_t *td);
418STATIC int asr_intr (
419 IN Asr_softc_t * sc);
420STATIC void asr_timeout (
421 INOUT void * arg);
422STATIC int ASR_init (
423 IN Asr_softc_t * sc);
424STATIC INLINE int ASR_acquireLct (
425 INOUT Asr_softc_t * sc);
426STATIC INLINE int ASR_acquireHrt (
427 INOUT Asr_softc_t * sc);
428STATIC void asr_action (
984263bc 429 IN struct cam_sim * sim,
5ca58d54
RG
430 IN union ccb * ccb);
431STATIC void asr_poll (
432 IN struct cam_sim * sim);
984263bc
MD
433
434/*
435 * Here is the auto-probe structure used to nest our tests appropriately
436 * during the startup phase of the operating system.
437 */
984263bc
MD
438STATIC device_method_t asr_methods[] = {
439 DEVMETHOD(device_probe, asr_probe),
440 DEVMETHOD(device_attach, asr_attach),
441 { 0, 0 }
442};
443
444STATIC driver_t asr_driver = {
445 "asr",
446 asr_methods,
447 sizeof(Asr_softc_t)
448};
449
450STATIC devclass_t asr_devclass;
451
32832096 452DECLARE_DUMMY_MODULE(asr);
984263bc
MD
453DRIVER_MODULE(asr, pci, asr_driver, asr_devclass, 0, 0);
454
455STATIC device_method_t domino_methods[] = {
456 DEVMETHOD(device_probe, domino_probe),
457 DEVMETHOD(device_attach, domino_attach),
458 { 0, 0 }
459};
460
461STATIC driver_t domino_driver = {
462 "domino",
463 domino_methods,
464 0
465};
466
467STATIC devclass_t domino_devclass;
468
469DRIVER_MODULE(domino, pci, domino_driver, domino_devclass, 0, 0);
470
471STATIC device_method_t mode0_methods[] = {
472 DEVMETHOD(device_probe, mode0_probe),
473 DEVMETHOD(device_attach, mode0_attach),
474 { 0, 0 }
475};
476
477STATIC driver_t mode0_driver = {
478 "mode0",
479 mode0_methods,
480 0
481};
482
483STATIC devclass_t mode0_devclass;
484
485DRIVER_MODULE(mode0, pci, mode0_driver, mode0_devclass, 0, 0);
984263bc
MD
486
487/*
488 * devsw for asr hba driver
489 *
490 * only ioctl is used. the sd driver provides all other access.
491 */
492#define CDEV_MAJOR 154 /* prefered default character major */
493STATIC struct cdevsw asr_cdevsw = {
fabb8ceb
MD
494 "asr", /* name */
495 CDEV_MAJOR, /* maj */
496 0, /* flags */
497 NULL, /* port */
498 0, /* auto */
499
984263bc
MD
500 asr_open, /* open */
501 asr_close, /* close */
502 noread, /* read */
503 nowrite, /* write */
504 asr_ioctl, /* ioctl */
505 nopoll, /* poll */
506 nommap, /* mmap */
507 nostrategy, /* strategy */
984263bc 508 nodump, /* dump */
fabb8ceb 509 nopsize /* psize */
984263bc
MD
510};
511
984263bc
MD
512/*
513 * Initialize the dynamic cdevsw hooks.
514 */
515STATIC void
e4c9c0c8 516asr_drvinit (void * unused)
984263bc
MD
517{
518 static int asr_devsw_installed = 0;
519
520 if (asr_devsw_installed) {
521 return;
522 }
523 asr_devsw_installed++;
524 /*
525 * Find a free spot (the report during driver load used by
526 * osd layer in engine to generate the controlling nodes).
e4c9c0c8
MD
527 *
528 * XXX this is garbage code, store a unit number in asr_cdevsw
529 * and iterate through that instead?
984263bc 530 */
e4c9c0c8
MD
531 while (asr_cdevsw.d_maj < NUMCDEVSW &&
532 cdevsw_get(asr_cdevsw.d_maj, -1) != NULL
533 ) {
984263bc
MD
534 ++asr_cdevsw.d_maj;
535 }
e4c9c0c8
MD
536 if (asr_cdevsw.d_maj >= NUMCDEVSW) {
537 asr_cdevsw.d_maj = 0;
538 while (asr_cdevsw.d_maj < CDEV_MAJOR &&
539 cdevsw_get(asr_cdevsw.d_maj, -1) != NULL
540 ) {
541 ++asr_cdevsw.d_maj;
542 }
543 }
544
984263bc
MD
545 /*
546 * Come to papa
547 */
e4c9c0c8 548 cdevsw_add(&asr_cdevsw, 0, 0);
984263bc
MD
549} /* asr_drvinit */
550
551/* Must initialize before CAM layer picks up our HBA driver */
552SYSINIT(asrdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,asr_drvinit,NULL)
553
554/* I2O support routines */
555#define defAlignLong(STRUCT,NAME) char NAME[sizeof(STRUCT)]
556#define getAlignLong(STRUCT,NAME) ((STRUCT *)(NAME))
557
558/*
559 * Fill message with default.
560 */
561STATIC PI2O_MESSAGE_FRAME
562ASR_fillMessage (
563 IN char * Message,
564 IN u_int16_t size)
565{
566 OUT PI2O_MESSAGE_FRAME Message_Ptr;
567
568 Message_Ptr = getAlignLong(I2O_MESSAGE_FRAME, Message);
569 bzero ((void *)Message_Ptr, size);
570 I2O_MESSAGE_FRAME_setVersionOffset(Message_Ptr, I2O_VERSION_11);
571 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
572 (size + sizeof(U32) - 1) >> 2);
573 I2O_MESSAGE_FRAME_setInitiatorAddress (Message_Ptr, 1);
574 return (Message_Ptr);
575} /* ASR_fillMessage */
576
577#define EMPTY_QUEUE ((U32)-1L)
578
579STATIC INLINE U32
580ASR_getMessage(
581 IN i2oRegs_t * virt)
582{
583 OUT U32 MessageOffset;
584
585 if ((MessageOffset = virt->ToFIFO) == EMPTY_QUEUE) {
586 MessageOffset = virt->ToFIFO;
587 }
588 return (MessageOffset);
589} /* ASR_getMessage */
590
591/* Issue a polled command */
592STATIC U32
593ASR_initiateCp (
594 INOUT i2oRegs_t * virt,
595 INOUT U8 * fvirt,
596 IN PI2O_MESSAGE_FRAME Message)
597{
598 OUT U32 Mask = -1L;
599 U32 MessageOffset;
600 u_int Delay = 1500;
601
602 /*
603 * ASR_initiateCp is only used for synchronous commands and will
604 * be made more resiliant to adapter delays since commands like
605 * resetIOP can cause the adapter to be deaf for a little time.
606 */
607 while (((MessageOffset = ASR_getMessage(virt)) == EMPTY_QUEUE)
608 && (--Delay != 0)) {
609 DELAY (10000);
610 }
611 if (MessageOffset != EMPTY_QUEUE) {
612 bcopy (Message, fvirt + MessageOffset,
613 I2O_MESSAGE_FRAME_getMessageSize(Message) << 2);
614 /*
615 * Disable the Interrupts
616 */
617 virt->Mask = (Mask = virt->Mask) | Mask_InterruptsDisabled;
618 virt->ToFIFO = MessageOffset;
619 }
620 return (Mask);
621} /* ASR_initiateCp */
622
623/*
624 * Reset the adapter.
625 */
626STATIC U32
627ASR_resetIOP (
628 INOUT i2oRegs_t * virt,
629 INOUT U8 * fvirt)
630{
631 struct resetMessage {
632 I2O_EXEC_IOP_RESET_MESSAGE M;
633 U32 R;
634 };
635 defAlignLong(struct resetMessage,Message);
636 PI2O_EXEC_IOP_RESET_MESSAGE Message_Ptr;
637 OUT U32 * volatile Reply_Ptr;
638 U32 Old;
639
640 /*
641 * Build up our copy of the Message.
642 */
643 Message_Ptr = (PI2O_EXEC_IOP_RESET_MESSAGE)ASR_fillMessage(Message,
644 sizeof(I2O_EXEC_IOP_RESET_MESSAGE));
645 I2O_EXEC_IOP_RESET_MESSAGE_setFunction(Message_Ptr, I2O_EXEC_IOP_RESET);
646 /*
647 * Reset the Reply Status
648 */
649 *(Reply_Ptr = (U32 *)((char *)Message_Ptr
650 + sizeof(I2O_EXEC_IOP_RESET_MESSAGE))) = 0;
651 I2O_EXEC_IOP_RESET_MESSAGE_setStatusWordLowAddress(Message_Ptr,
652 KVTOPHYS((void *)Reply_Ptr));
653 /*
654 * Send the Message out
655 */
656 if ((Old = ASR_initiateCp (virt, fvirt, (PI2O_MESSAGE_FRAME)Message_Ptr)) != (U32)-1L) {
657 /*
658 * Wait for a response (Poll), timeouts are dangerous if
659 * the card is truly responsive. We assume response in 2s.
660 */
661 u_int8_t Delay = 200;
662
663 while ((*Reply_Ptr == 0) && (--Delay != 0)) {
664 DELAY (10000);
665 }
666 /*
667 * Re-enable the interrupts.
668 */
669 virt->Mask = Old;
670 ASSERT (*Reply_Ptr);
671 return (*Reply_Ptr);
672 }
673 ASSERT (Old != (U32)-1L);
674 return (0);
675} /* ASR_resetIOP */
676
677/*
678 * Get the curent state of the adapter
679 */
680STATIC INLINE PI2O_EXEC_STATUS_GET_REPLY
681ASR_getStatus (
682 INOUT i2oRegs_t * virt,
683 INOUT U8 * fvirt,
684 OUT PI2O_EXEC_STATUS_GET_REPLY buffer)
685{
686 defAlignLong(I2O_EXEC_STATUS_GET_MESSAGE,Message);
687 PI2O_EXEC_STATUS_GET_MESSAGE Message_Ptr;
688 U32 Old;
689
690 /*
691 * Build up our copy of the Message.
692 */
693 Message_Ptr = (PI2O_EXEC_STATUS_GET_MESSAGE)ASR_fillMessage(Message,
694 sizeof(I2O_EXEC_STATUS_GET_MESSAGE));
695 I2O_EXEC_STATUS_GET_MESSAGE_setFunction(Message_Ptr,
696 I2O_EXEC_STATUS_GET);
697 I2O_EXEC_STATUS_GET_MESSAGE_setReplyBufferAddressLow(Message_Ptr,
698 KVTOPHYS((void *)buffer));
699 /* This one is a Byte Count */
700 I2O_EXEC_STATUS_GET_MESSAGE_setReplyBufferLength(Message_Ptr,
701 sizeof(I2O_EXEC_STATUS_GET_REPLY));
702 /*
703 * Reset the Reply Status
704 */
705 bzero ((void *)buffer, sizeof(I2O_EXEC_STATUS_GET_REPLY));
706 /*
707 * Send the Message out
708 */
709 if ((Old = ASR_initiateCp (virt, fvirt, (PI2O_MESSAGE_FRAME)Message_Ptr)) != (U32)-1L) {
710 /*
711 * Wait for a response (Poll), timeouts are dangerous if
712 * the card is truly responsive. We assume response in 50ms.
713 */
714 u_int8_t Delay = 255;
715
716 while (*((U8 * volatile)&(buffer->SyncByte)) == 0) {
717 if (--Delay == 0) {
718 buffer = (PI2O_EXEC_STATUS_GET_REPLY)NULL;
719 break;
720 }
721 DELAY (1000);
722 }
723 /*
724 * Re-enable the interrupts.
725 */
726 virt->Mask = Old;
727 return (buffer);
728 }
729 return ((PI2O_EXEC_STATUS_GET_REPLY)NULL);
730} /* ASR_getStatus */
731
732/*
733 * Check if the device is a SCSI I2O HBA, and add it to the list.
734 */
735
736/*
737 * Probe for ASR controller. If we find it, we will use it.
738 * virtual adapters.
739 */
740STATIC PROBE_RET
741asr_probe(PROBE_ARGS)
742{
743 PROBE_SET();
744 if ((id == 0xA5011044) || (id == 0xA5111044)) {
745 PROBE_RETURN ("Adaptec Caching SCSI RAID");
746 }
747 PROBE_RETURN (NULL);
748} /* asr_probe */
749
750/*
751 * Probe/Attach for DOMINO chipset.
752 */
753STATIC PROBE_RET
754domino_probe(PROBE_ARGS)
755{
756 PROBE_SET();
757 if (id == 0x10121044) {
758 PROBE_RETURN ("Adaptec Caching Memory Controller");
759 }
760 PROBE_RETURN (NULL);
761} /* domino_probe */
762
763STATIC ATTACH_RET
764domino_attach (ATTACH_ARGS)
765{
766 ATTACH_RETURN (0);
767} /* domino_attach */
768
769/*
770 * Probe/Attach for MODE0 adapters.
771 */
772STATIC PROBE_RET
773mode0_probe(PROBE_ARGS)
774{
775 PROBE_SET();
776
777 /*
778 * If/When we can get a business case to commit to a
779 * Mode0 driver here, we can make all these tests more
780 * specific and robust. Mode0 adapters have their processors
781 * turned off, this the chips are in a raw state.
782 */
783
784 /* This is a PLX9054 */
785 if (id == 0x905410B5) {
786 PROBE_RETURN ("Adaptec Mode0 PM3757");
787 }
788 /* This is a PLX9080 */
789 if (id == 0x908010B5) {
790 PROBE_RETURN ("Adaptec Mode0 PM3754/PM3755");
791 }
792 /* This is a ZION 80303 */
793 if (id == 0x53098086) {
794 PROBE_RETURN ("Adaptec Mode0 3010S");
795 }
796 /* This is an i960RS */
797 if (id == 0x39628086) {
798 PROBE_RETURN ("Adaptec Mode0 2100S");
799 }
800 /* This is an i960RN */
801 if (id == 0x19648086) {
802 PROBE_RETURN ("Adaptec Mode0 PM2865/2400A/3200S/3400S");
803 }
804#if 0 /* this would match any generic i960 -- mjs */
805 /* This is an i960RP (typically also on Motherboards) */
806 if (id == 0x19608086) {
807 PROBE_RETURN ("Adaptec Mode0 PM2554/PM1554/PM2654");
808 }
809#endif
810 PROBE_RETURN (NULL);
811} /* mode0_probe */
812
813STATIC ATTACH_RET
814mode0_attach (ATTACH_ARGS)
815{
816 ATTACH_RETURN (0);
817} /* mode0_attach */
818
819STATIC INLINE union asr_ccb *
820asr_alloc_ccb (
821 IN Asr_softc_t * sc)
822{
823 OUT union asr_ccb * new_ccb;
824
825 if ((new_ccb = (union asr_ccb *)malloc(sizeof(*new_ccb),
826 M_DEVBUF, M_WAITOK)) != (union asr_ccb *)NULL) {
827 bzero (new_ccb, sizeof(*new_ccb));
828 new_ccb->ccb_h.pinfo.priority = 1;
829 new_ccb->ccb_h.pinfo.index = CAM_UNQUEUED_INDEX;
830 new_ccb->ccb_h.spriv_ptr0 = sc;
831 }
832 return (new_ccb);
833} /* asr_alloc_ccb */
834
835STATIC INLINE void
836asr_free_ccb (
837 IN union asr_ccb * free_ccb)
838{
839 free(free_ccb, M_DEVBUF);
840} /* asr_free_ccb */
841
842/*
843 * Print inquiry data `carefully'
844 */
845STATIC void
846ASR_prstring (
847 u_int8_t * s,
848 int len)
849{
850 while ((--len >= 0) && (*s) && (*s != ' ') && (*s != '-')) {
851 printf ("%c", *(s++));
852 }
853} /* ASR_prstring */
854
855/*
856 * Prototypes
857 */
5ca58d54 858STATIC INLINE int ASR_queue (
984263bc 859 IN Asr_softc_t * sc,
5ca58d54 860 IN PI2O_MESSAGE_FRAME Message);
984263bc
MD
861/*
862 * Send a message synchronously and without Interrupt to a ccb.
863 */
864STATIC int
865ASR_queue_s (
866 INOUT union asr_ccb * ccb,
867 IN PI2O_MESSAGE_FRAME Message)
868{
869 int s;
870 U32 Mask;
871 Asr_softc_t * sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
872
873 /*
874 * We do not need any (optional byteswapping) method access to
875 * the Initiator context field.
876 */
877 I2O_MESSAGE_FRAME_setInitiatorContext64(Message, (long)ccb);
878
879 /* Prevent interrupt service */
880 s = splcam ();
881 sc->ha_Virt->Mask = (Mask = sc->ha_Virt->Mask)
882 | Mask_InterruptsDisabled;
883
884 if (ASR_queue (sc, Message) == EMPTY_QUEUE) {
885 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
886 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
887 }
888
889 /*
890 * Wait for this board to report a finished instruction.
891 */
892 while ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
893 (void)asr_intr (sc);
894 }
895
896 /* Re-enable Interrupts */
897 sc->ha_Virt->Mask = Mask;
898 splx(s);
899
900 return (ccb->ccb_h.status);
901} /* ASR_queue_s */
902
903/*
904 * Send a message synchronously to a Asr_softc_t
905 */
906STATIC int
907ASR_queue_c (
908 IN Asr_softc_t * sc,
909 IN PI2O_MESSAGE_FRAME Message)
910{
911 union asr_ccb * ccb;
912 OUT int status;
913
914 if ((ccb = asr_alloc_ccb (sc)) == (union asr_ccb *)NULL) {
915 return (CAM_REQUEUE_REQ);
916 }
917
918 status = ASR_queue_s (ccb, Message);
919
920 asr_free_ccb(ccb);
921
922 return (status);
923} /* ASR_queue_c */
924
925/*
926 * Add the specified ccb to the active queue
927 */
928STATIC INLINE void
929ASR_ccbAdd (
930 IN Asr_softc_t * sc,
931 INOUT union asr_ccb * ccb)
932{
933 int s;
934
935 s = splcam();
936 LIST_INSERT_HEAD(&(sc->ha_ccb), &(ccb->ccb_h), sim_links.le);
937 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
938 if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT) {
939 /*
940 * RAID systems can take considerable time to
941 * complete some commands given the large cache
942 * flashes switching from write back to write thru.
943 */
944 ccb->ccb_h.timeout = 6 * 60 * 1000;
945 }
946 ccb->ccb_h.timeout_ch = timeout(asr_timeout, (caddr_t)ccb,
947 (ccb->ccb_h.timeout * hz) / 1000);
948 }
949 splx(s);
950} /* ASR_ccbAdd */
951
952/*
953 * Remove the specified ccb from the active queue.
954 */
955STATIC INLINE void
956ASR_ccbRemove (
957 IN Asr_softc_t * sc,
958 INOUT union asr_ccb * ccb)
959{
960 int s;
961
962 s = splcam();
963 untimeout(asr_timeout, (caddr_t)ccb, ccb->ccb_h.timeout_ch);
964 LIST_REMOVE(&(ccb->ccb_h), sim_links.le);
965 splx(s);
966} /* ASR_ccbRemove */
967
968/*
969 * Fail all the active commands, so they get re-issued by the operating
970 * system.
971 */
972STATIC INLINE void
973ASR_failActiveCommands (
974 IN Asr_softc_t * sc)
975{
976 struct ccb_hdr * ccb;
977 int s;
978
979#if 0 /* Currently handled by callers, unnecessary paranoia currently */
980 /* Left in for historical perspective. */
981 defAlignLong(I2O_EXEC_LCT_NOTIFY_MESSAGE,Message);
982 PI2O_EXEC_LCT_NOTIFY_MESSAGE Message_Ptr;
983
984 /* Send a blind LCT command to wait for the enableSys to complete */
985 Message_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)ASR_fillMessage(Message,
986 sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) - sizeof(I2O_SG_ELEMENT));
987 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
988 I2O_EXEC_LCT_NOTIFY);
989 I2O_EXEC_LCT_NOTIFY_MESSAGE_setClassIdentifier(Message_Ptr,
990 I2O_CLASS_MATCH_ANYCLASS);
991 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
992#endif
993
994 s = splcam();
995 /*
996 * We do not need to inform the CAM layer that we had a bus
997 * reset since we manage it on our own, this also prevents the
998 * SCSI_DELAY settling that would be required on other systems.
999 * The `SCSI_DELAY' has already been handled by the card via the
1000 * acquisition of the LCT table while we are at CAM priority level.
1001 * for (int bus = 0; bus <= sc->ha_MaxBus; ++bus) {
1002 * xpt_async (AC_BUS_RESET, sc->ha_path[bus], NULL);
1003 * }
1004 */
1005 while ((ccb = LIST_FIRST(&(sc->ha_ccb))) != (struct ccb_hdr *)NULL) {
1006 ASR_ccbRemove (sc, (union asr_ccb *)ccb);
1007
1008 ccb->status &= ~CAM_STATUS_MASK;
1009 ccb->status |= CAM_REQUEUE_REQ;
1010 /* Nothing Transfered */
1011 ((struct ccb_scsiio *)ccb)->resid
1012 = ((struct ccb_scsiio *)ccb)->dxfer_len;
1013
1014 if (ccb->path) {
1015 xpt_done ((union ccb *)ccb);
1016 } else {
1017 wakeup ((caddr_t)ccb);
1018 }
1019 }
1020 splx(s);
1021} /* ASR_failActiveCommands */
1022
1023/*
1024 * The following command causes the HBA to reset the specific bus
1025 */
1026STATIC INLINE void
1027ASR_resetBus(
1028 IN Asr_softc_t * sc,
1029 IN int bus)
1030{
1031 defAlignLong(I2O_HBA_BUS_RESET_MESSAGE,Message);
1032 I2O_HBA_BUS_RESET_MESSAGE * Message_Ptr;
1033 PI2O_LCT_ENTRY Device;
1034
1035 Message_Ptr = (I2O_HBA_BUS_RESET_MESSAGE *)ASR_fillMessage(Message,
1036 sizeof(I2O_HBA_BUS_RESET_MESSAGE));
1037 I2O_MESSAGE_FRAME_setFunction(&Message_Ptr->StdMessageFrame,
1038 I2O_HBA_BUS_RESET);
1039 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
1040 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
1041 ++Device) {
1042 if (((Device->le_type & I2O_PORT) != 0)
1043 && (Device->le_bus == bus)) {
1044 I2O_MESSAGE_FRAME_setTargetAddress(
1045 &Message_Ptr->StdMessageFrame,
1046 I2O_LCT_ENTRY_getLocalTID(Device));
1047 /* Asynchronous command, with no expectations */
1048 (void)ASR_queue(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1049 break;
1050 }
1051 }
1052} /* ASR_resetBus */
1053
1054STATIC INLINE int
1055ASR_getBlinkLedCode (
1056 IN Asr_softc_t * sc)
1057{
1058 if ((sc != (Asr_softc_t *)NULL)
1059 && (sc->ha_blinkLED != (u_int8_t *)NULL)
1060 && (sc->ha_blinkLED[1] == 0xBC)) {
1061 return (sc->ha_blinkLED[0]);
1062 }
1063 return (0);
1064} /* ASR_getBlinkCode */
1065
1066/*
1067 * Determine the address of an TID lookup. Must be done at high priority
1068 * since the address can be changed by other threads of execution.
1069 *
1070 * Returns NULL pointer if not indexible (but will attempt to generate
1071 * an index if `new_entry' flag is set to TRUE).
1072 *
1073 * All addressible entries are to be guaranteed zero if never initialized.
1074 */
1075STATIC INLINE tid_t *
1076ASR_getTidAddress(
1077 INOUT Asr_softc_t * sc,
1078 IN int bus,
1079 IN int target,
1080 IN int lun,
1081 IN int new_entry)
1082{
1083 target2lun_t * bus_ptr;
1084 lun2tid_t * target_ptr;
1085 unsigned new_size;
1086
1087 /*
1088 * Validity checking of incoming parameters. More of a bound
1089 * expansion limit than an issue with the code dealing with the
1090 * values.
1091 *
1092 * sc must be valid before it gets here, so that check could be
1093 * dropped if speed a critical issue.
1094 */
1095 if ((sc == (Asr_softc_t *)NULL)
1096 || (bus > MAX_CHANNEL)
1097 || (target > sc->ha_MaxId)
1098 || (lun > sc->ha_MaxLun)) {
1099 debug_asr_printf("(%lx,%d,%d,%d) target out of range\n",
1100 (u_long)sc, bus, target, lun);
1101 return ((tid_t *)NULL);
1102 }
1103 /*
1104 * See if there is an associated bus list.
1105 *
1106 * for performance, allocate in size of BUS_CHUNK chunks.
1107 * BUS_CHUNK must be a power of two. This is to reduce
1108 * fragmentation effects on the allocations.
1109 */
1110# define BUS_CHUNK 8
1111 new_size = ((target + BUS_CHUNK - 1) & ~(BUS_CHUNK - 1));
1112 if ((bus_ptr = sc->ha_targets[bus]) == (target2lun_t *)NULL) {
1113 /*
1114 * Allocate a new structure?
1115 * Since one element in structure, the +1
1116 * needed for size has been abstracted.
1117 */
1118 if ((new_entry == FALSE)
1119 || ((sc->ha_targets[bus] = bus_ptr = (target2lun_t *)malloc (
1120 sizeof(*bus_ptr) + (sizeof(bus_ptr->LUN) * new_size),
1121 M_TEMP, M_WAITOK))
1122 == (target2lun_t *)NULL)) {
1123 debug_asr_printf("failed to allocate bus list\n");
1124 return ((tid_t *)NULL);
1125 }
1126 bzero (bus_ptr, sizeof(*bus_ptr)
1127 + (sizeof(bus_ptr->LUN) * new_size));
1128 bus_ptr->size = new_size + 1;
1129 } else if (bus_ptr->size <= new_size) {
1130 target2lun_t * new_bus_ptr;
1131
1132 /*
1133 * Reallocate a new structure?
1134 * Since one element in structure, the +1
1135 * needed for size has been abstracted.
1136 */
1137 if ((new_entry == FALSE)
1138 || ((new_bus_ptr = (target2lun_t *)malloc (
1139 sizeof(*bus_ptr) + (sizeof(bus_ptr->LUN) * new_size),
1140 M_TEMP, M_WAITOK))
1141 == (target2lun_t *)NULL)) {
1142 debug_asr_printf("failed to reallocate bus list\n");
1143 return ((tid_t *)NULL);
1144 }
1145 /*
1146 * Zero and copy the whole thing, safer, simpler coding
1147 * and not really performance critical at this point.
1148 */
1149 bzero (new_bus_ptr, sizeof(*bus_ptr)
1150 + (sizeof(bus_ptr->LUN) * new_size));
1151 bcopy (bus_ptr, new_bus_ptr, sizeof(*bus_ptr)
1152 + (sizeof(bus_ptr->LUN) * (bus_ptr->size - 1)));
1153 sc->ha_targets[bus] = new_bus_ptr;
1154 free (bus_ptr, M_TEMP);
1155 bus_ptr = new_bus_ptr;
1156 bus_ptr->size = new_size + 1;
1157 }
1158 /*
1159 * We now have the bus list, lets get to the target list.
1160 * Since most systems have only *one* lun, we do not allocate
1161 * in chunks as above, here we allow one, then in chunk sizes.
1162 * TARGET_CHUNK must be a power of two. This is to reduce
1163 * fragmentation effects on the allocations.
1164 */
1165# define TARGET_CHUNK 8
1166 if ((new_size = lun) != 0) {
1167 new_size = ((lun + TARGET_CHUNK - 1) & ~(TARGET_CHUNK - 1));
1168 }
1169 if ((target_ptr = bus_ptr->LUN[target]) == (lun2tid_t *)NULL) {
1170 /*
1171 * Allocate a new structure?
1172 * Since one element in structure, the +1
1173 * needed for size has been abstracted.
1174 */
1175 if ((new_entry == FALSE)
1176 || ((bus_ptr->LUN[target] = target_ptr = (lun2tid_t *)malloc (
1177 sizeof(*target_ptr) + (sizeof(target_ptr->TID) * new_size),
1178 M_TEMP, M_WAITOK))
1179 == (lun2tid_t *)NULL)) {
1180 debug_asr_printf("failed to allocate target list\n");
1181 return ((tid_t *)NULL);
1182 }
1183 bzero (target_ptr, sizeof(*target_ptr)
1184 + (sizeof(target_ptr->TID) * new_size));
1185 target_ptr->size = new_size + 1;
1186 } else if (target_ptr->size <= new_size) {
1187 lun2tid_t * new_target_ptr;
1188
1189 /*
1190 * Reallocate a new structure?
1191 * Since one element in structure, the +1
1192 * needed for size has been abstracted.
1193 */
1194 if ((new_entry == FALSE)
1195 || ((new_target_ptr = (lun2tid_t *)malloc (
1196 sizeof(*target_ptr) + (sizeof(target_ptr->TID) * new_size),
1197 M_TEMP, M_WAITOK))
1198 == (lun2tid_t *)NULL)) {
1199 debug_asr_printf("failed to reallocate target list\n");
1200 return ((tid_t *)NULL);
1201 }
1202 /*
1203 * Zero and copy the whole thing, safer, simpler coding
1204 * and not really performance critical at this point.
1205 */
1206 bzero (new_target_ptr, sizeof(*target_ptr)
1207 + (sizeof(target_ptr->TID) * new_size));
1208 bcopy (target_ptr, new_target_ptr,
1209 sizeof(*target_ptr)
1210 + (sizeof(target_ptr->TID) * (target_ptr->size - 1)));
1211 bus_ptr->LUN[target] = new_target_ptr;
1212 free (target_ptr, M_TEMP);
1213 target_ptr = new_target_ptr;
1214 target_ptr->size = new_size + 1;
1215 }
1216 /*
1217 * Now, acquire the TID address from the LUN indexed list.
1218 */
1219 return (&(target_ptr->TID[lun]));
1220} /* ASR_getTidAddress */
1221
1222/*
1223 * Get a pre-existing TID relationship.
1224 *
1225 * If the TID was never set, return (tid_t)-1.
1226 *
1227 * should use mutex rather than spl.
1228 */
1229STATIC INLINE tid_t
1230ASR_getTid (
1231 IN Asr_softc_t * sc,
1232 IN int bus,
1233 IN int target,
1234 IN int lun)
1235{
1236 tid_t * tid_ptr;
1237 int s;
1238 OUT tid_t retval;
1239
1240 s = splcam();
1241 if (((tid_ptr = ASR_getTidAddress (sc, bus, target, lun, FALSE))
1242 == (tid_t *)NULL)
1243 /* (tid_t)0 or (tid_t)-1 indicate no TID */
1244 || (*tid_ptr == (tid_t)0)) {
1245 splx(s);
1246 return ((tid_t)-1);
1247 }
1248 retval = *tid_ptr;
1249 splx(s);
1250 return (retval);
1251} /* ASR_getTid */
1252
1253/*
1254 * Set a TID relationship.
1255 *
1256 * If the TID was not set, return (tid_t)-1.
1257 *
1258 * should use mutex rather than spl.
1259 */
1260STATIC INLINE tid_t
1261ASR_setTid (
1262 INOUT Asr_softc_t * sc,
1263 IN int bus,
1264 IN int target,
1265 IN int lun,
1266 INOUT tid_t TID)
1267{
1268 tid_t * tid_ptr;
1269 int s;
1270
1271 if (TID != (tid_t)-1) {
1272 if (TID == 0) {
1273 return ((tid_t)-1);
1274 }
1275 s = splcam();
1276 if ((tid_ptr = ASR_getTidAddress (sc, bus, target, lun, TRUE))
1277 == (tid_t *)NULL) {
1278 splx(s);
1279 return ((tid_t)-1);
1280 }
1281 *tid_ptr = TID;
1282 splx(s);
1283 }
1284 return (TID);
1285} /* ASR_setTid */
1286
1287/*-------------------------------------------------------------------------*/
1288/* Function ASR_rescan */
1289/*-------------------------------------------------------------------------*/
1290/* The Parameters Passed To This Function Are : */
1291/* Asr_softc_t * : HBA miniport driver's adapter data storage. */
1292/* */
1293/* This Function Will rescan the adapter and resynchronize any data */
1294/* */
1295/* Return : 0 For OK, Error Code Otherwise */
1296/*-------------------------------------------------------------------------*/
1297
1298STATIC INLINE int
1299ASR_rescan(
1300 IN Asr_softc_t * sc)
1301{
1302 int bus;
1303 OUT int error;
1304
1305 /*
1306 * Re-acquire the LCT table and synchronize us to the adapter.
1307 */
1308 if ((error = ASR_acquireLct(sc)) == 0) {
1309 error = ASR_acquireHrt(sc);
1310 }
1311
1312 if (error != 0) {
1313 return error;
1314 }
1315
1316 bus = sc->ha_MaxBus;
1317 /* Reset all existing cached TID lookups */
1318 do {
1319 int target, event = 0;
1320
1321 /*
1322 * Scan for all targets on this bus to see if they
1323 * got affected by the rescan.
1324 */
1325 for (target = 0; target <= sc->ha_MaxId; ++target) {
1326 int lun;
1327
1328 /* Stay away from the controller ID */
1329 if (target == sc->ha_adapter_target[bus]) {
1330 continue;
1331 }
1332 for (lun = 0; lun <= sc->ha_MaxLun; ++lun) {
1333 PI2O_LCT_ENTRY Device;
1334 tid_t TID = (tid_t)-1;
1335 tid_t LastTID;
1336
1337 /*
1338 * See if the cached TID changed. Search for
1339 * the device in our new LCT.
1340 */
1341 for (Device = sc->ha_LCT->LCTEntry;
1342 Device < (PI2O_LCT_ENTRY)(((U32 *)sc->ha_LCT)
1343 + I2O_LCT_getTableSize(sc->ha_LCT));
1344 ++Device) {
1345 if ((Device->le_type != I2O_UNKNOWN)
1346 && (Device->le_bus == bus)
1347 && (Device->le_target == target)
1348 && (Device->le_lun == lun)
1349 && (I2O_LCT_ENTRY_getUserTID(Device)
1350 == 0xFFF)) {
1351 TID = I2O_LCT_ENTRY_getLocalTID(
1352 Device);
1353 break;
1354 }
1355 }
1356 /*
1357 * Indicate to the OS that the label needs
1358 * to be recalculated, or that the specific
1359 * open device is no longer valid (Merde)
1360 * because the cached TID changed.
1361 */
1362 LastTID = ASR_getTid (sc, bus, target, lun);
1363 if (LastTID != TID) {
1364 struct cam_path * path;
1365
1366 if (xpt_create_path(&path,
1367 /*periph*/NULL,
1368 cam_sim_path(sc->ha_sim[bus]),
1369 target, lun) != CAM_REQ_CMP) {
1370 if (TID == (tid_t)-1) {
1371 event |= AC_LOST_DEVICE;
1372 } else {
1373 event |= AC_INQ_CHANGED
1374 | AC_GETDEV_CHANGED;
1375 }
1376 } else {
1377 if (TID == (tid_t)-1) {
1378 xpt_async(
1379 AC_LOST_DEVICE,
1380 path, NULL);
1381 } else if (LastTID == (tid_t)-1) {
1382 struct ccb_getdev ccb;
1383
1384 xpt_setup_ccb(
1385 &(ccb.ccb_h),
1386 path, /*priority*/5);
1387 xpt_async(
1388 AC_FOUND_DEVICE,
1389 path,
1390 &ccb);
1391 } else {
1392 xpt_async(
1393 AC_INQ_CHANGED,
1394 path, NULL);
1395 xpt_async(
1396 AC_GETDEV_CHANGED,
1397 path, NULL);
1398 }
1399 }
1400 }
1401 /*
1402 * We have the option of clearing the
1403 * cached TID for it to be rescanned, or to
1404 * set it now even if the device never got
1405 * accessed. We chose the later since we
1406 * currently do not use the condition that
1407 * the TID ever got cached.
1408 */
1409 ASR_setTid (sc, bus, target, lun, TID);
1410 }
1411 }
1412 /*
1413 * The xpt layer can not handle multiple events at the
1414 * same call.
1415 */
1416 if (event & AC_LOST_DEVICE) {
1417 xpt_async(AC_LOST_DEVICE, sc->ha_path[bus], NULL);
1418 }
1419 if (event & AC_INQ_CHANGED) {
1420 xpt_async(AC_INQ_CHANGED, sc->ha_path[bus], NULL);
1421 }
1422 if (event & AC_GETDEV_CHANGED) {
1423 xpt_async(AC_GETDEV_CHANGED, sc->ha_path[bus], NULL);
1424 }
1425 } while (--bus >= 0);
1426 return (error);
1427} /* ASR_rescan */
1428
1429/*-------------------------------------------------------------------------*/
1430/* Function ASR_reset */
1431/*-------------------------------------------------------------------------*/
1432/* The Parameters Passed To This Function Are : */
1433/* Asr_softc_t * : HBA miniport driver's adapter data storage. */
1434/* */
1435/* This Function Will reset the adapter and resynchronize any data */
1436/* */
1437/* Return : None */
1438/*-------------------------------------------------------------------------*/
1439
1440STATIC INLINE int
1441ASR_reset(
1442 IN Asr_softc_t * sc)
1443{
1444 int s, retVal;
1445
1446 s = splcam();
1447 if ((sc->ha_in_reset == HA_IN_RESET)
1448 || (sc->ha_in_reset == HA_OFF_LINE_RECOVERY)) {
1449 splx (s);
1450 return (EBUSY);
1451 }
1452 /*
1453 * Promotes HA_OPERATIONAL to HA_IN_RESET,
1454 * or HA_OFF_LINE to HA_OFF_LINE_RECOVERY.
1455 */
1456 ++(sc->ha_in_reset);
1457 if (ASR_resetIOP (sc->ha_Virt, sc->ha_Fvirt) == 0) {
1458 debug_asr_printf ("ASR_resetIOP failed\n");
1459 /*
1460 * We really need to take this card off-line, easier said
1461 * than make sense. Better to keep retrying for now since if a
1462 * UART cable is connected the blinkLEDs the adapter is now in
1463 * a hard state requiring action from the monitor commands to
1464 * the HBA to continue. For debugging waiting forever is a
1465 * good thing. In a production system, however, one may wish
1466 * to instead take the card off-line ...
1467 */
1468# if 0 && (defined(HA_OFF_LINE))
1469 /*
1470 * Take adapter off-line.
1471 */
1472 printf ("asr%d: Taking adapter off-line\n",
1473 sc->ha_path[0]
1474 ? cam_sim_unit(xpt_path_sim(sc->ha_path[0]))
1475 : 0);
1476 sc->ha_in_reset = HA_OFF_LINE;
1477 splx (s);
1478 return (ENXIO);
1479# else
1480 /* Wait Forever */
1481 while (ASR_resetIOP (sc->ha_Virt, sc->ha_Fvirt) == 0);
1482# endif
1483 }
1484 retVal = ASR_init (sc);
1485 splx (s);
1486 if (retVal != 0) {
1487 debug_asr_printf ("ASR_init failed\n");
1488 sc->ha_in_reset = HA_OFF_LINE;
1489 return (ENXIO);
1490 }
1491 if (ASR_rescan (sc) != 0) {
1492 debug_asr_printf ("ASR_rescan failed\n");
1493 }
1494 ASR_failActiveCommands (sc);
1495 if (sc->ha_in_reset == HA_OFF_LINE_RECOVERY) {
1496 printf ("asr%d: Brining adapter back on-line\n",
1497 sc->ha_path[0]
1498 ? cam_sim_unit(xpt_path_sim(sc->ha_path[0]))
1499 : 0);
1500 }
1501 sc->ha_in_reset = HA_OPERATIONAL;
1502 return (0);
1503} /* ASR_reset */
1504
1505/*
1506 * Device timeout handler.
1507 */
1508STATIC void
1509asr_timeout(
1510 INOUT void * arg)
1511{
1512 union asr_ccb * ccb = (union asr_ccb *)arg;
1513 Asr_softc_t * sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
1514 int s;
1515
1516 debug_asr_print_path(ccb);
1517 debug_asr_printf("timed out");
1518
1519 /*
1520 * Check if the adapter has locked up?
1521 */
1522 if ((s = ASR_getBlinkLedCode(sc)) != 0) {
1523 /* Reset Adapter */
1524 printf ("asr%d: Blink LED 0x%x resetting adapter\n",
1525 cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)), s);
1526 if (ASR_reset (sc) == ENXIO) {
1527 /* Try again later */
1528 ccb->ccb_h.timeout_ch = timeout(asr_timeout,
1529 (caddr_t)ccb,
1530 (ccb->ccb_h.timeout * hz) / 1000);
1531 }
1532 return;
1533 }
1534 /*
1535 * Abort does not function on the ASR card!!! Walking away from
1536 * the SCSI command is also *very* dangerous. A SCSI BUS reset is
1537 * our best bet, followed by a complete adapter reset if that fails.
1538 */
1539 s = splcam();
1540 /* Check if we already timed out once to raise the issue */
1541 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_CMD_TIMEOUT) {
1542 debug_asr_printf (" AGAIN\nreinitializing adapter\n");
1543 if (ASR_reset (sc) == ENXIO) {
1544 ccb->ccb_h.timeout_ch = timeout(asr_timeout,
1545 (caddr_t)ccb,
1546 (ccb->ccb_h.timeout * hz) / 1000);
1547 }
1548 splx(s);
1549 return;
1550 }
1551 debug_asr_printf ("\nresetting bus\n");
1552 /* If the BUS reset does not take, then an adapter reset is next! */
1553 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1554 ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
1555 ccb->ccb_h.timeout_ch = timeout(asr_timeout, (caddr_t)ccb,
1556 (ccb->ccb_h.timeout * hz) / 1000);
1557 ASR_resetBus (sc, cam_sim_bus(xpt_path_sim(ccb->ccb_h.path)));
1558 xpt_async (AC_BUS_RESET, ccb->ccb_h.path, NULL);
1559 splx(s);
1560} /* asr_timeout */
1561
1562/*
1563 * send a message asynchronously
1564 */
1565STATIC INLINE int
1566ASR_queue(
1567 IN Asr_softc_t * sc,
1568 IN PI2O_MESSAGE_FRAME Message)
1569{
1570 OUT U32 MessageOffset;
1571 union asr_ccb * ccb;
1572
1573 debug_asr_printf ("Host Command Dump:\n");
1574 debug_asr_dump_message (Message);
1575
1576 ccb = (union asr_ccb *)(long)
1577 I2O_MESSAGE_FRAME_getInitiatorContext64(Message);
1578
1579 if ((MessageOffset = ASR_getMessage(sc->ha_Virt)) != EMPTY_QUEUE) {
984263bc
MD
1580 bcopy (Message, sc->ha_Fvirt + MessageOffset,
1581 I2O_MESSAGE_FRAME_getMessageSize(Message) << 2);
1582 if (ccb) {
1583 ASR_ccbAdd (sc, ccb);
1584 }
1585 /* Post the command */
1586 sc->ha_Virt->ToFIFO = MessageOffset;
1587 } else {
1588 if (ASR_getBlinkLedCode(sc)) {
1589 /*
1590 * Unlikely we can do anything if we can't grab a
1591 * message frame :-(, but lets give it a try.
1592 */
1593 (void)ASR_reset (sc);
1594 }
1595 }
1596 return (MessageOffset);
1597} /* ASR_queue */
1598
1599
1600/* Simple Scatter Gather elements */
1601#define SG(SGL,Index,Flags,Buffer,Size) \
1602 I2O_FLAGS_COUNT_setCount( \
1603 &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index].FlagsCount), \
1604 Size); \
1605 I2O_FLAGS_COUNT_setFlags( \
1606 &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index].FlagsCount), \
1607 I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT | (Flags)); \
1608 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress( \
1609 &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index]), \
1610 (Buffer == NULL) ? NULL : KVTOPHYS(Buffer))
1611
1612/*
1613 * Retrieve Parameter Group.
1614 * Buffer must be allocated using defAlignLong macro.
1615 */
1616STATIC void *
1617ASR_getParams(
1618 IN Asr_softc_t * sc,
1619 IN tid_t TID,
1620 IN int Group,
1621 OUT void * Buffer,
1622 IN unsigned BufferSize)
1623{
1624 struct paramGetMessage {
1625 I2O_UTIL_PARAMS_GET_MESSAGE M;
1626 char F[
1627 sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT)];
1628 struct Operations {
1629 I2O_PARAM_OPERATIONS_LIST_HEADER Header;
1630 I2O_PARAM_OPERATION_ALL_TEMPLATE Template[1];
1631 } O;
1632 };
1633 defAlignLong(struct paramGetMessage, Message);
1634 struct Operations * Operations_Ptr;
1635 I2O_UTIL_PARAMS_GET_MESSAGE * Message_Ptr;
1636 struct ParamBuffer {
1637 I2O_PARAM_RESULTS_LIST_HEADER Header;
1638 I2O_PARAM_READ_OPERATION_RESULT Read;
1639 char Info[1];
1640 } * Buffer_Ptr;
1641
1642 Message_Ptr = (I2O_UTIL_PARAMS_GET_MESSAGE *)ASR_fillMessage(Message,
1643 sizeof(I2O_UTIL_PARAMS_GET_MESSAGE)
1644 + sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT));
1645 Operations_Ptr = (struct Operations *)((char *)Message_Ptr
1646 + sizeof(I2O_UTIL_PARAMS_GET_MESSAGE)
1647 + sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT));
1648 bzero ((void *)Operations_Ptr, sizeof(struct Operations));
1649 I2O_PARAM_OPERATIONS_LIST_HEADER_setOperationCount(
1650 &(Operations_Ptr->Header), 1);
1651 I2O_PARAM_OPERATION_ALL_TEMPLATE_setOperation(
1652 &(Operations_Ptr->Template[0]), I2O_PARAMS_OPERATION_FIELD_GET);
1653 I2O_PARAM_OPERATION_ALL_TEMPLATE_setFieldCount(
1654 &(Operations_Ptr->Template[0]), 0xFFFF);
1655 I2O_PARAM_OPERATION_ALL_TEMPLATE_setGroupNumber(
1656 &(Operations_Ptr->Template[0]), Group);
1657 bzero ((void *)(Buffer_Ptr = getAlignLong(struct ParamBuffer, Buffer)),
1658 BufferSize);
1659
1660 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
1661 I2O_VERSION_11
1662 + (((sizeof(I2O_UTIL_PARAMS_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1663 / sizeof(U32)) << 4));
1664 I2O_MESSAGE_FRAME_setTargetAddress (&(Message_Ptr->StdMessageFrame),
1665 TID);
1666 I2O_MESSAGE_FRAME_setFunction (&(Message_Ptr->StdMessageFrame),
1667 I2O_UTIL_PARAMS_GET);
1668 /*
1669 * Set up the buffers as scatter gather elements.
1670 */
1671 SG(&(Message_Ptr->SGL), 0,
1672 I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER,
1673 Operations_Ptr, sizeof(struct Operations));
1674 SG(&(Message_Ptr->SGL), 1,
1675 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
1676 Buffer_Ptr, BufferSize);
1677
1678 if ((ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) == CAM_REQ_CMP)
1679 && (Buffer_Ptr->Header.ResultCount)) {
1680 return ((void *)(Buffer_Ptr->Info));
1681 }
1682 return ((void *)NULL);
1683} /* ASR_getParams */
1684
1685/*
1686 * Acquire the LCT information.
1687 */
1688STATIC INLINE int
1689ASR_acquireLct (
1690 INOUT Asr_softc_t * sc)
1691{
1692 PI2O_EXEC_LCT_NOTIFY_MESSAGE Message_Ptr;
1693 PI2O_SGE_SIMPLE_ELEMENT sg;
1694 int MessageSizeInBytes;
1695 caddr_t v;
1696 int len;
1697 I2O_LCT Table;
1698 PI2O_LCT_ENTRY Entry;
1699
1700 /*
1701 * sc value assumed valid
1702 */
1703 MessageSizeInBytes = sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE)
1704 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT);
1705 if ((Message_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)malloc (
1706 MessageSizeInBytes, M_TEMP, M_WAITOK))
1707 == (PI2O_EXEC_LCT_NOTIFY_MESSAGE)NULL) {
1708 return (ENOMEM);
1709 }
1710 (void)ASR_fillMessage((char *)Message_Ptr, MessageSizeInBytes);
1711 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
1712 (I2O_VERSION_11 +
1713 (((sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1714 / sizeof(U32)) << 4)));
1715 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
1716 I2O_EXEC_LCT_NOTIFY);
1717 I2O_EXEC_LCT_NOTIFY_MESSAGE_setClassIdentifier(Message_Ptr,
1718 I2O_CLASS_MATCH_ANYCLASS);
1719 /*
1720 * Call the LCT table to determine the number of device entries
1721 * to reserve space for.
1722 */
1723 SG(&(Message_Ptr->SGL), 0,
1724 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER, &Table,
1725 sizeof(I2O_LCT));
1726 /*
1727 * since this code is reused in several systems, code efficiency
1728 * is greater by using a shift operation rather than a divide by
1729 * sizeof(u_int32_t).
1730 */
1731 I2O_LCT_setTableSize(&Table,
1732 (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)) >> 2);
1733 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1734 /*
1735 * Determine the size of the LCT table.
1736 */
1737 if (sc->ha_LCT) {
1738 free (sc->ha_LCT, M_TEMP);
1739 }
1740 /*
1741 * malloc only generates contiguous memory when less than a
1742 * page is expected. We must break the request up into an SG list ...
1743 */
1744 if (((len = (I2O_LCT_getTableSize(&Table) << 2)) <=
1745 (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)))
1746 || (len > (128 * 1024))) { /* Arbitrary */
1747 free (Message_Ptr, M_TEMP);
1748 return (EINVAL);
1749 }
1750 if ((sc->ha_LCT = (PI2O_LCT)malloc (len, M_TEMP, M_WAITOK))
1751 == (PI2O_LCT)NULL) {
1752 free (Message_Ptr, M_TEMP);
1753 return (ENOMEM);
1754 }
1755 /*
1756 * since this code is reused in several systems, code efficiency
1757 * is greater by using a shift operation rather than a divide by
1758 * sizeof(u_int32_t).
1759 */
1760 I2O_LCT_setTableSize(sc->ha_LCT,
1761 (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)) >> 2);
1762 /*
1763 * Convert the access to the LCT table into a SG list.
1764 */
1765 sg = Message_Ptr->SGL.u.Simple;
1766 v = (caddr_t)(sc->ha_LCT);
1767 for (;;) {
1768 int next, base, span;
1769
1770 span = 0;
1771 next = base = KVTOPHYS(v);
1772 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, base);
1773
1774 /* How far can we go contiguously */
1775 while ((len > 0) && (base == next)) {
1776 int size;
1777
1778 next = trunc_page(base) + PAGE_SIZE;
1779 size = next - base;
1780 if (size > len) {
1781 size = len;
1782 }
1783 span += size;
1784 v += size;
1785 len -= size;
1786 base = KVTOPHYS(v);
1787 }
1788
1789 /* Construct the Flags */
1790 I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), span);
1791 {
1792 int rw = I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT;
1793 if (len <= 0) {
1794 rw = (I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT
1795 | I2O_SGL_FLAGS_LAST_ELEMENT
1796 | I2O_SGL_FLAGS_END_OF_BUFFER);
1797 }
1798 I2O_FLAGS_COUNT_setFlags(&(sg->FlagsCount), rw);
1799 }
1800
1801 if (len <= 0) {
1802 break;
1803 }
1804
1805 /*
1806 * Incrementing requires resizing of the packet.
1807 */
1808 ++sg;
1809 MessageSizeInBytes += sizeof(*sg);
1810 I2O_MESSAGE_FRAME_setMessageSize(
1811 &(Message_Ptr->StdMessageFrame),
1812 I2O_MESSAGE_FRAME_getMessageSize(
1813 &(Message_Ptr->StdMessageFrame))
1814 + (sizeof(*sg) / sizeof(U32)));
1815 {
1816 PI2O_EXEC_LCT_NOTIFY_MESSAGE NewMessage_Ptr;
1817
1818 if ((NewMessage_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)
1819 malloc (MessageSizeInBytes, M_TEMP, M_WAITOK))
1820 == (PI2O_EXEC_LCT_NOTIFY_MESSAGE)NULL) {
1821 free (sc->ha_LCT, M_TEMP);
1822 sc->ha_LCT = (PI2O_LCT)NULL;
1823 free (Message_Ptr, M_TEMP);
1824 return (ENOMEM);
1825 }
1826 span = ((caddr_t)sg) - (caddr_t)Message_Ptr;
1827 bcopy ((caddr_t)Message_Ptr,
1828 (caddr_t)NewMessage_Ptr, span);
1829 free (Message_Ptr, M_TEMP);
1830 sg = (PI2O_SGE_SIMPLE_ELEMENT)
1831 (((caddr_t)NewMessage_Ptr) + span);
1832 Message_Ptr = NewMessage_Ptr;
1833 }
1834 }
1835 { int retval;
1836
1837 retval = ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1838 free (Message_Ptr, M_TEMP);
1839 if (retval != CAM_REQ_CMP) {
1840 return (ENODEV);
1841 }
1842 }
1843 /* If the LCT table grew, lets truncate accesses */
1844 if (I2O_LCT_getTableSize(&Table) < I2O_LCT_getTableSize(sc->ha_LCT)) {
1845 I2O_LCT_setTableSize(sc->ha_LCT, I2O_LCT_getTableSize(&Table));
1846 }
1847 for (Entry = sc->ha_LCT->LCTEntry; Entry < (PI2O_LCT_ENTRY)
1848 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
1849 ++Entry) {
1850 Entry->le_type = I2O_UNKNOWN;
1851 switch (I2O_CLASS_ID_getClass(&(Entry->ClassID))) {
1852
1853 case I2O_CLASS_RANDOM_BLOCK_STORAGE:
1854 Entry->le_type = I2O_BSA;
1855 break;
1856
1857 case I2O_CLASS_SCSI_PERIPHERAL:
1858 Entry->le_type = I2O_SCSI;
1859 break;
1860
1861 case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
1862 Entry->le_type = I2O_FCA;
1863 break;
1864
1865 case I2O_CLASS_BUS_ADAPTER_PORT:
1866 Entry->le_type = I2O_PORT | I2O_SCSI;
1867 /* FALLTHRU */
1868 case I2O_CLASS_FIBRE_CHANNEL_PORT:
1869 if (I2O_CLASS_ID_getClass(&(Entry->ClassID)) ==
1870 I2O_CLASS_FIBRE_CHANNEL_PORT) {
1871 Entry->le_type = I2O_PORT | I2O_FCA;
1872 }
1873 { struct ControllerInfo {
1874 I2O_PARAM_RESULTS_LIST_HEADER Header;
1875 I2O_PARAM_READ_OPERATION_RESULT Read;
1876 I2O_HBA_SCSI_CONTROLLER_INFO_SCALAR Info;
1877 };
1878 defAlignLong(struct ControllerInfo, Buffer);
1879 PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR Info;
1880
1881 Entry->le_bus = 0xff;
1882 Entry->le_target = 0xff;
1883 Entry->le_lun = 0xff;
1884
1885 if ((Info = (PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR)
1886 ASR_getParams(sc,
1887 I2O_LCT_ENTRY_getLocalTID(Entry),
1888 I2O_HBA_SCSI_CONTROLLER_INFO_GROUP_NO,
1889 Buffer, sizeof(struct ControllerInfo)))
1890 == (PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR)NULL) {
1891 continue;
1892 }
1893 Entry->le_target
1894 = I2O_HBA_SCSI_CONTROLLER_INFO_SCALAR_getInitiatorID(
1895 Info);
1896 Entry->le_lun = 0;
1897 } /* FALLTHRU */
1898 default:
1899 continue;
1900 }
1901 { struct DeviceInfo {
1902 I2O_PARAM_RESULTS_LIST_HEADER Header;
1903 I2O_PARAM_READ_OPERATION_RESULT Read;
1904 I2O_DPT_DEVICE_INFO_SCALAR Info;
1905 };
1906 defAlignLong (struct DeviceInfo, Buffer);
1907 PI2O_DPT_DEVICE_INFO_SCALAR Info;
1908
1909 Entry->le_bus = 0xff;
1910 Entry->le_target = 0xff;
1911 Entry->le_lun = 0xff;
1912
1913 if ((Info = (PI2O_DPT_DEVICE_INFO_SCALAR)
1914 ASR_getParams(sc,
1915 I2O_LCT_ENTRY_getLocalTID(Entry),
1916 I2O_DPT_DEVICE_INFO_GROUP_NO,
1917 Buffer, sizeof(struct DeviceInfo)))
1918 == (PI2O_DPT_DEVICE_INFO_SCALAR)NULL) {
1919 continue;
1920 }
1921 Entry->le_type
1922 |= I2O_DPT_DEVICE_INFO_SCALAR_getDeviceType(Info);
1923 Entry->le_bus
1924 = I2O_DPT_DEVICE_INFO_SCALAR_getBus(Info);
1925 if ((Entry->le_bus > sc->ha_MaxBus)
1926 && (Entry->le_bus <= MAX_CHANNEL)) {
1927 sc->ha_MaxBus = Entry->le_bus;
1928 }
1929 Entry->le_target
1930 = I2O_DPT_DEVICE_INFO_SCALAR_getIdentifier(Info);
1931 Entry->le_lun
1932 = I2O_DPT_DEVICE_INFO_SCALAR_getLunInfo(Info);
1933 }
1934 }
1935 /*
1936 * A zero return value indicates success.
1937 */
1938 return (0);
1939} /* ASR_acquireLct */
1940
1941/*
1942 * Initialize a message frame.
1943 * We assume that the CDB has already been set up, so all we do here is
1944 * generate the Scatter Gather list.
1945 */
1946STATIC INLINE PI2O_MESSAGE_FRAME
1947ASR_init_message(
1948 IN union asr_ccb * ccb,
1949 OUT PI2O_MESSAGE_FRAME Message)
1950{
1951 int next, span, base, rw;
1952 OUT PI2O_MESSAGE_FRAME Message_Ptr;
1953 Asr_softc_t * sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
1954 PI2O_SGE_SIMPLE_ELEMENT sg;
1955 caddr_t v;
1956 vm_size_t size, len;
1957 U32 MessageSize;
1958
1959 /* We only need to zero out the PRIVATE_SCSI_SCB_EXECUTE_MESSAGE */
1960 bzero (Message_Ptr = getAlignLong(I2O_MESSAGE_FRAME, Message),
1961 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) - sizeof(I2O_SG_ELEMENT)));
1962
1963 {
1964 int target = ccb->ccb_h.target_id;
1965 int lun = ccb->ccb_h.target_lun;
1966 int bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path));
1967 tid_t TID;
1968
1969 if ((TID = ASR_getTid (sc, bus, target, lun)) == (tid_t)-1) {
1970 PI2O_LCT_ENTRY Device;
1971
1972 TID = (tid_t)0;
1973 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
1974 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
1975 ++Device) {
1976 if ((Device->le_type != I2O_UNKNOWN)
1977 && (Device->le_bus == bus)
1978 && (Device->le_target == target)
1979 && (Device->le_lun == lun)
1980 && (I2O_LCT_ENTRY_getUserTID(Device) == 0xFFF)) {
1981 TID = I2O_LCT_ENTRY_getLocalTID(Device);
1982 ASR_setTid (sc, Device->le_bus,
1983 Device->le_target, Device->le_lun,
1984 TID);
1985 break;
1986 }
1987 }
1988 }
1989 if (TID == (tid_t)0) {
1990 return ((PI2O_MESSAGE_FRAME)NULL);
1991 }
1992 I2O_MESSAGE_FRAME_setTargetAddress(Message_Ptr, TID);
1993 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setTID(
1994 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, TID);
1995 }
1996 I2O_MESSAGE_FRAME_setVersionOffset(Message_Ptr, I2O_VERSION_11 |
1997 (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1998 / sizeof(U32)) << 4));
1999 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
2000 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2001 - sizeof(I2O_SG_ELEMENT)) / sizeof(U32));
2002 I2O_MESSAGE_FRAME_setInitiatorAddress (Message_Ptr, 1);
2003 I2O_MESSAGE_FRAME_setFunction(Message_Ptr, I2O_PRIVATE_MESSAGE);
2004 I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode (
2005 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, I2O_SCSI_SCB_EXEC);
2006 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (
2007 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
2008 I2O_SCB_FLAG_ENABLE_DISCONNECT
2009 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2010 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
2011 /*
2012 * We do not need any (optional byteswapping) method access to
2013 * the Initiator & Transaction context field.
2014 */
2015 I2O_MESSAGE_FRAME_setInitiatorContext64(Message, (long)ccb);
2016
2017 I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
2018 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, DPT_ORGANIZATION_ID);
2019 /*
2020 * copy the cdb over
2021 */
2022 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(
2023 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, ccb->csio.cdb_len);
2024 bcopy (&(ccb->csio.cdb_io),
2025 ((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)->CDB, ccb->csio.cdb_len);
2026
2027 /*
2028 * Given a buffer describing a transfer, set up a scatter/gather map
2029 * in a ccb to map that SCSI transfer.
2030 */
2031
2032 rw = (ccb->ccb_h.flags & CAM_DIR_IN) ? 0 : I2O_SGL_FLAGS_DIR;
2033
2034 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (
2035 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
2036 (ccb->csio.dxfer_len)
2037 ? ((rw) ? (I2O_SCB_FLAG_XFER_TO_DEVICE
2038 | I2O_SCB_FLAG_ENABLE_DISCONNECT
2039 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2040 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER)
2041 : (I2O_SCB_FLAG_XFER_FROM_DEVICE
2042 | I2O_SCB_FLAG_ENABLE_DISCONNECT
2043 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2044 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER))
2045 : (I2O_SCB_FLAG_ENABLE_DISCONNECT
2046 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2047 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
2048
2049 /*
2050 * Given a transfer described by a `data', fill in the SG list.
2051 */
2052 sg = &((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)->SGL.u.Simple[0];
2053
2054 len = ccb->csio.dxfer_len;
2055 v = ccb->csio.data_ptr;
2056 ASSERT (ccb->csio.dxfer_len >= 0);
2057 MessageSize = I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr);
2058 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setByteCount(
2059 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, len);
2060 while ((len > 0) && (sg < &((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2061 Message_Ptr)->SGL.u.Simple[SG_SIZE])) {
2062 span = 0;
2063 next = base = KVTOPHYS(v);
2064 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, base);
2065
2066 /* How far can we go contiguously */
2067 while ((len > 0) && (base == next)) {
2068 next = trunc_page(base) + PAGE_SIZE;
2069 size = next - base;
2070 if (size > len) {
2071 size = len;
2072 }
2073 span += size;
2074 v += size;
2075 len -= size;
2076 base = KVTOPHYS(v);
2077 }
2078
2079 I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), span);
2080 if (len == 0) {
2081 rw |= I2O_SGL_FLAGS_LAST_ELEMENT;
2082 }
2083 I2O_FLAGS_COUNT_setFlags(&(sg->FlagsCount),
2084 I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT | rw);
2085 ++sg;
2086 MessageSize += sizeof(*sg) / sizeof(U32);
2087 }
2088 /* We always do the request sense ... */
2089 if ((span = ccb->csio.sense_len) == 0) {
2090 span = sizeof(ccb->csio.sense_data);
2091 }
2092 SG(sg, 0, I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
2093 &(ccb->csio.sense_data), span);
2094 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
2095 MessageSize + (sizeof(*sg) / sizeof(U32)));
2096 return (Message_Ptr);
2097} /* ASR_init_message */
2098
2099/*
2100 * Reset the adapter.
2101 */
2102STATIC INLINE U32
2103ASR_initOutBound (
2104 INOUT Asr_softc_t * sc)
2105{
2106 struct initOutBoundMessage {
2107 I2O_EXEC_OUTBOUND_INIT_MESSAGE M;
2108 U32 R;
2109 };
2110 defAlignLong(struct initOutBoundMessage,Message);
2111 PI2O_EXEC_OUTBOUND_INIT_MESSAGE Message_Ptr;
2112 OUT U32 * volatile Reply_Ptr;
2113 U32 Old;
2114
2115 /*
2116 * Build up our copy of the Message.
2117 */
2118 Message_Ptr = (PI2O_EXEC_OUTBOUND_INIT_MESSAGE)ASR_fillMessage(Message,
2119 sizeof(I2O_EXEC_OUTBOUND_INIT_MESSAGE));
2120 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
2121 I2O_EXEC_OUTBOUND_INIT);
2122 I2O_EXEC_OUTBOUND_INIT_MESSAGE_setHostPageFrameSize(Message_Ptr, PAGE_SIZE);
2123 I2O_EXEC_OUTBOUND_INIT_MESSAGE_setOutboundMFrameSize(Message_Ptr,
2124 sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME));
2125 /*
2126 * Reset the Reply Status
2127 */
2128 *(Reply_Ptr = (U32 *)((char *)Message_Ptr
2129 + sizeof(I2O_EXEC_OUTBOUND_INIT_MESSAGE))) = 0;
2130 SG (&(Message_Ptr->SGL), 0, I2O_SGL_FLAGS_LAST_ELEMENT, Reply_Ptr,
2131 sizeof(U32));
2132 /*
2133 * Send the Message out
2134 */
2135 if ((Old = ASR_initiateCp (sc->ha_Virt, sc->ha_Fvirt, (PI2O_MESSAGE_FRAME)Message_Ptr)) != (U32)-1L) {
2136 u_long size, addr;
2137
2138 /*
2139 * Wait for a response (Poll).
2140 */
2141 while (*Reply_Ptr < I2O_EXEC_OUTBOUND_INIT_REJECTED);
2142 /*
2143 * Re-enable the interrupts.
2144 */
2145 sc->ha_Virt->Mask = Old;
2146 /*
2147 * Populate the outbound table.
2148 */
2149 if (sc->ha_Msgs == (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL) {
2150
2151 /* Allocate the reply frames */
2152 size = sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
2153 * sc->ha_Msgs_Count;
2154
2155 /*
2156 * contigmalloc only works reliably at
2157 * initialization time.
2158 */
2159 if ((sc->ha_Msgs = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
2160 contigmalloc (size, M_DEVBUF, M_WAITOK, 0ul,
2161 0xFFFFFFFFul, (u_long)sizeof(U32), 0ul))
2162 != (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL) {
2163 (void)bzero ((char *)sc->ha_Msgs, size);
2164 sc->ha_Msgs_Phys = KVTOPHYS(sc->ha_Msgs);
2165 }
2166 }
2167
2168 /* Initialize the outbound FIFO */
2169 if (sc->ha_Msgs != (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL)
2170 for (size = sc->ha_Msgs_Count, addr = sc->ha_Msgs_Phys;
2171 size; --size) {
2172 sc->ha_Virt->FromFIFO = addr;
2173 addr += sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME);
2174 }
2175 return (*Reply_Ptr);
2176 }
2177 return (0);
2178} /* ASR_initOutBound */
2179
2180/*
2181 * Set the system table
2182 */
2183STATIC INLINE int
2184ASR_setSysTab(
2185 IN Asr_softc_t * sc)
2186{
2187 PI2O_EXEC_SYS_TAB_SET_MESSAGE Message_Ptr;
2188 PI2O_SET_SYSTAB_HEADER SystemTable;
2189 Asr_softc_t * ha;
2190 PI2O_SGE_SIMPLE_ELEMENT sg;
2191 int retVal;
2192
2193 if ((SystemTable = (PI2O_SET_SYSTAB_HEADER)malloc (
2194 sizeof(I2O_SET_SYSTAB_HEADER), M_TEMP, M_WAITOK))
2195 == (PI2O_SET_SYSTAB_HEADER)NULL) {
2196 return (ENOMEM);
2197 }
2198 bzero (SystemTable, sizeof(I2O_SET_SYSTAB_HEADER));
2199 for (ha = Asr_softc; ha; ha = ha->ha_next) {
2200 ++SystemTable->NumberEntries;
2201 }
2202 if ((Message_Ptr = (PI2O_EXEC_SYS_TAB_SET_MESSAGE)malloc (
2203 sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
2204 + ((3+SystemTable->NumberEntries) * sizeof(I2O_SGE_SIMPLE_ELEMENT)),
2205 M_TEMP, M_WAITOK)) == (PI2O_EXEC_SYS_TAB_SET_MESSAGE)NULL) {
2206 free (SystemTable, M_TEMP);
2207 return (ENOMEM);
2208 }
2209 (void)ASR_fillMessage((char *)Message_Ptr,
2210 sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
2211 + ((3+SystemTable->NumberEntries) * sizeof(I2O_SGE_SIMPLE_ELEMENT)));
2212 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
2213 (I2O_VERSION_11 +
2214 (((sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
2215 / sizeof(U32)) << 4)));
2216 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
2217 I2O_EXEC_SYS_TAB_SET);
2218 /*
2219 * Call the LCT table to determine the number of device entries
2220 * to reserve space for.
2221 * since this code is reused in several systems, code efficiency
2222 * is greater by using a shift operation rather than a divide by
2223 * sizeof(u_int32_t).
2224 */
2225 sg = (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr
2226 + ((I2O_MESSAGE_FRAME_getVersionOffset(
2227 &(Message_Ptr->StdMessageFrame)) & 0xF0) >> 2));
2228 SG(sg, 0, I2O_SGL_FLAGS_DIR, SystemTable, sizeof(I2O_SET_SYSTAB_HEADER));
2229 ++sg;
2230 for (ha = Asr_softc; ha; ha = ha->ha_next) {
2231 SG(sg, 0,
2232 ((ha->ha_next)
2233 ? (I2O_SGL_FLAGS_DIR)
2234 : (I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER)),
2235 &(ha->ha_SystemTable), sizeof(ha->ha_SystemTable));
2236 ++sg;
2237 }
2238 SG(sg, 0, I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER, NULL, 0);
2239 SG(sg, 1, I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_LAST_ELEMENT
2240 | I2O_SGL_FLAGS_END_OF_BUFFER, NULL, 0);
2241 retVal = ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
2242 free (Message_Ptr, M_TEMP);
2243 free (SystemTable, M_TEMP);
2244 return (retVal);
2245} /* ASR_setSysTab */
2246
2247STATIC INLINE int
2248ASR_acquireHrt (
2249 INOUT Asr_softc_t * sc)
2250{
2251 defAlignLong(I2O_EXEC_HRT_GET_MESSAGE,Message);
2252 I2O_EXEC_HRT_GET_MESSAGE * Message_Ptr;
2253 struct {
2254 I2O_HRT Header;
2255 I2O_HRT_ENTRY Entry[MAX_CHANNEL];
2256 } Hrt;
2257 u_int8_t NumberOfEntries;
2258 PI2O_HRT_ENTRY Entry;
2259
2260 bzero ((void *)&Hrt, sizeof (Hrt));
2261 Message_Ptr = (I2O_EXEC_HRT_GET_MESSAGE *)ASR_fillMessage(Message,
2262 sizeof(I2O_EXEC_HRT_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
2263 + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2264 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
2265 (I2O_VERSION_11
2266 + (((sizeof(I2O_EXEC_HRT_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
2267 / sizeof(U32)) << 4)));
2268 I2O_MESSAGE_FRAME_setFunction (&(Message_Ptr->StdMessageFrame),
2269 I2O_EXEC_HRT_GET);
2270
2271 /*
2272 * Set up the buffers as scatter gather elements.
2273 */
2274 SG(&(Message_Ptr->SGL), 0,
2275 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
2276 &Hrt, sizeof(Hrt));
2277 if (ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) != CAM_REQ_CMP) {
2278 return (ENODEV);
2279 }
2280 if ((NumberOfEntries = I2O_HRT_getNumberEntries(&Hrt.Header))
2281 > (MAX_CHANNEL + 1)) {
2282 NumberOfEntries = MAX_CHANNEL + 1;
2283 }
2284 for (Entry = Hrt.Header.HRTEntry;
2285 NumberOfEntries != 0;
2286 ++Entry, --NumberOfEntries) {
2287 PI2O_LCT_ENTRY Device;
2288
2289 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
2290 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
2291 ++Device) {
2292 if (I2O_LCT_ENTRY_getLocalTID(Device)
2293 == (I2O_HRT_ENTRY_getAdapterID(Entry) & 0xFFF)) {
2294 Device->le_bus = I2O_HRT_ENTRY_getAdapterID(
2295 Entry) >> 16;
2296 if ((Device->le_bus > sc->ha_MaxBus)
2297 && (Device->le_bus <= MAX_CHANNEL)) {
2298 sc->ha_MaxBus = Device->le_bus;
2299 }
2300 }
2301 }
2302 }
2303 return (0);
2304} /* ASR_acquireHrt */
2305
2306/*
2307 * Enable the adapter.
2308 */
2309STATIC INLINE int
2310ASR_enableSys (
2311 IN Asr_softc_t * sc)
2312{
2313 defAlignLong(I2O_EXEC_SYS_ENABLE_MESSAGE,Message);
2314 PI2O_EXEC_SYS_ENABLE_MESSAGE Message_Ptr;
2315
2316 Message_Ptr = (PI2O_EXEC_SYS_ENABLE_MESSAGE)ASR_fillMessage(Message,
2317 sizeof(I2O_EXEC_SYS_ENABLE_MESSAGE));
2318 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
2319 I2O_EXEC_SYS_ENABLE);
2320 return (ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) != 0);
2321} /* ASR_enableSys */
2322
2323/*
2324 * Perform the stages necessary to initialize the adapter
2325 */
2326STATIC int
2327ASR_init(
2328 IN Asr_softc_t * sc)
2329{
2330 return ((ASR_initOutBound(sc) == 0)
2331 || (ASR_setSysTab(sc) != CAM_REQ_CMP)
2332 || (ASR_enableSys(sc) != CAM_REQ_CMP));
2333} /* ASR_init */
2334
2335/*
2336 * Send a Synchronize Cache command to the target device.
2337 */
2338STATIC INLINE void
2339ASR_sync (
2340 IN Asr_softc_t * sc,
2341 IN int bus,
2342 IN int target,
2343 IN int lun)
2344{
2345 tid_t TID;
2346
2347 /*
2348 * We will not synchronize the device when there are outstanding
2349 * commands issued by the OS (this is due to a locked up device,
2350 * as the OS normally would flush all outstanding commands before
2351 * issuing a shutdown or an adapter reset).
2352 */
2353 if ((sc != (Asr_softc_t *)NULL)
2354 && (LIST_FIRST(&(sc->ha_ccb)) != (struct ccb_hdr *)NULL)
2355 && ((TID = ASR_getTid (sc, bus, target, lun)) != (tid_t)-1)
2356 && (TID != (tid_t)0)) {
2357 defAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE,Message);
2358 PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE Message_Ptr;
2359
2360 bzero (Message_Ptr
2361 = getAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE, Message),
2362 sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2363 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2364
2365 I2O_MESSAGE_FRAME_setVersionOffset(
2366 (PI2O_MESSAGE_FRAME)Message_Ptr,
2367 I2O_VERSION_11
2368 | (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2369 - sizeof(I2O_SG_ELEMENT))
2370 / sizeof(U32)) << 4));
2371 I2O_MESSAGE_FRAME_setMessageSize(
2372 (PI2O_MESSAGE_FRAME)Message_Ptr,
2373 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2374 - sizeof(I2O_SG_ELEMENT))
2375 / sizeof(U32));
2376 I2O_MESSAGE_FRAME_setInitiatorAddress (
2377 (PI2O_MESSAGE_FRAME)Message_Ptr, 1);
2378 I2O_MESSAGE_FRAME_setFunction(
2379 (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_PRIVATE_MESSAGE);
2380 I2O_MESSAGE_FRAME_setTargetAddress(
2381 (PI2O_MESSAGE_FRAME)Message_Ptr, TID);
2382 I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode (
2383 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2384 I2O_SCSI_SCB_EXEC);
2385 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setTID(Message_Ptr, TID);
2386 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2387 I2O_SCB_FLAG_ENABLE_DISCONNECT
2388 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2389 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
2390 I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
2391 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2392 DPT_ORGANIZATION_ID);
2393 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(Message_Ptr, 6);
2394 Message_Ptr->CDB[0] = SYNCHRONIZE_CACHE;
2395 Message_Ptr->CDB[1] = (lun << 5);
2396
2397 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2398 (I2O_SCB_FLAG_XFER_FROM_DEVICE
2399 | I2O_SCB_FLAG_ENABLE_DISCONNECT
2400 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2401 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
2402
2403 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
2404
2405 }
2406}
2407
2408STATIC INLINE void
2409ASR_synchronize (
2410 IN Asr_softc_t * sc)
2411{
2412 int bus, target, lun;
2413
2414 for (bus = 0; bus <= sc->ha_MaxBus; ++bus) {
2415 for (target = 0; target <= sc->ha_MaxId; ++target) {
2416 for (lun = 0; lun <= sc->ha_MaxLun; ++lun) {
2417 ASR_sync(sc,bus,target,lun);
2418 }
2419 }
2420 }
2421}
2422
2423/*
2424 * Reset the HBA, targets and BUS.
2425 * Currently this resets *all* the SCSI busses.
2426 */
2427STATIC INLINE void
2428asr_hbareset(
2429 IN Asr_softc_t * sc)
2430{
2431 ASR_synchronize (sc);
2432 (void)ASR_reset (sc);
2433} /* asr_hbareset */
2434
2435/*
2436 * A reduced copy of the real pci_map_mem, incorporating the MAX_MAP
2437 * limit and a reduction in error checking (in the pre 4.0 case).
2438 */
2439STATIC int
2440asr_pci_map_mem (
984263bc 2441 IN device_t tag,
984263bc
MD
2442 IN Asr_softc_t * sc)
2443{
2444 int rid;
2445 u_int32_t p, l, s;
2446
984263bc
MD
2447 /*
2448 * I2O specification says we must find first *memory* mapped BAR
2449 */
2450 for (rid = PCIR_MAPS;
2451 rid < (PCIR_MAPS + 4 * sizeof(u_int32_t));
2452 rid += sizeof(u_int32_t)) {
2453 p = pci_read_config(tag, rid, sizeof(p));
2454 if ((p & 1) == 0) {
2455 break;
2456 }
2457 }
2458 /*
2459 * Give up?
2460 */
2461 if (rid >= (PCIR_MAPS + 4 * sizeof(u_int32_t))) {
2462 rid = PCIR_MAPS;
2463 }
2464 p = pci_read_config(tag, rid, sizeof(p));
2465 pci_write_config(tag, rid, -1, sizeof(p));
2466 l = 0 - (pci_read_config(tag, rid, sizeof(l)) & ~15);
2467 pci_write_config(tag, rid, p, sizeof(p));
2468 if (l > MAX_MAP) {
2469 l = MAX_MAP;
2470 }
2471 /*
2472 * The 2005S Zero Channel RAID solution is not a perfect PCI
2473 * citizen. It asks for 4MB on BAR0, and 0MB on BAR1, once
2474 * enabled it rewrites the size of BAR0 to 2MB, sets BAR1 to
2475 * BAR0+2MB and sets it's size to 2MB. The IOP registers are
2476 * accessible via BAR0, the messaging registers are accessible
2477 * via BAR1. If the subdevice code is 50 to 59 decimal.
2478 */
2479 s = pci_read_config(tag, PCIR_DEVVENDOR, sizeof(s));
2480 if (s != 0xA5111044) {
2481 s = pci_read_config(tag, PCIR_SUBVEND_0, sizeof(s));
2482 if ((((ADPTDOMINATOR_SUB_ID_START ^ s) & 0xF000FFFF) == 0)
2483 && (ADPTDOMINATOR_SUB_ID_START <= s)
2484 && (s <= ADPTDOMINATOR_SUB_ID_END)) {
2485 l = MAX_MAP; /* Conjoined BAR Raptor Daptor */
2486 }
2487 }
2488 p &= ~15;
2489 sc->ha_mem_res = bus_alloc_resource(tag, SYS_RES_MEMORY, &rid,
2490 p, p + l, l, RF_ACTIVE);
2491 if (sc->ha_mem_res == (struct resource *)NULL) {
2492 return (0);
2493 }
2494 sc->ha_Base = (void *)rman_get_start(sc->ha_mem_res);
2495 if (sc->ha_Base == (void *)NULL) {
2496 return (0);
2497 }
2498 sc->ha_Virt = (i2oRegs_t *) rman_get_virtual(sc->ha_mem_res);
2499 if (s == 0xA5111044) { /* Split BAR Raptor Daptor */
2500 if ((rid += sizeof(u_int32_t))
2501 >= (PCIR_MAPS + 4 * sizeof(u_int32_t))) {
2502 return (0);
2503 }
2504 p = pci_read_config(tag, rid, sizeof(p));
2505 pci_write_config(tag, rid, -1, sizeof(p));
2506 l = 0 - (pci_read_config(tag, rid, sizeof(l)) & ~15);
2507 pci_write_config(tag, rid, p, sizeof(p));
2508 if (l > MAX_MAP) {
2509 l = MAX_MAP;
2510 }
2511 p &= ~15;
2512 sc->ha_mes_res = bus_alloc_resource(tag, SYS_RES_MEMORY, &rid,
2513 p, p + l, l, RF_ACTIVE);
2514 if (sc->ha_mes_res == (struct resource *)NULL) {
2515 return (0);
2516 }
2517 if ((void *)rman_get_start(sc->ha_mes_res) == (void *)NULL) {
2518 return (0);
2519 }
2520 sc->ha_Fvirt = (U8 *) rman_get_virtual(sc->ha_mes_res);
2521 } else {
2522 sc->ha_Fvirt = (U8 *)(sc->ha_Virt);
2523 }
984263bc
MD
2524 return (1);
2525} /* asr_pci_map_mem */
2526
2527/*
2528 * A simplified copy of the real pci_map_int with additional
2529 * registration requirements.
2530 */
2531STATIC int
2532asr_pci_map_int (
984263bc 2533 IN device_t tag,
984263bc
MD
2534 IN Asr_softc_t * sc)
2535{
984263bc
MD
2536 int rid = 0;
2537
2538 sc->ha_irq_res = bus_alloc_resource(tag, SYS_RES_IRQ, &rid,
2539 0, ~0, 1, RF_ACTIVE | RF_SHAREABLE);
2540 if (sc->ha_irq_res == (struct resource *)NULL) {
2541 return (0);
2542 }
2543 if (bus_setup_intr(tag, sc->ha_irq_res, INTR_TYPE_CAM,
2544 (driver_intr_t *)asr_intr, (void *)sc, &(sc->ha_intr))) {
2545 return (0);
2546 }
2547 sc->ha_irq = pci_read_config(tag, PCIR_INTLINE, sizeof(char));
984263bc
MD
2548 return (1);
2549} /* asr_pci_map_int */
2550
2551/*
2552 * Attach the devices, and virtual devices to the driver list.
2553 */
2554STATIC ATTACH_RET
2555asr_attach (ATTACH_ARGS)
2556{
2557 Asr_softc_t * sc;
2558 struct scsi_inquiry_data * iq;
2559 ATTACH_SET();
2560
076ae0ab 2561 sc = malloc(sizeof(*sc), M_DEVBUF, M_INTWAIT);
984263bc
MD
2562 if (Asr_softc == (Asr_softc_t *)NULL) {
2563 /*
2564 * Fixup the OS revision as saved in the dptsig for the
2565 * engine (dptioctl.h) to pick up.
2566 */
2567 bcopy (osrelease, &ASR_sig.dsDescription[16], 5);
2568 printf ("asr%d: major=%d\n", unit, asr_cdevsw.d_maj);
2569 }
2570 /*
2571 * Initialize the software structure
2572 */
2573 bzero (sc, sizeof(*sc));
2574 LIST_INIT(&(sc->ha_ccb));
984263bc
MD
2575 /* Link us into the HA list */
2576 {
2577 Asr_softc_t **ha;
2578
2579 for (ha = &Asr_softc; *ha; ha = &((*ha)->ha_next));
2580 *(ha) = sc;
2581 }
2582 {
2583 PI2O_EXEC_STATUS_GET_REPLY status;
2584 int size;
2585
2586 /*
2587 * This is the real McCoy!
2588 */
2589 if (!asr_pci_map_mem(tag, sc)) {
2590 printf ("asr%d: could not map memory\n", unit);
2591 ATTACH_RETURN(ENXIO);
2592 }
2593 /* Enable if not formerly enabled */
984263bc
MD
2594 pci_write_config (tag, PCIR_COMMAND,
2595 pci_read_config (tag, PCIR_COMMAND, sizeof(char))
2596 | PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN, sizeof(char));
2597 /* Knowledge is power, responsibility is direct */
2598 {
2599 struct pci_devinfo {
2600 STAILQ_ENTRY(pci_devinfo) pci_links;
2601 struct resource_list resources;
2602 pcicfgregs cfg;
2603 } * dinfo = device_get_ivars(tag);
2604 sc->ha_pciBusNum = dinfo->cfg.bus;
2605 sc->ha_pciDeviceNum = (dinfo->cfg.slot << 3)
2606 | dinfo->cfg.func;
2607 }
984263bc
MD
2608 /* Check if the device is there? */
2609 if ((ASR_resetIOP(sc->ha_Virt, sc->ha_Fvirt) == 0)
2610 || ((status = (PI2O_EXEC_STATUS_GET_REPLY)malloc (
2611 sizeof(I2O_EXEC_STATUS_GET_REPLY), M_TEMP, M_WAITOK))
2612 == (PI2O_EXEC_STATUS_GET_REPLY)NULL)
2613 || (ASR_getStatus(sc->ha_Virt, sc->ha_Fvirt, status) == NULL)) {
2614 printf ("asr%d: could not initialize hardware\n", unit);
2615 ATTACH_RETURN(ENODEV); /* Get next, maybe better luck */
2616 }
2617 sc->ha_SystemTable.OrganizationID = status->OrganizationID;
2618 sc->ha_SystemTable.IOP_ID = status->IOP_ID;
2619 sc->ha_SystemTable.I2oVersion = status->I2oVersion;
2620 sc->ha_SystemTable.IopState = status->IopState;
2621 sc->ha_SystemTable.MessengerType = status->MessengerType;
2622 sc->ha_SystemTable.InboundMessageFrameSize
2623 = status->InboundMFrameSize;
2624 sc->ha_SystemTable.MessengerInfo.InboundMessagePortAddressLow
2625 = (U32)(sc->ha_Base) + (U32)(&(((i2oRegs_t *)NULL)->ToFIFO));
2626
2627 if (!asr_pci_map_int(tag, (void *)sc)) {
2628 printf ("asr%d: could not map interrupt\n", unit);
2629 ATTACH_RETURN(ENXIO);
2630 }
2631
2632 /* Adjust the maximim inbound count */
2633 if (((sc->ha_QueueSize
2634 = I2O_EXEC_STATUS_GET_REPLY_getMaxInboundMFrames(status))
2635 > MAX_INBOUND)
2636 || (sc->ha_QueueSize == 0)) {
2637 sc->ha_QueueSize = MAX_INBOUND;
2638 }
2639
2640 /* Adjust the maximum outbound count */
2641 if (((sc->ha_Msgs_Count
2642 = I2O_EXEC_STATUS_GET_REPLY_getMaxOutboundMFrames(status))
2643 > MAX_OUTBOUND)
2644 || (sc->ha_Msgs_Count == 0)) {
2645 sc->ha_Msgs_Count = MAX_OUTBOUND;
2646 }
2647 if (sc->ha_Msgs_Count > sc->ha_QueueSize) {
2648 sc->ha_Msgs_Count = sc->ha_QueueSize;
2649 }
2650
2651 /* Adjust the maximum SG size to adapter */
2652 if ((size = (I2O_EXEC_STATUS_GET_REPLY_getInboundMFrameSize(
2653 status) << 2)) > MAX_INBOUND_SIZE) {
2654 size = MAX_INBOUND_SIZE;
2655 }
2656 free (status, M_TEMP);
2657 sc->ha_SgSize = (size - sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2658 + sizeof(I2O_SG_ELEMENT)) / sizeof(I2O_SGE_SIMPLE_ELEMENT);
2659 }
2660
2661 /*
2662 * Only do a bus/HBA reset on the first time through. On this
2663 * first time through, we do not send a flush to the devices.
2664 */
2665 if (ASR_init(sc) == 0) {
2666 struct BufferInfo {
2667 I2O_PARAM_RESULTS_LIST_HEADER Header;
2668 I2O_PARAM_READ_OPERATION_RESULT Read;
2669 I2O_DPT_EXEC_IOP_BUFFERS_SCALAR Info;
2670 };
2671 defAlignLong (struct BufferInfo, Buffer);
2672 PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR Info;
2673# define FW_DEBUG_BLED_OFFSET 8
2674
2675 if ((Info = (PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR)
2676 ASR_getParams(sc, 0,
2677 I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO,
2678 Buffer, sizeof(struct BufferInfo)))
2679 != (PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR)NULL) {
2680 sc->ha_blinkLED = sc->ha_Fvirt
2681 + I2O_DPT_EXEC_IOP_BUFFERS_SCALAR_getSerialOutputOffset(Info)
2682 + FW_DEBUG_BLED_OFFSET;
2683 }
2684 if (ASR_acquireLct(sc) == 0) {
2685 (void)ASR_acquireHrt(sc);
2686 }
2687 } else {
2688 printf ("asr%d: failed to initialize\n", unit);
2689 ATTACH_RETURN(ENXIO);
2690 }
2691 /*
2692 * Add in additional probe responses for more channels. We
2693 * are reusing the variable `target' for a channel loop counter.
2694 * Done here because of we need both the acquireLct and
2695 * acquireHrt data.
2696 */
2697 { PI2O_LCT_ENTRY Device;
2698
2699 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
2700 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
2701 ++Device) {
2702 if (Device->le_type == I2O_UNKNOWN) {
2703 continue;
2704 }
2705 if (I2O_LCT_ENTRY_getUserTID(Device) == 0xFFF) {
2706 if (Device->le_target > sc->ha_MaxId) {
2707 sc->ha_MaxId = Device->le_target;
2708 }
2709 if (Device->le_lun > sc->ha_MaxLun) {
2710 sc->ha_MaxLun = Device->le_lun;
2711 }
2712 }
2713 if (((Device->le_type & I2O_PORT) != 0)
2714 && (Device->le_bus <= MAX_CHANNEL)) {
2715 /* Do not increase MaxId for efficiency */
2716 sc->ha_adapter_target[Device->le_bus]
2717 = Device->le_target;
2718 }
2719 }
2720 }
2721
2722
2723 /*
2724 * Print the HBA model number as inquired from the card.
2725 */
2726
2727 printf ("asr%d:", unit);
2728
2729 if ((iq = (struct scsi_inquiry_data *)malloc (
2730 sizeof(struct scsi_inquiry_data), M_TEMP, M_WAITOK))
2731 != (struct scsi_inquiry_data *)NULL) {
2732 defAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE,Message);
2733 PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE Message_Ptr;
2734 int posted = 0;
2735
2736 bzero (iq, sizeof(struct scsi_inquiry_data));
2737 bzero (Message_Ptr
2738 = getAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE, Message),
2739 sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2740 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2741
2742 I2O_MESSAGE_FRAME_setVersionOffset(
2743 (PI2O_MESSAGE_FRAME)Message_Ptr,
2744 I2O_VERSION_11
2745 | (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2746 - sizeof(I2O_SG_ELEMENT))
2747 / sizeof(U32)) << 4));
2748 I2O_MESSAGE_FRAME_setMessageSize(
2749 (PI2O_MESSAGE_FRAME)Message_Ptr,
2750 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2751 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT))
2752 / sizeof(U32));
2753 I2O_MESSAGE_FRAME_setInitiatorAddress (
2754 (PI2O_MESSAGE_FRAME)Message_Ptr, 1);
2755 I2O_MESSAGE_FRAME_setFunction(
2756 (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_PRIVATE_MESSAGE);
2757 I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode (
2758 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2759 I2O_SCSI_SCB_EXEC);
2760 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2761 I2O_SCB_FLAG_ENABLE_DISCONNECT
2762 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2763 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
2764 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setInterpret(Message_Ptr, 1);
2765 I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
2766 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2767 DPT_ORGANIZATION_ID);
2768 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(Message_Ptr, 6);
2769 Message_Ptr->CDB[0] = INQUIRY;
2770 Message_Ptr->CDB[4] = (unsigned char)sizeof(struct scsi_inquiry_data);
2771 if (Message_Ptr->CDB[4] == 0) {
2772 Message_Ptr->CDB[4] = 255;
2773 }
2774
2775 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2776 (I2O_SCB_FLAG_XFER_FROM_DEVICE
2777 | I2O_SCB_FLAG_ENABLE_DISCONNECT
2778 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2779 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
2780
2781 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setByteCount(
2782 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
2783 sizeof(struct scsi_inquiry_data));
2784 SG(&(Message_Ptr->SGL), 0,
2785 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
2786 iq, sizeof(struct scsi_inquiry_data));
2787 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
2788
2789 if (iq->vendor[0] && (iq->vendor[0] != ' ')) {
2790 printf (" ");
2791 ASR_prstring (iq->vendor, 8);
2792 ++posted;
2793 }
2794 if (iq->product[0] && (iq->product[0] != ' ')) {
2795 printf (" ");
2796 ASR_prstring (iq->product, 16);
2797 ++posted;
2798 }
2799 if (iq->revision[0] && (iq->revision[0] != ' ')) {
2800 printf (" FW Rev. ");
2801 ASR_prstring (iq->revision, 4);
2802 ++posted;
2803 }
2804 free ((caddr_t)iq, M_TEMP);
2805 if (posted) {
2806 printf (",");
2807 }
2808 }
2809 printf (" %d channel, %d CCBs, Protocol I2O\n", sc->ha_MaxBus + 1,
2810 (sc->ha_QueueSize > MAX_INBOUND) ? MAX_INBOUND : sc->ha_QueueSize);
2811
2812 /*
2813 * fill in the prototype cam_path.
2814 */
2815 {
2816 int bus;
2817 union asr_ccb * ccb;
2818
2819 if ((ccb = asr_alloc_ccb (sc)) == (union asr_ccb *)NULL) {
2820 printf ("asr%d: CAM could not be notified of asynchronous callback parameters\n", unit);
2821 ATTACH_RETURN(ENOMEM);
2822 }
2823 for (bus = 0; bus <= sc->ha_MaxBus; ++bus) {
984263bc
MD
2824 int QueueSize = sc->ha_QueueSize;
2825
2826 if (QueueSize > MAX_INBOUND) {
2827 QueueSize = MAX_INBOUND;
2828 }
2829
984263bc
MD
2830 /*
2831 * Construct our first channel SIM entry
2832 */
2833 sc->ha_sim[bus] = cam_sim_alloc(
2834 asr_action, asr_poll, "asr", sc,
521cf4d2
MD
2835 unit, 1, QueueSize, NULL);
2836 if (sc->ha_sim[bus] == NULL)
984263bc 2837 continue;
984263bc
MD
2838
2839 if (xpt_bus_register(sc->ha_sim[bus], bus)
2840 != CAM_SUCCESS) {
521cf4d2 2841 cam_sim_free(sc->ha_sim[bus]);
984263bc
MD
2842 sc->ha_sim[bus] = NULL;
2843 continue;
2844 }
2845
2846 if (xpt_create_path(&(sc->ha_path[bus]), /*periph*/NULL,
2847 cam_sim_path(sc->ha_sim[bus]), CAM_TARGET_WILDCARD,
2848 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2849 xpt_bus_deregister(
2850 cam_sim_path(sc->ha_sim[bus]));
521cf4d2 2851 cam_sim_free(sc->ha_sim[bus]);
984263bc
MD
2852 sc->ha_sim[bus] = NULL;
2853 continue;
2854 }
2855 }
2856 asr_free_ccb (ccb);
2857 }
2858 /*
2859 * Generate the device node information
2860 */
e4c9c0c8 2861 make_dev(&asr_cdevsw, unit, 0, 0, S_IRWXU, "rasr%d", unit);
984263bc
MD
2862 ATTACH_RETURN(0);
2863} /* asr_attach */
2864
2865STATIC void
2866asr_poll(
2867 IN struct cam_sim *sim)
2868{
2869 asr_intr(cam_sim_softc(sim));
2870} /* asr_poll */
2871
2872STATIC void
2873asr_action(
2874 IN struct cam_sim * sim,
2875 IN union ccb * ccb)
2876{
2877 struct Asr_softc * sc;
2878
2879 debug_asr_printf ("asr_action(%lx,%lx{%x})\n",
2880 (u_long)sim, (u_long)ccb, ccb->ccb_h.func_code);
2881
2882 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("asr_action\n"));
2883
2884 ccb->ccb_h.spriv_ptr0 = sc = (struct Asr_softc *)cam_sim_softc(sim);
2885
2886 switch (ccb->ccb_h.func_code) {
2887
2888 /* Common cases first */
2889 case XPT_SCSI_IO: /* Execute the requested I/O operation */
2890 {
2891 struct Message {
2892 char M[MAX_INBOUND_SIZE];
2893 };
2894 defAlignLong(struct Message,Message);
2895 PI2O_MESSAGE_FRAME Message_Ptr;
2896
2897 /* Reject incoming commands while we are resetting the card */
2898 if (sc->ha_in_reset != HA_OPERATIONAL) {
2899 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2900 if (sc->ha_in_reset >= HA_OFF_LINE) {
2901 /* HBA is now off-line */
2902 ccb->ccb_h.status |= CAM_UNREC_HBA_ERROR;
2903 } else {
2904 /* HBA currently resetting, try again later. */
2905 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
2906 }
2907 debug_asr_cmd_printf (" e\n");
2908 xpt_done(ccb);
2909 debug_asr_cmd_printf (" q\n");
2910 break;
2911 }
2912 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
2913 printf(
2914 "asr%d WARNING: scsi_cmd(%x) already done on b%dt%du%d\n",
2915 cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)),
2916 ccb->csio.cdb_io.cdb_bytes[0],
2917 cam_sim_bus(sim),
2918 ccb->ccb_h.target_id,
2919 ccb->ccb_h.target_lun);
2920 }
2921 debug_asr_cmd_printf ("(%d,%d,%d,%d)",
2922 cam_sim_unit(sim),
2923 cam_sim_bus(sim),
2924 ccb->ccb_h.target_id,
2925 ccb->ccb_h.target_lun);
2926 debug_asr_cmd_dump_ccb(ccb);
2927
2928 if ((Message_Ptr = ASR_init_message ((union asr_ccb *)ccb,
2929 (PI2O_MESSAGE_FRAME)Message)) != (PI2O_MESSAGE_FRAME)NULL) {
2930 debug_asr_cmd2_printf ("TID=%x:\n",
2931 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_getTID(
2932 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr));
2933 debug_asr_cmd2_dump_message(Message_Ptr);
2934 debug_asr_cmd1_printf (" q");
2935
2936 if (ASR_queue (sc, Message_Ptr) == EMPTY_QUEUE) {
984263bc
MD
2937 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2938 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
2939 debug_asr_cmd_printf (" E\n");
2940 xpt_done(ccb);
2941 }
2942 debug_asr_cmd_printf (" Q\n");
2943 break;
2944 }
2945 /*
2946 * We will get here if there is no valid TID for the device
2947 * referenced in the scsi command packet.
2948 */
2949 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2950 ccb->ccb_h.status |= CAM_SEL_TIMEOUT;
2951 debug_asr_cmd_printf (" B\n");
2952 xpt_done(ccb);
2953 break;
2954 }
2955
2956 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */
2957 /* Rese HBA device ... */
2958 asr_hbareset (sc);
2959 ccb->ccb_h.status = CAM_REQ_CMP;
2960 xpt_done(ccb);
2961 break;
2962
2963# if (defined(REPORT_LUNS))
2964 case REPORT_LUNS:
2965# endif
2966 case XPT_ABORT: /* Abort the specified CCB */
2967 /* XXX Implement */
2968 ccb->ccb_h.status = CAM_REQ_INVALID;
2969 xpt_done(ccb);
2970 break;
2971
2972 case XPT_SET_TRAN_SETTINGS:
2973 /* XXX Implement */
2974 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
2975 xpt_done(ccb);
2976 break;
2977
2978 case XPT_GET_TRAN_SETTINGS:
2979 /* Get default/user set transfer settings for the target */
2980 {
2981 struct ccb_trans_settings *cts;
2982 u_int target_mask;
2983
2984 cts = &(ccb->cts);
2985 target_mask = 0x01 << ccb->ccb_h.target_id;
2986 if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) {
2987 cts->flags = CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB;
2988 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2989 cts->sync_period = 6; /* 40MHz */
2990 cts->sync_offset = 15;
2991
2992 cts->valid = CCB_TRANS_SYNC_RATE_VALID
2993 | CCB_TRANS_SYNC_OFFSET_VALID
2994 | CCB_TRANS_BUS_WIDTH_VALID
2995 | CCB_TRANS_DISC_VALID
2996 | CCB_TRANS_TQ_VALID;
2997 ccb->ccb_h.status = CAM_REQ_CMP;
2998 } else {
2999 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
3000 }
3001 xpt_done(ccb);
3002 break;
3003 }
3004
3005 case XPT_CALC_GEOMETRY:
3006 {
3007 struct ccb_calc_geometry *ccg;
3008 u_int32_t size_mb;
3009 u_int32_t secs_per_cylinder;
3010
3011 ccg = &(ccb->ccg);
3012 size_mb = ccg->volume_size
3013 / ((1024L * 1024L) / ccg->block_size);
3014
3015 if (size_mb > 4096) {
3016 ccg->heads = 255;
3017 ccg->secs_per_track = 63;
3018 } else if (size_mb > 2048) {
3019 ccg->heads = 128;
3020 ccg->secs_per_track = 63;
3021 } else if (size_mb > 1024) {
3022 ccg->heads = 65;
3023 ccg->secs_per_track = 63;
3024 } else {
3025 ccg->heads = 64;
3026 ccg->secs_per_track = 32;
3027 }
3028 secs_per_cylinder = ccg->heads * ccg->secs_per_track;
3029 ccg->cylinders = ccg->volume_size / secs_per_cylinder;
3030 ccb->ccb_h.status = CAM_REQ_CMP;
3031 xpt_done(ccb);
3032 break;
3033 }
3034
3035 case XPT_RESET_BUS: /* Reset the specified SCSI bus */
3036 ASR_resetBus (sc, cam_sim_bus(sim));
3037 ccb->ccb_h.status = CAM_REQ_CMP;
3038 xpt_done(ccb);
3039 break;
3040
3041 case XPT_TERM_IO: /* Terminate the I/O process */
3042 /* XXX Implement */
3043 ccb->ccb_h.status = CAM_REQ_INVALID;
3044 xpt_done(ccb);
3045 break;
3046
3047 case XPT_PATH_INQ: /* Path routing inquiry */
3048 {
3049 struct ccb_pathinq *cpi = &(ccb->cpi);
3050
3051 cpi->version_num = 1; /* XXX??? */
3052 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
3053 cpi->target_sprt = 0;
3054 /* Not necessary to reset bus, done by HDM initialization */
3055 cpi->hba_misc = PIM_NOBUSRESET;
3056 cpi->hba_eng_cnt = 0;
3057 cpi->max_target = sc->ha_MaxId;
3058 cpi->max_lun = sc->ha_MaxLun;
3059 cpi->initiator_id = sc->ha_adapter_target[cam_sim_bus(sim)];
3060 cpi->bus_id = cam_sim_bus(sim);
3061 cpi->base_transfer_speed = 3300;
3062 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
3063 strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN);
3064 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
3065 cpi->unit_number = cam_sim_unit(sim);
3066 cpi->ccb_h.status = CAM_REQ_CMP;
3067 xpt_done(ccb);
3068 break;
3069 }
3070 default:
3071 ccb->ccb_h.status = CAM_REQ_INVALID;
3072 xpt_done(ccb);
3073 break;
3074 }
3075} /* asr_action */
3076
984263bc
MD
3077
3078/*
3079 * Handle processing of current CCB as pointed to by the Status.
3080 */
3081STATIC int
3082asr_intr (
3083 IN Asr_softc_t * sc)
3084{
3085 OUT int processed;
3086
984263bc
MD
3087 for (processed = 0;
3088 sc->ha_Virt->Status & Mask_InterruptsDisabled;
3089 processed = 1) {
3090 union asr_ccb * ccb;
3091 U32 ReplyOffset;
3092 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply;
3093
3094 if (((ReplyOffset = sc->ha_Virt->FromFIFO) == EMPTY_QUEUE)
3095 && ((ReplyOffset = sc->ha_Virt->FromFIFO) == EMPTY_QUEUE)) {
3096 break;
3097 }
3098 Reply = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)(ReplyOffset
3099 - sc->ha_Msgs_Phys + (char *)(sc->ha_Msgs));
3100 /*
3101 * We do not need any (optional byteswapping) method access to
3102 * the Initiator context field.
3103 */
3104 ccb = (union asr_ccb *)(long)
3105 I2O_MESSAGE_FRAME_getInitiatorContext64(
3106 &(Reply->StdReplyFrame.StdMessageFrame));
3107 if (I2O_MESSAGE_FRAME_getMsgFlags(
3108 &(Reply->StdReplyFrame.StdMessageFrame))
3109 & I2O_MESSAGE_FLAGS_FAIL) {
3110 defAlignLong(I2O_UTIL_NOP_MESSAGE,Message);
3111 PI2O_UTIL_NOP_MESSAGE Message_Ptr;
3112 U32 MessageOffset;
3113
3114 MessageOffset = (u_long)
3115 I2O_FAILURE_REPLY_MESSAGE_FRAME_getPreservedMFA(
3116 (PI2O_FAILURE_REPLY_MESSAGE_FRAME)Reply);
3117 /*
3118 * Get the Original Message Frame's address, and get
3119 * it's Transaction Context into our space. (Currently
3120 * unused at original authorship, but better to be
3121 * safe than sorry). Straight copy means that we
3122 * need not concern ourselves with the (optional
3123 * byteswapping) method access.
3124 */
3125 Reply->StdReplyFrame.TransactionContext
3126 = ((PI2O_SINGLE_REPLY_MESSAGE_FRAME)
3127 (sc->ha_Fvirt + MessageOffset))->TransactionContext;
3128 /*
3129 * For 64 bit machines, we need to reconstruct the
3130 * 64 bit context.
3131 */
3132 ccb = (union asr_ccb *)(long)
3133 I2O_MESSAGE_FRAME_getInitiatorContext64(
3134 &(Reply->StdReplyFrame.StdMessageFrame));
3135 /*
3136 * Unique error code for command failure.
3137 */
3138 I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode(
3139 &(Reply->StdReplyFrame), (u_int16_t)-2);
3140 /*
3141 * Modify the message frame to contain a NOP and
3142 * re-issue it to the controller.
3143 */
3144 Message_Ptr = (PI2O_UTIL_NOP_MESSAGE)ASR_fillMessage(
3145 Message, sizeof(I2O_UTIL_NOP_MESSAGE));
3146# if (I2O_UTIL_NOP != 0)
3147 I2O_MESSAGE_FRAME_setFunction (
3148 &(Message_Ptr->StdMessageFrame),
3149 I2O_UTIL_NOP);
3150# endif
3151 /*
3152 * Copy the packet out to the Original Message
3153 */
3154 bcopy ((caddr_t)Message_Ptr,
3155 sc->ha_Fvirt + MessageOffset,
3156 sizeof(I2O_UTIL_NOP_MESSAGE));
3157 /*
3158 * Issue the NOP
3159 */
3160 sc->ha_Virt->ToFIFO = MessageOffset;
3161 }
3162
3163 /*
3164 * Asynchronous command with no return requirements,
3165 * and a generic handler for immunity against odd error
3166 * returns from the adapter.
3167 */
3168 if (ccb == (union asr_ccb *)NULL) {
3169 /*
3170 * Return Reply so that it can be used for the
3171 * next command
3172 */
3173 sc->ha_Virt->FromFIFO = ReplyOffset;
3174 continue;
3175 }
3176
3177 /* Welease Wadjah! (and stop timeouts) */
3178 ASR_ccbRemove (sc, ccb);
3179
3180 switch (
3181 I2O_SINGLE_REPLY_MESSAGE_FRAME_getDetailedStatusCode(
3182 &(Reply->StdReplyFrame))) {
3183
3184 case I2O_SCSI_DSC_SUCCESS:
3185 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3186 ccb->ccb_h.status |= CAM_REQ_CMP;
3187 break;
3188
3189 case I2O_SCSI_DSC_CHECK_CONDITION:
3190 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3191 ccb->ccb_h.status |= CAM_REQ_CMP|CAM_AUTOSNS_VALID;
3192 break;
3193
3194 case I2O_SCSI_DSC_BUSY:
3195 /* FALLTHRU */
3196 case I2O_SCSI_HBA_DSC_ADAPTER_BUSY:
3197 /* FALLTHRU */
3198 case I2O_SCSI_HBA_DSC_SCSI_BUS_RESET:
3199 /* FALLTHRU */
3200 case I2O_SCSI_HBA_DSC_BUS_BUSY:
3201 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3202 ccb->ccb_h.status |= CAM_SCSI_BUSY;
3203 break;
3204
3205 case I2O_SCSI_HBA_DSC_SELECTION_TIMEOUT:
3206 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3207 ccb->ccb_h.status |= CAM_SEL_TIMEOUT;
3208 break;
3209
3210 case I2O_SCSI_HBA_DSC_COMMAND_TIMEOUT:
3211 /* FALLTHRU */
3212 case I2O_SCSI_HBA_DSC_DEVICE_NOT_PRESENT:
3213 /* FALLTHRU */
3214 case I2O_SCSI_HBA_DSC_LUN_INVALID:
3215 /* FALLTHRU */
3216 case I2O_SCSI_HBA_DSC_SCSI_TID_INVALID:
3217 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3218 ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
3219 break;
3220
3221 case I2O_SCSI_HBA_DSC_DATA_OVERRUN:
3222 /* FALLTHRU */
3223 case I2O_SCSI_HBA_DSC_REQUEST_LENGTH_ERROR:
3224 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3225 ccb->ccb_h.status |= CAM_DATA_RUN_ERR;
3226 break;
3227
3228 default:
3229 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3230 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
3231 break;
3232 }
3233 if ((ccb->csio.resid = ccb->csio.dxfer_len) != 0) {
3234 ccb->csio.resid -=
3235 I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_getTransferCount(
3236 Reply);
3237 }
3238
984263bc
MD
3239 /* Sense data in reply packet */
3240 if (ccb->ccb_h.status & CAM_AUTOSNS_VALID) {
3241 u_int16_t size = I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_getAutoSenseTransferCount(Reply);
3242
3243 if (size) {
3244 if (size > sizeof(ccb->csio.sense_data)) {
3245 size = sizeof(ccb->csio.sense_data);
3246 }
3247 if (size > I2O_SCSI_SENSE_DATA_SZ) {
3248 size = I2O_SCSI_SENSE_DATA_SZ;
3249 }
3250 if ((ccb->csio.sense_len)
3251 && (size > ccb->csio.sense_len)) {
3252 size = ccb->csio.sense_len;
3253 }
3254 bcopy ((caddr_t)Reply->SenseData,
3255 (caddr_t)&(ccb->csio.sense_data), size);
3256 }
3257 }
3258
3259 /*
3260 * Return Reply so that it can be used for the next command
3261 * since we have no more need for it now
3262 */
3263 sc->ha_Virt->FromFIFO = ReplyOffset;
3264
3265 if (ccb->ccb_h.path) {
3266 xpt_done ((union ccb *)ccb);
3267 } else {
3268 wakeup ((caddr_t)ccb);
3269 }
3270 }
984263bc
MD
3271 return (processed);
3272} /* asr_intr */
3273
3274#undef QueueSize /* Grrrr */
3275#undef SG_Size /* Grrrr */
3276
3277/*
3278 * Meant to be included at the bottom of asr.c !!!
3279 */
3280
3281/*
3282 * Included here as hard coded. Done because other necessary include
3283 * files utilize C++ comment structures which make them a nuisance to
3284 * included here just to pick up these three typedefs.
3285 */
3286typedef U32 DPT_TAG_T;
3287typedef U32 DPT_MSG_T;
3288typedef U32 DPT_RTN_T;
3289
3290#undef SCSI_RESET /* Conflicts with "scsi/scsiconf.h" defintion */
1f2de5d4 3291#include "osd_unix.h"
984263bc
MD
3292
3293#define asr_unit(dev) minor(dev)
3294
3295STATIC INLINE Asr_softc_t *
3296ASR_get_sc (
3297 IN dev_t dev)
3298{
3299 int unit = asr_unit(dev);
3300 OUT Asr_softc_t * sc = Asr_softc;
3301
3302 while (sc && sc->ha_sim[0] && (cam_sim_unit(sc->ha_sim[0]) != unit)) {
3303 sc = sc->ha_next;
3304 }
3305 return (sc);
3306} /* ASR_get_sc */
3307
3308STATIC u_int8_t ASR_ctlr_held;
3309#if (!defined(UNREFERENCED_PARAMETER))
3310# define UNREFERENCED_PARAMETER(x) (void)(x)
3311#endif
3312
3313STATIC int
3314asr_open(
3315 IN dev_t dev,
3316 int32_t flags,
3317 int32_t ifmt,
41c20dac 3318 IN d_thread_t *td)
984263bc
MD
3319{
3320 int s;
3321 OUT int error;
3322 UNREFERENCED_PARAMETER(flags);
3323 UNREFERENCED_PARAMETER(ifmt);
3324
3325 if (ASR_get_sc (dev) == (Asr_softc_t *)NULL) {
3326 return (ENODEV);
3327 }
dadab5e9 3328 KKASSERT(td->td_proc);
984263bc
MD
3329 s = splcam ();
3330 if (ASR_ctlr_held) {
3331 error = EBUSY;
dadab5e9 3332 } else if ((error = suser_cred(td->td_proc->p_ucred, 0)) == 0) {
984263bc
MD
3333 ++ASR_ctlr_held;
3334 }
3335 splx(s);
3336 return (error);
3337} /* asr_open */
3338
3339STATIC int
3340asr_close(
3341 dev_t dev,
3342 int flags,
3343 int ifmt,
41c20dac 3344 d_thread_t *td)
984263bc
MD
3345{
3346 UNREFERENCED_PARAMETER(dev);
3347 UNREFERENCED_PARAMETER(flags);
3348 UNREFERENCED_PARAMETER(ifmt);
41c20dac 3349 UNREFERENCED_PARAMETER(td);
984263bc
MD
3350
3351 ASR_ctlr_held = 0;
3352 return (0);
3353} /* asr_close */
3354
3355
3356/*-------------------------------------------------------------------------*/
3357/* Function ASR_queue_i */
3358/*-------------------------------------------------------------------------*/
3359/* The Parameters Passed To This Function Are : */
3360/* Asr_softc_t * : HBA miniport driver's adapter data storage. */
3361/* PI2O_MESSAGE_FRAME : Msg Structure Pointer For This Command */
3362/* I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME following the Msg Structure */
3363/* */
3364/* This Function Will Take The User Request Packet And Convert It To An */
3365/* I2O MSG And Send It Off To The Adapter. */
3366/* */
3367/* Return : 0 For OK, Error Code Otherwise */
3368/*-------------------------------------------------------------------------*/
3369STATIC INLINE int
3370ASR_queue_i(
3371 IN Asr_softc_t * sc,
3372 INOUT PI2O_MESSAGE_FRAME Packet)
3373{
3374 union asr_ccb * ccb;
3375 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply;
3376 PI2O_MESSAGE_FRAME Message_Ptr;
3377 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply_Ptr;
3378 int MessageSizeInBytes;
3379 int ReplySizeInBytes;
3380 int error;
3381 int s;
3382 /* Scatter Gather buffer list */
3383 struct ioctlSgList_S {
3384 SLIST_ENTRY(ioctlSgList_S) link;
3385 caddr_t UserSpace;
3386 I2O_FLAGS_COUNT FlagsCount;
3387 char KernelSpace[sizeof(long)];
3388 } * elm;
3389 /* Generates a `first' entry */
3390 SLIST_HEAD(ioctlSgListHead_S, ioctlSgList_S) sgList;
3391
3392 if (ASR_getBlinkLedCode(sc)) {
3393 debug_usr_cmd_printf ("Adapter currently in BlinkLed %x\n",
3394 ASR_getBlinkLedCode(sc));
3395 return (EIO);
3396 }
3397 /* Copy in the message into a local allocation */
3398 if ((Message_Ptr = (PI2O_MESSAGE_FRAME)malloc (
3399 sizeof(I2O_MESSAGE_FRAME), M_TEMP, M_WAITOK))
3400 == (PI2O_MESSAGE_FRAME)NULL) {
3401 debug_usr_cmd_printf (
3402 "Failed to acquire I2O_MESSAGE_FRAME memory\n");
3403 return (ENOMEM);
3404 }
3405 if ((error = copyin ((caddr_t)Packet, (caddr_t)Message_Ptr,
3406 sizeof(I2O_MESSAGE_FRAME))) != 0) {
3407 free (Message_Ptr, M_TEMP);
3408 debug_usr_cmd_printf ("Can't copy in packet errno=%d\n", error);
3409 return (error);
3410 }
3411 /* Acquire information to determine type of packet */
3412 MessageSizeInBytes = (I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr)<<2);
3413 /* The offset of the reply information within the user packet */
3414 Reply = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)((char *)Packet
3415 + MessageSizeInBytes);
3416
3417 /* Check if the message is a synchronous initialization command */
3418 s = I2O_MESSAGE_FRAME_getFunction(Message_Ptr);
3419 free (Message_Ptr, M_TEMP);
3420 switch (s) {
3421
3422 case I2O_EXEC_IOP_RESET:
3423 { U32 status;
3424
3425 status = ASR_resetIOP(sc->ha_Virt, sc->ha_Fvirt);
3426 ReplySizeInBytes = sizeof(status);
3427 debug_usr_cmd_printf ("resetIOP done\n");
3428 return (copyout ((caddr_t)&status, (caddr_t)Reply,
3429 ReplySizeInBytes));
3430 }
3431
3432 case I2O_EXEC_STATUS_GET:
3433 { I2O_EXEC_STATUS_GET_REPLY status;
3434
3435 if (ASR_getStatus (sc->ha_Virt, sc->ha_Fvirt, &status)
3436 == (PI2O_EXEC_STATUS_GET_REPLY)NULL) {
3437 debug_usr_cmd_printf ("getStatus failed\n");
3438 return (ENXIO);
3439 }
3440 ReplySizeInBytes = sizeof(status);
3441 debug_usr_cmd_printf ("getStatus done\n");
3442 return (copyout ((caddr_t)&status, (caddr_t)Reply,
3443 ReplySizeInBytes));
3444 }
3445
3446 case I2O_EXEC_OUTBOUND_INIT:
3447 { U32 status;
3448
3449 status = ASR_initOutBound(sc);
3450 ReplySizeInBytes = sizeof(status);
3451 debug_usr_cmd_printf ("intOutBound done\n");
3452 return (copyout ((caddr_t)&status, (caddr_t)Reply,
3453 ReplySizeInBytes));
3454 }
3455 }
3456
3457 /* Determine if the message size is valid */
3458 if ((MessageSizeInBytes < sizeof(I2O_MESSAGE_FRAME))
3459 || (MAX_INBOUND_SIZE < MessageSizeInBytes)) {
3460 debug_usr_cmd_printf ("Packet size %d incorrect\n",
3461 MessageSizeInBytes);
3462 return (EINVAL);
3463 }
3464
3465 if ((Message_Ptr = (PI2O_MESSAGE_FRAME)malloc (MessageSizeInBytes,
3466 M_TEMP, M_WAITOK)) == (PI2O_MESSAGE_FRAME)NULL) {
3467 debug_usr_cmd_printf ("Failed to acquire frame[%d] memory\n",
3468 MessageSizeInBytes);
3469 return (ENOMEM);
3470 }
3471 if ((error = copyin ((caddr_t)Packet, (caddr_t)Message_Ptr,
3472 MessageSizeInBytes)) != 0) {
3473 free (Message_Ptr, M_TEMP);
3474 debug_usr_cmd_printf ("Can't copy in packet[%d] errno=%d\n",
3475 MessageSizeInBytes, error);
3476 return (error);
3477 }
3478
3479 /* Check the size of the reply frame, and start constructing */
3480
3481 if ((Reply_Ptr = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)malloc (
3482 sizeof(I2O_MESSAGE_FRAME), M_TEMP, M_WAITOK))
3483 == (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL) {
3484 free (Message_Ptr, M_TEMP);
3485 debug_usr_cmd_printf (
3486 "Failed to acquire I2O_MESSAGE_FRAME memory\n");
3487 return (ENOMEM);
3488 }
3489 if ((error = copyin ((caddr_t)Reply, (caddr_t)Reply_Ptr,
3490 sizeof(I2O_MESSAGE_FRAME))) != 0) {
3491 free (Reply_Ptr, M_TEMP);
3492 free (Message_Ptr, M_TEMP);
3493 debug_usr_cmd_printf (
3494 "Failed to copy in reply frame, errno=%d\n",
3495 error);
3496 return (error);
3497 }
3498 ReplySizeInBytes = (I2O_MESSAGE_FRAME_getMessageSize(
3499 &(Reply_Ptr->StdReplyFrame.StdMessageFrame)) << 2);
3500 free (Reply_Ptr, M_TEMP);
3501 if (ReplySizeInBytes < sizeof(I2O_SINGLE_REPLY_MESSAGE_FRAME)) {
3502 free (Message_Ptr, M_TEMP);
3503 debug_usr_cmd_printf (
3504 "Failed to copy in reply frame[%d], errno=%d\n",
3505 ReplySizeInBytes, error);
3506 return (EINVAL);
3507 }
3508
3509 if ((Reply_Ptr = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)malloc (
3510 ((ReplySizeInBytes > sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME))
3511 ? ReplySizeInBytes
3512 : sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)),
3513 M_TEMP, M_WAITOK)) == (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL) {
3514 free (Message_Ptr, M_TEMP);
3515 debug_usr_cmd_printf ("Failed to acquire frame[%d] memory\n",
3516 ReplySizeInBytes);
3517 return (ENOMEM);
3518 }
3519 (void)ASR_fillMessage ((char *)Reply_Ptr, ReplySizeInBytes);
3520 Reply_Ptr->StdReplyFrame.StdMessageFrame.InitiatorContext
3521 = Message_Ptr->InitiatorContext;
3522 Reply_Ptr->StdReplyFrame.TransactionContext
3523 = ((PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr)->TransactionContext;
3524 I2O_MESSAGE_FRAME_setMsgFlags(
3525 &(Reply_Ptr->StdReplyFrame.StdMessageFrame),
3526 I2O_MESSAGE_FRAME_getMsgFlags(
3527 &(Reply_Ptr->StdReplyFrame.StdMessageFrame))
3528 | I2O_MESSAGE_FLAGS_REPLY);
3529
3530 /* Check if the message is a special case command */
3531 switch (I2O_MESSAGE_FRAME_getFunction(Message_Ptr)) {
3532 case I2O_EXEC_SYS_TAB_SET: /* Special Case of empty Scatter Gather */
3533 if (MessageSizeInBytes == ((I2O_MESSAGE_FRAME_getVersionOffset(
3534 Message_Ptr) & 0xF0) >> 2)) {
3535 free (Message_Ptr, M_TEMP);
3536 I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode(
3537 &(Reply_Ptr->StdReplyFrame),
3538 (ASR_setSysTab(sc) != CAM_REQ_CMP));
3539 I2O_MESSAGE_FRAME_setMessageSize(
3540 &(Reply_Ptr->StdReplyFrame.StdMessageFrame),
3541 sizeof(I2O_SINGLE_REPLY_MESSAGE_FRAME));
3542 error = copyout ((caddr_t)Reply_Ptr, (caddr_t)Reply,
3543 ReplySizeInBytes);
3544 free (Reply_Ptr, M_TEMP);
3545 return (error);
3546 }
3547 }
3548
3549 /* Deal in the general case */
3550 /* First allocate and optionally copy in each scatter gather element */
3551 SLIST_INIT(&sgList);
3552 if ((I2O_MESSAGE_FRAME_getVersionOffset(Message_Ptr) & 0xF0) != 0) {
3553 PI2O_SGE_SIMPLE_ELEMENT sg;
3554
3555 /*
3556 * since this code is reused in several systems, code
3557 * efficiency is greater by using a shift operation rather
3558 * than a divide by sizeof(u_int32_t).
3559 */
3560 sg = (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr
3561 + ((I2O_MESSAGE_FRAME_getVersionOffset(Message_Ptr) & 0xF0)
3562 >> 2));
3563 while (sg < (PI2O_SGE_SIMPLE_ELEMENT)(((caddr_t)Message_Ptr)
3564 + MessageSizeInBytes)) {
3565 caddr_t v;
3566 int len;
3567
3568 if ((I2O_FLAGS_COUNT_getFlags(&(sg->FlagsCount))
3569 & I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT) == 0) {
3570 error = EINVAL;
3571 break;
3572 }
3573 len = I2O_FLAGS_COUNT_getCount(&(sg->FlagsCount));
3574 debug_usr_cmd_printf ("SG[%d] = %x[%d]\n",
3575 sg - (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr
3576 + ((I2O_MESSAGE_FRAME_getVersionOffset(
3577 Message_Ptr) & 0xF0) >> 2)),
3578 I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg), len);
3579
3580 if ((elm = (struct ioctlSgList_S *)malloc (
3581 sizeof(*elm) - sizeof(elm->KernelSpace) + len,
3582 M_TEMP, M_WAITOK))
3583 == (struct ioctlSgList_S *)NULL) {
3584 debug_usr_cmd_printf (
3585 "Failed to allocate SG[%d]\n", len);
3586 error = ENOMEM;
3587 break;
3588 }
3589 SLIST_INSERT_HEAD(&sgList, elm, link);
3590 elm->FlagsCount = sg->FlagsCount;
3591 elm->UserSpace = (caddr_t)
3592 (I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg));
3593 v = elm->KernelSpace;
3594 /* Copy in outgoing data (DIR bit could be invalid) */
3595 if ((error = copyin (elm->UserSpace, (caddr_t)v, len))
3596 != 0) {
3597 break;
3598 }
3599 /*
3600 * If the buffer is not contiguous, lets
3601 * break up the scatter/gather entries.
3602 */
3603 while ((len > 0)
3604 && (sg < (PI2O_SGE_SIMPLE_ELEMENT)
3605 (((caddr_t)Message_Ptr) + MAX_INBOUND_SIZE))) {
3606 int next, base, span;
3607
3608 span = 0;
3609 next = base = KVTOPHYS(v);
3610 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg,
3611 base);
3612
3613 /* How far can we go physically contiguously */
3614 while ((len > 0) && (base == next)) {
3615 int size;
3616
3617 next = trunc_page(base) + PAGE_SIZE;
3618 size = next - base;
3619 if (size > len) {
3620 size = len;
3621 }
3622 span += size;
3623 v += size;
3624 len -= size;
3625 base = KVTOPHYS(v);
3626 }
3627
3628 /* Construct the Flags */
3629 I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount),
3630 span);
3631 {
3632 int flags = I2O_FLAGS_COUNT_getFlags(
3633 &(elm->FlagsCount));
3634 /* Any remaining length? */
3635 if (len > 0) {
3636 flags &=
3637 ~(I2O_SGL_FLAGS_END_OF_BUFFER
3638 | I2O_SGL_FLAGS_LAST_ELEMENT);
3639 }
3640 I2O_FLAGS_COUNT_setFlags(
3641 &(sg->FlagsCount), flags);
3642 }
3643
3644 debug_usr_cmd_printf ("sg[%d] = %x[%d]\n",
3645 sg - (PI2O_SGE_SIMPLE_ELEMENT)
3646 ((char *)Message_Ptr
3647 + ((I2O_MESSAGE_FRAME_getVersionOffset(
3648 Message_Ptr) & 0xF0) >> 2)),
3649 I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg),
3650 span);
3651 if (len <= 0) {
3652 break;
3653 }
3654
3655 /*
3656 * Incrementing requires resizing of the
3657 * packet, and moving up the existing SG
3658 * elements.
3659 */
3660 ++sg;
3661 MessageSizeInBytes += sizeof(*sg);
3662 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
3663 I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr)
3664 + (sizeof(*sg) / sizeof(U32)));
3665 {
3666 PI2O_MESSAGE_FRAME NewMessage_Ptr;
3667
3668 if ((NewMessage_Ptr
3669 = (PI2O_MESSAGE_FRAME)
3670 malloc (MessageSizeInBytes,
3671 M_TEMP, M_WAITOK))
3672 == (PI2O_MESSAGE_FRAME)NULL) {
3673 debug_usr_cmd_printf (
3674 "Failed to acquire frame[%d] memory\n",
3675 MessageSizeInBytes);
3676 error = ENOMEM;
3677 break;
3678 }
3679 span = ((caddr_t)sg)
3680 - (caddr_t)Message_Ptr;
3681 bcopy ((caddr_t)Message_Ptr,
3682 (caddr_t)NewMessage_Ptr, span);
3683 bcopy ((caddr_t)(sg-1),
3684 ((caddr_t)NewMessage_Ptr) + span,
3685 MessageSizeInBytes - span);
3686 free (Message_Ptr, M_TEMP);
3687 sg = (PI2O_SGE_SIMPLE_ELEMENT)
3688 (((caddr_t)NewMessage_Ptr) + span);
3689 Message_Ptr = NewMessage_Ptr;
3690 }
3691 }
3692 if ((error)
3693 || ((I2O_FLAGS_COUNT_getFlags(&(sg->FlagsCount))
3694 & I2O_SGL_FLAGS_LAST_ELEMENT) != 0)) {
3695 break;
3696 }
3697 ++sg;
3698 }
3699 if (error) {
3700 while ((elm = SLIST_FIRST(&sgList))
3701 != (struct ioctlSgList_S *)NULL) {
3702 SLIST_REMOVE_HEAD(&sgList, link);
3703 free (elm, M_TEMP);
3704 }
3705 free (Reply_Ptr, M_TEMP);
3706 free (Message_Ptr, M_TEMP);
3707 return (error);
3708 }
3709 }
3710
3711 debug_usr_cmd_printf ("Inbound: ");
3712 debug_usr_cmd_dump_message(Message_Ptr);
3713
3714 /* Send the command */
3715 if ((ccb = asr_alloc_ccb (sc)) == (union asr_ccb *)NULL) {
3716 /* Free up in-kernel buffers */
3717 while ((elm = SLIST_FIRST(&sgList))
3718 != (struct ioctlSgList_S *)NULL) {
3719 SLIST_REMOVE_HEAD(&sgList, link);
3720 free (elm, M_TEMP);
3721 }
3722 free (Reply_Ptr, M_TEMP);
3723 free (Message_Ptr, M_TEMP);
3724 return (ENOMEM);
3725 }
3726
3727 /*
3728 * We do not need any (optional byteswapping) method access to
3729 * the Initiator context field.
3730 */
3731 I2O_MESSAGE_FRAME_setInitiatorContext64(
3732 (PI2O_MESSAGE_FRAME)Message_Ptr, (long)ccb);
3733
3734 (void)ASR_queue (sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
3735
3736 free (Message_Ptr, M_TEMP);
3737
3738 /*
3739 * Wait for the board to report a finished instruction.
3740 */
3741 s = splcam();
3742 while ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
3743 if (ASR_getBlinkLedCode(sc)) {
3744 /* Reset Adapter */
3745 printf ("asr%d: Blink LED 0x%x resetting adapter\n",
3746 cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)),
3747 ASR_getBlinkLedCode(sc));
3748 if (ASR_reset (sc) == ENXIO) {
3749 /* Command Cleanup */
3750 ASR_ccbRemove(sc, ccb);
3751 }
3752 splx(s);
3753 /* Free up in-kernel buffers */
3754 while ((elm = SLIST_FIRST(&sgList))
3755 != (struct ioctlSgList_S *)NULL) {
3756 SLIST_REMOVE_HEAD(&sgList, link);
3757 free (elm, M_TEMP);
3758 }
3759 free (Reply_Ptr, M_TEMP);
3760 asr_free_ccb(ccb);
3761 return (EIO);
3762 }
3763 /* Check every second for BlinkLed */
377d4740 3764 tsleep((caddr_t)ccb, 0, "asr", hz);
984263bc
MD
3765 }
3766 splx(s);
3767
3768 debug_usr_cmd_printf ("Outbound: ");
3769 debug_usr_cmd_dump_message(Reply_Ptr);
3770
3771 I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode(
3772 &(Reply_Ptr->StdReplyFrame),
3773 (ccb->ccb_h.status != CAM_REQ_CMP));
3774
3775 if (ReplySizeInBytes >= (sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
3776 - I2O_SCSI_SENSE_DATA_SZ - sizeof(U32))) {
3777 I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_setTransferCount(Reply_Ptr,
3778 ccb->csio.dxfer_len - ccb->csio.resid);
3779 }
3780 if ((ccb->ccb_h.status & CAM_AUTOSNS_VALID) && (ReplySizeInBytes
3781 > (sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
3782 - I2O_SCSI_SENSE_DATA_SZ))) {
3783 int size = ReplySizeInBytes
3784 - sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
3785 - I2O_SCSI_SENSE_DATA_SZ;
3786
3787 if (size > sizeof(ccb->csio.sense_data)) {
3788 size = sizeof(ccb->csio.sense_data);
3789 }
3790 bcopy ((caddr_t)&(ccb->csio.sense_data), (caddr_t)Reply_Ptr->SenseData,
3791 size);
3792 I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_setAutoSenseTransferCount(
3793 Reply_Ptr, size);
3794 }
3795
3796 /* Free up in-kernel buffers */
3797 while ((elm = SLIST_FIRST(&sgList)) != (struct ioctlSgList_S *)NULL) {
3798 /* Copy out as necessary */
3799 if ((error == 0)
3800 /* DIR bit considered `valid', error due to ignorance works */
3801 && ((I2O_FLAGS_COUNT_getFlags(&(elm->FlagsCount))
3802 & I2O_SGL_FLAGS_DIR) == 0)) {
3803 error = copyout ((caddr_t)(elm->KernelSpace),
3804 elm->UserSpace,
3805 I2O_FLAGS_COUNT_getCount(&(elm->FlagsCount)));
3806 }
3807 SLIST_REMOVE_HEAD(&sgList, link);
3808 free (elm, M_TEMP);
3809 }
3810 if (error == 0) {
3811 /* Copy reply frame to user space */
3812 error = copyout ((caddr_t)Reply_Ptr, (caddr_t)Reply,
3813 ReplySizeInBytes);
3814 }
3815 free (Reply_Ptr, M_TEMP);
3816 asr_free_ccb(ccb);
3817
3818 return (error);
3819} /* ASR_queue_i */
3820
3821/*----------------------------------------------------------------------*/
3822/* Function asr_ioctl */
3823/*----------------------------------------------------------------------*/
3824/* The parameters passed to this function are : */
3825/* dev : Device number. */
3826/* cmd : Ioctl Command */
3827/* data : User Argument Passed In. */
3828/* flag : Mode Parameter */
3829/* proc : Process Parameter */
3830/* */
3831/* This function is the user interface into this adapter driver */
3832/* */