Remove all remaining SPL code. Replace the mtd_cpl field in the machine
[dragonfly.git] / sys / dev / raid / asr / asr.c
CommitLineData
984263bc 1/* $FreeBSD: src/sys/dev/asr/asr.c,v 1.3.2.2 2001/08/23 05:21:29 scottl Exp $ */
38787eef 2/* $DragonFly: src/sys/dev/raid/asr/asr.c,v 1.21 2005/06/16 21:12:38 dillon Exp $ */
984263bc
MD
3/*
4 * Copyright (c) 1996-2000 Distributed Processing Technology Corporation
5 * Copyright (c) 2000-2001 Adaptec Corporation
6 * All rights reserved.
7 *
8 * TERMS AND CONDITIONS OF USE
9 *
10 * Redistribution and use in source form, with or without modification, are
11 * permitted provided that redistributions of source code must retain the
12 * above copyright notice, this list of conditions and the following disclaimer.
13 *
14 * This software is provided `as is' by Adaptec and any express or implied
15 * warranties, including, but not limited to, the implied warranties of
16 * merchantability and fitness for a particular purpose, are disclaimed. In no
17 * event shall Adaptec be liable for any direct, indirect, incidental, special,
18 * exemplary or consequential damages (including, but not limited to,
19 * procurement of substitute goods or services; loss of use, data, or profits;
20 * or business interruptions) however caused and on any theory of liability,
21 * whether in contract, strict liability, or tort (including negligence or
22 * otherwise) arising in any way out of the use of this driver software, even
23 * if advised of the possibility of such damage.
24 *
25 * SCSI I2O host adapter driver
26 *
27 * V1.08 2001/08/21 Mark_Salyzyn@adaptec.com
28 * - The 2000S and 2005S do not initialize on some machines,
29 * increased timeout to 255ms from 50ms for the StatusGet
30 * command.
31 * V1.07 2001/05/22 Mark_Salyzyn@adaptec.com
32 * - I knew this one was too good to be true. The error return
33 * on ioctl commands needs to be compared to CAM_REQ_CMP, not
34 * to the bit masked status.
35 * V1.06 2001/05/08 Mark_Salyzyn@adaptec.com
36 * - The 2005S that was supported is affectionately called the
37 * Conjoined BAR Firmware. In order to support RAID-5 in a
38 * 16MB low-cost configuration, Firmware was forced to go
39 * to a Split BAR Firmware. This requires a separate IOP and
40 * Messaging base address.
41 * V1.05 2001/04/25 Mark_Salyzyn@adaptec.com
42 * - Handle support for 2005S Zero Channel RAID solution.
43 * - System locked up if the Adapter locked up. Do not try
44 * to send other commands if the resetIOP command fails. The
45 * fail outstanding command discovery loop was flawed as the
46 * removal of the command from the list prevented discovering
47 * all the commands.
48 * - Comment changes to clarify driver.
49 * - SysInfo searched for an EATA SmartROM, not an I2O SmartROM.
50 * - We do not use the AC_FOUND_DEV event because of I2O.
51 * Removed asr_async.
52 * V1.04 2000/09/22 Mark_Salyzyn@adaptec.com, msmith@freebsd.org,
53 * lampa@fee.vutbr.cz and Scott_Long@adaptec.com.
54 * - Removed support for PM1554, PM2554 and PM2654 in Mode-0
55 * mode as this is confused with competitor adapters in run
56 * mode.
57 * - critical locking needed in ASR_ccbAdd and ASR_ccbRemove
58 * to prevent operating system panic.
59 * - moved default major number to 154 from 97.
60 * V1.03 2000/07/12 Mark_Salyzyn@adaptec.com
61 * - The controller is not actually an ASR (Adaptec SCSI RAID)
62 * series that is visible, it's more of an internal code name.
63 * remove any visible references within reason for now.
64 * - bus_ptr->LUN was not correctly zeroed when initially
65 * allocated causing a possible panic of the operating system
66 * during boot.
67 * V1.02 2000/06/26 Mark_Salyzyn@adaptec.com
68 * - Code always fails for ASR_getTid affecting performance.
69 * - initiated a set of changes that resulted from a formal
70 * code inspection by Mark_Salyzyn@adaptec.com,
71 * George_Dake@adaptec.com, Jeff_Zeak@adaptec.com,
72 * Martin_Wilson@adaptec.com and Vincent_Trandoan@adaptec.com.
73 * Their findings were focussed on the LCT & TID handler, and
74 * all resulting changes were to improve code readability,
75 * consistency or have a positive effect on performance.
76 * V1.01 2000/06/14 Mark_Salyzyn@adaptec.com
77 * - Passthrough returned an incorrect error.
78 * - Passthrough did not migrate the intrinsic scsi layer wakeup
79 * on command completion.
80 * - generate control device nodes using make_dev and delete_dev.
81 * - Performance affected by TID caching reallocing.
82 * - Made suggested changes by Justin_Gibbs@adaptec.com
83 * - use splcam instead of splbio.
984263bc
MD
84 * - use u_int8_t instead of u_char.
85 * - use u_int16_t instead of u_short.
86 * - use u_int32_t instead of u_long where appropriate.
87 * - use 64 bit context handler instead of 32 bit.
88 * - create_ccb should only allocate the worst case
89 * requirements for the driver since CAM may evolve
90 * making union ccb much larger than needed here.
91 * renamed create_ccb to asr_alloc_ccb.
92 * - go nutz justifying all debug prints as macros
93 * defined at the top and remove unsightly ifdefs.
94 * - INLINE STATIC viewed as confusing. Historically
95 * utilized to affect code performance and debug
96 * issues in OS, Compiler or OEM specific situations.
97 * V1.00 2000/05/31 Mark_Salyzyn@adaptec.com
98 * - Ported from FreeBSD 2.2.X DPT I2O driver.
99 * changed struct scsi_xfer to union ccb/struct ccb_hdr
100 * changed variable name xs to ccb
101 * changed struct scsi_link to struct cam_path
102 * changed struct scsibus_data to struct cam_sim
103 * stopped using fordriver for holding on to the TID
104 * use proprietary packet creation instead of scsi_inquire
105 * CAM layer sends synchronize commands.
106 */
107
108#define ASR_VERSION 1
109#define ASR_REVISION '0'
110#define ASR_SUBREVISION '8'
111#define ASR_MONTH 8
112#define ASR_DAY 21
113#define ASR_YEAR 2001 - 1980
114
115/*
116 * Debug macros to reduce the unsightly ifdefs
117 */
118#if (defined(DEBUG_ASR) || defined(DEBUG_ASR_USR_CMD) || defined(DEBUG_ASR_CMD))
119# define debug_asr_message(message) \
120 { \
121 u_int32_t * pointer = (u_int32_t *)message; \
122 u_int32_t length = I2O_MESSAGE_FRAME_getMessageSize(message);\
123 u_int32_t counter = 0; \
124 \
125 while (length--) { \
126 printf ("%08lx%c", (u_long)*(pointer++), \
127 (((++counter & 7) == 0) || (length == 0)) \
128 ? '\n' \
129 : ' '); \
130 } \
131 }
132#endif /* DEBUG_ASR || DEBUG_ASR_USR_CMD || DEBUG_ASR_CMD */
133
134#if (defined(DEBUG_ASR))
135 /* Breaks on none STDC based compilers :-( */
136# define debug_asr_printf(fmt,args...) printf(fmt, ##args)
137# define debug_asr_dump_message(message) debug_asr_message(message)
138# define debug_asr_print_path(ccb) xpt_print_path(ccb->ccb_h.path);
139 /* None fatal version of the ASSERT macro */
140# if (defined(__STDC__))
141# define ASSERT(phrase) if(!(phrase))printf(#phrase " at line %d file %s\n",__LINE__,__FILE__)
142# else
143# define ASSERT(phrase) if(!(phrase))printf("phrase" " at line %d file %s\n",__LINE__,__FILE__)
144# endif
145#else /* DEBUG_ASR */
146# define debug_asr_printf(fmt,args...)
147# define debug_asr_dump_message(message)
148# define debug_asr_print_path(ccb)
149# define ASSERT(x)
150#endif /* DEBUG_ASR */
151
152/*
153 * If DEBUG_ASR_CMD is defined:
154 * 0 - Display incoming SCSI commands
155 * 1 - add in a quick character before queueing.
156 * 2 - add in outgoing message frames.
157 */
158#if (defined(DEBUG_ASR_CMD))
159# define debug_asr_cmd_printf(fmt,args...) printf(fmt,##args)
160# define debug_asr_dump_ccb(ccb) \
161 { \
162 u_int8_t * cp = (unsigned char *)&(ccb->csio.cdb_io); \
163 int len = ccb->csio.cdb_len; \
164 \
165 while (len) { \
166 debug_asr_cmd_printf (" %02x", *(cp++)); \
167 --len; \
168 } \
169 }
170# if (DEBUG_ASR_CMD > 0)
171# define debug_asr_cmd1_printf debug_asr_cmd_printf
172# else
173# define debug_asr_cmd1_printf(fmt,args...)
174# endif
175# if (DEBUG_ASR_CMD > 1)
176# define debug_asr_cmd2_printf debug_asr_cmd_printf
177# define debug_asr_cmd2_dump_message(message) debug_asr_message(message)
178# else
179# define debug_asr_cmd2_printf(fmt,args...)
180# define debug_asr_cmd2_dump_message(message)
181# endif
182#else /* DEBUG_ASR_CMD */
183# define debug_asr_cmd_printf(fmt,args...)
184# define debug_asr_cmd_dump_ccb(ccb)
185# define debug_asr_cmd1_printf(fmt,args...)
186# define debug_asr_cmd2_printf(fmt,args...)
187# define debug_asr_cmd2_dump_message(message)
188#endif /* DEBUG_ASR_CMD */
189
190#if (defined(DEBUG_ASR_USR_CMD))
191# define debug_usr_cmd_printf(fmt,args...) printf(fmt,##args)
192# define debug_usr_cmd_dump_message(message) debug_usr_message(message)
193#else /* DEBUG_ASR_USR_CMD */
194# define debug_usr_cmd_printf(fmt,args...)
195# define debug_usr_cmd_dump_message(message)
196#endif /* DEBUG_ASR_USR_CMD */
197
198#define dsDescription_size 46 /* Snug as a bug in a rug */
1f2de5d4 199#include "dptsig.h"
984263bc
MD
200
201static dpt_sig_S ASR_sig = {
202 { 'd', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION, PROC_INTEL,
203 PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM, FT_HBADRVR, 0,
204 OEM_DPT, OS_FREE_BSD, CAP_ABOVE16MB, DEV_ALL,
205 ADF_ALL_SC5,
206 0, 0, ASR_VERSION, ASR_REVISION, ASR_SUBREVISION,
207 ASR_MONTH, ASR_DAY, ASR_YEAR,
208/* 01234567890123456789012345678901234567890123456789 < 50 chars */
209 "Adaptec FreeBSD 4.0.0 Unix SCSI I2O HBA Driver"
210 /* ^^^^^ asr_attach alters these to match OS */
211};
212
213#include <sys/param.h> /* TRUE=1 and FALSE=0 defined here */
214#include <sys/kernel.h>
215#include <sys/systm.h>
216#include <sys/malloc.h>
217#include <sys/proc.h>
218#include <sys/conf.h>
219#include <sys/disklabel.h>
220#include <sys/bus.h>
221#include <machine/resource.h>
222#include <machine/bus.h>
223#include <sys/rman.h>
224#include <sys/stat.h>
f15db79e 225#include <sys/device.h>
7f2216bc 226#include <sys/thread2.h>
984263bc 227
1f2de5d4
MD
228#include <bus/cam/cam.h>
229#include <bus/cam/cam_ccb.h>
230#include <bus/cam/cam_sim.h>
231#include <bus/cam/cam_xpt_sim.h>
232#include <bus/cam/cam_xpt_periph.h>
984263bc 233
1f2de5d4
MD
234#include <bus/cam/scsi/scsi_all.h>
235#include <bus/cam/scsi/scsi_message.h>
984263bc
MD
236
237#include <vm/vm.h>
238#include <vm/pmap.h>
239#include <machine/cputypes.h>
240#include <machine/clock.h>
241#include <i386/include/vmparam.h>
242
1f2de5d4
MD
243#include <bus/pci/pcivar.h>
244#include <bus/pci/pcireg.h>
984263bc
MD
245
246#define STATIC static
247#define INLINE
248
249#if (defined(DEBUG_ASR) && (DEBUG_ASR > 0))
250# undef STATIC
251# define STATIC
252# undef INLINE
253# define INLINE
254#endif
255#define IN
256#define OUT
257#define INOUT
258
259#define osdSwap4(x) ((u_long)ntohl((u_long)(x)))
260#define KVTOPHYS(x) vtophys(x)
1f2de5d4
MD
261#include "dptalign.h"
262#include "i2oexec.h"
263#include "i2obscsi.h"
264#include "i2odpt.h"
265#include "i2oadptr.h"
1f2de5d4 266#include "sys_info.h"
984263bc
MD
267
268/* Configuration Definitions */
269
270#define SG_SIZE 58 /* Scatter Gather list Size */
271#define MAX_TARGET_ID 126 /* Maximum Target ID supported */
272#define MAX_LUN 255 /* Maximum LUN Supported */
273#define MAX_CHANNEL 7 /* Maximum Channel # Supported by driver */
274#define MAX_INBOUND 2000 /* Max CCBs, Also Max Queue Size */
275#define MAX_OUTBOUND 256 /* Maximum outbound frames/adapter */
276#define MAX_INBOUND_SIZE 512 /* Maximum inbound frame size */
277#define MAX_MAP 4194304L /* Maximum mapping size of IOP */
278 /* Also serves as the minimum map for */
279 /* the 2005S zero channel RAID product */
280
281/**************************************************************************
282** ASR Host Adapter structure - One Structure For Each Host Adapter That **
283** Is Configured Into The System. The Structure Supplies Configuration **
284** Information, Status Info, Queue Info And An Active CCB List Pointer. **
285***************************************************************************/
286
287/* I2O register set */
288typedef struct {
289 U8 Address[0x30];
290 volatile U32 Status;
291 volatile U32 Mask;
292# define Mask_InterruptsDisabled 0x08
293 U32 x[2];
294 volatile U32 ToFIFO; /* In Bound FIFO */
295 volatile U32 FromFIFO; /* Out Bound FIFO */
296} i2oRegs_t;
297
298/*
299 * A MIX of performance and space considerations for TID lookups
300 */
301typedef u_int16_t tid_t;
302
303typedef struct {
304 u_int32_t size; /* up to MAX_LUN */
305 tid_t TID[1];
306} lun2tid_t;
307
308typedef struct {
309 u_int32_t size; /* up to MAX_TARGET */
310 lun2tid_t * LUN[1];
311} target2lun_t;
312
313/*
314 * To ensure that we only allocate and use the worst case ccb here, lets
315 * make our own local ccb union. If asr_alloc_ccb is utilized for another
316 * ccb type, ensure that you add the additional structures into our local
317 * ccb union. To ensure strict type checking, we will utilize the local
318 * ccb definition wherever possible.
319 */
320union asr_ccb {
321 struct ccb_hdr ccb_h; /* For convenience */
322 struct ccb_scsiio csio;
323 struct ccb_setasync csa;
324};
325
326typedef struct Asr_softc {
327 u_int16_t ha_irq;
328 void * ha_Base; /* base port for each board */
329 u_int8_t * volatile ha_blinkLED;
330 i2oRegs_t * ha_Virt; /* Base address of IOP */
331 U8 * ha_Fvirt; /* Base address of Frames */
332 I2O_IOP_ENTRY ha_SystemTable;
333 LIST_HEAD(,ccb_hdr) ha_ccb; /* ccbs in use */
334 struct cam_path * ha_path[MAX_CHANNEL+1];
335 struct cam_sim * ha_sim[MAX_CHANNEL+1];
984263bc
MD
336 struct resource * ha_mem_res;
337 struct resource * ha_mes_res;
338 struct resource * ha_irq_res;
339 void * ha_intr;
984263bc
MD
340 PI2O_LCT ha_LCT; /* Complete list of devices */
341# define le_type IdentityTag[0]
342# define I2O_BSA 0x20
343# define I2O_FCA 0x40
344# define I2O_SCSI 0x00
345# define I2O_PORT 0x80
346# define I2O_UNKNOWN 0x7F
347# define le_bus IdentityTag[1]
348# define le_target IdentityTag[2]
349# define le_lun IdentityTag[3]
350 target2lun_t * ha_targets[MAX_CHANNEL+1];
351 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME ha_Msgs;
352 u_long ha_Msgs_Phys;
353
354 u_int8_t ha_in_reset;
355# define HA_OPERATIONAL 0
356# define HA_IN_RESET 1
357# define HA_OFF_LINE 2
358# define HA_OFF_LINE_RECOVERY 3
359 /* Configuration information */
360 /* The target id maximums we take */
361 u_int8_t ha_MaxBus; /* Maximum bus */
362 u_int8_t ha_MaxId; /* Maximum target ID */
363 u_int8_t ha_MaxLun; /* Maximum target LUN */
364 u_int8_t ha_SgSize; /* Max SG elements */
365 u_int8_t ha_pciBusNum;
366 u_int8_t ha_pciDeviceNum;
367 u_int8_t ha_adapter_target[MAX_CHANNEL+1];
368 u_int16_t ha_QueueSize; /* Max outstanding commands */
369 u_int16_t ha_Msgs_Count;
370
371 /* Links into other parents and HBAs */
372 struct Asr_softc * ha_next; /* HBA list */
984263bc
MD
373} Asr_softc_t;
374
375STATIC Asr_softc_t * Asr_softc;
376
377/*
378 * Prototypes of the routines we have in this object.
379 */
380
381/* Externally callable routines */
984263bc
MD
382#define PROBE_ARGS IN device_t tag
383#define PROBE_RET int
384#define PROBE_SET() u_long id = (pci_get_device(tag)<<16)|pci_get_vendor(tag)
385#define PROBE_RETURN(retval) if(retval){device_set_desc(tag,retval);return(0);}else{return(ENXIO);}
386#define ATTACH_ARGS IN device_t tag
387#define ATTACH_RET int
388#define ATTACH_SET() int unit = device_get_unit(tag)
389#define ATTACH_RETURN(retval) return(retval)
984263bc 390/* I2O HDM interface */
5ca58d54
RG
391STATIC PROBE_RET asr_probe (PROBE_ARGS);
392STATIC ATTACH_RET asr_attach (ATTACH_ARGS);
984263bc 393/* DOMINO placeholder */
5ca58d54
RG
394STATIC PROBE_RET domino_probe (PROBE_ARGS);
395STATIC ATTACH_RET domino_attach (ATTACH_ARGS);
984263bc 396/* MODE0 adapter placeholder */
5ca58d54
RG
397STATIC PROBE_RET mode0_probe (PROBE_ARGS);
398STATIC ATTACH_RET mode0_attach (ATTACH_ARGS);
984263bc 399
5ca58d54
RG
400STATIC Asr_softc_t * ASR_get_sc (
401 IN dev_t dev);
402STATIC int asr_ioctl (
984263bc
MD
403 IN dev_t dev,
404 IN u_long cmd,
405 INOUT caddr_t data,
406 int flag,
5ca58d54
RG
407 d_thread_t *td);
408STATIC int asr_open (
984263bc
MD
409 IN dev_t dev,
410 int32_t flags,
411 int32_t ifmt,
5ca58d54
RG
412 IN d_thread_t *td);
413STATIC int asr_close (
984263bc
MD
414 dev_t dev,
415 int flags,
416 int ifmt,
5ca58d54
RG
417 d_thread_t *td);
418STATIC int asr_intr (
419 IN Asr_softc_t * sc);
420STATIC void asr_timeout (
421 INOUT void * arg);
422STATIC int ASR_init (
423 IN Asr_softc_t * sc);
424STATIC INLINE int ASR_acquireLct (
425 INOUT Asr_softc_t * sc);
426STATIC INLINE int ASR_acquireHrt (
427 INOUT Asr_softc_t * sc);
428STATIC void asr_action (
984263bc 429 IN struct cam_sim * sim,
5ca58d54
RG
430 IN union ccb * ccb);
431STATIC void asr_poll (
432 IN struct cam_sim * sim);
984263bc
MD
433
434/*
435 * Here is the auto-probe structure used to nest our tests appropriately
436 * during the startup phase of the operating system.
437 */
984263bc
MD
438STATIC device_method_t asr_methods[] = {
439 DEVMETHOD(device_probe, asr_probe),
440 DEVMETHOD(device_attach, asr_attach),
441 { 0, 0 }
442};
443
444STATIC driver_t asr_driver = {
445 "asr",
446 asr_methods,
447 sizeof(Asr_softc_t)
448};
449
450STATIC devclass_t asr_devclass;
451
32832096 452DECLARE_DUMMY_MODULE(asr);
984263bc
MD
453DRIVER_MODULE(asr, pci, asr_driver, asr_devclass, 0, 0);
454
455STATIC device_method_t domino_methods[] = {
456 DEVMETHOD(device_probe, domino_probe),
457 DEVMETHOD(device_attach, domino_attach),
458 { 0, 0 }
459};
460
461STATIC driver_t domino_driver = {
462 "domino",
463 domino_methods,
464 0
465};
466
467STATIC devclass_t domino_devclass;
468
469DRIVER_MODULE(domino, pci, domino_driver, domino_devclass, 0, 0);
470
471STATIC device_method_t mode0_methods[] = {
472 DEVMETHOD(device_probe, mode0_probe),
473 DEVMETHOD(device_attach, mode0_attach),
474 { 0, 0 }
475};
476
477STATIC driver_t mode0_driver = {
478 "mode0",
479 mode0_methods,
480 0
481};
482
483STATIC devclass_t mode0_devclass;
484
485DRIVER_MODULE(mode0, pci, mode0_driver, mode0_devclass, 0, 0);
984263bc
MD
486
487/*
488 * devsw for asr hba driver
489 *
490 * only ioctl is used. the sd driver provides all other access.
491 */
492#define CDEV_MAJOR 154 /* prefered default character major */
493STATIC struct cdevsw asr_cdevsw = {
fabb8ceb
MD
494 "asr", /* name */
495 CDEV_MAJOR, /* maj */
496 0, /* flags */
497 NULL, /* port */
498 0, /* auto */
499
984263bc
MD
500 asr_open, /* open */
501 asr_close, /* close */
502 noread, /* read */
503 nowrite, /* write */
504 asr_ioctl, /* ioctl */
505 nopoll, /* poll */
506 nommap, /* mmap */
507 nostrategy, /* strategy */
984263bc 508 nodump, /* dump */
fabb8ceb 509 nopsize /* psize */
984263bc
MD
510};
511
984263bc
MD
512/*
513 * Initialize the dynamic cdevsw hooks.
514 */
515STATIC void
e4c9c0c8 516asr_drvinit (void * unused)
984263bc
MD
517{
518 static int asr_devsw_installed = 0;
519
520 if (asr_devsw_installed) {
521 return;
522 }
523 asr_devsw_installed++;
524 /*
525 * Find a free spot (the report during driver load used by
526 * osd layer in engine to generate the controlling nodes).
e4c9c0c8
MD
527 *
528 * XXX this is garbage code, store a unit number in asr_cdevsw
529 * and iterate through that instead?
984263bc 530 */
e4c9c0c8
MD
531 while (asr_cdevsw.d_maj < NUMCDEVSW &&
532 cdevsw_get(asr_cdevsw.d_maj, -1) != NULL
533 ) {
984263bc
MD
534 ++asr_cdevsw.d_maj;
535 }
e4c9c0c8
MD
536 if (asr_cdevsw.d_maj >= NUMCDEVSW) {
537 asr_cdevsw.d_maj = 0;
538 while (asr_cdevsw.d_maj < CDEV_MAJOR &&
539 cdevsw_get(asr_cdevsw.d_maj, -1) != NULL
540 ) {
541 ++asr_cdevsw.d_maj;
542 }
543 }
544
984263bc
MD
545 /*
546 * Come to papa
547 */
e4c9c0c8 548 cdevsw_add(&asr_cdevsw, 0, 0);
984263bc
MD
549} /* asr_drvinit */
550
551/* Must initialize before CAM layer picks up our HBA driver */
552SYSINIT(asrdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,asr_drvinit,NULL)
553
554/* I2O support routines */
555#define defAlignLong(STRUCT,NAME) char NAME[sizeof(STRUCT)]
556#define getAlignLong(STRUCT,NAME) ((STRUCT *)(NAME))
557
558/*
559 * Fill message with default.
560 */
561STATIC PI2O_MESSAGE_FRAME
562ASR_fillMessage (
563 IN char * Message,
564 IN u_int16_t size)
565{
566 OUT PI2O_MESSAGE_FRAME Message_Ptr;
567
568 Message_Ptr = getAlignLong(I2O_MESSAGE_FRAME, Message);
569 bzero ((void *)Message_Ptr, size);
570 I2O_MESSAGE_FRAME_setVersionOffset(Message_Ptr, I2O_VERSION_11);
571 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
572 (size + sizeof(U32) - 1) >> 2);
573 I2O_MESSAGE_FRAME_setInitiatorAddress (Message_Ptr, 1);
574 return (Message_Ptr);
575} /* ASR_fillMessage */
576
577#define EMPTY_QUEUE ((U32)-1L)
578
579STATIC INLINE U32
580ASR_getMessage(
581 IN i2oRegs_t * virt)
582{
583 OUT U32 MessageOffset;
584
585 if ((MessageOffset = virt->ToFIFO) == EMPTY_QUEUE) {
586 MessageOffset = virt->ToFIFO;
587 }
588 return (MessageOffset);
589} /* ASR_getMessage */
590
591/* Issue a polled command */
592STATIC U32
593ASR_initiateCp (
594 INOUT i2oRegs_t * virt,
595 INOUT U8 * fvirt,
596 IN PI2O_MESSAGE_FRAME Message)
597{
598 OUT U32 Mask = -1L;
599 U32 MessageOffset;
600 u_int Delay = 1500;
601
602 /*
603 * ASR_initiateCp is only used for synchronous commands and will
604 * be made more resiliant to adapter delays since commands like
605 * resetIOP can cause the adapter to be deaf for a little time.
606 */
607 while (((MessageOffset = ASR_getMessage(virt)) == EMPTY_QUEUE)
608 && (--Delay != 0)) {
609 DELAY (10000);
610 }
611 if (MessageOffset != EMPTY_QUEUE) {
612 bcopy (Message, fvirt + MessageOffset,
613 I2O_MESSAGE_FRAME_getMessageSize(Message) << 2);
614 /*
615 * Disable the Interrupts
616 */
617 virt->Mask = (Mask = virt->Mask) | Mask_InterruptsDisabled;
618 virt->ToFIFO = MessageOffset;
619 }
620 return (Mask);
621} /* ASR_initiateCp */
622
623/*
624 * Reset the adapter.
625 */
626STATIC U32
627ASR_resetIOP (
628 INOUT i2oRegs_t * virt,
629 INOUT U8 * fvirt)
630{
631 struct resetMessage {
632 I2O_EXEC_IOP_RESET_MESSAGE M;
633 U32 R;
634 };
635 defAlignLong(struct resetMessage,Message);
636 PI2O_EXEC_IOP_RESET_MESSAGE Message_Ptr;
637 OUT U32 * volatile Reply_Ptr;
638 U32 Old;
639
640 /*
641 * Build up our copy of the Message.
642 */
643 Message_Ptr = (PI2O_EXEC_IOP_RESET_MESSAGE)ASR_fillMessage(Message,
644 sizeof(I2O_EXEC_IOP_RESET_MESSAGE));
645 I2O_EXEC_IOP_RESET_MESSAGE_setFunction(Message_Ptr, I2O_EXEC_IOP_RESET);
646 /*
647 * Reset the Reply Status
648 */
649 *(Reply_Ptr = (U32 *)((char *)Message_Ptr
650 + sizeof(I2O_EXEC_IOP_RESET_MESSAGE))) = 0;
651 I2O_EXEC_IOP_RESET_MESSAGE_setStatusWordLowAddress(Message_Ptr,
652 KVTOPHYS((void *)Reply_Ptr));
653 /*
654 * Send the Message out
655 */
656 if ((Old = ASR_initiateCp (virt, fvirt, (PI2O_MESSAGE_FRAME)Message_Ptr)) != (U32)-1L) {
657 /*
658 * Wait for a response (Poll), timeouts are dangerous if
659 * the card is truly responsive. We assume response in 2s.
660 */
661 u_int8_t Delay = 200;
662
663 while ((*Reply_Ptr == 0) && (--Delay != 0)) {
664 DELAY (10000);
665 }
666 /*
667 * Re-enable the interrupts.
668 */
669 virt->Mask = Old;
670 ASSERT (*Reply_Ptr);
671 return (*Reply_Ptr);
672 }
673 ASSERT (Old != (U32)-1L);
674 return (0);
675} /* ASR_resetIOP */
676
677/*
678 * Get the curent state of the adapter
679 */
680STATIC INLINE PI2O_EXEC_STATUS_GET_REPLY
681ASR_getStatus (
682 INOUT i2oRegs_t * virt,
683 INOUT U8 * fvirt,
684 OUT PI2O_EXEC_STATUS_GET_REPLY buffer)
685{
686 defAlignLong(I2O_EXEC_STATUS_GET_MESSAGE,Message);
687 PI2O_EXEC_STATUS_GET_MESSAGE Message_Ptr;
688 U32 Old;
689
690 /*
691 * Build up our copy of the Message.
692 */
693 Message_Ptr = (PI2O_EXEC_STATUS_GET_MESSAGE)ASR_fillMessage(Message,
694 sizeof(I2O_EXEC_STATUS_GET_MESSAGE));
695 I2O_EXEC_STATUS_GET_MESSAGE_setFunction(Message_Ptr,
696 I2O_EXEC_STATUS_GET);
697 I2O_EXEC_STATUS_GET_MESSAGE_setReplyBufferAddressLow(Message_Ptr,
698 KVTOPHYS((void *)buffer));
699 /* This one is a Byte Count */
700 I2O_EXEC_STATUS_GET_MESSAGE_setReplyBufferLength(Message_Ptr,
701 sizeof(I2O_EXEC_STATUS_GET_REPLY));
702 /*
703 * Reset the Reply Status
704 */
705 bzero ((void *)buffer, sizeof(I2O_EXEC_STATUS_GET_REPLY));
706 /*
707 * Send the Message out
708 */
709 if ((Old = ASR_initiateCp (virt, fvirt, (PI2O_MESSAGE_FRAME)Message_Ptr)) != (U32)-1L) {
710 /*
711 * Wait for a response (Poll), timeouts are dangerous if
712 * the card is truly responsive. We assume response in 50ms.
713 */
714 u_int8_t Delay = 255;
715
716 while (*((U8 * volatile)&(buffer->SyncByte)) == 0) {
717 if (--Delay == 0) {
718 buffer = (PI2O_EXEC_STATUS_GET_REPLY)NULL;
719 break;
720 }
721 DELAY (1000);
722 }
723 /*
724 * Re-enable the interrupts.
725 */
726 virt->Mask = Old;
727 return (buffer);
728 }
729 return ((PI2O_EXEC_STATUS_GET_REPLY)NULL);
730} /* ASR_getStatus */
731
732/*
733 * Check if the device is a SCSI I2O HBA, and add it to the list.
734 */
735
736/*
737 * Probe for ASR controller. If we find it, we will use it.
738 * virtual adapters.
739 */
740STATIC PROBE_RET
741asr_probe(PROBE_ARGS)
742{
743 PROBE_SET();
744 if ((id == 0xA5011044) || (id == 0xA5111044)) {
745 PROBE_RETURN ("Adaptec Caching SCSI RAID");
746 }
747 PROBE_RETURN (NULL);
748} /* asr_probe */
749
750/*
751 * Probe/Attach for DOMINO chipset.
752 */
753STATIC PROBE_RET
754domino_probe(PROBE_ARGS)
755{
756 PROBE_SET();
757 if (id == 0x10121044) {
758 PROBE_RETURN ("Adaptec Caching Memory Controller");
759 }
760 PROBE_RETURN (NULL);
761} /* domino_probe */
762
763STATIC ATTACH_RET
764domino_attach (ATTACH_ARGS)
765{
766 ATTACH_RETURN (0);
767} /* domino_attach */
768
769/*
770 * Probe/Attach for MODE0 adapters.
771 */
772STATIC PROBE_RET
773mode0_probe(PROBE_ARGS)
774{
775 PROBE_SET();
776
777 /*
778 * If/When we can get a business case to commit to a
779 * Mode0 driver here, we can make all these tests more
780 * specific and robust. Mode0 adapters have their processors
781 * turned off, this the chips are in a raw state.
782 */
783
784 /* This is a PLX9054 */
785 if (id == 0x905410B5) {
786 PROBE_RETURN ("Adaptec Mode0 PM3757");
787 }
788 /* This is a PLX9080 */
789 if (id == 0x908010B5) {
790 PROBE_RETURN ("Adaptec Mode0 PM3754/PM3755");
791 }
792 /* This is a ZION 80303 */
793 if (id == 0x53098086) {
794 PROBE_RETURN ("Adaptec Mode0 3010S");
795 }
796 /* This is an i960RS */
797 if (id == 0x39628086) {
798 PROBE_RETURN ("Adaptec Mode0 2100S");
799 }
800 /* This is an i960RN */
801 if (id == 0x19648086) {
802 PROBE_RETURN ("Adaptec Mode0 PM2865/2400A/3200S/3400S");
803 }
804#if 0 /* this would match any generic i960 -- mjs */
805 /* This is an i960RP (typically also on Motherboards) */
806 if (id == 0x19608086) {
807 PROBE_RETURN ("Adaptec Mode0 PM2554/PM1554/PM2654");
808 }
809#endif
810 PROBE_RETURN (NULL);
811} /* mode0_probe */
812
813STATIC ATTACH_RET
814mode0_attach (ATTACH_ARGS)
815{
816 ATTACH_RETURN (0);
817} /* mode0_attach */
818
819STATIC INLINE union asr_ccb *
820asr_alloc_ccb (
821 IN Asr_softc_t * sc)
822{
823 OUT union asr_ccb * new_ccb;
824
825 if ((new_ccb = (union asr_ccb *)malloc(sizeof(*new_ccb),
826 M_DEVBUF, M_WAITOK)) != (union asr_ccb *)NULL) {
827 bzero (new_ccb, sizeof(*new_ccb));
828 new_ccb->ccb_h.pinfo.priority = 1;
829 new_ccb->ccb_h.pinfo.index = CAM_UNQUEUED_INDEX;
830 new_ccb->ccb_h.spriv_ptr0 = sc;
831 }
832 return (new_ccb);
833} /* asr_alloc_ccb */
834
835STATIC INLINE void
836asr_free_ccb (
837 IN union asr_ccb * free_ccb)
838{
839 free(free_ccb, M_DEVBUF);
840} /* asr_free_ccb */
841
842/*
843 * Print inquiry data `carefully'
844 */
845STATIC void
846ASR_prstring (
847 u_int8_t * s,
848 int len)
849{
850 while ((--len >= 0) && (*s) && (*s != ' ') && (*s != '-')) {
851 printf ("%c", *(s++));
852 }
853} /* ASR_prstring */
854
855/*
856 * Prototypes
857 */
5ca58d54 858STATIC INLINE int ASR_queue (
984263bc 859 IN Asr_softc_t * sc,
5ca58d54 860 IN PI2O_MESSAGE_FRAME Message);
984263bc
MD
861/*
862 * Send a message synchronously and without Interrupt to a ccb.
863 */
864STATIC int
865ASR_queue_s (
866 INOUT union asr_ccb * ccb,
867 IN PI2O_MESSAGE_FRAME Message)
868{
984263bc
MD
869 U32 Mask;
870 Asr_softc_t * sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
871
872 /*
873 * We do not need any (optional byteswapping) method access to
874 * the Initiator context field.
875 */
876 I2O_MESSAGE_FRAME_setInitiatorContext64(Message, (long)ccb);
877
878 /* Prevent interrupt service */
7f2216bc 879 crit_enter();
984263bc
MD
880 sc->ha_Virt->Mask = (Mask = sc->ha_Virt->Mask)
881 | Mask_InterruptsDisabled;
882
883 if (ASR_queue (sc, Message) == EMPTY_QUEUE) {
884 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
885 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
886 }
887
888 /*
889 * Wait for this board to report a finished instruction.
890 */
891 while ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
892 (void)asr_intr (sc);
893 }
894
895 /* Re-enable Interrupts */
896 sc->ha_Virt->Mask = Mask;
7f2216bc 897 crit_exit();
984263bc
MD
898
899 return (ccb->ccb_h.status);
900} /* ASR_queue_s */
901
902/*
903 * Send a message synchronously to a Asr_softc_t
904 */
905STATIC int
906ASR_queue_c (
907 IN Asr_softc_t * sc,
908 IN PI2O_MESSAGE_FRAME Message)
909{
910 union asr_ccb * ccb;
911 OUT int status;
912
913 if ((ccb = asr_alloc_ccb (sc)) == (union asr_ccb *)NULL) {
914 return (CAM_REQUEUE_REQ);
915 }
916
917 status = ASR_queue_s (ccb, Message);
918
919 asr_free_ccb(ccb);
920
921 return (status);
922} /* ASR_queue_c */
923
924/*
925 * Add the specified ccb to the active queue
926 */
927STATIC INLINE void
928ASR_ccbAdd (
929 IN Asr_softc_t * sc,
930 INOUT union asr_ccb * ccb)
931{
7f2216bc 932 crit_enter();
984263bc
MD
933 LIST_INSERT_HEAD(&(sc->ha_ccb), &(ccb->ccb_h), sim_links.le);
934 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
935 if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT) {
936 /*
937 * RAID systems can take considerable time to
938 * complete some commands given the large cache
939 * flashes switching from write back to write thru.
940 */
941 ccb->ccb_h.timeout = 6 * 60 * 1000;
942 }
ddcafce9
JS
943 callout_reset(&ccb->ccb_h.timeout_ch,
944 (ccb->ccb_h.timeout * hz) / 1000, asr_timeout, ccb);
984263bc 945 }
7f2216bc 946 crit_exit();
984263bc
MD
947} /* ASR_ccbAdd */
948
949/*
950 * Remove the specified ccb from the active queue.
951 */
952STATIC INLINE void
953ASR_ccbRemove (
954 IN Asr_softc_t * sc,
955 INOUT union asr_ccb * ccb)
956{
7f2216bc 957 crit_enter();
ddcafce9 958 callout_stop(&ccb->ccb_h.timeout_ch);
984263bc 959 LIST_REMOVE(&(ccb->ccb_h), sim_links.le);
7f2216bc 960 crit_exit();
984263bc
MD
961} /* ASR_ccbRemove */
962
963/*
964 * Fail all the active commands, so they get re-issued by the operating
965 * system.
966 */
967STATIC INLINE void
968ASR_failActiveCommands (
969 IN Asr_softc_t * sc)
970{
971 struct ccb_hdr * ccb;
984263bc
MD
972
973#if 0 /* Currently handled by callers, unnecessary paranoia currently */
974 /* Left in for historical perspective. */
975 defAlignLong(I2O_EXEC_LCT_NOTIFY_MESSAGE,Message);
976 PI2O_EXEC_LCT_NOTIFY_MESSAGE Message_Ptr;
977
978 /* Send a blind LCT command to wait for the enableSys to complete */
979 Message_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)ASR_fillMessage(Message,
980 sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) - sizeof(I2O_SG_ELEMENT));
981 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
982 I2O_EXEC_LCT_NOTIFY);
983 I2O_EXEC_LCT_NOTIFY_MESSAGE_setClassIdentifier(Message_Ptr,
984 I2O_CLASS_MATCH_ANYCLASS);
985 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
986#endif
987
7f2216bc 988 crit_enter();
984263bc
MD
989 /*
990 * We do not need to inform the CAM layer that we had a bus
991 * reset since we manage it on our own, this also prevents the
992 * SCSI_DELAY settling that would be required on other systems.
993 * The `SCSI_DELAY' has already been handled by the card via the
994 * acquisition of the LCT table while we are at CAM priority level.
995 * for (int bus = 0; bus <= sc->ha_MaxBus; ++bus) {
996 * xpt_async (AC_BUS_RESET, sc->ha_path[bus], NULL);
997 * }
998 */
999 while ((ccb = LIST_FIRST(&(sc->ha_ccb))) != (struct ccb_hdr *)NULL) {
1000 ASR_ccbRemove (sc, (union asr_ccb *)ccb);
1001
1002 ccb->status &= ~CAM_STATUS_MASK;
1003 ccb->status |= CAM_REQUEUE_REQ;
1004 /* Nothing Transfered */
1005 ((struct ccb_scsiio *)ccb)->resid
1006 = ((struct ccb_scsiio *)ccb)->dxfer_len;
1007
1008 if (ccb->path) {
1009 xpt_done ((union ccb *)ccb);
1010 } else {
1011 wakeup ((caddr_t)ccb);
1012 }
1013 }
7f2216bc 1014 crit_exit();
984263bc
MD
1015} /* ASR_failActiveCommands */
1016
1017/*
1018 * The following command causes the HBA to reset the specific bus
1019 */
1020STATIC INLINE void
1021ASR_resetBus(
1022 IN Asr_softc_t * sc,
1023 IN int bus)
1024{
1025 defAlignLong(I2O_HBA_BUS_RESET_MESSAGE,Message);
1026 I2O_HBA_BUS_RESET_MESSAGE * Message_Ptr;
1027 PI2O_LCT_ENTRY Device;
1028
1029 Message_Ptr = (I2O_HBA_BUS_RESET_MESSAGE *)ASR_fillMessage(Message,
1030 sizeof(I2O_HBA_BUS_RESET_MESSAGE));
1031 I2O_MESSAGE_FRAME_setFunction(&Message_Ptr->StdMessageFrame,
1032 I2O_HBA_BUS_RESET);
1033 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
1034 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
1035 ++Device) {
1036 if (((Device->le_type & I2O_PORT) != 0)
1037 && (Device->le_bus == bus)) {
1038 I2O_MESSAGE_FRAME_setTargetAddress(
1039 &Message_Ptr->StdMessageFrame,
1040 I2O_LCT_ENTRY_getLocalTID(Device));
1041 /* Asynchronous command, with no expectations */
1042 (void)ASR_queue(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1043 break;
1044 }
1045 }
1046} /* ASR_resetBus */
1047
1048STATIC INLINE int
1049ASR_getBlinkLedCode (
1050 IN Asr_softc_t * sc)
1051{
1052 if ((sc != (Asr_softc_t *)NULL)
1053 && (sc->ha_blinkLED != (u_int8_t *)NULL)
1054 && (sc->ha_blinkLED[1] == 0xBC)) {
1055 return (sc->ha_blinkLED[0]);
1056 }
1057 return (0);
1058} /* ASR_getBlinkCode */
1059
1060/*
1061 * Determine the address of an TID lookup. Must be done at high priority
1062 * since the address can be changed by other threads of execution.
1063 *
1064 * Returns NULL pointer if not indexible (but will attempt to generate
1065 * an index if `new_entry' flag is set to TRUE).
1066 *
1067 * All addressible entries are to be guaranteed zero if never initialized.
1068 */
1069STATIC INLINE tid_t *
1070ASR_getTidAddress(
1071 INOUT Asr_softc_t * sc,
1072 IN int bus,
1073 IN int target,
1074 IN int lun,
1075 IN int new_entry)
1076{
1077 target2lun_t * bus_ptr;
1078 lun2tid_t * target_ptr;
1079 unsigned new_size;
1080
1081 /*
1082 * Validity checking of incoming parameters. More of a bound
1083 * expansion limit than an issue with the code dealing with the
1084 * values.
1085 *
1086 * sc must be valid before it gets here, so that check could be
1087 * dropped if speed a critical issue.
1088 */
1089 if ((sc == (Asr_softc_t *)NULL)
1090 || (bus > MAX_CHANNEL)
1091 || (target > sc->ha_MaxId)
1092 || (lun > sc->ha_MaxLun)) {
1093 debug_asr_printf("(%lx,%d,%d,%d) target out of range\n",
1094 (u_long)sc, bus, target, lun);
1095 return ((tid_t *)NULL);
1096 }
1097 /*
1098 * See if there is an associated bus list.
1099 *
1100 * for performance, allocate in size of BUS_CHUNK chunks.
1101 * BUS_CHUNK must be a power of two. This is to reduce
1102 * fragmentation effects on the allocations.
1103 */
1104# define BUS_CHUNK 8
1105 new_size = ((target + BUS_CHUNK - 1) & ~(BUS_CHUNK - 1));
1106 if ((bus_ptr = sc->ha_targets[bus]) == (target2lun_t *)NULL) {
1107 /*
1108 * Allocate a new structure?
1109 * Since one element in structure, the +1
1110 * needed for size has been abstracted.
1111 */
1112 if ((new_entry == FALSE)
1113 || ((sc->ha_targets[bus] = bus_ptr = (target2lun_t *)malloc (
1114 sizeof(*bus_ptr) + (sizeof(bus_ptr->LUN) * new_size),
1115 M_TEMP, M_WAITOK))
1116 == (target2lun_t *)NULL)) {
1117 debug_asr_printf("failed to allocate bus list\n");
1118 return ((tid_t *)NULL);
1119 }
1120 bzero (bus_ptr, sizeof(*bus_ptr)
1121 + (sizeof(bus_ptr->LUN) * new_size));
1122 bus_ptr->size = new_size + 1;
1123 } else if (bus_ptr->size <= new_size) {
1124 target2lun_t * new_bus_ptr;
1125
1126 /*
1127 * Reallocate a new structure?
1128 * Since one element in structure, the +1
1129 * needed for size has been abstracted.
1130 */
1131 if ((new_entry == FALSE)
1132 || ((new_bus_ptr = (target2lun_t *)malloc (
1133 sizeof(*bus_ptr) + (sizeof(bus_ptr->LUN) * new_size),
1134 M_TEMP, M_WAITOK))
1135 == (target2lun_t *)NULL)) {
1136 debug_asr_printf("failed to reallocate bus list\n");
1137 return ((tid_t *)NULL);
1138 }
1139 /*
1140 * Zero and copy the whole thing, safer, simpler coding
1141 * and not really performance critical at this point.
1142 */
1143 bzero (new_bus_ptr, sizeof(*bus_ptr)
1144 + (sizeof(bus_ptr->LUN) * new_size));
1145 bcopy (bus_ptr, new_bus_ptr, sizeof(*bus_ptr)
1146 + (sizeof(bus_ptr->LUN) * (bus_ptr->size - 1)));
1147 sc->ha_targets[bus] = new_bus_ptr;
1148 free (bus_ptr, M_TEMP);
1149 bus_ptr = new_bus_ptr;
1150 bus_ptr->size = new_size + 1;
1151 }
1152 /*
1153 * We now have the bus list, lets get to the target list.
1154 * Since most systems have only *one* lun, we do not allocate
1155 * in chunks as above, here we allow one, then in chunk sizes.
1156 * TARGET_CHUNK must be a power of two. This is to reduce
1157 * fragmentation effects on the allocations.
1158 */
1159# define TARGET_CHUNK 8
1160 if ((new_size = lun) != 0) {
1161 new_size = ((lun + TARGET_CHUNK - 1) & ~(TARGET_CHUNK - 1));
1162 }
1163 if ((target_ptr = bus_ptr->LUN[target]) == (lun2tid_t *)NULL) {
1164 /*
1165 * Allocate a new structure?
1166 * Since one element in structure, the +1
1167 * needed for size has been abstracted.
1168 */
1169 if ((new_entry == FALSE)
1170 || ((bus_ptr->LUN[target] = target_ptr = (lun2tid_t *)malloc (
1171 sizeof(*target_ptr) + (sizeof(target_ptr->TID) * new_size),
1172 M_TEMP, M_WAITOK))
1173 == (lun2tid_t *)NULL)) {
1174 debug_asr_printf("failed to allocate target list\n");
1175 return ((tid_t *)NULL);
1176 }
1177 bzero (target_ptr, sizeof(*target_ptr)
1178 + (sizeof(target_ptr->TID) * new_size));
1179 target_ptr->size = new_size + 1;
1180 } else if (target_ptr->size <= new_size) {
1181 lun2tid_t * new_target_ptr;
1182
1183 /*
1184 * Reallocate a new structure?
1185 * Since one element in structure, the +1
1186 * needed for size has been abstracted.
1187 */
1188 if ((new_entry == FALSE)
1189 || ((new_target_ptr = (lun2tid_t *)malloc (
1190 sizeof(*target_ptr) + (sizeof(target_ptr->TID) * new_size),
1191 M_TEMP, M_WAITOK))
1192 == (lun2tid_t *)NULL)) {
1193 debug_asr_printf("failed to reallocate target list\n");
1194 return ((tid_t *)NULL);
1195 }
1196 /*
1197 * Zero and copy the whole thing, safer, simpler coding
1198 * and not really performance critical at this point.
1199 */
1200 bzero (new_target_ptr, sizeof(*target_ptr)
1201 + (sizeof(target_ptr->TID) * new_size));
1202 bcopy (target_ptr, new_target_ptr,
1203 sizeof(*target_ptr)
1204 + (sizeof(target_ptr->TID) * (target_ptr->size - 1)));
1205 bus_ptr->LUN[target] = new_target_ptr;
1206 free (target_ptr, M_TEMP);
1207 target_ptr = new_target_ptr;
1208 target_ptr->size = new_size + 1;
1209 }
1210 /*
1211 * Now, acquire the TID address from the LUN indexed list.
1212 */
1213 return (&(target_ptr->TID[lun]));
1214} /* ASR_getTidAddress */
1215
1216/*
1217 * Get a pre-existing TID relationship.
1218 *
1219 * If the TID was never set, return (tid_t)-1.
1220 *
1221 * should use mutex rather than spl.
1222 */
1223STATIC INLINE tid_t
1224ASR_getTid (
1225 IN Asr_softc_t * sc,
1226 IN int bus,
1227 IN int target,
1228 IN int lun)
1229{
1230 tid_t * tid_ptr;
984263bc
MD
1231 OUT tid_t retval;
1232
7f2216bc 1233 crit_enter();
984263bc
MD
1234 if (((tid_ptr = ASR_getTidAddress (sc, bus, target, lun, FALSE))
1235 == (tid_t *)NULL)
1236 /* (tid_t)0 or (tid_t)-1 indicate no TID */
1237 || (*tid_ptr == (tid_t)0)) {
7f2216bc 1238 crit_exit();
984263bc
MD
1239 return ((tid_t)-1);
1240 }
1241 retval = *tid_ptr;
7f2216bc 1242 crit_exit();
984263bc
MD
1243 return (retval);
1244} /* ASR_getTid */
1245
1246/*
1247 * Set a TID relationship.
1248 *
1249 * If the TID was not set, return (tid_t)-1.
1250 *
1251 * should use mutex rather than spl.
1252 */
1253STATIC INLINE tid_t
1254ASR_setTid (
1255 INOUT Asr_softc_t * sc,
1256 IN int bus,
1257 IN int target,
1258 IN int lun,
1259 INOUT tid_t TID)
1260{
1261 tid_t * tid_ptr;
984263bc
MD
1262
1263 if (TID != (tid_t)-1) {
1264 if (TID == 0) {
1265 return ((tid_t)-1);
1266 }
7f2216bc 1267 crit_enter();
984263bc
MD
1268 if ((tid_ptr = ASR_getTidAddress (sc, bus, target, lun, TRUE))
1269 == (tid_t *)NULL) {
7f2216bc 1270 crit_exit();
984263bc
MD
1271 return ((tid_t)-1);
1272 }
1273 *tid_ptr = TID;
7f2216bc 1274 crit_exit();
984263bc
MD
1275 }
1276 return (TID);
1277} /* ASR_setTid */
1278
1279/*-------------------------------------------------------------------------*/
1280/* Function ASR_rescan */
1281/*-------------------------------------------------------------------------*/
1282/* The Parameters Passed To This Function Are : */
1283/* Asr_softc_t * : HBA miniport driver's adapter data storage. */
1284/* */
1285/* This Function Will rescan the adapter and resynchronize any data */
1286/* */
1287/* Return : 0 For OK, Error Code Otherwise */
1288/*-------------------------------------------------------------------------*/
1289
1290STATIC INLINE int
1291ASR_rescan(
1292 IN Asr_softc_t * sc)
1293{
1294 int bus;
1295 OUT int error;
1296
1297 /*
1298 * Re-acquire the LCT table and synchronize us to the adapter.
1299 */
1300 if ((error = ASR_acquireLct(sc)) == 0) {
1301 error = ASR_acquireHrt(sc);
1302 }
1303
1304 if (error != 0) {
1305 return error;
1306 }
1307
1308 bus = sc->ha_MaxBus;
1309 /* Reset all existing cached TID lookups */
1310 do {
1311 int target, event = 0;
1312
1313 /*
1314 * Scan for all targets on this bus to see if they
1315 * got affected by the rescan.
1316 */
1317 for (target = 0; target <= sc->ha_MaxId; ++target) {
1318 int lun;
1319
1320 /* Stay away from the controller ID */
1321 if (target == sc->ha_adapter_target[bus]) {
1322 continue;
1323 }
1324 for (lun = 0; lun <= sc->ha_MaxLun; ++lun) {
1325 PI2O_LCT_ENTRY Device;
1326 tid_t TID = (tid_t)-1;
1327 tid_t LastTID;
1328
1329 /*
1330 * See if the cached TID changed. Search for
1331 * the device in our new LCT.
1332 */
1333 for (Device = sc->ha_LCT->LCTEntry;
1334 Device < (PI2O_LCT_ENTRY)(((U32 *)sc->ha_LCT)
1335 + I2O_LCT_getTableSize(sc->ha_LCT));
1336 ++Device) {
1337 if ((Device->le_type != I2O_UNKNOWN)
1338 && (Device->le_bus == bus)
1339 && (Device->le_target == target)
1340 && (Device->le_lun == lun)
1341 && (I2O_LCT_ENTRY_getUserTID(Device)
1342 == 0xFFF)) {
1343 TID = I2O_LCT_ENTRY_getLocalTID(
1344 Device);
1345 break;
1346 }
1347 }
1348 /*
1349 * Indicate to the OS that the label needs
1350 * to be recalculated, or that the specific
1351 * open device is no longer valid (Merde)
1352 * because the cached TID changed.
1353 */
1354 LastTID = ASR_getTid (sc, bus, target, lun);
1355 if (LastTID != TID) {
1356 struct cam_path * path;
1357
1358 if (xpt_create_path(&path,
1359 /*periph*/NULL,
1360 cam_sim_path(sc->ha_sim[bus]),
1361 target, lun) != CAM_REQ_CMP) {
1362 if (TID == (tid_t)-1) {
1363 event |= AC_LOST_DEVICE;
1364 } else {
1365 event |= AC_INQ_CHANGED
1366 | AC_GETDEV_CHANGED;
1367 }
1368 } else {
1369 if (TID == (tid_t)-1) {
1370 xpt_async(
1371 AC_LOST_DEVICE,
1372 path, NULL);
1373 } else if (LastTID == (tid_t)-1) {
1374 struct ccb_getdev ccb;
1375
1376 xpt_setup_ccb(
1377 &(ccb.ccb_h),
1378 path, /*priority*/5);
1379 xpt_async(
1380 AC_FOUND_DEVICE,
1381 path,
1382 &ccb);
1383 } else {
1384 xpt_async(
1385 AC_INQ_CHANGED,
1386 path, NULL);
1387 xpt_async(
1388 AC_GETDEV_CHANGED,
1389 path, NULL);
1390 }
1391 }
1392 }
1393 /*
1394 * We have the option of clearing the
1395 * cached TID for it to be rescanned, or to
1396 * set it now even if the device never got
1397 * accessed. We chose the later since we
1398 * currently do not use the condition that
1399 * the TID ever got cached.
1400 */
1401 ASR_setTid (sc, bus, target, lun, TID);
1402 }
1403 }
1404 /*
1405 * The xpt layer can not handle multiple events at the
1406 * same call.
1407 */
1408 if (event & AC_LOST_DEVICE) {
1409 xpt_async(AC_LOST_DEVICE, sc->ha_path[bus], NULL);
1410 }
1411 if (event & AC_INQ_CHANGED) {
1412 xpt_async(AC_INQ_CHANGED, sc->ha_path[bus], NULL);
1413 }
1414 if (event & AC_GETDEV_CHANGED) {
1415 xpt_async(AC_GETDEV_CHANGED, sc->ha_path[bus], NULL);
1416 }
1417 } while (--bus >= 0);
1418 return (error);
1419} /* ASR_rescan */
1420
1421/*-------------------------------------------------------------------------*/
1422/* Function ASR_reset */
1423/*-------------------------------------------------------------------------*/
1424/* The Parameters Passed To This Function Are : */
1425/* Asr_softc_t * : HBA miniport driver's adapter data storage. */
1426/* */
1427/* This Function Will reset the adapter and resynchronize any data */
1428/* */
1429/* Return : None */
1430/*-------------------------------------------------------------------------*/
1431
1432STATIC INLINE int
1433ASR_reset(
1434 IN Asr_softc_t * sc)
1435{
7f2216bc 1436 int retVal;
984263bc 1437
7f2216bc 1438 crit_enter();
984263bc
MD
1439 if ((sc->ha_in_reset == HA_IN_RESET)
1440 || (sc->ha_in_reset == HA_OFF_LINE_RECOVERY)) {
7f2216bc 1441 crit_exit();
984263bc
MD
1442 return (EBUSY);
1443 }
1444 /*
1445 * Promotes HA_OPERATIONAL to HA_IN_RESET,
1446 * or HA_OFF_LINE to HA_OFF_LINE_RECOVERY.
1447 */
1448 ++(sc->ha_in_reset);
1449 if (ASR_resetIOP (sc->ha_Virt, sc->ha_Fvirt) == 0) {
1450 debug_asr_printf ("ASR_resetIOP failed\n");
1451 /*
1452 * We really need to take this card off-line, easier said
1453 * than make sense. Better to keep retrying for now since if a
1454 * UART cable is connected the blinkLEDs the adapter is now in
1455 * a hard state requiring action from the monitor commands to
1456 * the HBA to continue. For debugging waiting forever is a
1457 * good thing. In a production system, however, one may wish
1458 * to instead take the card off-line ...
1459 */
1460# if 0 && (defined(HA_OFF_LINE))
1461 /*
1462 * Take adapter off-line.
1463 */
1464 printf ("asr%d: Taking adapter off-line\n",
1465 sc->ha_path[0]
1466 ? cam_sim_unit(xpt_path_sim(sc->ha_path[0]))
1467 : 0);
1468 sc->ha_in_reset = HA_OFF_LINE;
7f2216bc 1469 crit_exit();
984263bc
MD
1470 return (ENXIO);
1471# else
1472 /* Wait Forever */
1473 while (ASR_resetIOP (sc->ha_Virt, sc->ha_Fvirt) == 0);
1474# endif
1475 }
1476 retVal = ASR_init (sc);
7f2216bc 1477 crit_exit();
984263bc
MD
1478 if (retVal != 0) {
1479 debug_asr_printf ("ASR_init failed\n");
1480 sc->ha_in_reset = HA_OFF_LINE;
1481 return (ENXIO);
1482 }
1483 if (ASR_rescan (sc) != 0) {
1484 debug_asr_printf ("ASR_rescan failed\n");
1485 }
1486 ASR_failActiveCommands (sc);
1487 if (sc->ha_in_reset == HA_OFF_LINE_RECOVERY) {
1488 printf ("asr%d: Brining adapter back on-line\n",
1489 sc->ha_path[0]
1490 ? cam_sim_unit(xpt_path_sim(sc->ha_path[0]))
1491 : 0);
1492 }
1493 sc->ha_in_reset = HA_OPERATIONAL;
1494 return (0);
1495} /* ASR_reset */
1496
1497/*
1498 * Device timeout handler.
1499 */
1500STATIC void
1501asr_timeout(
1502 INOUT void * arg)
1503{
1504 union asr_ccb * ccb = (union asr_ccb *)arg;
1505 Asr_softc_t * sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
1506 int s;
1507
1508 debug_asr_print_path(ccb);
1509 debug_asr_printf("timed out");
1510
1511 /*
1512 * Check if the adapter has locked up?
1513 */
1514 if ((s = ASR_getBlinkLedCode(sc)) != 0) {
1515 /* Reset Adapter */
1516 printf ("asr%d: Blink LED 0x%x resetting adapter\n",
1517 cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)), s);
1518 if (ASR_reset (sc) == ENXIO) {
1519 /* Try again later */
ddcafce9
JS
1520 callout_reset(&ccb->ccb_h.timeout_ch,
1521 (ccb->ccb_h.timeout * hz) / 1000, asr_timeout, ccb);
984263bc
MD
1522 }
1523 return;
1524 }
1525 /*
1526 * Abort does not function on the ASR card!!! Walking away from
1527 * the SCSI command is also *very* dangerous. A SCSI BUS reset is
1528 * our best bet, followed by a complete adapter reset if that fails.
1529 */
7f2216bc 1530 crit_enter();
984263bc
MD
1531 /* Check if we already timed out once to raise the issue */
1532 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_CMD_TIMEOUT) {
1533 debug_asr_printf (" AGAIN\nreinitializing adapter\n");
1534 if (ASR_reset (sc) == ENXIO) {
ddcafce9
JS
1535 callout_reset(&ccb->ccb_h.timeout_ch,
1536 (ccb->ccb_h.timeout * hz) / 1000, asr_timeout, ccb);
984263bc 1537 }
7f2216bc 1538 crit_exit();
984263bc
MD
1539 return;
1540 }
1541 debug_asr_printf ("\nresetting bus\n");
1542 /* If the BUS reset does not take, then an adapter reset is next! */
1543 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1544 ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
ddcafce9
JS
1545 callout_reset(&ccb->ccb_h.timeout_ch, (ccb->ccb_h.timeout * hz) / 1000,
1546 asr_timeout, ccb);
984263bc
MD
1547 ASR_resetBus (sc, cam_sim_bus(xpt_path_sim(ccb->ccb_h.path)));
1548 xpt_async (AC_BUS_RESET, ccb->ccb_h.path, NULL);
7f2216bc 1549 crit_exit();
984263bc
MD
1550} /* asr_timeout */
1551
1552/*
1553 * send a message asynchronously
1554 */
1555STATIC INLINE int
1556ASR_queue(
1557 IN Asr_softc_t * sc,
1558 IN PI2O_MESSAGE_FRAME Message)
1559{
1560 OUT U32 MessageOffset;
1561 union asr_ccb * ccb;
1562
1563 debug_asr_printf ("Host Command Dump:\n");
1564 debug_asr_dump_message (Message);
1565
1566 ccb = (union asr_ccb *)(long)
1567 I2O_MESSAGE_FRAME_getInitiatorContext64(Message);
1568
1569 if ((MessageOffset = ASR_getMessage(sc->ha_Virt)) != EMPTY_QUEUE) {
984263bc
MD
1570 bcopy (Message, sc->ha_Fvirt + MessageOffset,
1571 I2O_MESSAGE_FRAME_getMessageSize(Message) << 2);
1572 if (ccb) {
1573 ASR_ccbAdd (sc, ccb);
1574 }
1575 /* Post the command */
1576 sc->ha_Virt->ToFIFO = MessageOffset;
1577 } else {
1578 if (ASR_getBlinkLedCode(sc)) {
1579 /*
1580 * Unlikely we can do anything if we can't grab a
1581 * message frame :-(, but lets give it a try.
1582 */
1583 (void)ASR_reset (sc);
1584 }
1585 }
1586 return (MessageOffset);
1587} /* ASR_queue */
1588
1589
1590/* Simple Scatter Gather elements */
1591#define SG(SGL,Index,Flags,Buffer,Size) \
1592 I2O_FLAGS_COUNT_setCount( \
1593 &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index].FlagsCount), \
1594 Size); \
1595 I2O_FLAGS_COUNT_setFlags( \
1596 &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index].FlagsCount), \
1597 I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT | (Flags)); \
1598 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress( \
1599 &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index]), \
1600 (Buffer == NULL) ? NULL : KVTOPHYS(Buffer))
1601
1602/*
1603 * Retrieve Parameter Group.
1604 * Buffer must be allocated using defAlignLong macro.
1605 */
1606STATIC void *
1607ASR_getParams(
1608 IN Asr_softc_t * sc,
1609 IN tid_t TID,
1610 IN int Group,
1611 OUT void * Buffer,
1612 IN unsigned BufferSize)
1613{
1614 struct paramGetMessage {
1615 I2O_UTIL_PARAMS_GET_MESSAGE M;
1616 char F[
1617 sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT)];
1618 struct Operations {
1619 I2O_PARAM_OPERATIONS_LIST_HEADER Header;
1620 I2O_PARAM_OPERATION_ALL_TEMPLATE Template[1];
1621 } O;
1622 };
1623 defAlignLong(struct paramGetMessage, Message);
1624 struct Operations * Operations_Ptr;
1625 I2O_UTIL_PARAMS_GET_MESSAGE * Message_Ptr;
1626 struct ParamBuffer {
1627 I2O_PARAM_RESULTS_LIST_HEADER Header;
1628 I2O_PARAM_READ_OPERATION_RESULT Read;
1629 char Info[1];
1630 } * Buffer_Ptr;
1631
1632 Message_Ptr = (I2O_UTIL_PARAMS_GET_MESSAGE *)ASR_fillMessage(Message,
1633 sizeof(I2O_UTIL_PARAMS_GET_MESSAGE)
1634 + sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT));
1635 Operations_Ptr = (struct Operations *)((char *)Message_Ptr
1636 + sizeof(I2O_UTIL_PARAMS_GET_MESSAGE)
1637 + sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT));
1638 bzero ((void *)Operations_Ptr, sizeof(struct Operations));
1639 I2O_PARAM_OPERATIONS_LIST_HEADER_setOperationCount(
1640 &(Operations_Ptr->Header), 1);
1641 I2O_PARAM_OPERATION_ALL_TEMPLATE_setOperation(
1642 &(Operations_Ptr->Template[0]), I2O_PARAMS_OPERATION_FIELD_GET);
1643 I2O_PARAM_OPERATION_ALL_TEMPLATE_setFieldCount(
1644 &(Operations_Ptr->Template[0]), 0xFFFF);
1645 I2O_PARAM_OPERATION_ALL_TEMPLATE_setGroupNumber(
1646 &(Operations_Ptr->Template[0]), Group);
1647 bzero ((void *)(Buffer_Ptr = getAlignLong(struct ParamBuffer, Buffer)),
1648 BufferSize);
1649
1650 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
1651 I2O_VERSION_11
1652 + (((sizeof(I2O_UTIL_PARAMS_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1653 / sizeof(U32)) << 4));
1654 I2O_MESSAGE_FRAME_setTargetAddress (&(Message_Ptr->StdMessageFrame),
1655 TID);
1656 I2O_MESSAGE_FRAME_setFunction (&(Message_Ptr->StdMessageFrame),
1657 I2O_UTIL_PARAMS_GET);
1658 /*
1659 * Set up the buffers as scatter gather elements.
1660 */
1661 SG(&(Message_Ptr->SGL), 0,
1662 I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER,
1663 Operations_Ptr, sizeof(struct Operations));
1664 SG(&(Message_Ptr->SGL), 1,
1665 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
1666 Buffer_Ptr, BufferSize);
1667
1668 if ((ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) == CAM_REQ_CMP)
1669 && (Buffer_Ptr->Header.ResultCount)) {
1670 return ((void *)(Buffer_Ptr->Info));
1671 }
1672 return ((void *)NULL);
1673} /* ASR_getParams */
1674
1675/*
1676 * Acquire the LCT information.
1677 */
1678STATIC INLINE int
1679ASR_acquireLct (
1680 INOUT Asr_softc_t * sc)
1681{
1682 PI2O_EXEC_LCT_NOTIFY_MESSAGE Message_Ptr;
1683 PI2O_SGE_SIMPLE_ELEMENT sg;
1684 int MessageSizeInBytes;
1685 caddr_t v;
1686 int len;
1687 I2O_LCT Table;
1688 PI2O_LCT_ENTRY Entry;
1689
1690 /*
1691 * sc value assumed valid
1692 */
1693 MessageSizeInBytes = sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE)
1694 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT);
1695 if ((Message_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)malloc (
1696 MessageSizeInBytes, M_TEMP, M_WAITOK))
1697 == (PI2O_EXEC_LCT_NOTIFY_MESSAGE)NULL) {
1698 return (ENOMEM);
1699 }
1700 (void)ASR_fillMessage((char *)Message_Ptr, MessageSizeInBytes);
1701 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
1702 (I2O_VERSION_11 +
1703 (((sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1704 / sizeof(U32)) << 4)));
1705 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
1706 I2O_EXEC_LCT_NOTIFY);
1707 I2O_EXEC_LCT_NOTIFY_MESSAGE_setClassIdentifier(Message_Ptr,
1708 I2O_CLASS_MATCH_ANYCLASS);
1709 /*
1710 * Call the LCT table to determine the number of device entries
1711 * to reserve space for.
1712 */
1713 SG(&(Message_Ptr->SGL), 0,
1714 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER, &Table,
1715 sizeof(I2O_LCT));
1716 /*
1717 * since this code is reused in several systems, code efficiency
1718 * is greater by using a shift operation rather than a divide by
1719 * sizeof(u_int32_t).
1720 */
1721 I2O_LCT_setTableSize(&Table,
1722 (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)) >> 2);
1723 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1724 /*
1725 * Determine the size of the LCT table.
1726 */
1727 if (sc->ha_LCT) {
1728 free (sc->ha_LCT, M_TEMP);
1729 }
1730 /*
1731 * malloc only generates contiguous memory when less than a
1732 * page is expected. We must break the request up into an SG list ...
1733 */
1734 if (((len = (I2O_LCT_getTableSize(&Table) << 2)) <=
1735 (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)))
1736 || (len > (128 * 1024))) { /* Arbitrary */
1737 free (Message_Ptr, M_TEMP);
1738 return (EINVAL);
1739 }
1740 if ((sc->ha_LCT = (PI2O_LCT)malloc (len, M_TEMP, M_WAITOK))
1741 == (PI2O_LCT)NULL) {
1742 free (Message_Ptr, M_TEMP);
1743 return (ENOMEM);
1744 }
1745 /*
1746 * since this code is reused in several systems, code efficiency
1747 * is greater by using a shift operation rather than a divide by
1748 * sizeof(u_int32_t).
1749 */
1750 I2O_LCT_setTableSize(sc->ha_LCT,
1751 (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)) >> 2);
1752 /*
1753 * Convert the access to the LCT table into a SG list.
1754 */
1755 sg = Message_Ptr->SGL.u.Simple;
1756 v = (caddr_t)(sc->ha_LCT);
1757 for (;;) {
1758 int next, base, span;
1759
1760 span = 0;
1761 next = base = KVTOPHYS(v);
1762 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, base);
1763
1764 /* How far can we go contiguously */
1765 while ((len > 0) && (base == next)) {
1766 int size;
1767
1768 next = trunc_page(base) + PAGE_SIZE;
1769 size = next - base;
1770 if (size > len) {
1771 size = len;
1772 }
1773 span += size;
1774 v += size;
1775 len -= size;
1776 base = KVTOPHYS(v);
1777 }
1778
1779 /* Construct the Flags */
1780 I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), span);
1781 {
1782 int rw = I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT;
1783 if (len <= 0) {
1784 rw = (I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT
1785 | I2O_SGL_FLAGS_LAST_ELEMENT
1786 | I2O_SGL_FLAGS_END_OF_BUFFER);
1787 }
1788 I2O_FLAGS_COUNT_setFlags(&(sg->FlagsCount), rw);
1789 }
1790
1791 if (len <= 0) {
1792 break;
1793 }
1794
1795 /*
1796 * Incrementing requires resizing of the packet.
1797 */
1798 ++sg;
1799 MessageSizeInBytes += sizeof(*sg);
1800 I2O_MESSAGE_FRAME_setMessageSize(
1801 &(Message_Ptr->StdMessageFrame),
1802 I2O_MESSAGE_FRAME_getMessageSize(
1803 &(Message_Ptr->StdMessageFrame))
1804 + (sizeof(*sg) / sizeof(U32)));
1805 {
1806 PI2O_EXEC_LCT_NOTIFY_MESSAGE NewMessage_Ptr;
1807
1808 if ((NewMessage_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)
1809 malloc (MessageSizeInBytes, M_TEMP, M_WAITOK))
1810 == (PI2O_EXEC_LCT_NOTIFY_MESSAGE)NULL) {
1811 free (sc->ha_LCT, M_TEMP);
1812 sc->ha_LCT = (PI2O_LCT)NULL;
1813 free (Message_Ptr, M_TEMP);
1814 return (ENOMEM);
1815 }
1816 span = ((caddr_t)sg) - (caddr_t)Message_Ptr;
1817 bcopy ((caddr_t)Message_Ptr,
1818 (caddr_t)NewMessage_Ptr, span);
1819 free (Message_Ptr, M_TEMP);
1820 sg = (PI2O_SGE_SIMPLE_ELEMENT)
1821 (((caddr_t)NewMessage_Ptr) + span);
1822 Message_Ptr = NewMessage_Ptr;
1823 }
1824 }
1825 { int retval;
1826
1827 retval = ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1828 free (Message_Ptr, M_TEMP);
1829 if (retval != CAM_REQ_CMP) {
1830 return (ENODEV);
1831 }
1832 }
1833 /* If the LCT table grew, lets truncate accesses */
1834 if (I2O_LCT_getTableSize(&Table) < I2O_LCT_getTableSize(sc->ha_LCT)) {
1835 I2O_LCT_setTableSize(sc->ha_LCT, I2O_LCT_getTableSize(&Table));
1836 }
1837 for (Entry = sc->ha_LCT->LCTEntry; Entry < (PI2O_LCT_ENTRY)
1838 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
1839 ++Entry) {
1840 Entry->le_type = I2O_UNKNOWN;
1841 switch (I2O_CLASS_ID_getClass(&(Entry->ClassID))) {
1842
1843 case I2O_CLASS_RANDOM_BLOCK_STORAGE:
1844 Entry->le_type = I2O_BSA;
1845 break;
1846
1847 case I2O_CLASS_SCSI_PERIPHERAL:
1848 Entry->le_type = I2O_SCSI;
1849 break;
1850
1851 case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
1852 Entry->le_type = I2O_FCA;
1853 break;
1854
1855 case I2O_CLASS_BUS_ADAPTER_PORT:
1856 Entry->le_type = I2O_PORT | I2O_SCSI;
1857 /* FALLTHRU */
1858 case I2O_CLASS_FIBRE_CHANNEL_PORT:
1859 if (I2O_CLASS_ID_getClass(&(Entry->ClassID)) ==
1860 I2O_CLASS_FIBRE_CHANNEL_PORT) {
1861 Entry->le_type = I2O_PORT | I2O_FCA;
1862 }
1863 { struct ControllerInfo {
1864 I2O_PARAM_RESULTS_LIST_HEADER Header;
1865 I2O_PARAM_READ_OPERATION_RESULT Read;
1866 I2O_HBA_SCSI_CONTROLLER_INFO_SCALAR Info;
1867 };
1868 defAlignLong(struct ControllerInfo, Buffer);
1869 PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR Info;
1870
1871 Entry->le_bus = 0xff;
1872 Entry->le_target = 0xff;
1873 Entry->le_lun = 0xff;
1874
1875 if ((Info = (PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR)
1876 ASR_getParams(sc,
1877 I2O_LCT_ENTRY_getLocalTID(Entry),
1878 I2O_HBA_SCSI_CONTROLLER_INFO_GROUP_NO,
1879 Buffer, sizeof(struct ControllerInfo)))
1880 == (PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR)NULL) {
1881 continue;
1882 }
1883 Entry->le_target
1884 = I2O_HBA_SCSI_CONTROLLER_INFO_SCALAR_getInitiatorID(
1885 Info);
1886 Entry->le_lun = 0;
1887 } /* FALLTHRU */
1888 default:
1889 continue;
1890 }
1891 { struct DeviceInfo {
1892 I2O_PARAM_RESULTS_LIST_HEADER Header;
1893 I2O_PARAM_READ_OPERATION_RESULT Read;
1894 I2O_DPT_DEVICE_INFO_SCALAR Info;
1895 };
1896 defAlignLong (struct DeviceInfo, Buffer);
1897 PI2O_DPT_DEVICE_INFO_SCALAR Info;
1898
1899 Entry->le_bus = 0xff;
1900 Entry->le_target = 0xff;
1901 Entry->le_lun = 0xff;
1902
1903 if ((Info = (PI2O_DPT_DEVICE_INFO_SCALAR)
1904 ASR_getParams(sc,
1905 I2O_LCT_ENTRY_getLocalTID(Entry),
1906 I2O_DPT_DEVICE_INFO_GROUP_NO,
1907 Buffer, sizeof(struct DeviceInfo)))
1908 == (PI2O_DPT_DEVICE_INFO_SCALAR)NULL) {
1909 continue;
1910 }
1911 Entry->le_type
1912 |= I2O_DPT_DEVICE_INFO_SCALAR_getDeviceType(Info);
1913 Entry->le_bus
1914 = I2O_DPT_DEVICE_INFO_SCALAR_getBus(Info);
1915 if ((Entry->le_bus > sc->ha_MaxBus)
1916 && (Entry->le_bus <= MAX_CHANNEL)) {
1917 sc->ha_MaxBus = Entry->le_bus;
1918 }
1919 Entry->le_target
1920 = I2O_DPT_DEVICE_INFO_SCALAR_getIdentifier(Info);
1921 Entry->le_lun
1922 = I2O_DPT_DEVICE_INFO_SCALAR_getLunInfo(Info);
1923 }
1924 }
1925 /*
1926 * A zero return value indicates success.
1927 */
1928 return (0);
1929} /* ASR_acquireLct */
1930
1931/*
1932 * Initialize a message frame.
1933 * We assume that the CDB has already been set up, so all we do here is
1934 * generate the Scatter Gather list.
1935 */
1936STATIC INLINE PI2O_MESSAGE_FRAME
1937ASR_init_message(
1938 IN union asr_ccb * ccb,
1939 OUT PI2O_MESSAGE_FRAME Message)
1940{
1941 int next, span, base, rw;
1942 OUT PI2O_MESSAGE_FRAME Message_Ptr;
1943 Asr_softc_t * sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
1944 PI2O_SGE_SIMPLE_ELEMENT sg;
1945 caddr_t v;
1946 vm_size_t size, len;
1947 U32 MessageSize;
1948
1949 /* We only need to zero out the PRIVATE_SCSI_SCB_EXECUTE_MESSAGE */
1950 bzero (Message_Ptr = getAlignLong(I2O_MESSAGE_FRAME, Message),
1951 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) - sizeof(I2O_SG_ELEMENT)));
1952
1953 {
1954 int target = ccb->ccb_h.target_id;
1955 int lun = ccb->ccb_h.target_lun;
1956 int bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path));
1957 tid_t TID;
1958
1959 if ((TID = ASR_getTid (sc, bus, target, lun)) == (tid_t)-1) {
1960 PI2O_LCT_ENTRY Device;
1961
1962 TID = (tid_t)0;
1963 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
1964 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
1965 ++Device) {
1966 if ((Device->le_type != I2O_UNKNOWN)
1967 && (Device->le_bus == bus)
1968 && (Device->le_target == target)
1969 && (Device->le_lun == lun)
1970 && (I2O_LCT_ENTRY_getUserTID(Device) == 0xFFF)) {
1971 TID = I2O_LCT_ENTRY_getLocalTID(Device);
1972 ASR_setTid (sc, Device->le_bus,
1973 Device->le_target, Device->le_lun,
1974 TID);
1975 break;
1976 }
1977 }
1978 }
1979 if (TID == (tid_t)0) {
1980 return ((PI2O_MESSAGE_FRAME)NULL);
1981 }
1982 I2O_MESSAGE_FRAME_setTargetAddress(Message_Ptr, TID);
1983 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setTID(
1984 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, TID);
1985 }
1986 I2O_MESSAGE_FRAME_setVersionOffset(Message_Ptr, I2O_VERSION_11 |
1987 (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1988 / sizeof(U32)) << 4));
1989 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
1990 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
1991 - sizeof(I2O_SG_ELEMENT)) / sizeof(U32));
1992 I2O_MESSAGE_FRAME_setInitiatorAddress (Message_Ptr, 1);
1993 I2O_MESSAGE_FRAME_setFunction(Message_Ptr, I2O_PRIVATE_MESSAGE);
1994 I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode (
1995 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, I2O_SCSI_SCB_EXEC);
1996 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (
1997 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
1998 I2O_SCB_FLAG_ENABLE_DISCONNECT
1999 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2000 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
2001 /*
2002 * We do not need any (optional byteswapping) method access to
2003 * the Initiator & Transaction context field.
2004 */
2005 I2O_MESSAGE_FRAME_setInitiatorContext64(Message, (long)ccb);
2006
2007 I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
2008 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, DPT_ORGANIZATION_ID);
2009 /*
2010 * copy the cdb over
2011 */
2012 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(
2013 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, ccb->csio.cdb_len);
2014 bcopy (&(ccb->csio.cdb_io),
2015 ((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)->CDB, ccb->csio.cdb_len);
2016
2017 /*
2018 * Given a buffer describing a transfer, set up a scatter/gather map
2019 * in a ccb to map that SCSI transfer.
2020 */
2021
2022 rw = (ccb->ccb_h.flags & CAM_DIR_IN) ? 0 : I2O_SGL_FLAGS_DIR;
2023
2024 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (
2025 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
2026 (ccb->csio.dxfer_len)
2027 ? ((rw) ? (I2O_SCB_FLAG_XFER_TO_DEVICE
2028 | I2O_SCB_FLAG_ENABLE_DISCONNECT
2029 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2030 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER)
2031 : (I2O_SCB_FLAG_XFER_FROM_DEVICE
2032 | I2O_SCB_FLAG_ENABLE_DISCONNECT
2033 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2034 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER))
2035 : (I2O_SCB_FLAG_ENABLE_DISCONNECT
2036 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2037 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
2038
2039 /*
2040 * Given a transfer described by a `data', fill in the SG list.
2041 */
2042 sg = &((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)->SGL.u.Simple[0];
2043
2044 len = ccb->csio.dxfer_len;
2045 v = ccb->csio.data_ptr;
2046 ASSERT (ccb->csio.dxfer_len >= 0);
2047 MessageSize = I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr);
2048 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setByteCount(
2049 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, len);
2050 while ((len > 0) && (sg < &((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2051 Message_Ptr)->SGL.u.Simple[SG_SIZE])) {
2052 span = 0;
2053 next = base = KVTOPHYS(v);
2054 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, base);
2055
2056 /* How far can we go contiguously */
2057 while ((len > 0) && (base == next)) {
2058 next = trunc_page(base) + PAGE_SIZE;
2059 size = next - base;
2060 if (size > len) {
2061 size = len;
2062 }
2063 span += size;
2064 v += size;
2065 len -= size;
2066 base = KVTOPHYS(v);
2067 }
2068
2069 I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), span);
2070 if (len == 0) {
2071 rw |= I2O_SGL_FLAGS_LAST_ELEMENT;
2072 }
2073 I2O_FLAGS_COUNT_setFlags(&(sg->FlagsCount),
2074 I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT | rw);
2075 ++sg;
2076 MessageSize += sizeof(*sg) / sizeof(U32);
2077 }
2078 /* We always do the request sense ... */
2079 if ((span = ccb->csio.sense_len) == 0) {
2080 span = sizeof(ccb->csio.sense_data);
2081 }
2082 SG(sg, 0, I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
2083 &(ccb->csio.sense_data), span);
2084 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
2085 MessageSize + (sizeof(*sg) / sizeof(U32)));
2086 return (Message_Ptr);
2087} /* ASR_init_message */
2088
2089/*
2090 * Reset the adapter.
2091 */
2092STATIC INLINE U32
2093ASR_initOutBound (
2094 INOUT Asr_softc_t * sc)
2095{
2096 struct initOutBoundMessage {
2097 I2O_EXEC_OUTBOUND_INIT_MESSAGE M;
2098 U32 R;
2099 };
2100 defAlignLong(struct initOutBoundMessage,Message);
2101 PI2O_EXEC_OUTBOUND_INIT_MESSAGE Message_Ptr;
2102 OUT U32 * volatile Reply_Ptr;
2103 U32 Old;
2104
2105 /*
2106 * Build up our copy of the Message.
2107 */
2108 Message_Ptr = (PI2O_EXEC_OUTBOUND_INIT_MESSAGE)ASR_fillMessage(Message,
2109 sizeof(I2O_EXEC_OUTBOUND_INIT_MESSAGE));
2110 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
2111 I2O_EXEC_OUTBOUND_INIT);
2112 I2O_EXEC_OUTBOUND_INIT_MESSAGE_setHostPageFrameSize(Message_Ptr, PAGE_SIZE);
2113 I2O_EXEC_OUTBOUND_INIT_MESSAGE_setOutboundMFrameSize(Message_Ptr,
2114 sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME));
2115 /*
2116 * Reset the Reply Status
2117 */
2118 *(Reply_Ptr = (U32 *)((char *)Message_Ptr
2119 + sizeof(I2O_EXEC_OUTBOUND_INIT_MESSAGE))) = 0;
2120 SG (&(Message_Ptr->SGL), 0, I2O_SGL_FLAGS_LAST_ELEMENT, Reply_Ptr,
2121 sizeof(U32));
2122 /*
2123 * Send the Message out
2124 */
2125 if ((Old = ASR_initiateCp (sc->ha_Virt, sc->ha_Fvirt, (PI2O_MESSAGE_FRAME)Message_Ptr)) != (U32)-1L) {
2126 u_long size, addr;
2127
2128 /*
2129 * Wait for a response (Poll).
2130 */
2131 while (*Reply_Ptr < I2O_EXEC_OUTBOUND_INIT_REJECTED);
2132 /*
2133 * Re-enable the interrupts.
2134 */
2135 sc->ha_Virt->Mask = Old;
2136 /*
2137 * Populate the outbound table.
2138 */
2139 if (sc->ha_Msgs == (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL) {
2140
2141 /* Allocate the reply frames */
2142 size = sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
2143 * sc->ha_Msgs_Count;
2144
2145 /*
2146 * contigmalloc only works reliably at
2147 * initialization time.
2148 */
2149 if ((sc->ha_Msgs = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
2150 contigmalloc (size, M_DEVBUF, M_WAITOK, 0ul,
2151 0xFFFFFFFFul, (u_long)sizeof(U32), 0ul))
2152 != (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL) {
2153 (void)bzero ((char *)sc->ha_Msgs, size);
2154 sc->ha_Msgs_Phys = KVTOPHYS(sc->ha_Msgs);
2155 }
2156 }
2157
2158 /* Initialize the outbound FIFO */
2159 if (sc->ha_Msgs != (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL)
2160 for (size = sc->ha_Msgs_Count, addr = sc->ha_Msgs_Phys;
2161 size; --size) {
2162 sc->ha_Virt->FromFIFO = addr;
2163 addr += sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME);
2164 }
2165 return (*Reply_Ptr);
2166 }
2167 return (0);
2168} /* ASR_initOutBound */
2169
2170/*
2171 * Set the system table
2172 */
2173STATIC INLINE int
2174ASR_setSysTab(
2175 IN Asr_softc_t * sc)
2176{
2177 PI2O_EXEC_SYS_TAB_SET_MESSAGE Message_Ptr;
2178 PI2O_SET_SYSTAB_HEADER SystemTable;
2179 Asr_softc_t * ha;
2180 PI2O_SGE_SIMPLE_ELEMENT sg;
2181 int retVal;
2182
2183 if ((SystemTable = (PI2O_SET_SYSTAB_HEADER)malloc (
2184 sizeof(I2O_SET_SYSTAB_HEADER), M_TEMP, M_WAITOK))
2185 == (PI2O_SET_SYSTAB_HEADER)NULL) {
2186 return (ENOMEM);
2187 }
2188 bzero (SystemTable, sizeof(I2O_SET_SYSTAB_HEADER));
2189 for (ha = Asr_softc; ha; ha = ha->ha_next) {
2190 ++SystemTable->NumberEntries;
2191 }
2192 if ((Message_Ptr = (PI2O_EXEC_SYS_TAB_SET_MESSAGE)malloc (
2193 sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
2194 + ((3+SystemTable->NumberEntries) * sizeof(I2O_SGE_SIMPLE_ELEMENT)),
2195 M_TEMP, M_WAITOK)) == (PI2O_EXEC_SYS_TAB_SET_MESSAGE)NULL) {
2196 free (SystemTable, M_TEMP);
2197 return (ENOMEM);
2198 }
2199 (void)ASR_fillMessage((char *)Message_Ptr,
2200 sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
2201 + ((3+SystemTable->NumberEntries) * sizeof(I2O_SGE_SIMPLE_ELEMENT)));
2202 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
2203 (I2O_VERSION_11 +
2204 (((sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
2205 / sizeof(U32)) << 4)));
2206 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
2207 I2O_EXEC_SYS_TAB_SET);
2208 /*
2209 * Call the LCT table to determine the number of device entries
2210 * to reserve space for.
2211 * since this code is reused in several systems, code efficiency
2212 * is greater by using a shift operation rather than a divide by
2213 * sizeof(u_int32_t).
2214 */
2215 sg = (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr
2216 + ((I2O_MESSAGE_FRAME_getVersionOffset(
2217 &(Message_Ptr->StdMessageFrame)) & 0xF0) >> 2));
2218 SG(sg, 0, I2O_SGL_FLAGS_DIR, SystemTable, sizeof(I2O_SET_SYSTAB_HEADER));
2219 ++sg;
2220 for (ha = Asr_softc; ha; ha = ha->ha_next) {
2221 SG(sg, 0,
2222 ((ha->ha_next)
2223 ? (I2O_SGL_FLAGS_DIR)
2224 : (I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER)),
2225 &(ha->ha_SystemTable), sizeof(ha->ha_SystemTable));
2226 ++sg;
2227 }
2228 SG(sg, 0, I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER, NULL, 0);
2229 SG(sg, 1, I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_LAST_ELEMENT
2230 | I2O_SGL_FLAGS_END_OF_BUFFER, NULL, 0);
2231 retVal = ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
2232 free (Message_Ptr, M_TEMP);
2233 free (SystemTable, M_TEMP);
2234 return (retVal);
2235} /* ASR_setSysTab */
2236
2237STATIC INLINE int
2238ASR_acquireHrt (
2239 INOUT Asr_softc_t * sc)
2240{
2241 defAlignLong(I2O_EXEC_HRT_GET_MESSAGE,Message);
2242 I2O_EXEC_HRT_GET_MESSAGE * Message_Ptr;
2243 struct {
2244 I2O_HRT Header;
2245 I2O_HRT_ENTRY Entry[MAX_CHANNEL];
2246 } Hrt;
2247 u_int8_t NumberOfEntries;
2248 PI2O_HRT_ENTRY Entry;
2249
2250 bzero ((void *)&Hrt, sizeof (Hrt));
2251 Message_Ptr = (I2O_EXEC_HRT_GET_MESSAGE *)ASR_fillMessage(Message,
2252 sizeof(I2O_EXEC_HRT_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
2253 + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2254 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
2255 (I2O_VERSION_11
2256 + (((sizeof(I2O_EXEC_HRT_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
2257 / sizeof(U32)) << 4)));
2258 I2O_MESSAGE_FRAME_setFunction (&(Message_Ptr->StdMessageFrame),
2259 I2O_EXEC_HRT_GET);
2260
2261 /*
2262 * Set up the buffers as scatter gather elements.
2263 */
2264 SG(&(Message_Ptr->SGL), 0,
2265 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
2266 &Hrt, sizeof(Hrt));
2267 if (ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) != CAM_REQ_CMP) {
2268 return (ENODEV);
2269 }
2270 if ((NumberOfEntries = I2O_HRT_getNumberEntries(&Hrt.Header))
2271 > (MAX_CHANNEL + 1)) {
2272 NumberOfEntries = MAX_CHANNEL + 1;
2273 }
2274 for (Entry = Hrt.Header.HRTEntry;
2275 NumberOfEntries != 0;
2276 ++Entry, --NumberOfEntries) {
2277 PI2O_LCT_ENTRY Device;
2278
2279 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
2280 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
2281 ++Device) {
2282 if (I2O_LCT_ENTRY_getLocalTID(Device)
2283 == (I2O_HRT_ENTRY_getAdapterID(Entry) & 0xFFF)) {
2284 Device->le_bus = I2O_HRT_ENTRY_getAdapterID(
2285 Entry) >> 16;
2286 if ((Device->le_bus > sc->ha_MaxBus)
2287 && (Device->le_bus <= MAX_CHANNEL)) {
2288 sc->ha_MaxBus = Device->le_bus;
2289 }
2290 }
2291 }
2292 }
2293 return (0);
2294} /* ASR_acquireHrt */
2295
2296/*
2297 * Enable the adapter.
2298 */
2299STATIC INLINE int
2300ASR_enableSys (
2301 IN Asr_softc_t * sc)
2302{
2303 defAlignLong(I2O_EXEC_SYS_ENABLE_MESSAGE,Message);
2304 PI2O_EXEC_SYS_ENABLE_MESSAGE Message_Ptr;
2305
2306 Message_Ptr = (PI2O_EXEC_SYS_ENABLE_MESSAGE)ASR_fillMessage(Message,
2307 sizeof(I2O_EXEC_SYS_ENABLE_MESSAGE));
2308 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
2309 I2O_EXEC_SYS_ENABLE);
2310 return (ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) != 0);
2311} /* ASR_enableSys */
2312
2313/*
2314 * Perform the stages necessary to initialize the adapter
2315 */
2316STATIC int
2317ASR_init(
2318 IN Asr_softc_t * sc)
2319{
2320 return ((ASR_initOutBound(sc) == 0)
2321 || (ASR_setSysTab(sc) != CAM_REQ_CMP)
2322 || (ASR_enableSys(sc) != CAM_REQ_CMP));
2323} /* ASR_init */
2324
2325/*
2326 * Send a Synchronize Cache command to the target device.
2327 */
2328STATIC INLINE void
2329ASR_sync (
2330 IN Asr_softc_t * sc,
2331 IN int bus,
2332 IN int target,
2333 IN int lun)
2334{
2335 tid_t TID;
2336
2337 /*
2338 * We will not synchronize the device when there are outstanding
2339 * commands issued by the OS (this is due to a locked up device,
2340 * as the OS normally would flush all outstanding commands before
2341 * issuing a shutdown or an adapter reset).
2342 */
2343 if ((sc != (Asr_softc_t *)NULL)
2344 && (LIST_FIRST(&(sc->ha_ccb)) != (struct ccb_hdr *)NULL)
2345 && ((TID = ASR_getTid (sc, bus, target, lun)) != (tid_t)-1)
2346 && (TID != (tid_t)0)) {
2347 defAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE,Message);
2348 PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE Message_Ptr;
2349
2350 bzero (Message_Ptr
2351 = getAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE, Message),
2352 sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2353 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2354
2355 I2O_MESSAGE_FRAME_setVersionOffset(
2356 (PI2O_MESSAGE_FRAME)Message_Ptr,
2357 I2O_VERSION_11
2358 | (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2359 - sizeof(I2O_SG_ELEMENT))
2360 / sizeof(U32)) << 4));
2361 I2O_MESSAGE_FRAME_setMessageSize(
2362 (PI2O_MESSAGE_FRAME)Message_Ptr,
2363 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2364 - sizeof(I2O_SG_ELEMENT))
2365 / sizeof(U32));
2366 I2O_MESSAGE_FRAME_setInitiatorAddress (
2367 (PI2O_MESSAGE_FRAME)Message_Ptr, 1);
2368 I2O_MESSAGE_FRAME_setFunction(
2369 (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_PRIVATE_MESSAGE);
2370 I2O_MESSAGE_FRAME_setTargetAddress(
2371 (PI2O_MESSAGE_FRAME)Message_Ptr, TID);
2372 I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode (
2373 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2374 I2O_SCSI_SCB_EXEC);
2375 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setTID(Message_Ptr, TID);
2376 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2377 I2O_SCB_FLAG_ENABLE_DISCONNECT
2378 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2379 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
2380 I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
2381 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2382 DPT_ORGANIZATION_ID);
2383 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(Message_Ptr, 6);
2384 Message_Ptr->CDB[0] = SYNCHRONIZE_CACHE;
2385 Message_Ptr->CDB[1] = (lun << 5);
2386
2387 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2388 (I2O_SCB_FLAG_XFER_FROM_DEVICE
2389 | I2O_SCB_FLAG_ENABLE_DISCONNECT
2390 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2391 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
2392
2393 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
2394
2395 }
2396}
2397
2398STATIC INLINE void
2399ASR_synchronize (
2400 IN Asr_softc_t * sc)
2401{
2402 int bus, target, lun;
2403
2404 for (bus = 0; bus <= sc->ha_MaxBus; ++bus) {
2405 for (target = 0; target <= sc->ha_MaxId; ++target) {
2406 for (lun = 0; lun <= sc->ha_MaxLun; ++lun) {
2407 ASR_sync(sc,bus,target,lun);
2408 }
2409 }
2410 }
2411}
2412
2413/*
2414 * Reset the HBA, targets and BUS.
2415 * Currently this resets *all* the SCSI busses.
2416 */
2417STATIC INLINE void
2418asr_hbareset(
2419 IN Asr_softc_t * sc)
2420{
2421 ASR_synchronize (sc);
2422 (void)ASR_reset (sc);
2423} /* asr_hbareset */
2424
2425/*
2426 * A reduced copy of the real pci_map_mem, incorporating the MAX_MAP
2427 * limit and a reduction in error checking (in the pre 4.0 case).
2428 */
2429STATIC int
2430asr_pci_map_mem (
984263bc 2431 IN device_t tag,
984263bc
MD
2432 IN Asr_softc_t * sc)
2433{
2434 int rid;
2435 u_int32_t p, l, s;
2436
984263bc
MD
2437 /*
2438 * I2O specification says we must find first *memory* mapped BAR
2439 */
2440 for (rid = PCIR_MAPS;
2441 rid < (PCIR_MAPS + 4 * sizeof(u_int32_t));
2442 rid += sizeof(u_int32_t)) {
2443 p = pci_read_config(tag, rid, sizeof(p));
2444 if ((p & 1) == 0) {
2445 break;
2446 }
2447 }
2448 /*
2449 * Give up?
2450 */
2451 if (rid >= (PCIR_MAPS + 4 * sizeof(u_int32_t))) {
2452 rid = PCIR_MAPS;
2453 }
2454 p = pci_read_config(tag, rid, sizeof(p));
2455 pci_write_config(tag, rid, -1, sizeof(p));
2456 l = 0 - (pci_read_config(tag, rid, sizeof(l)) & ~15);
2457 pci_write_config(tag, rid, p, sizeof(p));
2458 if (l > MAX_MAP) {
2459 l = MAX_MAP;
2460 }
2461 /*
2462 * The 2005S Zero Channel RAID solution is not a perfect PCI
2463 * citizen. It asks for 4MB on BAR0, and 0MB on BAR1, once
2464 * enabled it rewrites the size of BAR0 to 2MB, sets BAR1 to
2465 * BAR0+2MB and sets it's size to 2MB. The IOP registers are
2466 * accessible via BAR0, the messaging registers are accessible
2467 * via BAR1. If the subdevice code is 50 to 59 decimal.
2468 */
2469 s = pci_read_config(tag, PCIR_DEVVENDOR, sizeof(s));
2470 if (s != 0xA5111044) {
2471 s = pci_read_config(tag, PCIR_SUBVEND_0, sizeof(s));
2472 if ((((ADPTDOMINATOR_SUB_ID_START ^ s) & 0xF000FFFF) == 0)
2473 && (ADPTDOMINATOR_SUB_ID_START <= s)
2474 && (s <= ADPTDOMINATOR_SUB_ID_END)) {
2475 l = MAX_MAP; /* Conjoined BAR Raptor Daptor */
2476 }
2477 }
2478 p &= ~15;
2479 sc->ha_mem_res = bus_alloc_resource(tag, SYS_RES_MEMORY, &rid,
2480 p, p + l, l, RF_ACTIVE);
2481 if (sc->ha_mem_res == (struct resource *)NULL) {
2482 return (0);
2483 }
2484 sc->ha_Base = (void *)rman_get_start(sc->ha_mem_res);
2485 if (sc->ha_Base == (void *)NULL) {
2486 return (0);
2487 }
2488 sc->ha_Virt = (i2oRegs_t *) rman_get_virtual(sc->ha_mem_res);
2489 if (s == 0xA5111044) { /* Split BAR Raptor Daptor */
2490 if ((rid += sizeof(u_int32_t))
2491 >= (PCIR_MAPS + 4 * sizeof(u_int32_t))) {
2492 return (0);
2493 }
2494 p = pci_read_config(tag, rid, sizeof(p));
2495 pci_write_config(tag, rid, -1, sizeof(p));
2496 l = 0 - (pci_read_config(tag, rid, sizeof(l)) & ~15);
2497 pci_write_config(tag, rid, p, sizeof(p));
2498 if (l > MAX_MAP) {
2499 l = MAX_MAP;
2500 }
2501 p &= ~15;
2502 sc->ha_mes_res = bus_alloc_resource(tag, SYS_RES_MEMORY, &rid,
2503 p, p + l, l, RF_ACTIVE);
2504 if (sc->ha_mes_res == (struct resource *)NULL) {
2505 return (0);
2506 }
2507 if ((void *)rman_get_start(sc->ha_mes_res) == (void *)NULL) {
2508 return (0);
2509 }
2510 sc->ha_Fvirt = (U8 *) rman_get_virtual(sc->ha_mes_res);
2511 } else {
2512 sc->ha_Fvirt = (U8 *)(sc->ha_Virt);
2513 }
984263bc
MD
2514 return (1);
2515} /* asr_pci_map_mem */
2516
2517/*
2518 * A simplified copy of the real pci_map_int with additional
2519 * registration requirements.
2520 */
2521STATIC int
2522asr_pci_map_int (
984263bc 2523 IN device_t tag,
984263bc
MD
2524 IN Asr_softc_t * sc)
2525{
e9cb6d99
MD
2526 int rid = 0;
2527 int error;
984263bc
MD
2528
2529 sc->ha_irq_res = bus_alloc_resource(tag, SYS_RES_IRQ, &rid,
2530 0, ~0, 1, RF_ACTIVE | RF_SHAREABLE);
2531 if (sc->ha_irq_res == (struct resource *)NULL) {
2532 return (0);
2533 }
e9cb6d99
MD
2534 error = bus_setup_intr(tag, sc->ha_irq_res, INTR_TYPE_CAM,
2535 (driver_intr_t *)asr_intr, (void *)sc,
2536 &(sc->ha_intr), NULL);
2537 if (error) {
984263bc
MD
2538 return (0);
2539 }
2540 sc->ha_irq = pci_read_config(tag, PCIR_INTLINE, sizeof(char));
984263bc
MD
2541 return (1);
2542} /* asr_pci_map_int */
2543
2544/*
2545 * Attach the devices, and virtual devices to the driver list.
2546 */
2547STATIC ATTACH_RET
2548asr_attach (ATTACH_ARGS)
2549{
2550 Asr_softc_t * sc;
2551 struct scsi_inquiry_data * iq;
2552 ATTACH_SET();
2553
076ae0ab 2554 sc = malloc(sizeof(*sc), M_DEVBUF, M_INTWAIT);
984263bc
MD
2555 if (Asr_softc == (Asr_softc_t *)NULL) {
2556 /*
2557 * Fixup the OS revision as saved in the dptsig for the
2558 * engine (dptioctl.h) to pick up.
2559 */
2560 bcopy (osrelease, &ASR_sig.dsDescription[16], 5);
2561 printf ("asr%d: major=%d\n", unit, asr_cdevsw.d_maj);
2562 }
2563 /*
2564 * Initialize the software structure
2565 */
2566 bzero (sc, sizeof(*sc));
2567 LIST_INIT(&(sc->ha_ccb));
984263bc
MD
2568 /* Link us into the HA list */
2569 {
2570 Asr_softc_t **ha;
2571
2572 for (ha = &Asr_softc; *ha; ha = &((*ha)->ha_next));
2573 *(ha) = sc;
2574 }
2575 {
2576 PI2O_EXEC_STATUS_GET_REPLY status;
2577 int size;
2578
2579 /*
2580 * This is the real McCoy!
2581 */
2582 if (!asr_pci_map_mem(tag, sc)) {
2583 printf ("asr%d: could not map memory\n", unit);
2584 ATTACH_RETURN(ENXIO);
2585 }
2586 /* Enable if not formerly enabled */
984263bc
MD
2587 pci_write_config (tag, PCIR_COMMAND,
2588 pci_read_config (tag, PCIR_COMMAND, sizeof(char))
2589 | PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN, sizeof(char));
2590 /* Knowledge is power, responsibility is direct */
2591 {
2592 struct pci_devinfo {
2593 STAILQ_ENTRY(pci_devinfo) pci_links;
2594 struct resource_list resources;
2595 pcicfgregs cfg;
2596 } * dinfo = device_get_ivars(tag);
2597 sc->ha_pciBusNum = dinfo->cfg.bus;
2598 sc->ha_pciDeviceNum = (dinfo->cfg.slot << 3)
2599 | dinfo->cfg.func;
2600 }
984263bc
MD
2601 /* Check if the device is there? */
2602 if ((ASR_resetIOP(sc->ha_Virt, sc->ha_Fvirt) == 0)
2603 || ((status = (PI2O_EXEC_STATUS_GET_REPLY)malloc (
2604 sizeof(I2O_EXEC_STATUS_GET_REPLY), M_TEMP, M_WAITOK))
2605 == (PI2O_EXEC_STATUS_GET_REPLY)NULL)
2606 || (ASR_getStatus(sc->ha_Virt, sc->ha_Fvirt, status) == NULL)) {
2607 printf ("asr%d: could not initialize hardware\n", unit);
2608 ATTACH_RETURN(ENODEV); /* Get next, maybe better luck */
2609 }
2610 sc->ha_SystemTable.OrganizationID = status->OrganizationID;
2611 sc->ha_SystemTable.IOP_ID = status->IOP_ID;
2612 sc->ha_SystemTable.I2oVersion = status->I2oVersion;
2613 sc->ha_SystemTable.IopState = status->IopState;
2614 sc->ha_SystemTable.MessengerType = status->MessengerType;
2615 sc->ha_SystemTable.InboundMessageFrameSize
2616 = status->InboundMFrameSize;
2617 sc->ha_SystemTable.MessengerInfo.InboundMessagePortAddressLow
2618 = (U32)(sc->ha_Base) + (U32)(&(((i2oRegs_t *)NULL)->ToFIFO));
2619
2620 if (!asr_pci_map_int(tag, (void *)sc)) {
2621 printf ("asr%d: could not map interrupt\n", unit);
2622 ATTACH_RETURN(ENXIO);
2623 }
2624
2625 /* Adjust the maximim inbound count */
2626 if (((sc->ha_QueueSize
2627 = I2O_EXEC_STATUS_GET_REPLY_getMaxInboundMFrames(status))
2628 > MAX_INBOUND)
2629 || (sc->ha_QueueSize == 0)) {
2630 sc->ha_QueueSize = MAX_INBOUND;
2631 }
2632
2633 /* Adjust the maximum outbound count */
2634 if (((sc->ha_Msgs_Count
2635 = I2O_EXEC_STATUS_GET_REPLY_getMaxOutboundMFrames(status))
2636 > MAX_OUTBOUND)
2637 || (sc->ha_Msgs_Count == 0)) {
2638 sc->ha_Msgs_Count = MAX_OUTBOUND;
2639 }
2640 if (sc->ha_Msgs_Count > sc->ha_QueueSize) {
2641 sc->ha_Msgs_Count = sc->ha_QueueSize;
2642 }
2643
2644 /* Adjust the maximum SG size to adapter */
2645 if ((size = (I2O_EXEC_STATUS_GET_REPLY_getInboundMFrameSize(
2646 status) << 2)) > MAX_INBOUND_SIZE) {
2647 size = MAX_INBOUND_SIZE;
2648 }
2649 free (status, M_TEMP);
2650 sc->ha_SgSize = (size - sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2651 + sizeof(I2O_SG_ELEMENT)) / sizeof(I2O_SGE_SIMPLE_ELEMENT);
2652 }
2653
2654 /*
2655 * Only do a bus/HBA reset on the first time through. On this
2656 * first time through, we do not send a flush to the devices.
2657 */
2658 if (ASR_init(sc) == 0) {
2659 struct BufferInfo {
2660 I2O_PARAM_RESULTS_LIST_HEADER Header;
2661 I2O_PARAM_READ_OPERATION_RESULT Read;
2662 I2O_DPT_EXEC_IOP_BUFFERS_SCALAR Info;
2663 };
2664 defAlignLong (struct BufferInfo, Buffer);
2665 PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR Info;
2666# define FW_DEBUG_BLED_OFFSET 8
2667
2668 if ((Info = (PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR)
2669 ASR_getParams(sc, 0,
2670 I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO,
2671 Buffer, sizeof(struct BufferInfo)))
2672 != (PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR)NULL) {
2673 sc->ha_blinkLED = sc->ha_Fvirt
2674 + I2O_DPT_EXEC_IOP_BUFFERS_SCALAR_getSerialOutputOffset(Info)
2675 + FW_DEBUG_BLED_OFFSET;
2676 }
2677 if (ASR_acquireLct(sc) == 0) {
2678 (void)ASR_acquireHrt(sc);
2679 }
2680 } else {
2681 printf ("asr%d: failed to initialize\n", unit);
2682 ATTACH_RETURN(ENXIO);
2683 }
2684 /*
2685 * Add in additional probe responses for more channels. We
2686 * are reusing the variable `target' for a channel loop counter.
2687 * Done here because of we need both the acquireLct and
2688 * acquireHrt data.
2689 */
2690 { PI2O_LCT_ENTRY Device;
2691
2692 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
2693 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
2694 ++Device) {
2695 if (Device->le_type == I2O_UNKNOWN) {
2696 continue;
2697 }
2698 if (I2O_LCT_ENTRY_getUserTID(Device) == 0xFFF) {
2699 if (Device->le_target > sc->ha_MaxId) {
2700 sc->ha_MaxId = Device->le_target;
2701 }
2702 if (Device->le_lun > sc->ha_MaxLun) {
2703 sc->ha_MaxLun = Device->le_lun;
2704 }
2705 }
2706 if (((Device->le_type & I2O_PORT) != 0)
2707 && (Device->le_bus <= MAX_CHANNEL)) {
2708 /* Do not increase MaxId for efficiency */
2709 sc->ha_adapter_target[Device->le_bus]
2710 = Device->le_target;
2711 }
2712 }
2713 }
2714
2715
2716 /*
2717 * Print the HBA model number as inquired from the card.
2718 */
2719
2720 printf ("asr%d:", unit);
2721
2722 if ((iq = (struct scsi_inquiry_data *)malloc (
2723 sizeof(struct scsi_inquiry_data), M_TEMP, M_WAITOK))
2724 != (struct scsi_inquiry_data *)NULL) {
2725 defAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE,Message);
2726 PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE Message_Ptr;
2727 int posted = 0;
2728
2729 bzero (iq, sizeof(struct scsi_inquiry_data));
2730 bzero (Message_Ptr
2731 = getAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE, Message),
2732 sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2733 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2734
2735 I2O_MESSAGE_FRAME_setVersionOffset(
2736 (PI2O_MESSAGE_FRAME)Message_Ptr,
2737 I2O_VERSION_11
2738 | (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2739 - sizeof(I2O_SG_ELEMENT))
2740 / sizeof(U32)) << 4));
2741 I2O_MESSAGE_FRAME_setMessageSize(
2742 (PI2O_MESSAGE_FRAME)Message_Ptr,
2743 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2744 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT))
2745 / sizeof(U32));
2746 I2O_MESSAGE_FRAME_setInitiatorAddress (
2747 (PI2O_MESSAGE_FRAME)Message_Ptr, 1);
2748 I2O_MESSAGE_FRAME_setFunction(
2749 (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_PRIVATE_MESSAGE);
2750 I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode (
2751 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2752 I2O_SCSI_SCB_EXEC);
2753 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2754 I2O_SCB_FLAG_ENABLE_DISCONNECT
2755 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2756 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
2757 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setInterpret(Message_Ptr, 1);
2758 I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
2759 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2760 DPT_ORGANIZATION_ID);
2761 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(Message_Ptr, 6);
2762 Message_Ptr->CDB[0] = INQUIRY;
2763 Message_Ptr->CDB[4] = (unsigned char)sizeof(struct scsi_inquiry_data);
2764 if (Message_Ptr->CDB[4] == 0) {
2765 Message_Ptr->CDB[4] = 255;
2766 }
2767
2768 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2769 (I2O_SCB_FLAG_XFER_FROM_DEVICE
2770 | I2O_SCB_FLAG_ENABLE_DISCONNECT
2771 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2772 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
2773
2774 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setByteCount(
2775 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
2776 sizeof(struct scsi_inquiry_data));
2777 SG(&(Message_Ptr->SGL), 0,
2778 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
2779 iq, sizeof(struct scsi_inquiry_data));
2780 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
2781
2782 if (iq->vendor[0] && (iq->vendor[0] != ' ')) {
2783 printf (" ");
2784 ASR_prstring (iq->vendor, 8);
2785 ++posted;
2786 }
2787 if (iq->product[0] && (iq->product[0] != ' ')) {
2788 printf (" ");
2789 ASR_prstring (iq->product, 16);
2790 ++posted;
2791 }
2792 if (iq->revision[0] && (iq->revision[0] != ' ')) {
2793 printf (" FW Rev. ");
2794 ASR_prstring (iq->revision, 4);
2795 ++posted;
2796 }
2797 free ((caddr_t)iq, M_TEMP);
2798 if (posted) {
2799 printf (",");
2800 }
2801 }
2802 printf (" %d channel, %d CCBs, Protocol I2O\n", sc->ha_MaxBus + 1,
2803 (sc->ha_QueueSize > MAX_INBOUND) ? MAX_INBOUND : sc->ha_QueueSize);
2804
2805 /*
2806 * fill in the prototype cam_path.
2807 */
2808 {
2809 int bus;
2810 union asr_ccb * ccb;
2811
2812 if ((ccb = asr_alloc_ccb (sc)) == (union asr_ccb *)NULL) {
2813 printf ("asr%d: CAM could not be notified of asynchronous callback parameters\n", unit);
2814 ATTACH_RETURN(ENOMEM);
2815 }
2816 for (bus = 0; bus <= sc->ha_MaxBus; ++bus) {
984263bc
MD
2817 int QueueSize = sc->ha_QueueSize;
2818
2819 if (QueueSize > MAX_INBOUND) {
2820 QueueSize = MAX_INBOUND;
2821 }
2822
984263bc
MD
2823 /*
2824 * Construct our first channel SIM entry
2825 */
2826 sc->ha_sim[bus] = cam_sim_alloc(
2827 asr_action, asr_poll, "asr", sc,
521cf4d2
MD
2828 unit, 1, QueueSize, NULL);
2829 if (sc->ha_sim[bus] == NULL)
984263bc 2830 continue;
984263bc
MD
2831
2832 if (xpt_bus_register(sc->ha_sim[bus], bus)
2833 != CAM_SUCCESS) {
521cf4d2 2834 cam_sim_free(sc->ha_sim[bus]);
984263bc
MD
2835 sc->ha_sim[bus] = NULL;
2836 continue;
2837 }
2838
2839 if (xpt_create_path(&(sc->ha_path[bus]), /*periph*/NULL,
2840 cam_sim_path(sc->ha_sim[bus]), CAM_TARGET_WILDCARD,
2841 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2842 xpt_bus_deregister(
2843 cam_sim_path(sc->ha_sim[bus]));
521cf4d2 2844 cam_sim_free(sc->ha_sim[bus]);
984263bc
MD
2845 sc->ha_sim[bus] = NULL;
2846 continue;
2847 }
2848 }
2849 asr_free_ccb (ccb);
2850 }
2851 /*
2852 * Generate the device node information
2853 */
e4c9c0c8 2854 make_dev(&asr_cdevsw, unit, 0, 0, S_IRWXU, "rasr%d", unit);
984263bc
MD
2855 ATTACH_RETURN(0);
2856} /* asr_attach */
2857
2858STATIC void
2859asr_poll(
2860 IN struct cam_sim *sim)
2861{
2862 asr_intr(cam_sim_softc(sim));
2863} /* asr_poll */
2864
2865STATIC void
2866asr_action(
2867 IN struct cam_sim * sim,
2868 IN union ccb * ccb)
2869{
2870 struct Asr_softc * sc;
2871
2872 debug_asr_printf ("asr_action(%lx,%lx{%x})\n",
2873 (u_long)sim, (u_long)ccb, ccb->ccb_h.func_code);
2874
2875 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("asr_action\n"));
2876
2877 ccb->ccb_h.spriv_ptr0 = sc = (struct Asr_softc *)cam_sim_softc(sim);
2878
2879 switch (ccb->ccb_h.func_code) {
2880
2881 /* Common cases first */
2882 case XPT_SCSI_IO: /* Execute the requested I/O operation */
2883 {
2884 struct Message {
2885 char M[MAX_INBOUND_SIZE];
2886 };
2887 defAlignLong(struct Message,Message);
2888 PI2O_MESSAGE_FRAME Message_Ptr;
2889
2890 /* Reject incoming commands while we are resetting the card */
2891 if (sc->ha_in_reset != HA_OPERATIONAL) {
2892 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2893 if (sc->ha_in_reset >= HA_OFF_LINE) {
2894 /* HBA is now off-line */
2895 ccb->ccb_h.status |= CAM_UNREC_HBA_ERROR;
2896 } else {
2897 /* HBA currently resetting, try again later. */
2898 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
2899 }
2900 debug_asr_cmd_printf (" e\n");
2901 xpt_done(ccb);
2902 debug_asr_cmd_printf (" q\n");
2903 break;
2904 }
2905 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
2906 printf(
2907 "asr%d WARNING: scsi_cmd(%x) already done on b%dt%du%d\n",
2908 cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)),
2909 ccb->csio.cdb_io.cdb_bytes[0],
2910 cam_sim_bus(sim),
2911 ccb->ccb_h.target_id,
2912 ccb->ccb_h.target_lun);
2913 }
2914 debug_asr_cmd_printf ("(%d,%d,%d,%d)",
2915 cam_sim_unit(sim),
2916 cam_sim_bus(sim),
2917 ccb->ccb_h.target_id,
2918 ccb->ccb_h.target_lun);
2919 debug_asr_cmd_dump_ccb(ccb);
2920
2921 if ((Message_Ptr = ASR_init_message ((union asr_ccb *)ccb,
2922 (PI2O_MESSAGE_FRAME)Message)) != (PI2O_MESSAGE_FRAME)NULL) {
2923 debug_asr_cmd2_printf ("TID=%x:\n",
2924 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_getTID(
2925 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr));
2926 debug_asr_cmd2_dump_message(Message_Ptr);
2927 debug_asr_cmd1_printf (" q");
2928
2929 if (ASR_queue (sc, Message_Ptr) == EMPTY_QUEUE) {
984263bc
MD
2930 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2931 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
2932 debug_asr_cmd_printf (" E\n");
2933 xpt_done(ccb);
2934 }
2935 debug_asr_cmd_printf (" Q\n");
2936 break;
2937 }
2938 /*
2939 * We will get here if there is no valid TID for the device
2940 * referenced in the scsi command packet.
2941 */
2942 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2943 ccb->ccb_h.status |= CAM_SEL_TIMEOUT;
2944 debug_asr_cmd_printf (" B\n");
2945 xpt_done(ccb);
2946 break;
2947 }
2948
2949 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */
2950 /* Rese HBA device ... */
2951 asr_hbareset (sc);
2952 ccb->ccb_h.status = CAM_REQ_CMP;
2953 xpt_done(ccb);
2954 break;
2955
2956# if (defined(REPORT_LUNS))
2957 case REPORT_LUNS:
2958# endif
2959 case XPT_ABORT: /* Abort the specified CCB */
2960 /* XXX Implement */
2961 ccb->ccb_h.status = CAM_REQ_INVALID;
2962 xpt_done(ccb);
2963 break;
2964
2965 case XPT_SET_TRAN_SETTINGS:
2966 /* XXX Implement */
2967 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
2968 xpt_done(ccb);
2969 break;
2970
2971 case XPT_GET_TRAN_SETTINGS:
2972 /* Get default/user set transfer settings for the target */
2973 {
2974 struct ccb_trans_settings *cts;
2975 u_int target_mask;
2976
2977 cts = &(ccb->cts);
2978 target_mask = 0x01 << ccb->ccb_h.target_id;
2979 if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) {
2980 cts->flags = CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB;
2981 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2982 cts->sync_period = 6; /* 40MHz */
2983 cts->sync_offset = 15;
2984
2985 cts->valid = CCB_TRANS_SYNC_RATE_VALID
2986 | CCB_TRANS_SYNC_OFFSET_VALID
2987 | CCB_TRANS_BUS_WIDTH_VALID
2988 | CCB_TRANS_DISC_VALID
2989 | CCB_TRANS_TQ_VALID;
2990 ccb->ccb_h.status = CAM_REQ_CMP;
2991 } else {
2992 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
2993 }
2994 xpt_done(ccb);
2995 break;
2996 }
2997
2998 case XPT_CALC_GEOMETRY:
2999 {
3000 struct ccb_calc_geometry *ccg;
3001 u_int32_t size_mb;
3002 u_int32_t secs_per_cylinder;
3003
3004 ccg = &(ccb->ccg);
3005 size_mb = ccg->volume_size
3006 / ((1024L * 1024L) / ccg->block_size);
3007
3008 if (size_mb > 4096) {
3009 ccg->heads = 255;
3010 ccg->secs_per_track = 63;
3011 } else if (size_mb > 2048) {
3012 ccg->heads = 128;
3013 ccg->secs_per_track = 63;
3014 } else if (size_mb > 1024) {
3015 ccg->heads = 65;
3016 ccg->secs_per_track = 63;
3017 } else {
3018 ccg->heads = 64;
3019 ccg->secs_per_track = 32;
3020 }
3021 secs_per_cylinder = ccg->heads * ccg->secs_per_track;
3022 ccg->cylinders = ccg->volume_size / secs_per_cylinder;
3023 ccb->ccb_h.status = CAM_REQ_CMP;
3024 xpt_done(ccb);
3025 break;
3026 }
3027
3028 case XPT_RESET_BUS: /* Reset the specified SCSI bus */
3029 ASR_resetBus (sc, cam_sim_bus(sim));
3030 ccb->ccb_h.status = CAM_REQ_CMP;
3031 xpt_done(ccb);
3032 break;
3033
3034 case XPT_TERM_IO: /* Terminate the I/O process */
3035 /* XXX Implement */
3036 ccb->ccb_h.status = CAM_REQ_INVALID;
3037 xpt_done(ccb);
3038 break;
3039
3040 case XPT_PATH_INQ: /* Path routing inquiry */
3041 {
3042 struct ccb_pathinq *cpi = &(ccb->cpi);
3043
3044 cpi->version_num = 1; /* XXX??? */
3045 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
3046 cpi->target_sprt = 0;
3047 /* Not necessary to reset bus, done by HDM initialization */
3048 cpi->hba_misc = PIM_NOBUSRESET;
3049 cpi->hba_eng_cnt = 0;
3050 cpi->max_target = sc->ha_MaxId;
3051 cpi->max_lun = sc->ha_MaxLun;
3052 cpi->initiator_id = sc->ha_adapter_target[cam_sim_bus(sim)];
3053 cpi->bus_id = cam_sim_bus(sim);
3054 cpi->base_transfer_speed = 3300;
3055 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
3056 strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN);
3057 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
3058 cpi->unit_number = cam_sim_unit(sim);
3059 cpi->ccb_h.status = CAM_REQ_CMP;
3060 xpt_done(ccb);
3061 break;
3062 }
3063 default:
3064 ccb->ccb_h.status = CAM_REQ_INVALID;
3065 xpt_done(ccb);
3066 break;
3067 }
3068} /* asr_action */
3069
984263bc
MD
3070
3071/*
3072 * Handle processing of current CCB as pointed to by the Status.
3073 */
3074STATIC int
3075asr_intr (
3076 IN Asr_softc_t * sc)
3077{
3078 OUT int processed;
3079
984263bc
MD
3080 for (processed = 0;
3081 sc->ha_Virt->Status & Mask_InterruptsDisabled;
3082 processed = 1) {
3083 union asr_ccb * ccb;
3084 U32 ReplyOffset;
3085 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply;
3086
3087 if (((ReplyOffset = sc->ha_Virt->FromFIFO) == EMPTY_QUEUE)
3088 && ((ReplyOffset = sc->ha_Virt->FromFIFO) == EMPTY_QUEUE)) {
3089 break;
3090 }
3091 Reply = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)(ReplyOffset
3092 - sc->ha_Msgs_Phys + (char *)(sc->ha_Msgs));
3093 /*
3094 * We do not need any (optional byteswapping) method access to
3095 * the Initiator context field.
3096 */
3097 ccb = (union asr_ccb *)(long)
3098 I2O_MESSAGE_FRAME_getInitiatorContext64(
3099 &(Reply->StdReplyFrame.StdMessageFrame));
3100 if (I2O_MESSAGE_FRAME_getMsgFlags(
3101 &(Reply->StdReplyFrame.StdMessageFrame))
3102 & I2O_MESSAGE_FLAGS_FAIL) {
3103 defAlignLong(I2O_UTIL_NOP_MESSAGE,Message);
3104 PI2O_UTIL_NOP_MESSAGE Message_Ptr;
3105 U32 MessageOffset;
3106
3107 MessageOffset = (u_long)
3108 I2O_FAILURE_REPLY_MESSAGE_FRAME_getPreservedMFA(
3109 (PI2O_FAILURE_REPLY_MESSAGE_FRAME)Reply);
3110 /*
3111 * Get the Original Message Frame's address, and get
3112 * it's Transaction Context into our space. (Currently
3113 * unused at original authorship, but better to be
3114 * safe than sorry). Straight copy means that we
3115 * need not concern ourselves with the (optional
3116 * byteswapping) method access.
3117 */
3118 Reply->StdReplyFrame.TransactionContext
3119 = ((PI2O_SINGLE_REPLY_MESSAGE_FRAME)
3120 (sc->ha_Fvirt + MessageOffset))->TransactionContext;
3121 /*
3122 * For 64 bit machines, we need to reconstruct the
3123 * 64 bit context.
3124 */
3125 ccb = (union asr_ccb *)(long)
3126 I2O_MESSAGE_FRAME_getInitiatorContext64(
3127 &(Reply->StdReplyFrame.StdMessageFrame));
3128 /*
3129 * Unique error code for command failure.
3130 */
3131 I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode(
3132 &(Reply->StdReplyFrame), (u_int16_t)-2);
3133 /*
3134 * Modify the message frame to contain a NOP and
3135 * re-issue it to the controller.
3136 */
3137 Message_Ptr = (PI2O_UTIL_NOP_MESSAGE)ASR_fillMessage(
3138 Message, sizeof(I2O_UTIL_NOP_MESSAGE));
3139# if (I2O_UTIL_NOP != 0)
3140 I2O_MESSAGE_FRAME_setFunction (
3141 &(Message_Ptr->StdMessageFrame),
3142 I2O_UTIL_NOP);
3143# endif
3144 /*
3145 * Copy the packet out to the Original Message
3146 */
3147 bcopy ((caddr_t)Message_Ptr,
3148 sc->ha_Fvirt + MessageOffset,
3149 sizeof(I2O_UTIL_NOP_MESSAGE));
3150 /*
3151 * Issue the NOP
3152 */
3153 sc->ha_Virt->ToFIFO = MessageOffset;
3154 }
3155
3156 /*
3157 * Asynchronous command with no return requirements,
3158 * and a generic handler for immunity against odd error
3159 * returns from the adapter.
3160 */
3161 if (ccb == (union asr_ccb *)NULL) {
3162 /*
3163 * Return Reply so that it can be used for the
3164 * next command
3165 */
3166 sc->ha_Virt->FromFIFO = ReplyOffset;
3167 continue;
3168 }
3169
3170 /* Welease Wadjah! (and stop timeouts) */
3171 ASR_ccbRemove (sc, ccb);
3172
3173 switch (
3174 I2O_SINGLE_REPLY_MESSAGE_FRAME_getDetailedStatusCode(
3175 &(Reply->StdReplyFrame))) {
3176
3177 case I2O_SCSI_DSC_SUCCESS:
3178 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3179 ccb->ccb_h.status |= CAM_REQ_CMP;
3180 break;
3181
3182 case I2O_SCSI_DSC_CHECK_CONDITION:
3183 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3184 ccb->ccb_h.status |= CAM_REQ_CMP|CAM_AUTOSNS_VALID;
3185 break;
3186
3187 case I2O_SCSI_DSC_BUSY:
3188 /* FALLTHRU */
3189 case I2O_SCSI_HBA_DSC_ADAPTER_BUSY:
3190 /* FALLTHRU */
3191 case I2O_SCSI_HBA_DSC_SCSI_BUS_RESET:
3192 /* FALLTHRU */
3193 case I2O_SCSI_HBA_DSC_BUS_BUSY:
3194 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3195 ccb->ccb_h.status |= CAM_SCSI_BUSY;
3196 break;
3197
3198 case I2O_SCSI_HBA_DSC_SELECTION_TIMEOUT:
3199 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3200 ccb->ccb_h.status |= CAM_SEL_TIMEOUT;
3201 break;
3202
3203 case I2O_SCSI_HBA_DSC_COMMAND_TIMEOUT:
3204 /* FALLTHRU */
3205 case I2O_SCSI_HBA_DSC_DEVICE_NOT_PRESENT:
3206 /* FALLTHRU */
3207 case I2O_SCSI_HBA_DSC_LUN_INVALID:
3208 /* FALLTHRU */
3209 case I2O_SCSI_HBA_DSC_SCSI_TID_INVALID:
3210 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3211 ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
3212 break;
3213
3214 case I2O_SCSI_HBA_DSC_DATA_OVERRUN:
3215 /* FALLTHRU */
3216 case I2O_SCSI_HBA_DSC_REQUEST_LENGTH_ERROR:
3217 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3218 ccb->ccb_h.status |= CAM_DATA_RUN_ERR;
3219 break;
3220
3221 default:
3222 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3223 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
3224 break;
3225 }
3226 if ((ccb->csio.resid = ccb->csio.dxfer_len) != 0) {
3227 ccb->csio.resid -=
3228 I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_getTransferCount(
3229 Reply);
3230 }
3231
984263bc
MD
3232 /* Sense data in reply packet */
3233 if (ccb->ccb_h.status & CAM_AUTOSNS_VALID) {
3234 u_int16_t size = I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_getAutoSenseTransferCount(Reply);
3235
3236 if (size) {
3237 if (size > sizeof(ccb->csio.sense_data)) {
3238 size = sizeof(ccb->csio.sense_data);
3239 }
3240 if (size > I2O_SCSI_SENSE_DATA_SZ) {
3241 size = I2O_SCSI_SENSE_DATA_SZ;
3242 }
3243 if ((ccb->csio.sense_len)
3244 && (size > ccb->csio.sense_len)) {
3245 size = ccb->csio.sense_len;
3246 }
3247 bcopy ((caddr_t)Reply->SenseData,
3248 (caddr_t)&(ccb->csio.sense_data), size);
3249 }
3250 }
3251
3252 /*
3253 * Return Reply so that it can be used for the next command
3254 * since we have no more need for it now
3255 */
3256 sc->ha_Virt->FromFIFO = ReplyOffset;
3257
3258 if (ccb->ccb_h.path) {
3259 xpt_done ((union ccb *)ccb);
3260 } else {
3261 wakeup ((caddr_t)ccb);
3262 }
3263 }
984263bc
MD
3264 return (processed);
3265} /* asr_intr */
3266
3267#undef QueueSize /* Grrrr */
3268#undef SG_Size /* Grrrr */
3269
3270/*
3271 * Meant to be included at the bottom of asr.c !!!
3272 */
3273
3274/*
3275 * Included here as hard coded. Done because other necessary include
3276 * files utilize C++ comment structures which make them a nuisance to
3277 * included here just to pick up these three typedefs.
3278 */
3279typedef U32 DPT_TAG_T;
3280typedef U32 DPT_MSG_T;
3281typedef U32 DPT_RTN_T;
3282
3283#undef SCSI_RESET /* Conflicts with "scsi/scsiconf.h" defintion */
1f2de5d4 3284#include "osd_unix.h"
984263bc
MD
3285
3286#define asr_unit(dev) minor(dev)
3287
3288STATIC INLINE Asr_softc_t *
3289ASR_get_sc (
3290 IN dev_t dev)
3291{
3292 int unit = asr_unit(dev);
3293 OUT Asr_softc_t * sc = Asr_softc;
3294
3295 while (sc && sc->ha_sim[0] && (cam_sim_unit(sc->ha_sim[0]) != unit)) {
3296 sc = sc->ha_next;
3297 }
3298 return (sc);
3299} /* ASR_get_sc */
3300
3301STATIC u_int8_t ASR_ctlr_held;
3302#if (!defined(UNREFERENCED_PARAMETER))
3303# define UNREFERENCED_PARAMETER(x) (void)(x)
3304#endif
3305
3306STATIC int
3307asr_open(
3308 IN dev_t dev,
3309 int32_t flags,
3310 int32_t ifmt,
41c20dac 3311 IN d_thread_t *td)
984263bc 3312{
984263bc
MD
3313 OUT int error;
3314 UNREFERENCED_PARAMETER(flags);
3315 UNREFERENCED_PARAMETER(ifmt);
3316
3317 if (ASR_get_sc (dev) == (Asr_softc_t *)NULL) {
3318 return (ENODEV);
3319 }
dadab5e9 3320 KKASSERT(td->td_proc);
7f2216bc 3321 crit_enter();
984263bc
MD
3322 if (ASR_ctlr_held) {
3323 error = EBUSY;
dadab5e9 3324 } else if ((error = suser_cred(td->td_proc->p_ucred, 0)) == 0) {
984263bc
MD
3325 ++ASR_ctlr_held;
3326 }
7f2216bc 3327 crit_exit();
984263bc
MD
3328 return (error);
3329} /* asr_open */
3330
3331STATIC int
3332asr_close(
3333 dev_t dev,
3334 int flags,
3335 int ifmt,
41c20dac 3336 d_thread_t *td)
984263bc
MD
3337{
3338 UNREFERENCED_PARAMETER(dev);
3339 UNREFERENCED_PARAMETER(flags);
3340 UNREFERENCED_PARAMETER(ifmt);
41c20dac 3341 UNREFERENCED_PARAMETER(td);
984263bc
MD
3342
3343 ASR_ctlr_held = 0;
3344 return (0);
3345} /* asr_close */
3346
3347
3348/*-------------------------------------------------------------------------*/
3349/* Function ASR_queue_i */
3350/*-------------------------------------------------------------------------*/
3351/* The Parameters Passed To This Function Are : */
3352/* Asr_softc_t * : HBA miniport driver's adapter data storage. */
3353/* PI2O_MESSAGE_FRAME : Msg Structure Pointer For This Command */
3354/* I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME following the Msg Structure */
3355/* */
3356/* This Function Will Take The User Request Packet And Convert It To An */
3357/* I2O MSG And Send It Off To The Adapter. */
3358/* */
3359/* Return : 0 For OK, Error Code Otherwise */
3360/*-------------------------------------------------------------------------*/
3361STATIC INLINE int
3362ASR_queue_i(
3363 IN Asr_softc_t * sc,
3364 INOUT PI2O_MESSAGE_FRAME Packet)
3365{
3366 union asr_ccb * ccb;
3367 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply;
3368 PI2O_MESSAGE_FRAME Message_Ptr;
3369 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply_Ptr;
3370 int MessageSizeInBytes;
3371 int ReplySizeInBytes;
3372 int error;
3373 int s;
3374 /* Scatter Gather buffer list */
3375 struct ioctlSgList_S {
3376 SLIST_ENTRY(ioctlSgList_S) link;
3377 caddr_t UserSpace;
3378 I2O_FLAGS_COUNT FlagsCount;
3379 char KernelSpace[sizeof(long)];
3380 } * elm;
3381 /* Generates a `first' entry */
3382 SLIST_HEAD(ioctlSgListHead_S, ioctlSgList_S) sgList;
3383
3384 if (ASR_getBlinkLedCode(sc)) {
3385 debug_usr_cmd_printf ("Adapter currently in BlinkLed %x\n",
3386 ASR_getBlinkLedCode(sc));
3387 return (EIO);
3388 }
3389 /* Copy in the message into a local allocation */
3390 if ((Message_Ptr = (PI2O_MESSAGE_FRAME)malloc (
3391 sizeof(I2O_MESSAGE_FRAME), M_TEMP, M_WAITOK))
3392 == (PI2O_MESSAGE_FRAME)NULL) {
3393 debug_usr_cmd_printf (
3394 "Failed to acquire I2O_MESSAGE_FRAME memory\n");
3395 return (ENOMEM);
3396 }
3397 if ((error = copyin ((caddr_t)Packet, (caddr_t)Message_Ptr,
3398 sizeof(I2O_MESSAGE_FRAME))) != 0) {
3399 free (Message_Ptr, M_TEMP);
3400 debug_usr_cmd_printf ("Can't copy in packet errno=%d\n", error);
3401 return (error);
3402 }
3403 /* Acquire information to determine type of packet */
3404 MessageSizeInBytes = (I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr)<<2);
3405 /* The offset of the reply information within the user packet */
3406 Reply = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)((char *)Packet
3407 + MessageSizeInBytes);
3408
3409 /* Check if the message is a synchronous initialization command */
3410 s = I2O_MESSAGE_FRAME_getFunction(Message_Ptr);
3411 free (Message_Ptr, M_TEMP);
3412 switch (s) {
3413
3414 case I2O_EXEC_IOP_RESET:
3415 { U32 status;
3416
3417 status = ASR_resetIOP(sc->ha_Virt, sc->ha_Fvirt);
3418 ReplySizeInBytes = sizeof(status);
3419 debug_usr_cmd_printf ("resetIOP done\n");
3420 return (copyout ((caddr_t)&status, (caddr_t)Reply,
3421 ReplySizeInBytes));
3422 }
3423
3424 case I2O_EXEC_STATUS_GET:
3425 { I2O_EXEC_STATUS_GET_REPLY status;
3426
3427 if (ASR_getStatus (sc->ha_Virt, sc->ha_Fvirt, &status)
3428 == (PI2O_EXEC_STATUS_GET_REPLY)NULL) {
3429 debug_usr_cmd_printf ("getStatus failed\n");
3430 return (ENXIO);
3431 }
3432 ReplySizeInBytes = sizeof(status);
3433 debug_usr_cmd_printf ("getStatus done\n");
3434 return (copyout ((caddr_t)&status, (caddr_t)Reply,
3435 ReplySizeInBytes));
3436 }
3437
3438 case I2O_EXEC_OUTBOUND_INIT:
3439 { U32 status;
3440
3441 status = ASR_initOutBound(sc);
3442 ReplySizeInBytes = sizeof(status);
3443 debug_usr_cmd_printf ("intOutBound done\n");
3444 return (copyout ((caddr_t)&status, (caddr_t)Reply,
3445 ReplySizeInBytes));
3446 }
3447 }
3448
3449 /* Determine if the message size is valid */
3450 if ((MessageSizeInBytes < sizeof(I2O_MESSAGE_FRAME))
3451 || (MAX_INBOUND_SIZE < MessageSizeInBytes)) {
3452 debug_usr_cmd_printf ("Packet size %d incorrect\n",
3453 MessageSizeInBytes);
3454 return (EINVAL);
3455 }
3456
3457 if ((Message_Ptr = (PI2O_MESSAGE_FRAME)malloc (MessageSizeInBytes,
3458 M_TEMP, M_WAITOK)) == (PI2O_MESSAGE_FRAME)NULL) {
3459 debug_usr_cmd_printf ("Failed to acquire frame[%d] memory\n",
3460 MessageSizeInBytes);
3461 return (ENOMEM);
3462 }
3463 if ((error = copyin ((caddr_t)Packet, (caddr_t)Message_Ptr,
3464 MessageSizeInBytes)) != 0) {
3465 free (Message_Ptr, M_TEMP);
3466 debug_usr_cmd_printf ("Can't copy in packet[%d] errno=%d\n",
3467 MessageSizeInBytes, error);
3468 return (error);
3469 }
3470
3471 /* Check the size of the reply frame, and start constructing */
3472
3473 if ((Reply_Ptr = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)malloc (
3474 sizeof(I2O_MESSAGE_FRAME), M_TEMP, M_WAITOK))
3475 == (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL) {
3476 free (Message_Ptr, M_TEMP);
3477 debug_usr_cmd_printf (
3478 "Failed to acquire I2O_MESSAGE_FRAME memory\n");
3479 return (ENOMEM);
3480 }
3481 if ((error = copyin ((caddr_t)Reply, (caddr_t)Reply_Ptr,
3482 sizeof(I2O_MESSAGE_FRAME))) != 0) {
3483 free (Reply_Ptr, M_TEMP);
3484 free (Message_Ptr, M_TEMP);
3485 debug_usr_cmd_printf (
3486 "Failed to copy in reply frame, errno=%d\n",
3487 error);
3488 return (error);
3489 }
3490 ReplySizeInBytes = (I2O_MESSAGE_FRAME_getMessageSize(
3491 &(Reply_Ptr->StdReplyFrame.StdMessageFrame)) << 2);
3492 free (Reply_Ptr, M_TEMP);
3493 if (ReplySizeInBytes < sizeof(I2O_SINGLE_REPLY_MESSAGE_FRAME)) {
3494 free (Message_Ptr, M_TEMP);
3495 debug_usr_cmd_printf (
3496 "Failed to copy in reply frame[%d], errno=%d\n",
3497 ReplySizeInBytes, error);
3498 return (EINVAL);
3499 }
3500
3501 if ((Reply_Ptr = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)malloc (
3502 ((ReplySizeInBytes > sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME))
3503 ? ReplySizeInBytes
3504 : sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)),
3505 M_TEMP, M_WAITOK)) == (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL) {
3506 free (Message_Ptr, M_TEMP);
3507 debug_usr_cmd_printf ("Failed to acquire frame[%d] memory\n",
3508 ReplySizeInBytes);
3509 return (ENOMEM);
3510 }
3511 (void)ASR_fillMessage ((char *)Reply_Ptr, ReplySizeInBytes);
3512 Reply_Ptr->StdReplyFrame.StdMessageFrame.InitiatorContext
3513 = Message_Ptr->InitiatorContext;
3514 Reply_Ptr->StdReplyFrame.TransactionContext
3515 = ((PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr)->TransactionContext;
3516 I2O_MESSAGE_FRAME_setMsgFlags(
3517 &(Reply_Ptr->StdReplyFrame.StdMessageFrame),
3518 I2O_MESSAGE_FRAME_getMsgFlags(
3519 &(Reply_Ptr->StdReplyFrame.StdMessageFrame))
3520 | I2O_MESSAGE_FLAGS_REPLY);
3521
3522 /* Check if the message is a special case command */
3523 switch (I2O_MESSAGE_FRAME_getFunction(Message_Ptr)) {
3524 case I2O_EXEC_SYS_TAB_SET: /* Special Case of empty Scatter Gather */
3525 if (MessageSizeInBytes == ((I2O_MESSAGE_FRAME_getVersionOffset(
3526 Message_Ptr) & 0xF0) >> 2)) {
3527 free (Message_Ptr, M_TEMP);
3528 I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode(
3529 &(Reply_Ptr->StdReplyFrame),
3530 (ASR_setSysTab(sc) != CAM_REQ_CMP));
3531 I2O_MESSAGE_FRAME_setMessageSize(
3532 &(Reply_Ptr->StdReplyFrame.StdMessageFrame),
3533 sizeof(I2O_SINGLE_REPLY_MESSAGE_FRAME));
3534 error = copyout ((caddr_t)Reply_Ptr, (caddr_t)Reply,
3535 ReplySizeInBytes);
3536 free (Reply_Ptr, M_TEMP);
3537 return (error);
3538 }
3539 }
3540
3541 /* Deal in the general case */
3542 /* First allocate and optionally copy in each scatter gather element */
3543 SLIST_INIT(&sgList);
3544 if ((I2O_MESSAGE_FRAME_getVersionOffset(Message_Ptr) & 0xF0) != 0) {
3545 PI2O_SGE_SIMPLE_ELEMENT sg;
3546
3547 /*
3548 * since this code is reused in several systems, code
3549 * efficiency is greater by using a shift operation rather
3550 * than a divide by sizeof(u_int32_t).
3551 */
3552 sg = (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr
3553 + ((I2O_MESSAGE_FRAME_getVersionOffset(Message_Ptr) & 0xF0)
3554 >> 2));
3555 while (sg < (PI2O_SGE_SIMPLE_ELEMENT)(((caddr_t)Message_Ptr)
3556 + MessageSizeInBytes)) {
3557 caddr_t v;
3558 int len;
3559
3560 if ((I2O_FLAGS_COUNT_getFlags(&(sg->FlagsCount))
3561 & I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT) == 0) {
3562 error = EINVAL;
3563 break;
3564 }
3565 len = I2O_FLAGS_COUNT_getCount(&(sg->FlagsCount));
3566 debug_usr_cmd_printf ("SG[%d] = %x[%d]\n",
3567 sg - (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr
3568 + ((I2O_MESSAGE_FRAME_getVersionOffset(
3569 Message_Ptr) & 0xF0) >> 2)),
3570 I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg), len);
3571
3572 if ((elm = (struct ioctlSgList_S *)malloc (
3573 sizeof(*elm) - sizeof(elm->KernelSpace) + len,
3574 M_TEMP, M_WAITOK))
3575 == (struct ioctlSgList_S *)NULL) {
3576 debug_usr_cmd_printf (
3577 "Failed to allocate SG[%d]\n", len);
3578 error = ENOMEM;
3579 break;
3580 }
3581 SLIST_INSERT_HEAD(&sgList, elm, link);
3582 elm->FlagsCount = sg->FlagsCount;
3583 elm->UserSpace = (caddr_t)
3584 (I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg));
3585 v = elm->KernelSpace;
3586 /* Copy in outgoing data (DIR bit could be invalid) */
3587 if ((error = copyin (elm->UserSpace, (caddr_t)v, len))
3588 != 0) {
3589 break;
3590 }
3591 /*
3592 * If the buffer is not contiguous, lets
3593 * break up the scatter/gather entries.
3594 */
3595 while ((len > 0)
3596 && (sg < (PI2O_SGE_SIMPLE_ELEMENT)
3597 (((caddr_t)Message_Ptr) + MAX_INBOUND_SIZE))) {
3598 int next, base, span;
3599
3600 span = 0;
3601 next = base = KVTOPHYS(v);
3602 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg,
3603 base);
3604
3605 /* How far can we go physically contiguously */
3606 while ((len > 0) && (base == next)) {
3607 int size;
3608
3609 next = trunc_page(base) + PAGE_SIZE;
3610 size = next - base;
3611 if (size > len) {
3612 size = len;
3613 }
3614 span += size;
3615 v += size;
3616 len -= size;
3617 base = KVTOPHYS(v);
3618 }
3619
3620 /* Construct the Flags */
3621 I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount),
3622 span);
3623 {
3624 int flags = I2O_FLAGS_COUNT_getFlags(
3625 &(elm->FlagsCount));
3626 /* Any remaining length? */
3627 if (len > 0) {
3628 flags &=
3629 ~(I2O_SGL_FLAGS_END_OF_BUFFER
3630 | I2O_SGL_FLAGS_LAST_ELEMENT);
3631 }
3632 I2O_FLAGS_COUNT_setFlags(
3633 &(sg->FlagsCount), flags);
3634 }
3635
3636 debug_usr_cmd_printf ("sg[%d] = %x[%d]\n",
3637 sg - (PI2O_SGE_SIMPLE_ELEMENT)
3638 ((char *)Message_Ptr
3639 + ((I2O_MESSAGE_FRAME_getVersionOffset(
3640 Message_Ptr) & 0xF0) >> 2)),
3641 I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg),
3642 span);
3643 if (len <= 0) {
3644 break;
3645 }
3646
3647 /*
3648 * Incrementing requires resizing of the
3649 * packet, and moving up the existing SG
3650 * elements.
3651 */
3652 ++sg;
3653 MessageSizeInBytes += sizeof(*sg);
3654 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
3655 I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr)
3656 + (sizeof(*sg) / sizeof(U32)));
3657 {
3658 PI2O_MESSAGE_FRAME NewMessage_Ptr;
3659
3660 if ((NewMessage_Ptr
3661 = (PI2O_MESSAGE_FRAME)
3662 malloc (MessageSizeInBytes,
3663 M_TEMP, M_WAITOK))
3664 == (PI2O_MESSAGE_FRAME)NULL) {
3665 debug_usr_cmd_printf (
3666 "Failed to acquire frame[%d] memory\n",
3667 MessageSizeInBytes);
3668 error = ENOMEM;
3669 break;
3670 }
3671 span = ((caddr_t)sg)
3672 - (caddr_t)Message_Ptr;
3673 bcopy ((caddr_t)Message_Ptr,
3674 (caddr_t)NewMessage_Ptr, span);
3675 bcopy ((caddr_t)(sg-1),
3676 ((caddr_t)NewMessage_Ptr) + span,
3677 MessageSizeInBytes - span);
3678 free (Message_Ptr, M_TEMP);
3679 sg = (PI2O_SGE_SIMPLE_ELEMENT)
3680 (((caddr_t)NewMessage_Ptr) + span);
3681 Message_Ptr = NewMessage_Ptr;
3682 }
3683 }
3684 if ((error)
3685 || ((I2O_FLAGS_COUNT_getFlags(&(sg->FlagsCount))
3686 & I2O_SGL_FLAGS_LAST_ELEMENT) != 0)) {
3687 break;
3688 }
3689 ++sg;
3690 }
3691 if (error) {
3692 while ((elm = SLIST_FIRST(&sgList))
3693 != (struct ioctlSgList_S *)NULL) {
3694 SLIST_REMOVE_HEAD(&sgList, link);
3695 free (elm, M_TEMP);
3696 }
3697 free (Reply_Ptr, M_TEMP);
3698 free (Message_Ptr, M_TEMP);
3699 return (error);
3700 }
3701 }
3702
3703 debug_usr_cmd_printf ("Inbound: ");
3704 debug_usr_cmd_dump_message(Message_Ptr);
3705
3706 /* Send the command */
3707 if ((ccb = asr_alloc_ccb (sc)) == (union asr_ccb *)NULL) {
3708 /* Free up in-kernel buffers */
3709 while ((elm = SLIST_FIRST(&sgList))
3710 != (struct ioctlSgList_S *)NULL) {
3711 SLIST_REMOVE_HEAD(&sgList, link);
3712 free (elm, M_TEMP);
3713 }
3714 free (Reply_Ptr, M_TEMP);
3715 free (Message_Ptr, M_TEMP);
3716 return (ENOMEM);
3717 }
3718
3719 /*
3720 * We do not need any (optional byteswapping) method access to
3721 * the Initiator context field.
3722 */
3723 I2O_MESSAGE_FRAME_setInitiatorContext64(
3724 (PI2O_MESSAGE_FRAME)Message_Ptr, (long)ccb);
3725
3726 (void)ASR_queue (sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
3727
3728 free (Message_Ptr, M_TEMP);
3729
3730 /*
3731 * Wait for the board to report a finished instruction.
3732 */
7f2216bc 3733 crit_enter();
984263bc
MD
3734 while ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
3735 if (ASR_getBlinkLedCode(sc)) {
3736 /* Reset Adapter */
3737 printf ("asr%d: Blink LED 0x%x resetting adapter\n",
3738 cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)),
3739 ASR_getBlinkLedCode(sc));
3740 if (ASR_reset (sc) == ENXIO) {
3741 /* Command Cleanup */
3742 ASR_ccbRemove(sc, ccb);
3743 }
7f2216bc 3744 crit_exit();
984263bc
MD
3745 /* Free up in-kernel buffers */
3746 while ((elm = SLIST_FIRST(&sgList))
3747 != (struct ioctlSgList_S *)NULL) {
3748 SLIST_REMOVE_HEAD(&sgList, link);
3749 free (elm, M_TEMP);
3750 }
3751 free (Reply_Ptr, M_TEMP);
3752 asr_free_ccb(ccb);
3753 return (EIO);
3754 }
3755 /* Check every second for BlinkLed */
377d4740 3756 tsleep((caddr_t)ccb, 0, "asr", hz);
984263bc 3757 }
7f2216bc 3758 crit_exit();
984263bc
MD
3759
3760 debug_usr_cmd_printf ("Outbound: ");
3761 debug_usr_cmd_dump_message(Reply_Ptr);
3762
3763 I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode(
3764 &(Reply_Ptr->StdReplyFrame),
3765 (ccb->ccb_h.status != CAM_REQ_CMP));
3766
3767 if (ReplySizeInBytes >= (sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
3768 - I2O_SCSI_SENSE_DATA_SZ - sizeof(U32))) {
3769 I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_setTransferCount(Reply_Ptr,
3770 ccb->csio.dxfer_len - ccb->csio.resid);
3771 }
3772 if ((ccb->ccb_h.status & CAM_AUTOSNS_VALID) && (ReplySizeInBytes
3773 > (sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
3774 - I2O_SCSI_SENSE_DATA_SZ))) {
3775 int size = ReplySizeInBytes
3776 - sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
3777 - I2O_SCSI_SENSE_DATA_SZ;
3778
3779 if (size > sizeof(ccb->csio.sense_data)) {
3780 size = sizeof(ccb->csio.sense_data);
3781 }
3782 bcopy ((caddr_t)&(ccb->csio.sense_data), (caddr_t)Reply_Ptr->SenseData,
3783 size);
3784 I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_setAutoSenseTransferCount(
3785 Reply_Ptr, size);