DEVFS - remove dev_ops_add(), dev_ops_get(), and get_dev()
[dragonfly.git] / sys / dev / raid / asr / asr.c
CommitLineData
984263bc 1/* $FreeBSD: src/sys/dev/asr/asr.c,v 1.3.2.2 2001/08/23 05:21:29 scottl Exp $ */
3641b7ca 2/* $DragonFly: src/sys/dev/raid/asr/asr.c,v 1.36 2008/06/05 18:06:31 swildner Exp $ */
984263bc
MD
3/*
4 * Copyright (c) 1996-2000 Distributed Processing Technology Corporation
5 * Copyright (c) 2000-2001 Adaptec Corporation
6 * All rights reserved.
7 *
8 * TERMS AND CONDITIONS OF USE
9 *
10 * Redistribution and use in source form, with or without modification, are
11 * permitted provided that redistributions of source code must retain the
12 * above copyright notice, this list of conditions and the following disclaimer.
13 *
14 * This software is provided `as is' by Adaptec and any express or implied
15 * warranties, including, but not limited to, the implied warranties of
16 * merchantability and fitness for a particular purpose, are disclaimed. In no
17 * event shall Adaptec be liable for any direct, indirect, incidental, special,
18 * exemplary or consequential damages (including, but not limited to,
19 * procurement of substitute goods or services; loss of use, data, or profits;
20 * or business interruptions) however caused and on any theory of liability,
21 * whether in contract, strict liability, or tort (including negligence or
22 * otherwise) arising in any way out of the use of this driver software, even
23 * if advised of the possibility of such damage.
24 *
25 * SCSI I2O host adapter driver
26 *
27 * V1.08 2001/08/21 Mark_Salyzyn@adaptec.com
28 * - The 2000S and 2005S do not initialize on some machines,
29 * increased timeout to 255ms from 50ms for the StatusGet
30 * command.
31 * V1.07 2001/05/22 Mark_Salyzyn@adaptec.com
32 * - I knew this one was too good to be true. The error return
33 * on ioctl commands needs to be compared to CAM_REQ_CMP, not
34 * to the bit masked status.
35 * V1.06 2001/05/08 Mark_Salyzyn@adaptec.com
36 * - The 2005S that was supported is affectionately called the
37 * Conjoined BAR Firmware. In order to support RAID-5 in a
38 * 16MB low-cost configuration, Firmware was forced to go
39 * to a Split BAR Firmware. This requires a separate IOP and
40 * Messaging base address.
41 * V1.05 2001/04/25 Mark_Salyzyn@adaptec.com
42 * - Handle support for 2005S Zero Channel RAID solution.
43 * - System locked up if the Adapter locked up. Do not try
44 * to send other commands if the resetIOP command fails. The
45 * fail outstanding command discovery loop was flawed as the
46 * removal of the command from the list prevented discovering
47 * all the commands.
48 * - Comment changes to clarify driver.
49 * - SysInfo searched for an EATA SmartROM, not an I2O SmartROM.
50 * - We do not use the AC_FOUND_DEV event because of I2O.
51 * Removed asr_async.
52 * V1.04 2000/09/22 Mark_Salyzyn@adaptec.com, msmith@freebsd.org,
53 * lampa@fee.vutbr.cz and Scott_Long@adaptec.com.
54 * - Removed support for PM1554, PM2554 and PM2654 in Mode-0
55 * mode as this is confused with competitor adapters in run
56 * mode.
57 * - critical locking needed in ASR_ccbAdd and ASR_ccbRemove
58 * to prevent operating system panic.
59 * - moved default major number to 154 from 97.
60 * V1.03 2000/07/12 Mark_Salyzyn@adaptec.com
61 * - The controller is not actually an ASR (Adaptec SCSI RAID)
62 * series that is visible, it's more of an internal code name.
63 * remove any visible references within reason for now.
64 * - bus_ptr->LUN was not correctly zeroed when initially
65 * allocated causing a possible panic of the operating system
66 * during boot.
67 * V1.02 2000/06/26 Mark_Salyzyn@adaptec.com
68 * - Code always fails for ASR_getTid affecting performance.
69 * - initiated a set of changes that resulted from a formal
70 * code inspection by Mark_Salyzyn@adaptec.com,
71 * George_Dake@adaptec.com, Jeff_Zeak@adaptec.com,
72 * Martin_Wilson@adaptec.com and Vincent_Trandoan@adaptec.com.
73 * Their findings were focussed on the LCT & TID handler, and
74 * all resulting changes were to improve code readability,
75 * consistency or have a positive effect on performance.
76 * V1.01 2000/06/14 Mark_Salyzyn@adaptec.com
77 * - Passthrough returned an incorrect error.
78 * - Passthrough did not migrate the intrinsic scsi layer wakeup
79 * on command completion.
80 * - generate control device nodes using make_dev and delete_dev.
81 * - Performance affected by TID caching reallocing.
82 * - Made suggested changes by Justin_Gibbs@adaptec.com
83 * - use splcam instead of splbio.
984263bc
MD
84 * - use u_int8_t instead of u_char.
85 * - use u_int16_t instead of u_short.
86 * - use u_int32_t instead of u_long where appropriate.
87 * - use 64 bit context handler instead of 32 bit.
88 * - create_ccb should only allocate the worst case
89 * requirements for the driver since CAM may evolve
90 * making union ccb much larger than needed here.
91 * renamed create_ccb to asr_alloc_ccb.
92 * - go nutz justifying all debug prints as macros
93 * defined at the top and remove unsightly ifdefs.
94 * - INLINE STATIC viewed as confusing. Historically
95 * utilized to affect code performance and debug
96 * issues in OS, Compiler or OEM specific situations.
97 * V1.00 2000/05/31 Mark_Salyzyn@adaptec.com
98 * - Ported from FreeBSD 2.2.X DPT I2O driver.
99 * changed struct scsi_xfer to union ccb/struct ccb_hdr
100 * changed variable name xs to ccb
101 * changed struct scsi_link to struct cam_path
102 * changed struct scsibus_data to struct cam_sim
103 * stopped using fordriver for holding on to the TID
104 * use proprietary packet creation instead of scsi_inquire
105 * CAM layer sends synchronize commands.
106 */
107
108#define ASR_VERSION 1
109#define ASR_REVISION '0'
110#define ASR_SUBREVISION '8'
111#define ASR_MONTH 8
112#define ASR_DAY 21
113#define ASR_YEAR 2001 - 1980
114
115/*
116 * Debug macros to reduce the unsightly ifdefs
117 */
118#if (defined(DEBUG_ASR) || defined(DEBUG_ASR_USR_CMD) || defined(DEBUG_ASR_CMD))
119# define debug_asr_message(message) \
120 { \
121 u_int32_t * pointer = (u_int32_t *)message; \
122 u_int32_t length = I2O_MESSAGE_FRAME_getMessageSize(message);\
123 u_int32_t counter = 0; \
124 \
125 while (length--) { \
e3869ec7 126 kprintf ("%08lx%c", (u_long)*(pointer++), \
984263bc
MD
127 (((++counter & 7) == 0) || (length == 0)) \
128 ? '\n' \
129 : ' '); \
130 } \
131 }
132#endif /* DEBUG_ASR || DEBUG_ASR_USR_CMD || DEBUG_ASR_CMD */
133
134#if (defined(DEBUG_ASR))
135 /* Breaks on none STDC based compilers :-( */
e3869ec7 136# define debug_asr_printf(fmt,args...) kprintf(fmt, ##args)
984263bc
MD
137# define debug_asr_dump_message(message) debug_asr_message(message)
138# define debug_asr_print_path(ccb) xpt_print_path(ccb->ccb_h.path);
139 /* None fatal version of the ASSERT macro */
140# if (defined(__STDC__))
e3869ec7 141# define ASSERT(phrase) if(!(phrase))kprintf(#phrase " at line %d file %s\n",__LINE__,__FILE__)
984263bc 142# else
e3869ec7 143# define ASSERT(phrase) if(!(phrase))kprintf("phrase" " at line %d file %s\n",__LINE__,__FILE__)
984263bc
MD
144# endif
145#else /* DEBUG_ASR */
146# define debug_asr_printf(fmt,args...)
147# define debug_asr_dump_message(message)
148# define debug_asr_print_path(ccb)
149# define ASSERT(x)
150#endif /* DEBUG_ASR */
151
152/*
153 * If DEBUG_ASR_CMD is defined:
154 * 0 - Display incoming SCSI commands
155 * 1 - add in a quick character before queueing.
156 * 2 - add in outgoing message frames.
157 */
158#if (defined(DEBUG_ASR_CMD))
e3869ec7 159# define debug_asr_cmd_printf(fmt,args...) kprintf(fmt,##args)
984263bc
MD
160# define debug_asr_dump_ccb(ccb) \
161 { \
162 u_int8_t * cp = (unsigned char *)&(ccb->csio.cdb_io); \
163 int len = ccb->csio.cdb_len; \
164 \
165 while (len) { \
166 debug_asr_cmd_printf (" %02x", *(cp++)); \
167 --len; \
168 } \
169 }
170# if (DEBUG_ASR_CMD > 0)
171# define debug_asr_cmd1_printf debug_asr_cmd_printf
172# else
173# define debug_asr_cmd1_printf(fmt,args...)
174# endif
175# if (DEBUG_ASR_CMD > 1)
176# define debug_asr_cmd2_printf debug_asr_cmd_printf
177# define debug_asr_cmd2_dump_message(message) debug_asr_message(message)
178# else
179# define debug_asr_cmd2_printf(fmt,args...)
180# define debug_asr_cmd2_dump_message(message)
181# endif
182#else /* DEBUG_ASR_CMD */
183# define debug_asr_cmd_printf(fmt,args...)
184# define debug_asr_cmd_dump_ccb(ccb)
185# define debug_asr_cmd1_printf(fmt,args...)
186# define debug_asr_cmd2_printf(fmt,args...)
187# define debug_asr_cmd2_dump_message(message)
188#endif /* DEBUG_ASR_CMD */
189
190#if (defined(DEBUG_ASR_USR_CMD))
e3869ec7 191# define debug_usr_cmd_printf(fmt,args...) kprintf(fmt,##args)
984263bc
MD
192# define debug_usr_cmd_dump_message(message) debug_usr_message(message)
193#else /* DEBUG_ASR_USR_CMD */
194# define debug_usr_cmd_printf(fmt,args...)
195# define debug_usr_cmd_dump_message(message)
196#endif /* DEBUG_ASR_USR_CMD */
197
198#define dsDescription_size 46 /* Snug as a bug in a rug */
1f2de5d4 199#include "dptsig.h"
984263bc
MD
200
201static dpt_sig_S ASR_sig = {
202 { 'd', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION, PROC_INTEL,
203 PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM, FT_HBADRVR, 0,
204 OEM_DPT, OS_FREE_BSD, CAP_ABOVE16MB, DEV_ALL,
205 ADF_ALL_SC5,
206 0, 0, ASR_VERSION, ASR_REVISION, ASR_SUBREVISION,
207 ASR_MONTH, ASR_DAY, ASR_YEAR,
208/* 01234567890123456789012345678901234567890123456789 < 50 chars */
209 "Adaptec FreeBSD 4.0.0 Unix SCSI I2O HBA Driver"
210 /* ^^^^^ asr_attach alters these to match OS */
211};
212
213#include <sys/param.h> /* TRUE=1 and FALSE=0 defined here */
214#include <sys/kernel.h>
215#include <sys/systm.h>
216#include <sys/malloc.h>
217#include <sys/proc.h>
895c1f85 218#include <sys/priv.h>
984263bc 219#include <sys/conf.h>
984263bc 220#include <sys/bus.h>
984263bc
MD
221#include <sys/rman.h>
222#include <sys/stat.h>
f15db79e 223#include <sys/device.h>
7f2216bc 224#include <sys/thread2.h>
984263bc 225
1f2de5d4
MD
226#include <bus/cam/cam.h>
227#include <bus/cam/cam_ccb.h>
228#include <bus/cam/cam_sim.h>
229#include <bus/cam/cam_xpt_sim.h>
230#include <bus/cam/cam_xpt_periph.h>
984263bc 231
1f2de5d4
MD
232#include <bus/cam/scsi/scsi_all.h>
233#include <bus/cam/scsi/scsi_message.h>
984263bc
MD
234
235#include <vm/vm.h>
236#include <vm/pmap.h>
237#include <machine/cputypes.h>
238#include <machine/clock.h>
f8334305 239#include <machine/vmparam.h>
984263bc 240
1f2de5d4
MD
241#include <bus/pci/pcivar.h>
242#include <bus/pci/pcireg.h>
984263bc
MD
243
244#define STATIC static
245#define INLINE
246
247#if (defined(DEBUG_ASR) && (DEBUG_ASR > 0))
248# undef STATIC
249# define STATIC
250# undef INLINE
251# define INLINE
252#endif
253#define IN
254#define OUT
255#define INOUT
256
257#define osdSwap4(x) ((u_long)ntohl((u_long)(x)))
258#define KVTOPHYS(x) vtophys(x)
1f2de5d4
MD
259#include "dptalign.h"
260#include "i2oexec.h"
261#include "i2obscsi.h"
262#include "i2odpt.h"
263#include "i2oadptr.h"
1f2de5d4 264#include "sys_info.h"
984263bc
MD
265
266/* Configuration Definitions */
267
268#define SG_SIZE 58 /* Scatter Gather list Size */
269#define MAX_TARGET_ID 126 /* Maximum Target ID supported */
270#define MAX_LUN 255 /* Maximum LUN Supported */
271#define MAX_CHANNEL 7 /* Maximum Channel # Supported by driver */
272#define MAX_INBOUND 2000 /* Max CCBs, Also Max Queue Size */
273#define MAX_OUTBOUND 256 /* Maximum outbound frames/adapter */
274#define MAX_INBOUND_SIZE 512 /* Maximum inbound frame size */
275#define MAX_MAP 4194304L /* Maximum mapping size of IOP */
276 /* Also serves as the minimum map for */
277 /* the 2005S zero channel RAID product */
278
279/**************************************************************************
280** ASR Host Adapter structure - One Structure For Each Host Adapter That **
281** Is Configured Into The System. The Structure Supplies Configuration **
282** Information, Status Info, Queue Info And An Active CCB List Pointer. **
283***************************************************************************/
284
285/* I2O register set */
286typedef struct {
287 U8 Address[0x30];
288 volatile U32 Status;
289 volatile U32 Mask;
290# define Mask_InterruptsDisabled 0x08
291 U32 x[2];
292 volatile U32 ToFIFO; /* In Bound FIFO */
293 volatile U32 FromFIFO; /* Out Bound FIFO */
294} i2oRegs_t;
295
296/*
297 * A MIX of performance and space considerations for TID lookups
298 */
299typedef u_int16_t tid_t;
300
301typedef struct {
302 u_int32_t size; /* up to MAX_LUN */
303 tid_t TID[1];
304} lun2tid_t;
305
306typedef struct {
307 u_int32_t size; /* up to MAX_TARGET */
308 lun2tid_t * LUN[1];
309} target2lun_t;
310
311/*
312 * To ensure that we only allocate and use the worst case ccb here, lets
313 * make our own local ccb union. If asr_alloc_ccb is utilized for another
314 * ccb type, ensure that you add the additional structures into our local
315 * ccb union. To ensure strict type checking, we will utilize the local
316 * ccb definition wherever possible.
317 */
318union asr_ccb {
319 struct ccb_hdr ccb_h; /* For convenience */
320 struct ccb_scsiio csio;
321 struct ccb_setasync csa;
322};
323
324typedef struct Asr_softc {
325 u_int16_t ha_irq;
326 void * ha_Base; /* base port for each board */
327 u_int8_t * volatile ha_blinkLED;
328 i2oRegs_t * ha_Virt; /* Base address of IOP */
329 U8 * ha_Fvirt; /* Base address of Frames */
330 I2O_IOP_ENTRY ha_SystemTable;
331 LIST_HEAD(,ccb_hdr) ha_ccb; /* ccbs in use */
332 struct cam_path * ha_path[MAX_CHANNEL+1];
333 struct cam_sim * ha_sim[MAX_CHANNEL+1];
984263bc
MD
334 struct resource * ha_mem_res;
335 struct resource * ha_mes_res;
336 struct resource * ha_irq_res;
337 void * ha_intr;
984263bc
MD
338 PI2O_LCT ha_LCT; /* Complete list of devices */
339# define le_type IdentityTag[0]
340# define I2O_BSA 0x20
341# define I2O_FCA 0x40
342# define I2O_SCSI 0x00
343# define I2O_PORT 0x80
344# define I2O_UNKNOWN 0x7F
345# define le_bus IdentityTag[1]
346# define le_target IdentityTag[2]
347# define le_lun IdentityTag[3]
348 target2lun_t * ha_targets[MAX_CHANNEL+1];
349 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME ha_Msgs;
350 u_long ha_Msgs_Phys;
351
352 u_int8_t ha_in_reset;
353# define HA_OPERATIONAL 0
354# define HA_IN_RESET 1
355# define HA_OFF_LINE 2
356# define HA_OFF_LINE_RECOVERY 3
357 /* Configuration information */
358 /* The target id maximums we take */
359 u_int8_t ha_MaxBus; /* Maximum bus */
360 u_int8_t ha_MaxId; /* Maximum target ID */
361 u_int8_t ha_MaxLun; /* Maximum target LUN */
362 u_int8_t ha_SgSize; /* Max SG elements */
363 u_int8_t ha_pciBusNum;
364 u_int8_t ha_pciDeviceNum;
365 u_int8_t ha_adapter_target[MAX_CHANNEL+1];
366 u_int16_t ha_QueueSize; /* Max outstanding commands */
367 u_int16_t ha_Msgs_Count;
368
369 /* Links into other parents and HBAs */
370 struct Asr_softc * ha_next; /* HBA list */
984263bc
MD
371} Asr_softc_t;
372
373STATIC Asr_softc_t * Asr_softc;
374
375/*
376 * Prototypes of the routines we have in this object.
377 */
378
379/* Externally callable routines */
984263bc
MD
380#define PROBE_ARGS IN device_t tag
381#define PROBE_RET int
382#define PROBE_SET() u_long id = (pci_get_device(tag)<<16)|pci_get_vendor(tag)
383#define PROBE_RETURN(retval) if(retval){device_set_desc(tag,retval);return(0);}else{return(ENXIO);}
384#define ATTACH_ARGS IN device_t tag
385#define ATTACH_RET int
386#define ATTACH_SET() int unit = device_get_unit(tag)
387#define ATTACH_RETURN(retval) return(retval)
984263bc 388/* I2O HDM interface */
5ca58d54
RG
389STATIC PROBE_RET asr_probe (PROBE_ARGS);
390STATIC ATTACH_RET asr_attach (ATTACH_ARGS);
984263bc 391/* DOMINO placeholder */
5ca58d54
RG
392STATIC PROBE_RET domino_probe (PROBE_ARGS);
393STATIC ATTACH_RET domino_attach (ATTACH_ARGS);
984263bc 394/* MODE0 adapter placeholder */
5ca58d54
RG
395STATIC PROBE_RET mode0_probe (PROBE_ARGS);
396STATIC ATTACH_RET mode0_attach (ATTACH_ARGS);
984263bc 397
b13267a5 398STATIC Asr_softc_t * ASR_get_sc (cdev_t dev);
fef8985e
MD
399STATIC d_ioctl_t asr_ioctl;
400STATIC d_open_t asr_open;
401STATIC d_close_t asr_close;
402STATIC int asr_intr (IN Asr_softc_t *sc);
403STATIC void asr_timeout (INOUT void *arg);
404STATIC int ASR_init (IN Asr_softc_t *sc);
405STATIC INLINE int ASR_acquireLct (INOUT Asr_softc_t *sc);
406STATIC INLINE int ASR_acquireHrt (INOUT Asr_softc_t *sc);
407STATIC void asr_action (IN struct cam_sim *sim,
408 IN union ccb *ccb);
409STATIC void asr_poll (IN struct cam_sim * sim);
984263bc
MD
410
411/*
412 * Here is the auto-probe structure used to nest our tests appropriately
413 * during the startup phase of the operating system.
414 */
984263bc
MD
415STATIC device_method_t asr_methods[] = {
416 DEVMETHOD(device_probe, asr_probe),
417 DEVMETHOD(device_attach, asr_attach),
418 { 0, 0 }
419};
420
421STATIC driver_t asr_driver = {
422 "asr",
423 asr_methods,
424 sizeof(Asr_softc_t)
425};
426
427STATIC devclass_t asr_devclass;
428
32832096 429DECLARE_DUMMY_MODULE(asr);
984263bc
MD
430DRIVER_MODULE(asr, pci, asr_driver, asr_devclass, 0, 0);
431
432STATIC device_method_t domino_methods[] = {
433 DEVMETHOD(device_probe, domino_probe),
434 DEVMETHOD(device_attach, domino_attach),
435 { 0, 0 }
436};
437
438STATIC driver_t domino_driver = {
439 "domino",
440 domino_methods,
441 0
442};
443
444STATIC devclass_t domino_devclass;
445
446DRIVER_MODULE(domino, pci, domino_driver, domino_devclass, 0, 0);
447
448STATIC device_method_t mode0_methods[] = {
449 DEVMETHOD(device_probe, mode0_probe),
450 DEVMETHOD(device_attach, mode0_attach),
451 { 0, 0 }
452};
453
454STATIC driver_t mode0_driver = {
455 "mode0",
456 mode0_methods,
457 0
458};
459
460STATIC devclass_t mode0_devclass;
461
462DRIVER_MODULE(mode0, pci, mode0_driver, mode0_devclass, 0, 0);
984263bc
MD
463
464/*
465 * devsw for asr hba driver
466 *
467 * only ioctl is used. the sd driver provides all other access.
468 */
fef8985e 469STATIC struct dev_ops asr_ops = {
0e9b9130 470 { "asr", -1, 0 },
fef8985e
MD
471 .d_open = asr_open,
472 .d_close = asr_close,
473 .d_ioctl = asr_ioctl,
984263bc
MD
474};
475
984263bc
MD
476/* I2O support routines */
477#define defAlignLong(STRUCT,NAME) char NAME[sizeof(STRUCT)]
478#define getAlignLong(STRUCT,NAME) ((STRUCT *)(NAME))
479
480/*
481 * Fill message with default.
482 */
483STATIC PI2O_MESSAGE_FRAME
484ASR_fillMessage (
485 IN char * Message,
486 IN u_int16_t size)
487{
488 OUT PI2O_MESSAGE_FRAME Message_Ptr;
489
490 Message_Ptr = getAlignLong(I2O_MESSAGE_FRAME, Message);
491 bzero ((void *)Message_Ptr, size);
492 I2O_MESSAGE_FRAME_setVersionOffset(Message_Ptr, I2O_VERSION_11);
493 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
494 (size + sizeof(U32) - 1) >> 2);
495 I2O_MESSAGE_FRAME_setInitiatorAddress (Message_Ptr, 1);
496 return (Message_Ptr);
497} /* ASR_fillMessage */
498
499#define EMPTY_QUEUE ((U32)-1L)
500
501STATIC INLINE U32
502ASR_getMessage(
503 IN i2oRegs_t * virt)
504{
505 OUT U32 MessageOffset;
506
507 if ((MessageOffset = virt->ToFIFO) == EMPTY_QUEUE) {
508 MessageOffset = virt->ToFIFO;
509 }
510 return (MessageOffset);
511} /* ASR_getMessage */
512
513/* Issue a polled command */
514STATIC U32
515ASR_initiateCp (
516 INOUT i2oRegs_t * virt,
517 INOUT U8 * fvirt,
518 IN PI2O_MESSAGE_FRAME Message)
519{
520 OUT U32 Mask = -1L;
521 U32 MessageOffset;
522 u_int Delay = 1500;
523
524 /*
525 * ASR_initiateCp is only used for synchronous commands and will
526 * be made more resiliant to adapter delays since commands like
527 * resetIOP can cause the adapter to be deaf for a little time.
528 */
529 while (((MessageOffset = ASR_getMessage(virt)) == EMPTY_QUEUE)
530 && (--Delay != 0)) {
531 DELAY (10000);
532 }
533 if (MessageOffset != EMPTY_QUEUE) {
534 bcopy (Message, fvirt + MessageOffset,
535 I2O_MESSAGE_FRAME_getMessageSize(Message) << 2);
536 /*
537 * Disable the Interrupts
538 */
539 virt->Mask = (Mask = virt->Mask) | Mask_InterruptsDisabled;
540 virt->ToFIFO = MessageOffset;
541 }
542 return (Mask);
543} /* ASR_initiateCp */
544
545/*
546 * Reset the adapter.
547 */
548STATIC U32
549ASR_resetIOP (
550 INOUT i2oRegs_t * virt,
551 INOUT U8 * fvirt)
552{
553 struct resetMessage {
554 I2O_EXEC_IOP_RESET_MESSAGE M;
555 U32 R;
556 };
557 defAlignLong(struct resetMessage,Message);
558 PI2O_EXEC_IOP_RESET_MESSAGE Message_Ptr;
559 OUT U32 * volatile Reply_Ptr;
560 U32 Old;
561
562 /*
563 * Build up our copy of the Message.
564 */
565 Message_Ptr = (PI2O_EXEC_IOP_RESET_MESSAGE)ASR_fillMessage(Message,
566 sizeof(I2O_EXEC_IOP_RESET_MESSAGE));
567 I2O_EXEC_IOP_RESET_MESSAGE_setFunction(Message_Ptr, I2O_EXEC_IOP_RESET);
568 /*
569 * Reset the Reply Status
570 */
571 *(Reply_Ptr = (U32 *)((char *)Message_Ptr
572 + sizeof(I2O_EXEC_IOP_RESET_MESSAGE))) = 0;
573 I2O_EXEC_IOP_RESET_MESSAGE_setStatusWordLowAddress(Message_Ptr,
574 KVTOPHYS((void *)Reply_Ptr));
575 /*
576 * Send the Message out
577 */
578 if ((Old = ASR_initiateCp (virt, fvirt, (PI2O_MESSAGE_FRAME)Message_Ptr)) != (U32)-1L) {
579 /*
580 * Wait for a response (Poll), timeouts are dangerous if
581 * the card is truly responsive. We assume response in 2s.
582 */
583 u_int8_t Delay = 200;
584
585 while ((*Reply_Ptr == 0) && (--Delay != 0)) {
586 DELAY (10000);
587 }
588 /*
589 * Re-enable the interrupts.
590 */
591 virt->Mask = Old;
592 ASSERT (*Reply_Ptr);
593 return (*Reply_Ptr);
594 }
595 ASSERT (Old != (U32)-1L);
596 return (0);
597} /* ASR_resetIOP */
598
599/*
600 * Get the curent state of the adapter
601 */
602STATIC INLINE PI2O_EXEC_STATUS_GET_REPLY
603ASR_getStatus (
604 INOUT i2oRegs_t * virt,
605 INOUT U8 * fvirt,
606 OUT PI2O_EXEC_STATUS_GET_REPLY buffer)
607{
608 defAlignLong(I2O_EXEC_STATUS_GET_MESSAGE,Message);
609 PI2O_EXEC_STATUS_GET_MESSAGE Message_Ptr;
610 U32 Old;
611
612 /*
613 * Build up our copy of the Message.
614 */
615 Message_Ptr = (PI2O_EXEC_STATUS_GET_MESSAGE)ASR_fillMessage(Message,
616 sizeof(I2O_EXEC_STATUS_GET_MESSAGE));
617 I2O_EXEC_STATUS_GET_MESSAGE_setFunction(Message_Ptr,
618 I2O_EXEC_STATUS_GET);
619 I2O_EXEC_STATUS_GET_MESSAGE_setReplyBufferAddressLow(Message_Ptr,
620 KVTOPHYS((void *)buffer));
621 /* This one is a Byte Count */
622 I2O_EXEC_STATUS_GET_MESSAGE_setReplyBufferLength(Message_Ptr,
623 sizeof(I2O_EXEC_STATUS_GET_REPLY));
624 /*
625 * Reset the Reply Status
626 */
627 bzero ((void *)buffer, sizeof(I2O_EXEC_STATUS_GET_REPLY));
628 /*
629 * Send the Message out
630 */
631 if ((Old = ASR_initiateCp (virt, fvirt, (PI2O_MESSAGE_FRAME)Message_Ptr)) != (U32)-1L) {
632 /*
633 * Wait for a response (Poll), timeouts are dangerous if
634 * the card is truly responsive. We assume response in 50ms.
635 */
636 u_int8_t Delay = 255;
637
638 while (*((U8 * volatile)&(buffer->SyncByte)) == 0) {
639 if (--Delay == 0) {
640 buffer = (PI2O_EXEC_STATUS_GET_REPLY)NULL;
641 break;
642 }
643 DELAY (1000);
644 }
645 /*
646 * Re-enable the interrupts.
647 */
648 virt->Mask = Old;
649 return (buffer);
650 }
651 return ((PI2O_EXEC_STATUS_GET_REPLY)NULL);
652} /* ASR_getStatus */
653
654/*
655 * Check if the device is a SCSI I2O HBA, and add it to the list.
656 */
657
658/*
659 * Probe for ASR controller. If we find it, we will use it.
660 * virtual adapters.
661 */
662STATIC PROBE_RET
663asr_probe(PROBE_ARGS)
664{
665 PROBE_SET();
666 if ((id == 0xA5011044) || (id == 0xA5111044)) {
667 PROBE_RETURN ("Adaptec Caching SCSI RAID");
668 }
669 PROBE_RETURN (NULL);
670} /* asr_probe */
671
672/*
673 * Probe/Attach for DOMINO chipset.
674 */
675STATIC PROBE_RET
676domino_probe(PROBE_ARGS)
677{
678 PROBE_SET();
679 if (id == 0x10121044) {
680 PROBE_RETURN ("Adaptec Caching Memory Controller");
681 }
682 PROBE_RETURN (NULL);
683} /* domino_probe */
684
685STATIC ATTACH_RET
686domino_attach (ATTACH_ARGS)
687{
688 ATTACH_RETURN (0);
689} /* domino_attach */
690
691/*
692 * Probe/Attach for MODE0 adapters.
693 */
694STATIC PROBE_RET
695mode0_probe(PROBE_ARGS)
696{
697 PROBE_SET();
698
699 /*
700 * If/When we can get a business case to commit to a
701 * Mode0 driver here, we can make all these tests more
702 * specific and robust. Mode0 adapters have their processors
703 * turned off, this the chips are in a raw state.
704 */
705
706 /* This is a PLX9054 */
707 if (id == 0x905410B5) {
708 PROBE_RETURN ("Adaptec Mode0 PM3757");
709 }
710 /* This is a PLX9080 */
711 if (id == 0x908010B5) {
712 PROBE_RETURN ("Adaptec Mode0 PM3754/PM3755");
713 }
714 /* This is a ZION 80303 */
715 if (id == 0x53098086) {
716 PROBE_RETURN ("Adaptec Mode0 3010S");
717 }
718 /* This is an i960RS */
719 if (id == 0x39628086) {
720 PROBE_RETURN ("Adaptec Mode0 2100S");
721 }
722 /* This is an i960RN */
723 if (id == 0x19648086) {
724 PROBE_RETURN ("Adaptec Mode0 PM2865/2400A/3200S/3400S");
725 }
726#if 0 /* this would match any generic i960 -- mjs */
727 /* This is an i960RP (typically also on Motherboards) */
728 if (id == 0x19608086) {
729 PROBE_RETURN ("Adaptec Mode0 PM2554/PM1554/PM2654");
730 }
731#endif
732 PROBE_RETURN (NULL);
733} /* mode0_probe */
734
735STATIC ATTACH_RET
736mode0_attach (ATTACH_ARGS)
737{
738 ATTACH_RETURN (0);
739} /* mode0_attach */
740
741STATIC INLINE union asr_ccb *
742asr_alloc_ccb (
743 IN Asr_softc_t * sc)
744{
745 OUT union asr_ccb * new_ccb;
746
978400d3
SW
747 new_ccb = (union asr_ccb *)kmalloc(sizeof(*new_ccb), M_DEVBUF,
748 M_WAITOK | M_ZERO);
749 new_ccb->ccb_h.pinfo.priority = 1;
750 new_ccb->ccb_h.pinfo.index = CAM_UNQUEUED_INDEX;
751 new_ccb->ccb_h.spriv_ptr0 = sc;
984263bc
MD
752 return (new_ccb);
753} /* asr_alloc_ccb */
754
755STATIC INLINE void
756asr_free_ccb (
757 IN union asr_ccb * free_ccb)
758{
efda3bd0 759 kfree(free_ccb, M_DEVBUF);
984263bc
MD
760} /* asr_free_ccb */
761
762/*
763 * Print inquiry data `carefully'
764 */
765STATIC void
766ASR_prstring (
767 u_int8_t * s,
768 int len)
769{
770 while ((--len >= 0) && (*s) && (*s != ' ') && (*s != '-')) {
e3869ec7 771 kprintf ("%c", *(s++));
984263bc
MD
772 }
773} /* ASR_prstring */
774
775/*
776 * Prototypes
777 */
5ca58d54 778STATIC INLINE int ASR_queue (
984263bc 779 IN Asr_softc_t * sc,
5ca58d54 780 IN PI2O_MESSAGE_FRAME Message);
984263bc
MD
781/*
782 * Send a message synchronously and without Interrupt to a ccb.
783 */
784STATIC int
785ASR_queue_s (
786 INOUT union asr_ccb * ccb,
787 IN PI2O_MESSAGE_FRAME Message)
788{
984263bc
MD
789 U32 Mask;
790 Asr_softc_t * sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
791
792 /*
793 * We do not need any (optional byteswapping) method access to
794 * the Initiator context field.
795 */
796 I2O_MESSAGE_FRAME_setInitiatorContext64(Message, (long)ccb);
797
798 /* Prevent interrupt service */
7f2216bc 799 crit_enter();
984263bc
MD
800 sc->ha_Virt->Mask = (Mask = sc->ha_Virt->Mask)
801 | Mask_InterruptsDisabled;
802
803 if (ASR_queue (sc, Message) == EMPTY_QUEUE) {
804 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
805 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
806 }
807
808 /*
809 * Wait for this board to report a finished instruction.
810 */
811 while ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
812 (void)asr_intr (sc);
813 }
814
815 /* Re-enable Interrupts */
816 sc->ha_Virt->Mask = Mask;
7f2216bc 817 crit_exit();
984263bc
MD
818
819 return (ccb->ccb_h.status);
820} /* ASR_queue_s */
821
822/*
823 * Send a message synchronously to a Asr_softc_t
824 */
825STATIC int
826ASR_queue_c (
827 IN Asr_softc_t * sc,
828 IN PI2O_MESSAGE_FRAME Message)
829{
830 union asr_ccb * ccb;
831 OUT int status;
832
2038fb68 833 if ((ccb = asr_alloc_ccb (sc)) == NULL) {
984263bc
MD
834 return (CAM_REQUEUE_REQ);
835 }
836
837 status = ASR_queue_s (ccb, Message);
838
839 asr_free_ccb(ccb);
840
841 return (status);
842} /* ASR_queue_c */
843
844/*
845 * Add the specified ccb to the active queue
846 */
847STATIC INLINE void
848ASR_ccbAdd (
849 IN Asr_softc_t * sc,
850 INOUT union asr_ccb * ccb)
851{
7f2216bc 852 crit_enter();
984263bc
MD
853 LIST_INSERT_HEAD(&(sc->ha_ccb), &(ccb->ccb_h), sim_links.le);
854 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
855 if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT) {
856 /*
857 * RAID systems can take considerable time to
858 * complete some commands given the large cache
859 * flashes switching from write back to write thru.
860 */
861 ccb->ccb_h.timeout = 6 * 60 * 1000;
862 }
ddcafce9
JS
863 callout_reset(&ccb->ccb_h.timeout_ch,
864 (ccb->ccb_h.timeout * hz) / 1000, asr_timeout, ccb);
984263bc 865 }
7f2216bc 866 crit_exit();
984263bc
MD
867} /* ASR_ccbAdd */
868
869/*
870 * Remove the specified ccb from the active queue.
871 */
872STATIC INLINE void
873ASR_ccbRemove (
874 IN Asr_softc_t * sc,
875 INOUT union asr_ccb * ccb)
876{
7f2216bc 877 crit_enter();
ddcafce9 878 callout_stop(&ccb->ccb_h.timeout_ch);
984263bc 879 LIST_REMOVE(&(ccb->ccb_h), sim_links.le);
7f2216bc 880 crit_exit();
984263bc
MD
881} /* ASR_ccbRemove */
882
883/*
884 * Fail all the active commands, so they get re-issued by the operating
885 * system.
886 */
887STATIC INLINE void
888ASR_failActiveCommands (
889 IN Asr_softc_t * sc)
890{
891 struct ccb_hdr * ccb;
984263bc
MD
892
893#if 0 /* Currently handled by callers, unnecessary paranoia currently */
894 /* Left in for historical perspective. */
895 defAlignLong(I2O_EXEC_LCT_NOTIFY_MESSAGE,Message);
896 PI2O_EXEC_LCT_NOTIFY_MESSAGE Message_Ptr;
897
898 /* Send a blind LCT command to wait for the enableSys to complete */
899 Message_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)ASR_fillMessage(Message,
900 sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) - sizeof(I2O_SG_ELEMENT));
901 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
902 I2O_EXEC_LCT_NOTIFY);
903 I2O_EXEC_LCT_NOTIFY_MESSAGE_setClassIdentifier(Message_Ptr,
904 I2O_CLASS_MATCH_ANYCLASS);
905 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
906#endif
907
7f2216bc 908 crit_enter();
984263bc
MD
909 /*
910 * We do not need to inform the CAM layer that we had a bus
911 * reset since we manage it on our own, this also prevents the
912 * SCSI_DELAY settling that would be required on other systems.
913 * The `SCSI_DELAY' has already been handled by the card via the
914 * acquisition of the LCT table while we are at CAM priority level.
915 * for (int bus = 0; bus <= sc->ha_MaxBus; ++bus) {
916 * xpt_async (AC_BUS_RESET, sc->ha_path[bus], NULL);
917 * }
918 */
2038fb68 919 while ((ccb = LIST_FIRST(&(sc->ha_ccb))) != NULL) {
984263bc
MD
920 ASR_ccbRemove (sc, (union asr_ccb *)ccb);
921
922 ccb->status &= ~CAM_STATUS_MASK;
923 ccb->status |= CAM_REQUEUE_REQ;
924 /* Nothing Transfered */
925 ((struct ccb_scsiio *)ccb)->resid
926 = ((struct ccb_scsiio *)ccb)->dxfer_len;
927
928 if (ccb->path) {
929 xpt_done ((union ccb *)ccb);
930 } else {
931 wakeup ((caddr_t)ccb);
932 }
933 }
7f2216bc 934 crit_exit();
984263bc
MD
935} /* ASR_failActiveCommands */
936
937/*
938 * The following command causes the HBA to reset the specific bus
939 */
940STATIC INLINE void
941ASR_resetBus(
942 IN Asr_softc_t * sc,
943 IN int bus)
944{
945 defAlignLong(I2O_HBA_BUS_RESET_MESSAGE,Message);
946 I2O_HBA_BUS_RESET_MESSAGE * Message_Ptr;
947 PI2O_LCT_ENTRY Device;
948
949 Message_Ptr = (I2O_HBA_BUS_RESET_MESSAGE *)ASR_fillMessage(Message,
950 sizeof(I2O_HBA_BUS_RESET_MESSAGE));
951 I2O_MESSAGE_FRAME_setFunction(&Message_Ptr->StdMessageFrame,
952 I2O_HBA_BUS_RESET);
953 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
954 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
955 ++Device) {
956 if (((Device->le_type & I2O_PORT) != 0)
957 && (Device->le_bus == bus)) {
958 I2O_MESSAGE_FRAME_setTargetAddress(
959 &Message_Ptr->StdMessageFrame,
960 I2O_LCT_ENTRY_getLocalTID(Device));
961 /* Asynchronous command, with no expectations */
962 (void)ASR_queue(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
963 break;
964 }
965 }
966} /* ASR_resetBus */
967
968STATIC INLINE int
969ASR_getBlinkLedCode (
970 IN Asr_softc_t * sc)
971{
2038fb68
SW
972 if ((sc != NULL)
973 && (sc->ha_blinkLED != NULL)
984263bc
MD
974 && (sc->ha_blinkLED[1] == 0xBC)) {
975 return (sc->ha_blinkLED[0]);
976 }
977 return (0);
978} /* ASR_getBlinkCode */
979
980/*
981 * Determine the address of an TID lookup. Must be done at high priority
982 * since the address can be changed by other threads of execution.
983 *
984 * Returns NULL pointer if not indexible (but will attempt to generate
985 * an index if `new_entry' flag is set to TRUE).
986 *
987 * All addressible entries are to be guaranteed zero if never initialized.
988 */
989STATIC INLINE tid_t *
990ASR_getTidAddress(
991 INOUT Asr_softc_t * sc,
992 IN int bus,
993 IN int target,
994 IN int lun,
995 IN int new_entry)
996{
997 target2lun_t * bus_ptr;
998 lun2tid_t * target_ptr;
999 unsigned new_size;
1000
1001 /*
1002 * Validity checking of incoming parameters. More of a bound
1003 * expansion limit than an issue with the code dealing with the
1004 * values.
1005 *
1006 * sc must be valid before it gets here, so that check could be
1007 * dropped if speed a critical issue.
1008 */
2038fb68 1009 if ((sc == NULL)
984263bc
MD
1010 || (bus > MAX_CHANNEL)
1011 || (target > sc->ha_MaxId)
1012 || (lun > sc->ha_MaxLun)) {
1013 debug_asr_printf("(%lx,%d,%d,%d) target out of range\n",
1014 (u_long)sc, bus, target, lun);
2038fb68 1015 return (NULL);
984263bc
MD
1016 }
1017 /*
1018 * See if there is an associated bus list.
1019 *
1020 * for performance, allocate in size of BUS_CHUNK chunks.
1021 * BUS_CHUNK must be a power of two. This is to reduce
1022 * fragmentation effects on the allocations.
1023 */
1024# define BUS_CHUNK 8
1025 new_size = ((target + BUS_CHUNK - 1) & ~(BUS_CHUNK - 1));
2038fb68 1026 if ((bus_ptr = sc->ha_targets[bus]) == NULL) {
984263bc
MD
1027 /*
1028 * Allocate a new structure?
1029 * Since one element in structure, the +1
1030 * needed for size has been abstracted.
1031 */
1032 if ((new_entry == FALSE)
efda3bd0 1033 || ((sc->ha_targets[bus] = bus_ptr = (target2lun_t *)kmalloc (
984263bc
MD
1034 sizeof(*bus_ptr) + (sizeof(bus_ptr->LUN) * new_size),
1035 M_TEMP, M_WAITOK))
2038fb68 1036 == NULL)) {
984263bc 1037 debug_asr_printf("failed to allocate bus list\n");
2038fb68 1038 return (NULL);
984263bc
MD
1039 }
1040 bzero (bus_ptr, sizeof(*bus_ptr)
1041 + (sizeof(bus_ptr->LUN) * new_size));
1042 bus_ptr->size = new_size + 1;
1043 } else if (bus_ptr->size <= new_size) {
1044 target2lun_t * new_bus_ptr;
1045
1046 /*
1047 * Reallocate a new structure?
1048 * Since one element in structure, the +1
1049 * needed for size has been abstracted.
1050 */
1051 if ((new_entry == FALSE)
efda3bd0 1052 || ((new_bus_ptr = (target2lun_t *)kmalloc (
984263bc
MD
1053 sizeof(*bus_ptr) + (sizeof(bus_ptr->LUN) * new_size),
1054 M_TEMP, M_WAITOK))
2038fb68 1055 == NULL)) {
984263bc 1056 debug_asr_printf("failed to reallocate bus list\n");
2038fb68 1057 return (NULL);
984263bc
MD
1058 }
1059 /*
1060 * Zero and copy the whole thing, safer, simpler coding
1061 * and not really performance critical at this point.
1062 */
1063 bzero (new_bus_ptr, sizeof(*bus_ptr)
1064 + (sizeof(bus_ptr->LUN) * new_size));
1065 bcopy (bus_ptr, new_bus_ptr, sizeof(*bus_ptr)
1066 + (sizeof(bus_ptr->LUN) * (bus_ptr->size - 1)));
1067 sc->ha_targets[bus] = new_bus_ptr;
efda3bd0 1068 kfree (bus_ptr, M_TEMP);
984263bc
MD
1069 bus_ptr = new_bus_ptr;
1070 bus_ptr->size = new_size + 1;
1071 }
1072 /*
1073 * We now have the bus list, lets get to the target list.
1074 * Since most systems have only *one* lun, we do not allocate
1075 * in chunks as above, here we allow one, then in chunk sizes.
1076 * TARGET_CHUNK must be a power of two. This is to reduce
1077 * fragmentation effects on the allocations.
1078 */
1079# define TARGET_CHUNK 8
1080 if ((new_size = lun) != 0) {
1081 new_size = ((lun + TARGET_CHUNK - 1) & ~(TARGET_CHUNK - 1));
1082 }
2038fb68 1083 if ((target_ptr = bus_ptr->LUN[target]) == NULL) {
984263bc
MD
1084 /*
1085 * Allocate a new structure?
1086 * Since one element in structure, the +1
1087 * needed for size has been abstracted.
1088 */
1089 if ((new_entry == FALSE)
efda3bd0 1090 || ((bus_ptr->LUN[target] = target_ptr = (lun2tid_t *)kmalloc (
984263bc
MD
1091 sizeof(*target_ptr) + (sizeof(target_ptr->TID) * new_size),
1092 M_TEMP, M_WAITOK))
2038fb68 1093 == NULL)) {
984263bc 1094 debug_asr_printf("failed to allocate target list\n");
2038fb68 1095 return (NULL);
984263bc
MD
1096 }
1097 bzero (target_ptr, sizeof(*target_ptr)
1098 + (sizeof(target_ptr->TID) * new_size));
1099 target_ptr->size = new_size + 1;
1100 } else if (target_ptr->size <= new_size) {
1101 lun2tid_t * new_target_ptr;
1102
1103 /*
1104 * Reallocate a new structure?
1105 * Since one element in structure, the +1
1106 * needed for size has been abstracted.
1107 */
1108 if ((new_entry == FALSE)
efda3bd0 1109 || ((new_target_ptr = (lun2tid_t *)kmalloc (
984263bc
MD
1110 sizeof(*target_ptr) + (sizeof(target_ptr->TID) * new_size),
1111 M_TEMP, M_WAITOK))
2038fb68 1112 == NULL)) {
984263bc 1113 debug_asr_printf("failed to reallocate target list\n");
2038fb68 1114 return (NULL);
984263bc
MD
1115 }
1116 /*
1117 * Zero and copy the whole thing, safer, simpler coding
1118 * and not really performance critical at this point.
1119 */
1120 bzero (new_target_ptr, sizeof(*target_ptr)
1121 + (sizeof(target_ptr->TID) * new_size));
1122 bcopy (target_ptr, new_target_ptr,
1123 sizeof(*target_ptr)
1124 + (sizeof(target_ptr->TID) * (target_ptr->size - 1)));
1125 bus_ptr->LUN[target] = new_target_ptr;
efda3bd0 1126 kfree (target_ptr, M_TEMP);
984263bc
MD
1127 target_ptr = new_target_ptr;
1128 target_ptr->size = new_size + 1;
1129 }
1130 /*
1131 * Now, acquire the TID address from the LUN indexed list.
1132 */
1133 return (&(target_ptr->TID[lun]));
1134} /* ASR_getTidAddress */
1135
1136/*
1137 * Get a pre-existing TID relationship.
1138 *
1139 * If the TID was never set, return (tid_t)-1.
1140 *
1141 * should use mutex rather than spl.
1142 */
1143STATIC INLINE tid_t
1144ASR_getTid (
1145 IN Asr_softc_t * sc,
1146 IN int bus,
1147 IN int target,
1148 IN int lun)
1149{
1150 tid_t * tid_ptr;
984263bc
MD
1151 OUT tid_t retval;
1152
7f2216bc 1153 crit_enter();
984263bc 1154 if (((tid_ptr = ASR_getTidAddress (sc, bus, target, lun, FALSE))
2038fb68 1155 == NULL)
984263bc
MD
1156 /* (tid_t)0 or (tid_t)-1 indicate no TID */
1157 || (*tid_ptr == (tid_t)0)) {
7f2216bc 1158 crit_exit();
984263bc
MD
1159 return ((tid_t)-1);
1160 }
1161 retval = *tid_ptr;
7f2216bc 1162 crit_exit();
984263bc
MD
1163 return (retval);
1164} /* ASR_getTid */
1165
1166/*
1167 * Set a TID relationship.
1168 *
1169 * If the TID was not set, return (tid_t)-1.
1170 *
1171 * should use mutex rather than spl.
1172 */
1173STATIC INLINE tid_t
1174ASR_setTid (
1175 INOUT Asr_softc_t * sc,
1176 IN int bus,
1177 IN int target,
1178 IN int lun,
1179 INOUT tid_t TID)
1180{
1181 tid_t * tid_ptr;
984263bc
MD
1182
1183 if (TID != (tid_t)-1) {
1184 if (TID == 0) {
1185 return ((tid_t)-1);
1186 }
7f2216bc 1187 crit_enter();
984263bc 1188 if ((tid_ptr = ASR_getTidAddress (sc, bus, target, lun, TRUE))
2038fb68 1189 == NULL) {
7f2216bc 1190 crit_exit();
984263bc
MD
1191 return ((tid_t)-1);
1192 }
1193 *tid_ptr = TID;
7f2216bc 1194 crit_exit();
984263bc
MD
1195 }
1196 return (TID);
1197} /* ASR_setTid */
1198
1199/*-------------------------------------------------------------------------*/
1200/* Function ASR_rescan */
1201/*-------------------------------------------------------------------------*/
1202/* The Parameters Passed To This Function Are : */
1203/* Asr_softc_t * : HBA miniport driver's adapter data storage. */
1204/* */
1205/* This Function Will rescan the adapter and resynchronize any data */
1206/* */
1207/* Return : 0 For OK, Error Code Otherwise */
1208/*-------------------------------------------------------------------------*/
1209
1210STATIC INLINE int
1211ASR_rescan(
1212 IN Asr_softc_t * sc)
1213{
1214 int bus;
1215 OUT int error;
1216
1217 /*
1218 * Re-acquire the LCT table and synchronize us to the adapter.
1219 */
1220 if ((error = ASR_acquireLct(sc)) == 0) {
1221 error = ASR_acquireHrt(sc);
1222 }
1223
1224 if (error != 0) {
1225 return error;
1226 }
1227
1228 bus = sc->ha_MaxBus;
1229 /* Reset all existing cached TID lookups */
1230 do {
1231 int target, event = 0;
1232
1233 /*
1234 * Scan for all targets on this bus to see if they
1235 * got affected by the rescan.
1236 */
1237 for (target = 0; target <= sc->ha_MaxId; ++target) {
1238 int lun;
1239
1240 /* Stay away from the controller ID */
1241 if (target == sc->ha_adapter_target[bus]) {
1242 continue;
1243 }
1244 for (lun = 0; lun <= sc->ha_MaxLun; ++lun) {
1245 PI2O_LCT_ENTRY Device;
1246 tid_t TID = (tid_t)-1;
1247 tid_t LastTID;
1248
1249 /*
1250 * See if the cached TID changed. Search for
1251 * the device in our new LCT.
1252 */
1253 for (Device = sc->ha_LCT->LCTEntry;
1254 Device < (PI2O_LCT_ENTRY)(((U32 *)sc->ha_LCT)
1255 + I2O_LCT_getTableSize(sc->ha_LCT));
1256 ++Device) {
1257 if ((Device->le_type != I2O_UNKNOWN)
1258 && (Device->le_bus == bus)
1259 && (Device->le_target == target)
1260 && (Device->le_lun == lun)
1261 && (I2O_LCT_ENTRY_getUserTID(Device)
1262 == 0xFFF)) {
1263 TID = I2O_LCT_ENTRY_getLocalTID(
1264 Device);
1265 break;
1266 }
1267 }
1268 /*
1269 * Indicate to the OS that the label needs
1270 * to be recalculated, or that the specific
1271 * open device is no longer valid (Merde)
1272 * because the cached TID changed.
1273 */
1274 LastTID = ASR_getTid (sc, bus, target, lun);
1275 if (LastTID != TID) {
1276 struct cam_path * path;
1277
1278 if (xpt_create_path(&path,
1279 /*periph*/NULL,
1280 cam_sim_path(sc->ha_sim[bus]),
1281 target, lun) != CAM_REQ_CMP) {
1282 if (TID == (tid_t)-1) {
1283 event |= AC_LOST_DEVICE;
1284 } else {
1285 event |= AC_INQ_CHANGED
1286 | AC_GETDEV_CHANGED;
1287 }
1288 } else {
1289 if (TID == (tid_t)-1) {
1290 xpt_async(
1291 AC_LOST_DEVICE,
1292 path, NULL);
1293 } else if (LastTID == (tid_t)-1) {
1294 struct ccb_getdev ccb;
1295
1296 xpt_setup_ccb(
1297 &(ccb.ccb_h),
1298 path, /*priority*/5);
1299 xpt_async(
1300 AC_FOUND_DEVICE,
1301 path,
1302 &ccb);
1303 } else {
1304 xpt_async(
1305 AC_INQ_CHANGED,
1306 path, NULL);
1307 xpt_async(
1308 AC_GETDEV_CHANGED,
1309 path, NULL);
1310 }
1311 }
1312 }
1313 /*
1314 * We have the option of clearing the
1315 * cached TID for it to be rescanned, or to
1316 * set it now even if the device never got
1317 * accessed. We chose the later since we
1318 * currently do not use the condition that
1319 * the TID ever got cached.
1320 */
1321 ASR_setTid (sc, bus, target, lun, TID);
1322 }
1323 }
1324 /*
1325 * The xpt layer can not handle multiple events at the
1326 * same call.
1327 */
1328 if (event & AC_LOST_DEVICE) {
1329 xpt_async(AC_LOST_DEVICE, sc->ha_path[bus], NULL);
1330 }
1331 if (event & AC_INQ_CHANGED) {
1332 xpt_async(AC_INQ_CHANGED, sc->ha_path[bus], NULL);
1333 }
1334 if (event & AC_GETDEV_CHANGED) {
1335 xpt_async(AC_GETDEV_CHANGED, sc->ha_path[bus], NULL);
1336 }
1337 } while (--bus >= 0);
1338 return (error);
1339} /* ASR_rescan */
1340
1341/*-------------------------------------------------------------------------*/
1342/* Function ASR_reset */
1343/*-------------------------------------------------------------------------*/
1344/* The Parameters Passed To This Function Are : */
1345/* Asr_softc_t * : HBA miniport driver's adapter data storage. */
1346/* */
1347/* This Function Will reset the adapter and resynchronize any data */
1348/* */
1349/* Return : None */
1350/*-------------------------------------------------------------------------*/
1351
1352STATIC INLINE int
1353ASR_reset(
1354 IN Asr_softc_t * sc)
1355{
7f2216bc 1356 int retVal;
984263bc 1357
7f2216bc 1358 crit_enter();
984263bc
MD
1359 if ((sc->ha_in_reset == HA_IN_RESET)
1360 || (sc->ha_in_reset == HA_OFF_LINE_RECOVERY)) {
7f2216bc 1361 crit_exit();
984263bc
MD
1362 return (EBUSY);
1363 }
1364 /*
1365 * Promotes HA_OPERATIONAL to HA_IN_RESET,
1366 * or HA_OFF_LINE to HA_OFF_LINE_RECOVERY.
1367 */
1368 ++(sc->ha_in_reset);
1369 if (ASR_resetIOP (sc->ha_Virt, sc->ha_Fvirt) == 0) {
1370 debug_asr_printf ("ASR_resetIOP failed\n");
1371 /*
1372 * We really need to take this card off-line, easier said
1373 * than make sense. Better to keep retrying for now since if a
1374 * UART cable is connected the blinkLEDs the adapter is now in
1375 * a hard state requiring action from the monitor commands to
1376 * the HBA to continue. For debugging waiting forever is a
1377 * good thing. In a production system, however, one may wish
1378 * to instead take the card off-line ...
1379 */
1380# if 0 && (defined(HA_OFF_LINE))
1381 /*
1382 * Take adapter off-line.
1383 */
e3869ec7 1384 kprintf ("asr%d: Taking adapter off-line\n",
984263bc
MD
1385 sc->ha_path[0]
1386 ? cam_sim_unit(xpt_path_sim(sc->ha_path[0]))
1387 : 0);
1388 sc->ha_in_reset = HA_OFF_LINE;
7f2216bc 1389 crit_exit();
984263bc
MD
1390 return (ENXIO);
1391# else
1392 /* Wait Forever */
1393 while (ASR_resetIOP (sc->ha_Virt, sc->ha_Fvirt) == 0);
1394# endif
1395 }
1396 retVal = ASR_init (sc);
7f2216bc 1397 crit_exit();
984263bc
MD
1398 if (retVal != 0) {
1399 debug_asr_printf ("ASR_init failed\n");
1400 sc->ha_in_reset = HA_OFF_LINE;
1401 return (ENXIO);
1402 }
1403 if (ASR_rescan (sc) != 0) {
1404 debug_asr_printf ("ASR_rescan failed\n");
1405 }
1406 ASR_failActiveCommands (sc);
1407 if (sc->ha_in_reset == HA_OFF_LINE_RECOVERY) {
e3869ec7 1408 kprintf ("asr%d: Brining adapter back on-line\n",
984263bc
MD
1409 sc->ha_path[0]
1410 ? cam_sim_unit(xpt_path_sim(sc->ha_path[0]))
1411 : 0);
1412 }
1413 sc->ha_in_reset = HA_OPERATIONAL;
1414 return (0);
1415} /* ASR_reset */
1416
1417/*
1418 * Device timeout handler.
1419 */
1420STATIC void
1421asr_timeout(
1422 INOUT void * arg)
1423{
1424 union asr_ccb * ccb = (union asr_ccb *)arg;
1425 Asr_softc_t * sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
1426 int s;
1427
1428 debug_asr_print_path(ccb);
1429 debug_asr_printf("timed out");
1430
1431 /*
1432 * Check if the adapter has locked up?
1433 */
1434 if ((s = ASR_getBlinkLedCode(sc)) != 0) {
1435 /* Reset Adapter */
e3869ec7 1436 kprintf ("asr%d: Blink LED 0x%x resetting adapter\n",
984263bc
MD
1437 cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)), s);
1438 if (ASR_reset (sc) == ENXIO) {
1439 /* Try again later */
ddcafce9
JS
1440 callout_reset(&ccb->ccb_h.timeout_ch,
1441 (ccb->ccb_h.timeout * hz) / 1000, asr_timeout, ccb);
984263bc
MD
1442 }
1443 return;
1444 }
1445 /*
1446 * Abort does not function on the ASR card!!! Walking away from
1447 * the SCSI command is also *very* dangerous. A SCSI BUS reset is
1448 * our best bet, followed by a complete adapter reset if that fails.
1449 */
7f2216bc 1450 crit_enter();
984263bc
MD
1451 /* Check if we already timed out once to raise the issue */
1452 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_CMD_TIMEOUT) {
1453 debug_asr_printf (" AGAIN\nreinitializing adapter\n");
1454 if (ASR_reset (sc) == ENXIO) {
ddcafce9
JS
1455 callout_reset(&ccb->ccb_h.timeout_ch,
1456 (ccb->ccb_h.timeout * hz) / 1000, asr_timeout, ccb);
984263bc 1457 }
7f2216bc 1458 crit_exit();
984263bc
MD
1459 return;
1460 }
1461 debug_asr_printf ("\nresetting bus\n");
1462 /* If the BUS reset does not take, then an adapter reset is next! */
1463 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1464 ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
ddcafce9
JS
1465 callout_reset(&ccb->ccb_h.timeout_ch, (ccb->ccb_h.timeout * hz) / 1000,
1466 asr_timeout, ccb);
984263bc
MD
1467 ASR_resetBus (sc, cam_sim_bus(xpt_path_sim(ccb->ccb_h.path)));
1468 xpt_async (AC_BUS_RESET, ccb->ccb_h.path, NULL);
7f2216bc 1469 crit_exit();
984263bc
MD
1470} /* asr_timeout */
1471
1472/*
1473 * send a message asynchronously
1474 */
1475STATIC INLINE int
1476ASR_queue(
1477 IN Asr_softc_t * sc,
1478 IN PI2O_MESSAGE_FRAME Message)
1479{
1480 OUT U32 MessageOffset;
1481 union asr_ccb * ccb;
1482
1483 debug_asr_printf ("Host Command Dump:\n");
1484 debug_asr_dump_message (Message);
1485
1486 ccb = (union asr_ccb *)(long)
1487 I2O_MESSAGE_FRAME_getInitiatorContext64(Message);
1488
1489 if ((MessageOffset = ASR_getMessage(sc->ha_Virt)) != EMPTY_QUEUE) {
984263bc
MD
1490 bcopy (Message, sc->ha_Fvirt + MessageOffset,
1491 I2O_MESSAGE_FRAME_getMessageSize(Message) << 2);
1492 if (ccb) {
1493 ASR_ccbAdd (sc, ccb);
1494 }
1495 /* Post the command */
1496 sc->ha_Virt->ToFIFO = MessageOffset;
1497 } else {
1498 if (ASR_getBlinkLedCode(sc)) {
1499 /*
1500 * Unlikely we can do anything if we can't grab a
1501 * message frame :-(, but lets give it a try.
1502 */
1503 (void)ASR_reset (sc);
1504 }
1505 }
1506 return (MessageOffset);
1507} /* ASR_queue */
1508
1509
1510/* Simple Scatter Gather elements */
1511#define SG(SGL,Index,Flags,Buffer,Size) \
1512 I2O_FLAGS_COUNT_setCount( \
1513 &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index].FlagsCount), \
1514 Size); \
1515 I2O_FLAGS_COUNT_setFlags( \
1516 &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index].FlagsCount), \
1517 I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT | (Flags)); \
1518 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress( \
1519 &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index]), \
3641b7ca 1520 (Buffer == NULL) ? 0 : KVTOPHYS(Buffer))
984263bc
MD
1521
1522/*
1523 * Retrieve Parameter Group.
1524 * Buffer must be allocated using defAlignLong macro.
1525 */
1526STATIC void *
1527ASR_getParams(
1528 IN Asr_softc_t * sc,
1529 IN tid_t TID,
1530 IN int Group,
1531 OUT void * Buffer,
1532 IN unsigned BufferSize)
1533{
1534 struct paramGetMessage {
1535 I2O_UTIL_PARAMS_GET_MESSAGE M;
1536 char F[
1537 sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT)];
1538 struct Operations {
1539 I2O_PARAM_OPERATIONS_LIST_HEADER Header;
1540 I2O_PARAM_OPERATION_ALL_TEMPLATE Template[1];
1541 } O;
1542 };
1543 defAlignLong(struct paramGetMessage, Message);
1544 struct Operations * Operations_Ptr;
1545 I2O_UTIL_PARAMS_GET_MESSAGE * Message_Ptr;
1546 struct ParamBuffer {
1547 I2O_PARAM_RESULTS_LIST_HEADER Header;
1548 I2O_PARAM_READ_OPERATION_RESULT Read;
1549 char Info[1];
1550 } * Buffer_Ptr;
1551
1552 Message_Ptr = (I2O_UTIL_PARAMS_GET_MESSAGE *)ASR_fillMessage(Message,
1553 sizeof(I2O_UTIL_PARAMS_GET_MESSAGE)
1554 + sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT));
1555 Operations_Ptr = (struct Operations *)((char *)Message_Ptr
1556 + sizeof(I2O_UTIL_PARAMS_GET_MESSAGE)
1557 + sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT));
1558 bzero ((void *)Operations_Ptr, sizeof(struct Operations));
1559 I2O_PARAM_OPERATIONS_LIST_HEADER_setOperationCount(
1560 &(Operations_Ptr->Header), 1);
1561 I2O_PARAM_OPERATION_ALL_TEMPLATE_setOperation(
1562 &(Operations_Ptr->Template[0]), I2O_PARAMS_OPERATION_FIELD_GET);
1563 I2O_PARAM_OPERATION_ALL_TEMPLATE_setFieldCount(
1564 &(Operations_Ptr->Template[0]), 0xFFFF);
1565 I2O_PARAM_OPERATION_ALL_TEMPLATE_setGroupNumber(
1566 &(Operations_Ptr->Template[0]), Group);
1567 bzero ((void *)(Buffer_Ptr = getAlignLong(struct ParamBuffer, Buffer)),
1568 BufferSize);
1569
1570 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
1571 I2O_VERSION_11
1572 + (((sizeof(I2O_UTIL_PARAMS_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1573 / sizeof(U32)) << 4));
1574 I2O_MESSAGE_FRAME_setTargetAddress (&(Message_Ptr->StdMessageFrame),
1575 TID);
1576 I2O_MESSAGE_FRAME_setFunction (&(Message_Ptr->StdMessageFrame),
1577 I2O_UTIL_PARAMS_GET);
1578 /*
1579 * Set up the buffers as scatter gather elements.
1580 */
1581 SG(&(Message_Ptr->SGL), 0,
1582 I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER,
1583 Operations_Ptr, sizeof(struct Operations));
1584 SG(&(Message_Ptr->SGL), 1,
1585 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
1586 Buffer_Ptr, BufferSize);
1587
1588 if ((ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) == CAM_REQ_CMP)
1589 && (Buffer_Ptr->Header.ResultCount)) {
1590 return ((void *)(Buffer_Ptr->Info));
1591 }
3641b7ca 1592 return (NULL);
984263bc
MD
1593} /* ASR_getParams */
1594
1595/*
1596 * Acquire the LCT information.
1597 */
1598STATIC INLINE int
1599ASR_acquireLct (
1600 INOUT Asr_softc_t * sc)
1601{
1602 PI2O_EXEC_LCT_NOTIFY_MESSAGE Message_Ptr;
1603 PI2O_SGE_SIMPLE_ELEMENT sg;
1604 int MessageSizeInBytes;
1605 caddr_t v;
1606 int len;
1607 I2O_LCT Table;
1608 PI2O_LCT_ENTRY Entry;
1609
1610 /*
1611 * sc value assumed valid
1612 */
1613 MessageSizeInBytes = sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE)
1614 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT);
978400d3
SW
1615 Message_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)kmalloc (
1616 MessageSizeInBytes, M_TEMP, M_WAITOK);
984263bc
MD
1617 (void)ASR_fillMessage((char *)Message_Ptr, MessageSizeInBytes);
1618 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
1619 (I2O_VERSION_11 +
1620 (((sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1621 / sizeof(U32)) << 4)));
1622 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
1623 I2O_EXEC_LCT_NOTIFY);
1624 I2O_EXEC_LCT_NOTIFY_MESSAGE_setClassIdentifier(Message_Ptr,
1625 I2O_CLASS_MATCH_ANYCLASS);
1626 /*
1627 * Call the LCT table to determine the number of device entries
1628 * to reserve space for.
1629 */
1630 SG(&(Message_Ptr->SGL), 0,
1631 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER, &Table,
1632 sizeof(I2O_LCT));
1633 /*
1634 * since this code is reused in several systems, code efficiency
1635 * is greater by using a shift operation rather than a divide by
1636 * sizeof(u_int32_t).
1637 */
1638 I2O_LCT_setTableSize(&Table,
1639 (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)) >> 2);
1640 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1641 /*
1642 * Determine the size of the LCT table.
1643 */
1644 if (sc->ha_LCT) {
efda3bd0 1645 kfree (sc->ha_LCT, M_TEMP);
984263bc
MD
1646 }
1647 /*
efda3bd0 1648 * kmalloc only generates contiguous memory when less than a
984263bc
MD
1649 * page is expected. We must break the request up into an SG list ...
1650 */
1651 if (((len = (I2O_LCT_getTableSize(&Table) << 2)) <=
1652 (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)))
1653 || (len > (128 * 1024))) { /* Arbitrary */
efda3bd0 1654 kfree (Message_Ptr, M_TEMP);
984263bc
MD
1655 return (EINVAL);
1656 }
978400d3 1657 sc->ha_LCT = (PI2O_LCT)kmalloc (len, M_TEMP, M_WAITOK);
984263bc
MD
1658 /*
1659 * since this code is reused in several systems, code efficiency
1660 * is greater by using a shift operation rather than a divide by
1661 * sizeof(u_int32_t).
1662 */
1663 I2O_LCT_setTableSize(sc->ha_LCT,
1664 (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)) >> 2);
1665 /*
1666 * Convert the access to the LCT table into a SG list.
1667 */
1668 sg = Message_Ptr->SGL.u.Simple;
1669 v = (caddr_t)(sc->ha_LCT);
1670 for (;;) {
1671 int next, base, span;
1672
1673 span = 0;
1674 next = base = KVTOPHYS(v);
1675 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, base);
1676
1677 /* How far can we go contiguously */
1678 while ((len > 0) && (base == next)) {
1679 int size;
1680
1681 next = trunc_page(base) + PAGE_SIZE;
1682 size = next - base;
1683 if (size > len) {
1684 size = len;
1685 }
1686 span += size;
1687 v += size;
1688 len -= size;
1689 base = KVTOPHYS(v);
1690 }
1691
1692 /* Construct the Flags */
1693 I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), span);
1694 {
1695 int rw = I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT;
1696 if (len <= 0) {
1697 rw = (I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT
1698 | I2O_SGL_FLAGS_LAST_ELEMENT
1699 | I2O_SGL_FLAGS_END_OF_BUFFER);
1700 }
1701 I2O_FLAGS_COUNT_setFlags(&(sg->FlagsCount), rw);
1702 }
1703
1704 if (len <= 0) {
1705 break;
1706 }
1707
1708 /*
1709 * Incrementing requires resizing of the packet.
1710 */
1711 ++sg;
1712 MessageSizeInBytes += sizeof(*sg);
1713 I2O_MESSAGE_FRAME_setMessageSize(
1714 &(Message_Ptr->StdMessageFrame),
1715 I2O_MESSAGE_FRAME_getMessageSize(
1716 &(Message_Ptr->StdMessageFrame))
1717 + (sizeof(*sg) / sizeof(U32)));
1718 {
1719 PI2O_EXEC_LCT_NOTIFY_MESSAGE NewMessage_Ptr;
1720
978400d3
SW
1721 NewMessage_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)
1722 kmalloc (MessageSizeInBytes, M_TEMP, M_WAITOK);
984263bc
MD
1723 span = ((caddr_t)sg) - (caddr_t)Message_Ptr;
1724 bcopy ((caddr_t)Message_Ptr,
1725 (caddr_t)NewMessage_Ptr, span);
efda3bd0 1726 kfree (Message_Ptr, M_TEMP);
984263bc
MD
1727 sg = (PI2O_SGE_SIMPLE_ELEMENT)
1728 (((caddr_t)NewMessage_Ptr) + span);
1729 Message_Ptr = NewMessage_Ptr;
1730 }
1731 }
1732 { int retval;
1733
1734 retval = ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
efda3bd0 1735 kfree (Message_Ptr, M_TEMP);
984263bc
MD
1736 if (retval != CAM_REQ_CMP) {
1737 return (ENODEV);
1738 }
1739 }
1740 /* If the LCT table grew, lets truncate accesses */
1741 if (I2O_LCT_getTableSize(&Table) < I2O_LCT_getTableSize(sc->ha_LCT)) {
1742 I2O_LCT_setTableSize(sc->ha_LCT, I2O_LCT_getTableSize(&Table));
1743 }
1744 for (Entry = sc->ha_LCT->LCTEntry; Entry < (PI2O_LCT_ENTRY)
1745 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
1746 ++Entry) {
1747 Entry->le_type = I2O_UNKNOWN;
1748 switch (I2O_CLASS_ID_getClass(&(Entry->ClassID))) {
1749
1750 case I2O_CLASS_RANDOM_BLOCK_STORAGE:
1751 Entry->le_type = I2O_BSA;
1752 break;
1753
1754 case I2O_CLASS_SCSI_PERIPHERAL:
1755 Entry->le_type = I2O_SCSI;
1756 break;
1757
1758 case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
1759 Entry->le_type = I2O_FCA;
1760 break;
1761
1762 case I2O_CLASS_BUS_ADAPTER_PORT:
1763 Entry->le_type = I2O_PORT | I2O_SCSI;
1764 /* FALLTHRU */
1765 case I2O_CLASS_FIBRE_CHANNEL_PORT:
1766 if (I2O_CLASS_ID_getClass(&(Entry->ClassID)) ==
1767 I2O_CLASS_FIBRE_CHANNEL_PORT) {
1768 Entry->le_type = I2O_PORT | I2O_FCA;
1769 }
1770 { struct ControllerInfo {
1771 I2O_PARAM_RESULTS_LIST_HEADER Header;
1772 I2O_PARAM_READ_OPERATION_RESULT Read;
1773 I2O_HBA_SCSI_CONTROLLER_INFO_SCALAR Info;
1774 };
1775 defAlignLong(struct ControllerInfo, Buffer);
1776 PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR Info;
1777
1778 Entry->le_bus = 0xff;
1779 Entry->le_target = 0xff;
1780 Entry->le_lun = 0xff;
1781
1782 if ((Info = (PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR)
1783 ASR_getParams(sc,
1784 I2O_LCT_ENTRY_getLocalTID(Entry),
1785 I2O_HBA_SCSI_CONTROLLER_INFO_GROUP_NO,
1786 Buffer, sizeof(struct ControllerInfo)))
1787 == (PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR)NULL) {
1788 continue;
1789 }
1790 Entry->le_target
1791 = I2O_HBA_SCSI_CONTROLLER_INFO_SCALAR_getInitiatorID(
1792 Info);
1793 Entry->le_lun = 0;
1794 } /* FALLTHRU */
1795 default:
1796 continue;
1797 }
1798 { struct DeviceInfo {
1799 I2O_PARAM_RESULTS_LIST_HEADER Header;
1800 I2O_PARAM_READ_OPERATION_RESULT Read;
1801 I2O_DPT_DEVICE_INFO_SCALAR Info;
1802 };
1803 defAlignLong (struct DeviceInfo, Buffer);
1804 PI2O_DPT_DEVICE_INFO_SCALAR Info;
1805
1806 Entry->le_bus = 0xff;
1807 Entry->le_target = 0xff;
1808 Entry->le_lun = 0xff;
1809
1810 if ((Info = (PI2O_DPT_DEVICE_INFO_SCALAR)
1811 ASR_getParams(sc,
1812 I2O_LCT_ENTRY_getLocalTID(Entry),
1813 I2O_DPT_DEVICE_INFO_GROUP_NO,
1814 Buffer, sizeof(struct DeviceInfo)))
1815 == (PI2O_DPT_DEVICE_INFO_SCALAR)NULL) {
1816 continue;
1817 }
1818 Entry->le_type
1819 |= I2O_DPT_DEVICE_INFO_SCALAR_getDeviceType(Info);
1820 Entry->le_bus
1821 = I2O_DPT_DEVICE_INFO_SCALAR_getBus(Info);
1822 if ((Entry->le_bus > sc->ha_MaxBus)
1823 && (Entry->le_bus <= MAX_CHANNEL)) {
1824 sc->ha_MaxBus = Entry->le_bus;
1825 }
1826 Entry->le_target
1827 = I2O_DPT_DEVICE_INFO_SCALAR_getIdentifier(Info);
1828 Entry->le_lun
1829 = I2O_DPT_DEVICE_INFO_SCALAR_getLunInfo(Info);
1830 }
1831 }
1832 /*
1833 * A zero return value indicates success.
1834 */
1835 return (0);
1836} /* ASR_acquireLct */
1837
1838/*
1839 * Initialize a message frame.
1840 * We assume that the CDB has already been set up, so all we do here is
1841 * generate the Scatter Gather list.
1842 */
1843STATIC INLINE PI2O_MESSAGE_FRAME
1844ASR_init_message(
1845 IN union asr_ccb * ccb,
1846 OUT PI2O_MESSAGE_FRAME Message)
1847{
1848 int next, span, base, rw;
1849 OUT PI2O_MESSAGE_FRAME Message_Ptr;
1850 Asr_softc_t * sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
1851 PI2O_SGE_SIMPLE_ELEMENT sg;
1852 caddr_t v;
1853 vm_size_t size, len;
1854 U32 MessageSize;
1855
1856 /* We only need to zero out the PRIVATE_SCSI_SCB_EXECUTE_MESSAGE */
1857 bzero (Message_Ptr = getAlignLong(I2O_MESSAGE_FRAME, Message),
1858 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) - sizeof(I2O_SG_ELEMENT)));
1859
1860 {
1861 int target = ccb->ccb_h.target_id;
1862 int lun = ccb->ccb_h.target_lun;
1863 int bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path));
1864 tid_t TID;
1865
1866 if ((TID = ASR_getTid (sc, bus, target, lun)) == (tid_t)-1) {
1867 PI2O_LCT_ENTRY Device;
1868
1869 TID = (tid_t)0;
1870 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
1871 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
1872 ++Device) {
1873 if ((Device->le_type != I2O_UNKNOWN)
1874 && (Device->le_bus == bus)
1875 && (Device->le_target == target)
1876 && (Device->le_lun == lun)
1877 && (I2O_LCT_ENTRY_getUserTID(Device) == 0xFFF)) {
1878 TID = I2O_LCT_ENTRY_getLocalTID(Device);
1879 ASR_setTid (sc, Device->le_bus,
1880 Device->le_target, Device->le_lun,
1881 TID);
1882 break;
1883 }
1884 }
1885 }
1886 if (TID == (tid_t)0) {
1887 return ((PI2O_MESSAGE_FRAME)NULL);
1888 }
1889 I2O_MESSAGE_FRAME_setTargetAddress(Message_Ptr, TID);
1890 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setTID(
1891 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, TID);
1892 }
1893 I2O_MESSAGE_FRAME_setVersionOffset(Message_Ptr, I2O_VERSION_11 |
1894 (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1895 / sizeof(U32)) << 4));
1896 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
1897 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
1898 - sizeof(I2O_SG_ELEMENT)) / sizeof(U32));
1899 I2O_MESSAGE_FRAME_setInitiatorAddress (Message_Ptr, 1);
1900 I2O_MESSAGE_FRAME_setFunction(Message_Ptr, I2O_PRIVATE_MESSAGE);
1901 I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode (
1902 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, I2O_SCSI_SCB_EXEC);
1903 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (
1904 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
1905 I2O_SCB_FLAG_ENABLE_DISCONNECT
1906 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
1907 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
1908 /*
1909 * We do not need any (optional byteswapping) method access to
1910 * the Initiator & Transaction context field.
1911 */
1912 I2O_MESSAGE_FRAME_setInitiatorContext64(Message, (long)ccb);
1913
1914 I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
1915 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, DPT_ORGANIZATION_ID);
1916 /*
1917 * copy the cdb over
1918 */
1919 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(
1920 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, ccb->csio.cdb_len);
1921 bcopy (&(ccb->csio.cdb_io),
1922 ((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)->CDB, ccb->csio.cdb_len);
1923
1924 /*
1925 * Given a buffer describing a transfer, set up a scatter/gather map
1926 * in a ccb to map that SCSI transfer.
1927 */
1928
1929 rw = (ccb->ccb_h.flags & CAM_DIR_IN) ? 0 : I2O_SGL_FLAGS_DIR;
1930
1931 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (
1932 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
1933 (ccb->csio.dxfer_len)
1934 ? ((rw) ? (I2O_SCB_FLAG_XFER_TO_DEVICE
1935 | I2O_SCB_FLAG_ENABLE_DISCONNECT
1936 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
1937 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER)
1938 : (I2O_SCB_FLAG_XFER_FROM_DEVICE
1939 | I2O_SCB_FLAG_ENABLE_DISCONNECT
1940 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
1941 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER))
1942 : (I2O_SCB_FLAG_ENABLE_DISCONNECT
1943 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
1944 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
1945
1946 /*
1947 * Given a transfer described by a `data', fill in the SG list.
1948 */
1949 sg = &((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)->SGL.u.Simple[0];
1950
1951 len = ccb->csio.dxfer_len;
1952 v = ccb->csio.data_ptr;
1953 ASSERT (ccb->csio.dxfer_len >= 0);
1954 MessageSize = I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr);
1955 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setByteCount(
1956 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, len);
1957 while ((len > 0) && (sg < &((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
1958 Message_Ptr)->SGL.u.Simple[SG_SIZE])) {
1959 span = 0;
1960 next = base = KVTOPHYS(v);
1961 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, base);
1962
1963 /* How far can we go contiguously */
1964 while ((len > 0) && (base == next)) {
1965 next = trunc_page(base) + PAGE_SIZE;
1966 size = next - base;
1967 if (size > len) {
1968 size = len;
1969 }
1970 span += size;
1971 v += size;
1972 len -= size;
1973 base = KVTOPHYS(v);
1974 }
1975
1976 I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), span);
1977 if (len == 0) {
1978 rw |= I2O_SGL_FLAGS_LAST_ELEMENT;
1979 }
1980 I2O_FLAGS_COUNT_setFlags(&(sg->FlagsCount),
1981 I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT | rw);
1982 ++sg;
1983 MessageSize += sizeof(*sg) / sizeof(U32);
1984 }
1985 /* We always do the request sense ... */
1986 if ((span = ccb->csio.sense_len) == 0) {
1987 span = sizeof(ccb->csio.sense_data);
1988 }
1989 SG(sg, 0, I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
1990 &(ccb->csio.sense_data), span);
1991 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
1992 MessageSize + (sizeof(*sg) / sizeof(U32)));
1993 return (Message_Ptr);
1994} /* ASR_init_message */
1995
1996/*
1997 * Reset the adapter.
1998 */
1999STATIC INLINE U32
2000ASR_initOutBound (
2001 INOUT Asr_softc_t * sc)
2002{
2003 struct initOutBoundMessage {
2004 I2O_EXEC_OUTBOUND_INIT_MESSAGE M;
2005 U32 R;
2006 };
2007 defAlignLong(struct initOutBoundMessage,Message);
2008 PI2O_EXEC_OUTBOUND_INIT_MESSAGE Message_Ptr;
2009 OUT U32 * volatile Reply_Ptr;
2010 U32 Old;
2011
2012 /*
2013 * Build up our copy of the Message.
2014 */
2015 Message_Ptr = (PI2O_EXEC_OUTBOUND_INIT_MESSAGE)ASR_fillMessage(Message,
2016 sizeof(I2O_EXEC_OUTBOUND_INIT_MESSAGE));
2017 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
2018 I2O_EXEC_OUTBOUND_INIT);
2019 I2O_EXEC_OUTBOUND_INIT_MESSAGE_setHostPageFrameSize(Message_Ptr, PAGE_SIZE);
2020 I2O_EXEC_OUTBOUND_INIT_MESSAGE_setOutboundMFrameSize(Message_Ptr,
2021 sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME));
2022 /*
2023 * Reset the Reply Status
2024 */
2025 *(Reply_Ptr = (U32 *)((char *)Message_Ptr
2026 + sizeof(I2O_EXEC_OUTBOUND_INIT_MESSAGE))) = 0;
2027 SG (&(Message_Ptr->SGL), 0, I2O_SGL_FLAGS_LAST_ELEMENT, Reply_Ptr,
2028 sizeof(U32));
2029 /*
2030 * Send the Message out
2031 */
2032 if ((Old = ASR_initiateCp (sc->ha_Virt, sc->ha_Fvirt, (PI2O_MESSAGE_FRAME)Message_Ptr)) != (U32)-1L) {
2033 u_long size, addr;
2034
2035 /*
2036 * Wait for a response (Poll).
2037 */
2038 while (*Reply_Ptr < I2O_EXEC_OUTBOUND_INIT_REJECTED);
2039 /*
2040 * Re-enable the interrupts.
2041 */
2042 sc->ha_Virt->Mask = Old;
2043 /*
2044 * Populate the outbound table.
2045 */
2046 if (sc->ha_Msgs == (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL) {
2047
2048 /* Allocate the reply frames */
2049 size = sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
2050 * sc->ha_Msgs_Count;
2051
2052 /*
2053 * contigmalloc only works reliably at
2054 * initialization time.
2055 */
2056 if ((sc->ha_Msgs = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
e7b4468c 2057 contigmalloc (size, M_DEVBUF, M_WAITOK | M_ZERO, 0ul,
984263bc
MD
2058 0xFFFFFFFFul, (u_long)sizeof(U32), 0ul))
2059 != (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL) {
984263bc
MD
2060 sc->ha_Msgs_Phys = KVTOPHYS(sc->ha_Msgs);
2061 }
2062 }
2063
2064 /* Initialize the outbound FIFO */
2065 if (sc->ha_Msgs != (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL)
2066 for (size = sc->ha_Msgs_Count, addr = sc->ha_Msgs_Phys;
2067 size; --size) {
2068 sc->ha_Virt->FromFIFO = addr;
2069 addr += sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME);
2070 }
2071 return (*Reply_Ptr);
2072 }
2073 return (0);
2074} /* ASR_initOutBound */
2075
2076/*
2077 * Set the system table
2078 */
2079STATIC INLINE int
2080ASR_setSysTab(
2081 IN Asr_softc_t * sc)
2082{
2083 PI2O_EXEC_SYS_TAB_SET_MESSAGE Message_Ptr;
2084 PI2O_SET_SYSTAB_HEADER SystemTable;
2085 Asr_softc_t * ha;
2086 PI2O_SGE_SIMPLE_ELEMENT sg;
2087 int retVal;
2088
978400d3
SW
2089 SystemTable = (PI2O_SET_SYSTAB_HEADER)kmalloc (
2090 sizeof(I2O_SET_SYSTAB_HEADER), M_TEMP, M_WAITOK | M_ZERO);
984263bc
MD
2091 for (ha = Asr_softc; ha; ha = ha->ha_next) {
2092 ++SystemTable->NumberEntries;
2093 }
978400d3 2094 Message_Ptr = (PI2O_EXEC_SYS_TAB_SET_MESSAGE)kmalloc (
984263bc
MD
2095 sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
2096 + ((3+SystemTable->NumberEntries) * sizeof(I2O_SGE_SIMPLE_ELEMENT)),
978400d3 2097 M_TEMP, M_WAITOK);
984263bc
MD
2098 (void)ASR_fillMessage((char *)Message_Ptr,
2099 sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
2100 + ((3+SystemTable->NumberEntries) * sizeof(I2O_SGE_SIMPLE_ELEMENT)));
2101 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
2102 (I2O_VERSION_11 +
2103 (((sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
2104 / sizeof(U32)) << 4)));
2105 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
2106 I2O_EXEC_SYS_TAB_SET);
2107 /*
2108 * Call the LCT table to determine the number of device entries
2109 * to reserve space for.
2110 * since this code is reused in several systems, code efficiency
2111 * is greater by using a shift operation rather than a divide by
2112 * sizeof(u_int32_t).
2113 */
2114 sg = (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr
2115 + ((I2O_MESSAGE_FRAME_getVersionOffset(
2116 &(Message_Ptr->StdMessageFrame)) & 0xF0) >> 2));
2117 SG(sg, 0, I2O_SGL_FLAGS_DIR, SystemTable, sizeof(I2O_SET_SYSTAB_HEADER));
2118 ++sg;
2119 for (ha = Asr_softc; ha; ha = ha->ha_next) {
2120 SG(sg, 0,
2121 ((ha->ha_next)
2122 ? (I2O_SGL_FLAGS_DIR)
2123 : (I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER)),
2124 &(ha->ha_SystemTable), sizeof(ha->ha_SystemTable));
2125 ++sg;
2126 }
2127 SG(sg, 0, I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER, NULL, 0);
2128 SG(sg, 1, I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_LAST_ELEMENT
2129 | I2O_SGL_FLAGS_END_OF_BUFFER, NULL, 0);
2130 retVal = ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
efda3bd0
MD
2131 kfree (Message_Ptr, M_TEMP);
2132 kfree (SystemTable, M_TEMP);
984263bc
MD
2133 return (retVal);
2134} /* ASR_setSysTab */
2135
2136STATIC INLINE int
2137ASR_acquireHrt (
2138 INOUT Asr_softc_t * sc)
2139{
2140 defAlignLong(I2O_EXEC_HRT_GET_MESSAGE,Message);
2141 I2O_EXEC_HRT_GET_MESSAGE * Message_Ptr;
2142 struct {
2143 I2O_HRT Header;
2144 I2O_HRT_ENTRY Entry[MAX_CHANNEL];
2145 } Hrt;
2146 u_int8_t NumberOfEntries;
2147 PI2O_HRT_ENTRY Entry;
2148
2149 bzero ((void *)&Hrt, sizeof (Hrt));
2150 Message_Ptr = (I2O_EXEC_HRT_GET_MESSAGE *)ASR_fillMessage(Message,
2151 sizeof(I2O_EXEC_HRT_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
2152 + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2153 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
2154 (I2O_VERSION_11
2155 + (((sizeof(I2O_EXEC_HRT_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
2156 / sizeof(U32)) << 4)));
2157 I2O_MESSAGE_FRAME_setFunction (&(Message_Ptr->StdMessageFrame),
2158 I2O_EXEC_HRT_GET);
2159
2160 /*
2161 * Set up the buffers as scatter gather elements.
2162 */
2163 SG(&(Message_Ptr->SGL), 0,
2164 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
2165 &Hrt, sizeof(Hrt));
2166 if (ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) != CAM_REQ_CMP) {
2167 return (ENODEV);
2168 }
2169 if ((NumberOfEntries = I2O_HRT_getNumberEntries(&Hrt.Header))
2170 > (MAX_CHANNEL + 1)) {
2171 NumberOfEntries = MAX_CHANNEL + 1;
2172 }
2173 for (Entry = Hrt.Header.HRTEntry;
2174 NumberOfEntries != 0;
2175 ++Entry, --NumberOfEntries) {
2176 PI2O_LCT_ENTRY Device;
2177
2178 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
2179 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
2180 ++Device) {
2181 if (I2O_LCT_ENTRY_getLocalTID(Device)
2182 == (I2O_HRT_ENTRY_getAdapterID(Entry) & 0xFFF)) {
2183 Device->le_bus = I2O_HRT_ENTRY_getAdapterID(
2184 Entry) >> 16;
2185 if ((Device->le_bus > sc->ha_MaxBus)
2186 && (Device->le_bus <= MAX_CHANNEL)) {
2187 sc->ha_MaxBus = Device->le_bus;
2188 }
2189 }
2190 }
2191 }
2192 return (0);
2193} /* ASR_acquireHrt */
2194
2195/*
2196 * Enable the adapter.
2197 */
2198STATIC INLINE int
2199ASR_enableSys (
2200 IN Asr_softc_t * sc)
2201{
2202 defAlignLong(I2O_EXEC_SYS_ENABLE_MESSAGE,Message);
2203 PI2O_EXEC_SYS_ENABLE_MESSAGE Message_Ptr;
2204
2205 Message_Ptr = (PI2O_EXEC_SYS_ENABLE_MESSAGE)ASR_fillMessage(Message,
2206 sizeof(I2O_EXEC_SYS_ENABLE_MESSAGE));
2207 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
2208 I2O_EXEC_SYS_ENABLE);
2209 return (ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) != 0);
2210} /* ASR_enableSys */
2211
2212/*
2213 * Perform the stages necessary to initialize the adapter
2214 */
2215STATIC int
2216ASR_init(
2217 IN Asr_softc_t * sc)
2218{
2219 return ((ASR_initOutBound(sc) == 0)
2220 || (ASR_setSysTab(sc) != CAM_REQ_CMP)
2221 || (ASR_enableSys(sc) != CAM_REQ_CMP));
2222} /* ASR_init */
2223
2224/*
2225 * Send a Synchronize Cache command to the target device.
2226 */
2227STATIC INLINE void
2228ASR_sync (
2229 IN Asr_softc_t * sc,
2230 IN int bus,
2231 IN int target,
2232 IN int lun)
2233{
2234 tid_t TID;
2235
2236 /*
2237 * We will not synchronize the device when there are outstanding
2238 * commands issued by the OS (this is due to a locked up device,
2239 * as the OS normally would flush all outstanding commands before
2240 * issuing a shutdown or an adapter reset).
2241 */
2038fb68
SW
2242 if ((sc != NULL)
2243 && (LIST_FIRST(&(sc->ha_ccb)) != NULL)
984263bc
MD
2244 && ((TID = ASR_getTid (sc, bus, target, lun)) != (tid_t)-1)
2245 && (TID != (tid_t)0)) {
2246 defAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE,Message);
2247 PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE Message_Ptr;
2248
2249 bzero (Message_Ptr
2250 = getAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE, Message),
2251 sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2252 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2253
2254 I2O_MESSAGE_FRAME_setVersionOffset(
2255 (PI2O_MESSAGE_FRAME)Message_Ptr,
2256 I2O_VERSION_11
2257 | (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2258 - sizeof(I2O_SG_ELEMENT))
2259 / sizeof(U32)) << 4));
2260 I2O_MESSAGE_FRAME_setMessageSize(
2261 (PI2O_MESSAGE_FRAME)Message_Ptr,
2262 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2263 - sizeof(I2O_SG_ELEMENT))
2264 / sizeof(U32));
2265 I2O_MESSAGE_FRAME_setInitiatorAddress (
2266 (PI2O_MESSAGE_FRAME)Message_Ptr, 1);
2267 I2O_MESSAGE_FRAME_setFunction(
2268 (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_PRIVATE_MESSAGE);
2269 I2O_MESSAGE_FRAME_setTargetAddress(
2270 (PI2O_MESSAGE_FRAME)Message_Ptr, TID);
2271 I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode (
2272 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2273 I2O_SCSI_SCB_EXEC);
2274 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setTID(Message_Ptr, TID);
2275 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2276 I2O_SCB_FLAG_ENABLE_DISCONNECT
2277 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2278 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
2279 I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
2280 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2281 DPT_ORGANIZATION_ID);
2282 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(Message_Ptr, 6);
2283 Message_Ptr->CDB[0] = SYNCHRONIZE_CACHE;
2284 Message_Ptr->CDB[1] = (lun << 5);
2285
2286 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2287 (I2O_SCB_FLAG_XFER_FROM_DEVICE
2288 | I2O_SCB_FLAG_ENABLE_DISCONNECT
2289 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2290 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
2291
2292 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
2293
2294 }
2295}
2296
2297STATIC INLINE void
2298ASR_synchronize (
2299 IN Asr_softc_t * sc)
2300{
2301 int bus, target, lun;
2302
2303 for (bus = 0; bus <= sc->ha_MaxBus; ++bus) {
2304 for (target = 0; target <= sc->ha_MaxId; ++target) {
2305 for (lun = 0; lun <= sc->ha_MaxLun; ++lun) {
2306 ASR_sync(sc,bus,target,lun);
2307 }
2308 }
2309 }
2310}
2311
2312/*
2313 * Reset the HBA, targets and BUS.
2314 * Currently this resets *all* the SCSI busses.
2315 */
2316STATIC INLINE void
2317asr_hbareset(
2318 IN Asr_softc_t * sc)
2319{
2320 ASR_synchronize (sc);
2321 (void)ASR_reset (sc);
2322} /* asr_hbareset */
2323
2324/*
2325 * A reduced copy of the real pci_map_mem, incorporating the MAX_MAP
2326 * limit and a reduction in error checking (in the pre 4.0 case).
2327 */
2328STATIC int
2329asr_pci_map_mem (
984263bc 2330 IN device_t tag,
984263bc
MD
2331 IN Asr_softc_t * sc)
2332{
2333 int rid;
2334 u_int32_t p, l, s;
2335
984263bc
MD
2336 /*
2337 * I2O specification says we must find first *memory* mapped BAR
2338 */
2339 for (rid = PCIR_MAPS;
2340 rid < (PCIR_MAPS + 4 * sizeof(u_int32_t));
2341 rid += sizeof(u_int32_t)) {
2342 p = pci_read_config(tag, rid, sizeof(p));
2343 if ((p & 1) == 0) {
2344 break;
2345 }
2346 }
2347 /*
2348 * Give up?
2349 */
2350 if (rid >= (PCIR_MAPS + 4 * sizeof(u_int32_t))) {
2351 rid = PCIR_MAPS;
2352 }
2353 p = pci_read_config(tag, rid, sizeof(p));
2354 pci_write_config(tag, rid, -1, sizeof(p));
2355 l = 0 - (pci_read_config(tag, rid, sizeof(l)) & ~15);
2356 pci_write_config(tag, rid, p, sizeof(p));
2357 if (l > MAX_MAP) {
2358 l = MAX_MAP;
2359 }
2360 /*
2361 * The 2005S Zero Channel RAID solution is not a perfect PCI
2362 * citizen. It asks for 4MB on BAR0, and 0MB on BAR1, once
2363 * enabled it rewrites the size of BAR0 to 2MB, sets BAR1 to
2364 * BAR0+2MB and sets it's size to 2MB. The IOP registers are
2365 * accessible via BAR0, the messaging registers are accessible
2366 * via BAR1. If the subdevice code is 50 to 59 decimal.
2367 */
2368 s = pci_read_config(tag, PCIR_DEVVENDOR, sizeof(s));
2369 if (s != 0xA5111044) {
2370 s = pci_read_config(tag, PCIR_SUBVEND_0, sizeof(s));
2371 if ((((ADPTDOMINATOR_SUB_ID_START ^ s) & 0xF000FFFF) == 0)
2372 && (ADPTDOMINATOR_SUB_ID_START <= s)
2373 && (s <= ADPTDOMINATOR_SUB_ID_END)) {
2374 l = MAX_MAP; /* Conjoined BAR Raptor Daptor */
2375 }
2376 }
2377 p &= ~15;
2378 sc->ha_mem_res = bus_alloc_resource(tag, SYS_RES_MEMORY, &rid,
2379 p, p + l, l, RF_ACTIVE);
2038fb68 2380 if (sc->ha_mem_res == NULL) {
984263bc
MD
2381 return (0);
2382 }
2383 sc->ha_Base = (void *)rman_get_start(sc->ha_mem_res);
3641b7ca 2384 if (sc->ha_Base == NULL) {
984263bc
MD
2385 return (0);
2386 }
2387 sc->ha_Virt = (i2oRegs_t *) rman_get_virtual(sc->ha_mem_res);
2388 if (s == 0xA5111044) { /* Split BAR Raptor Daptor */
2389 if ((rid += sizeof(u_int32_t))
2390 >= (PCIR_MAPS + 4 * sizeof(u_int32_t))) {
2391 return (0);
2392 }
2393 p = pci_read_config(tag, rid, sizeof(p));
2394 pci_write_config(tag, rid, -1, sizeof(p));
2395 l = 0 - (pci_read_config(tag, rid, sizeof(l)) & ~15);
2396 pci_write_config(tag, rid, p, sizeof(p));
2397 if (l > MAX_MAP) {
2398 l = MAX_MAP;
2399 }
2400 p &= ~15;
2401 sc->ha_mes_res = bus_alloc_resource(tag, SYS_RES_MEMORY, &rid,
2402 p, p + l, l, RF_ACTIVE);
2038fb68 2403 if (sc->ha_mes_res == NULL) {
984263bc
MD
2404 return (0);
2405 }
3641b7ca 2406 if ((void *)rman_get_start(sc->ha_mes_res) == NULL) {
984263bc
MD
2407 return (0);
2408 }
2409 sc->ha_Fvirt = (U8 *) rman_get_virtual(sc->ha_mes_res);
2410 } else {
2411 sc->ha_Fvirt = (U8 *)(sc->ha_Virt);
2412 }
984263bc
MD
2413 return (1);
2414} /* asr_pci_map_mem */
2415
2416/*
2417 * A simplified copy of the real pci_map_int with additional
2418 * registration requirements.
2419 */
2420STATIC int
2421asr_pci_map_int (
984263bc 2422 IN device_t tag,
984263bc
MD
2423 IN Asr_softc_t * sc)
2424{
e9cb6d99
MD
2425 int rid = 0;
2426 int error;
984263bc
MD
2427
2428 sc->ha_irq_res = bus_alloc_resource(tag, SYS_RES_IRQ, &rid,
2429 0, ~0, 1, RF_ACTIVE | RF_SHAREABLE);
2038fb68 2430 if (sc->ha_irq_res == NULL) {
984263bc
MD
2431 return (0);
2432 }
ee61f228 2433 error = bus_setup_intr(tag, sc->ha_irq_res, 0,
e9cb6d99
MD
2434 (driver_intr_t *)asr_intr, (void *)sc,
2435 &(sc->ha_intr), NULL);
2436 if (error) {
984263bc
MD
2437 return (0);
2438 }
2439 sc->ha_irq = pci_read_config(tag, PCIR_INTLINE, sizeof(char));
984263bc
MD
2440 return (1);
2441} /* asr_pci_map_int */
2442
2443/*
2444 * Attach the devices, and virtual devices to the driver list.
2445 */
2446STATIC ATTACH_RET
2447asr_attach (ATTACH_ARGS)
2448{
2449 Asr_softc_t * sc;
2450 struct scsi_inquiry_data * iq;
2451 ATTACH_SET();
2452
e7b4468c 2453 sc = kmalloc(sizeof(*sc), M_DEVBUF, M_INTWAIT | M_ZERO);
2038fb68 2454 if (Asr_softc == NULL) {
984263bc
MD
2455 /*
2456 * Fixup the OS revision as saved in the dptsig for the
2457 * engine (dptioctl.h) to pick up.
2458 */
2459 bcopy (osrelease, &ASR_sig.dsDescription[16], 5);
e3869ec7 2460 kprintf ("asr%d: major=%d\n", unit, asr_ops.head.maj);
984263bc
MD
2461 }
2462 /*
2463 * Initialize the software structure
2464 */
984263bc 2465 LIST_INIT(&(sc->ha_ccb));
984263bc
MD
2466 /* Link us into the HA list */
2467 {
2468 Asr_softc_t **ha;
2469
2470 for (ha = &Asr_softc; *ha; ha = &((*ha)->ha_next));
2471 *(ha) = sc;
2472 }
2473 {
2474 PI2O_EXEC_STATUS_GET_REPLY status;
2475 int size;
2476
2477 /*
2478 * This is the real McCoy!
2479 */
2480 if (!asr_pci_map_mem(tag, sc)) {
e3869ec7 2481 kprintf ("asr%d: could not map memory\n", unit);
984263bc
MD
2482 ATTACH_RETURN(ENXIO);
2483 }
2484 /* Enable if not formerly enabled */
984263bc
MD
2485 pci_write_config (tag, PCIR_COMMAND,
2486 pci_read_config (tag, PCIR_COMMAND, sizeof(char))
2487 | PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN, sizeof(char));
2488 /* Knowledge is power, responsibility is direct */
2489 {
2490 struct pci_devinfo {
2491 STAILQ_ENTRY(pci_devinfo) pci_links;
2492 struct resource_list resources;
2493 pcicfgregs cfg;
2494 } * dinfo = device_get_ivars(tag);
2495 sc->ha_pciBusNum = dinfo->cfg.bus;
2496 sc->ha_pciDeviceNum = (dinfo->cfg.slot << 3)
2497 | dinfo->cfg.func;
2498 }
984263bc
MD
2499 /* Check if the device is there? */
2500 if ((ASR_resetIOP(sc->ha_Virt, sc->ha_Fvirt) == 0)
efda3bd0 2501 || ((status = (PI2O_EXEC_STATUS_GET_REPLY)kmalloc (
984263bc
MD
2502 sizeof(I2O_EXEC_STATUS_GET_REPLY), M_TEMP, M_WAITOK))
2503 == (PI2O_EXEC_STATUS_GET_REPLY)NULL)
2504 || (ASR_getStatus(sc->ha_Virt, sc->ha_Fvirt, status) == NULL)) {
e3869ec7 2505 kprintf ("asr%d: could not initialize hardware\n", unit);
984263bc
MD
2506 ATTACH_RETURN(ENODEV); /* Get next, maybe better luck */
2507 }
2508 sc->ha_SystemTable.OrganizationID = status->OrganizationID;
2509 sc->ha_SystemTable.IOP_ID = status->IOP_ID;
2510 sc->ha_SystemTable.I2oVersion = status->I2oVersion;
2511 sc->ha_SystemTable.IopState = status->IopState;
2512 sc->ha_SystemTable.MessengerType = status->MessengerType;
2513 sc->ha_SystemTable.InboundMessageFrameSize
2514 = status->InboundMFrameSize;
2515 sc->ha_SystemTable.MessengerInfo.InboundMessagePortAddressLow
2516 = (U32)(sc->ha_Base) + (U32)(&(((i2oRegs_t *)NULL)->ToFIFO));
2517
2518 if (!asr_pci_map_int(tag, (void *)sc)) {
e3869ec7 2519 kprintf ("asr%d: could not map interrupt\n", unit);
984263bc
MD
2520 ATTACH_RETURN(ENXIO);
2521 }
2522
2523 /* Adjust the maximim inbound count */
2524 if (((sc->ha_QueueSize
2525 = I2O_EXEC_STATUS_GET_REPLY_getMaxInboundMFrames(status))
2526 > MAX_INBOUND)
2527 || (sc->ha_QueueSize == 0)) {
2528 sc->ha_QueueSize = MAX_INBOUND;
2529 }
2530
2531 /* Adjust the maximum outbound count */
2532 if (((sc->ha_Msgs_Count
2533 = I2O_EXEC_STATUS_GET_REPLY_getMaxOutboundMFrames(status))
2534 > MAX_OUTBOUND)
2535 || (sc->ha_Msgs_Count == 0)) {
2536 sc->ha_Msgs_Count = MAX_OUTBOUND;
2537 }
2538 if (sc->ha_Msgs_Count > sc->ha_QueueSize) {
2539 sc->ha_Msgs_Count = sc->ha_QueueSize;
2540 }
2541
2542 /* Adjust the maximum SG size to adapter */
2543 if ((size = (I2O_EXEC_STATUS_GET_REPLY_getInboundMFrameSize(
2544 status) << 2)) > MAX_INBOUND_SIZE) {
2545 size = MAX_INBOUND_SIZE;
2546 }
efda3bd0 2547 kfree (status, M_TEMP);
984263bc
MD
2548 sc->ha_SgSize = (size - sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2549 + sizeof(I2O_SG_ELEMENT)) / sizeof(I2O_SGE_SIMPLE_ELEMENT);
2550 }
2551
2552 /*
2553 * Only do a bus/HBA reset on the first time through. On this
2554 * first time through, we do not send a flush to the devices.
2555 */
2556 if (ASR_init(sc) == 0) {
2557 struct BufferInfo {
2558 I2O_PARAM_RESULTS_LIST_HEADER Header;
2559 I2O_PARAM_READ_OPERATION_RESULT Read;
2560 I2O_DPT_EXEC_IOP_BUFFERS_SCALAR Info;
2561 };
2562 defAlignLong (struct BufferInfo, Buffer);
2563 PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR Info;
2564# define FW_DEBUG_BLED_OFFSET 8
2565
2566 if ((Info = (PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR)
2567 ASR_getParams(sc, 0,
2568 I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO,
2569 Buffer, sizeof(struct BufferInfo)))
2570 != (PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR)NULL) {
2571 sc->ha_blinkLED = sc->ha_Fvirt
2572 + I2O_DPT_EXEC_IOP_BUFFERS_SCALAR_getSerialOutputOffset(Info)
2573 + FW_DEBUG_BLED_OFFSET;
2574 }
2575 if (ASR_acquireLct(sc) == 0) {
2576 (void)ASR_acquireHrt(sc);
2577 }
2578 } else {
e3869ec7 2579 kprintf ("asr%d: failed to initialize\n", unit);
984263bc
MD
2580 ATTACH_RETURN(ENXIO);
2581 }
2582 /*
2583 * Add in additional probe responses for more channels. We
2584 * are reusing the variable `target' for a channel loop counter.
2585 * Done here because of we need both the acquireLct and
2586 * acquireHrt data.
2587 */
2588 { PI2O_LCT_ENTRY Device;
2589
2590 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
2591 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
2592 ++Device) {
2593 if (Device->le_type == I2O_UNKNOWN) {
2594 continue;
2595 }
2596 if (I2O_LCT_ENTRY_getUserTID(Device) == 0xFFF) {
2597 if (Device->le_target > sc->ha_MaxId) {
2598 sc->ha_MaxId = Device->le_target;
2599 }
2600 if (Device->le_lun > sc->ha_MaxLun) {
2601 sc->ha_MaxLun = Device->le_lun;
2602 }
2603 }
2604 if (((Device->le_type & I2O_PORT) != 0)
2605 && (Device->le_bus <= MAX_CHANNEL)) {
2606 /* Do not increase MaxId for efficiency */
2607 sc->ha_adapter_target[Device->le_bus]
2608 = Device->le_target;
2609 }
2610 }
2611 }
2612
2613
2614 /*
2615 * Print the HBA model number as inquired from the card.
2616 */
2617
e3869ec7 2618 kprintf ("asr%d:", unit);
984263bc 2619
978400d3
SW
2620 iq = (struct scsi_inquiry_data *)kmalloc (
2621 sizeof(struct scsi_inquiry_data), M_TEMP, M_WAITOK | M_ZERO);
2622 defAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE,Message);
2623 PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE Message_Ptr;
2624 int posted = 0;
984263bc 2625
978400d3
SW
2626 bzero (Message_Ptr
2627 = getAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE, Message),
2628 sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2629 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT));
984263bc 2630
978400d3
SW
2631 I2O_MESSAGE_FRAME_setVersionOffset(
2632 (PI2O_MESSAGE_FRAME)Message_Ptr,
2633 I2O_VERSION_11
2634 | (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2635 - sizeof(I2O_SG_ELEMENT))
2636 / sizeof(U32)) << 4));
2637 I2O_MESSAGE_FRAME_setMessageSize(
2638 (PI2O_MESSAGE_FRAME)Message_Ptr,
2639 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2640 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT))
2641 / sizeof(U32));
2642 I2O_MESSAGE_FRAME_setInitiatorAddress (
2643 (PI2O_MESSAGE_FRAME)Message_Ptr, 1);
2644 I2O_MESSAGE_FRAME_setFunction(
2645 (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_PRIVATE_MESSAGE);
2646 I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode (
2647 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2648 I2O_SCSI_SCB_EXEC);
2649 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2650 I2O_SCB_FLAG_ENABLE_DISCONNECT
2651 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2652 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
2653 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setInterpret(Message_Ptr, 1);
2654 I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
2655 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2656 DPT_ORGANIZATION_ID);
2657 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(Message_Ptr, 6);
2658 Message_Ptr->CDB[0] = INQUIRY;
2659 Message_Ptr->CDB[4] = (unsigned char)sizeof(struct scsi_inquiry_data);
2660 if (Message_Ptr->CDB[4] == 0) {
2661 Message_Ptr->CDB[4] = 255;
2662 }
984263bc 2663
978400d3
SW
2664 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2665 (I2O_SCB_FLAG_XFER_FROM_DEVICE
2666 | I2O_SCB_FLAG_ENABLE_DISCONNECT
2667 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2668 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
984263bc 2669
978400d3
SW
2670 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setByteCount(
2671 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
2672 sizeof(struct scsi_inquiry_data));
2673 SG(&(Message_Ptr->SGL), 0,
2674 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
2675 iq, sizeof(struct scsi_inquiry_data));
2676 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
984263bc 2677
978400d3
SW
2678 if (iq->vendor[0] && (iq->vendor[0] != ' ')) {
2679 kprintf (" ");
2680 ASR_prstring (iq->vendor, 8);
2681 ++posted;
2682 }
2683 if (iq->product[0] && (iq->product[0] != ' ')) {
2684 kprintf (" ");
2685 ASR_prstring (iq->product, 16);
2686 ++posted;
2687 }
2688 if (iq->revision[0] && (iq->revision[0] != ' ')) {
2689 kprintf (" FW Rev. ");
2690 ASR_prstring (iq->revision, 4);
2691 ++posted;
2692 }
2693 kfree ((caddr_t)iq, M_TEMP);
2694 if (posted) {
2695 kprintf (",");
984263bc 2696 }
e3869ec7 2697 kprintf (" %d channel, %d CCBs, Protocol I2O\n", sc->ha_MaxBus + 1,
984263bc
MD
2698 (sc->ha_QueueSize > MAX_INBOUND) ? MAX_INBOUND : sc->ha_QueueSize);
2699
2700 /*
2701 * fill in the prototype cam_path.
2702 */
2703 {
2704 int bus;
2705 union asr_ccb * ccb;
2706
2038fb68 2707 if ((ccb = asr_alloc_ccb (sc)) == NULL) {
e3869ec7 2708 kprintf ("asr%d: CAM could not be notified of asynchronous callback parameters\n", unit);
984263bc
MD
2709 ATTACH_RETURN(ENOMEM);
2710 }
2711 for (bus = 0; bus <= sc->ha_MaxBus; ++bus) {
984263bc
MD
2712 int QueueSize = sc->ha_QueueSize;
2713
2714 if (QueueSize > MAX_INBOUND) {
2715 QueueSize = MAX_INBOUND;
2716 }
2717
2718 /*
984263bc
MD
2719 * Construct our first channel SIM entry
2720 */
2721 sc->ha_sim[bus] = cam_sim_alloc(
2722 asr_action, asr_poll, "asr", sc,
1c8b7a9a 2723 unit, &sim_mplock, 1, QueueSize, NULL);
521cf4d2 2724 if (sc->ha_sim[bus] == NULL)
984263bc 2725 continue;
984263bc
MD
2726
2727 if (xpt_bus_register(sc->ha_sim[bus], bus)
2728 != CAM_SUCCESS) {
521cf4d2 2729 cam_sim_free(sc->ha_sim[bus]);
984263bc
MD
2730 sc->ha_sim[bus] = NULL;
2731 continue;
2732 }
2733
2734 if (xpt_create_path(&(sc->ha_path[bus]), /*periph*/NULL,
2735 cam_sim_path(sc->ha_sim[bus]), CAM_TARGET_WILDCARD,
2736 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2737 xpt_bus_deregister(
2738 cam_sim_path(sc->ha_sim[bus]));
521cf4d2 2739 cam_sim_free(sc->ha_sim[bus]);
984263bc
MD
2740 sc->ha_sim[bus] = NULL;
2741 continue;
2742 }
2743 }
2744 asr_free_ccb (ccb);
2745 }
2746 /*
2747 * Generate the device node information
2748 */
fef8985e 2749 make_dev(&asr_ops, unit, 0, 0, S_IRWXU, "rasr%d", unit);
984263bc
MD
2750 ATTACH_RETURN(0);
2751} /* asr_attach */
2752
2753STATIC void
2754asr_poll(
2755 IN struct cam_sim *sim)
2756{
2757 asr_intr(cam_sim_softc(sim));
2758} /* asr_poll */
2759
2760STATIC void
2761asr_action(
2762 IN struct cam_sim * sim,
2763 IN union ccb * ccb)
2764{
2765 struct Asr_softc * sc;
2766
2767 debug_asr_printf ("asr_action(%lx,%lx{%x})\n",
2768 (u_long)sim, (u_long)ccb, ccb->ccb_h.func_code);
2769
2770 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("asr_action\n"));
2771
2772 ccb->ccb_h.spriv_ptr0 = sc = (struct Asr_softc *)cam_sim_softc(sim);
2773
2774 switch (ccb->ccb_h.func_code) {
2775
2776 /* Common cases first */
2777 case XPT_SCSI_IO: /* Execute the requested I/O operation */
2778 {
2779 struct Message {
2780 char M[MAX_INBOUND_SIZE];
2781 };
2782 defAlignLong(struct Message,Message);
2783 PI2O_MESSAGE_FRAME Message_Ptr;
2784
2785 /* Reject incoming commands while we are resetting the card */
2786 if (sc->ha_in_reset != HA_OPERATIONAL) {
2787 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2788 if (sc->ha_in_reset >= HA_OFF_LINE) {
2789 /* HBA is now off-line */
2790 ccb->ccb_h.status |= CAM_UNREC_HBA_ERROR;
2791 } else {
2792 /* HBA currently resetting, try again later. */
2793 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
2794 }
2795 debug_asr_cmd_printf (" e\n");
2796 xpt_done(ccb);
2797 debug_asr_cmd_printf (" q\n");
2798 break;
2799 }
2800 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
e3869ec7 2801 kprintf(
984263bc
MD
2802 "asr%d WARNING: scsi_cmd(%x) already done on b%dt%du%d\n",
2803 cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)),
2804 ccb->csio.cdb_io.cdb_bytes[0],
2805 cam_sim_bus(sim),
2806 ccb->ccb_h.target_id,
2807 ccb->ccb_h.target_lun);
2808 }
2809 debug_asr_cmd_printf ("(%d,%d,%d,%d)",
2810 cam_sim_unit(sim),
2811 cam_sim_bus(sim),
2812 ccb->ccb_h.target_id,
2813 ccb->ccb_h.target_lun);
2814 debug_asr_cmd_dump_ccb(ccb);
2815
2816 if ((Message_Ptr = ASR_init_message ((union asr_ccb *)ccb,
2817 (PI2O_MESSAGE_FRAME)Message)) != (PI2O_MESSAGE_FRAME)NULL) {
2818 debug_asr_cmd2_printf ("TID=%x:\n",
2819 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_getTID(
2820 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr));
2821 debug_asr_cmd2_dump_message(Message_Ptr);
2822 debug_asr_cmd1_printf (" q");
2823
2824 if (ASR_queue (sc, Message_Ptr) == EMPTY_QUEUE) {
984263bc
MD
2825 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2826 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
2827 debug_asr_cmd_printf (" E\n");
2828 xpt_done(ccb);
2829 }
2830 debug_asr_cmd_printf (" Q\n");
2831 break;
2832 }
2833 /*
2834 * We will get here if there is no valid TID for the device
2835 * referenced in the scsi command packet.
2836 */
2837 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2838 ccb->ccb_h.status |= CAM_SEL_TIMEOUT;
2839 debug_asr_cmd_printf (" B\n");
2840 xpt_done(ccb);
2841 break;
2842 }
2843
2844 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */
2845 /* Rese HBA device ... */
2846 asr_hbareset (sc);
2847 ccb->ccb_h.status = CAM_REQ_CMP;
2848 xpt_done(ccb);
2849 break;
2850
2851# if (defined(REPORT_LUNS))
2852 case REPORT_LUNS:
2853# endif
2854 case XPT_ABORT: /* Abort the specified CCB */
2855 /* XXX Implement */
2856 ccb->ccb_h.status = CAM_REQ_INVALID;
2857 xpt_done(ccb);
2858 break;
2859
2860 case XPT_SET_TRAN_SETTINGS:
2861 /* XXX Implement */
2862 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
2863 xpt_done(ccb);
2864 break;
2865
2866 case XPT_GET_TRAN_SETTINGS:
2867 /* Get default/user set transfer settings for the target */
2868 {
f19fcfb0 2869 struct ccb_trans_settings *cts = &(ccb->cts);
f19fcfb0
PA
2870 struct ccb_trans_settings_scsi *scsi =
2871 &cts->proto_specific.scsi;
2872 struct ccb_trans_settings_spi *spi =
2873 &cts->xport_specific.spi;
2874
2875 if (cts->type == CTS_TYPE_USER_SETTINGS) {
2876 cts->protocol = PROTO_SCSI;
2877 cts->protocol_version = SCSI_REV_2;
2878 cts->transport = XPORT_SPI;
2879 cts->transport_version = 2;
2880
2881 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
2882 spi->flags = CTS_SPI_FLAGS_DISC_ENB;
2883 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2884 spi->sync_period = 6; /* 40MHz */
2885 spi->sync_offset = 15;
2886 spi->valid = CTS_SPI_VALID_SYNC_RATE
2887 | CTS_SPI_VALID_SYNC_OFFSET
2888 | CTS_SPI_VALID_BUS_WIDTH
2889 | CTS_SPI_VALID_DISC;
2890 scsi->valid = CTS_SCSI_VALID_TQ;
2891
2892 ccb->ccb_h.status = CAM_REQ_CMP;
2893 } else {
2894 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
2895 }
984263bc
MD
2896 xpt_done(ccb);
2897 break;
2898 }
2899
2900 case XPT_CALC_GEOMETRY:
2901 {
2902 struct ccb_calc_geometry *ccg;
2903 u_int32_t size_mb;
2904 u_int32_t secs_per_cylinder;
2905
2906 ccg = &(ccb->ccg);
2907 size_mb = ccg->volume_size
2908 / ((1024L * 1024L) / ccg->block_size);
2909
2910 if (size_mb > 4096) {
2911 ccg->heads = 255;
2912 ccg->secs_per_track = 63;
2913 } else if (size_mb > 2048) {
2914 ccg->heads = 128;
2915 ccg->secs_per_track = 63;
2916 } else if (size_mb > 1024) {
2917 ccg->heads = 65;
2918 ccg->secs_per_track = 63;
2919 } else {
2920 ccg->heads = 64;
2921 ccg->secs_per_track = 32;
2922 }
2923 secs_per_cylinder = ccg->heads * ccg->secs_per_track;
2924 ccg->cylinders = ccg->volume_size / secs_per_cylinder;
2925 ccb->ccb_h.status = CAM_REQ_CMP;
2926 xpt_done(ccb);
2927 break;
2928 }
2929
2930 case XPT_RESET_BUS: /* Reset the specified SCSI bus */
2931 ASR_resetBus (sc, cam_sim_bus(sim));
2932 ccb->ccb_h.status = CAM_REQ_CMP;
2933 xpt_done(ccb);
2934 break;
2935
2936 case XPT_TERM_IO: /* Terminate the I/O process */
2937 /* XXX Implement */
2938 ccb->ccb_h.status = CAM_REQ_INVALID;
2939 xpt_done(ccb);
2940 break;
2941
2942 case XPT_PATH_INQ: /* Path routing inquiry */
2943 {
2944 struct ccb_pathinq *cpi = &(ccb->cpi);
2945
2946 cpi->version_num = 1; /* XXX??? */
2947 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
2948 cpi->target_sprt = 0;
2949 /* Not necessary to reset bus, done by HDM initialization */
2950 cpi->hba_misc = PIM_NOBUSRESET;
2951 cpi->hba_eng_cnt = 0;
2952 cpi->max_target = sc->ha_MaxId;
2953 cpi->max_lun = sc->ha_MaxLun;
2954 cpi->initiator_id = sc->ha_adapter_target[cam_sim_bus(sim)];
2955 cpi->bus_id = cam_sim_bus(sim);
2956 cpi->base_transfer_speed = 3300;
2957 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2958 strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN);
2959 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
2960 cpi->unit_number = cam_sim_unit(sim);
2961 cpi->ccb_h.status = CAM_REQ_CMP;
f19fcfb0
PA
2962 cpi->transport = XPORT_SPI;
2963 cpi->transport_version = 2;
2964 cpi->protocol = PROTO_SCSI;
2965 cpi->protocol_version = SCSI_REV_2;
984263bc
MD
2966 xpt_done(ccb);
2967 break;
2968 }
2969 default:
2970 ccb->ccb_h.status = CAM_REQ_INVALID;
2971 xpt_done(ccb);
2972 break;
2973 }
2974} /* asr_action */
2975
984263bc
MD
2976
2977/*
2978 * Handle processing of current CCB as pointed to by the Status.
2979 */
2980STATIC int
2981asr_intr (
2982 IN Asr_softc_t * sc)
2983{
2984 OUT int processed;
2985
984263bc
MD
2986 for (processed = 0;
2987 sc->ha_Virt->Status & Mask_InterruptsDisabled;
2988 processed = 1) {
2989 union asr_ccb * ccb;
2990 U32 ReplyOffset;
2991 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply;
2992
2993 if (((ReplyOffset = sc->ha_Virt->FromFIFO) == EMPTY_QUEUE)
2994 && ((ReplyOffset = sc->ha_Virt->FromFIFO) == EMPTY_QUEUE)) {
2995 break;
2996 }
2997 Reply = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)(ReplyOffset
2998 - sc->ha_Msgs_Phys + (char *)(sc->ha_Msgs));
2999 /*
3000 * We do not need any (optional byteswapping) method access to
3001 * the Initiator context field.
3002 */
3003 ccb = (union asr_ccb *)(long)
3004 I2O_MESSAGE_FRAME_getInitiatorContext64(
3005 &(Reply->StdReplyFrame.StdMessageFrame));
3006 if (I2O_MESSAGE_FRAME_getMsgFlags(
3007 &(Reply->StdReplyFrame.StdMessageFrame))
3008 & I2O_MESSAGE_FLAGS_FAIL) {
3009 defAlignLong(I2O_UTIL_NOP_MESSAGE,Message);
3010 PI2O_UTIL_NOP_MESSAGE Message_Ptr;
3011 U32 MessageOffset;
3012
3013 MessageOffset = (u_long)
3014 I2O_FAILURE_REPLY_MESSAGE_FRAME_getPreservedMFA(
3015 (PI2O_FAILURE_REPLY_MESSAGE_FRAME)Reply);
3016 /*
3017 * Get the Original Message Frame's address, and get
3018 * it's Transaction Context into our space. (Currently
3019 * unused at original authorship, but better to be
3020 * safe than sorry). Straight copy means that we
3021 * need not concern ourselves with the (optional
3022 * byteswapping) method access.
3023 */
3024 Reply->StdReplyFrame.TransactionContext
3025 = ((PI2O_SINGLE_REPLY_MESSAGE_FRAME)
3026 (sc->ha_Fvirt + MessageOffset))->TransactionContext;
3027 /*
3028 * For 64 bit machines, we need to reconstruct the
3029 * 64 bit context.
3030 */
3031 ccb = (union asr_ccb *)(long)
3032 I2O_MESSAGE_FRAME_getInitiatorContext64(
3033 &(Reply->StdReplyFrame.StdMessageFrame));
3034 /*
3035 * Unique error code for command failure.
3036 */
3037 I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode(
3038 &(Reply->StdReplyFrame), (u_int16_t)-2);
3039 /*
3040 * Modify the message frame to contain a NOP and
3041 * re-issue it to the controller.
3042 */
3043 Message_Ptr = (PI2O_UTIL_NOP_MESSAGE)ASR_fillMessage(
3044 Message, sizeof(I2O_UTIL_NOP_MESSAGE));
3045# if (I2O_UTIL_NOP != 0)
3046 I2O_MESSAGE_FRAME_setFunction (
3047 &(Message_Ptr->StdMessageFrame),
3048 I2O_UTIL_NOP);
3049# endif
3050 /*
3051 * Copy the packet out to the Original Message
3052 */
3053 bcopy ((caddr_t)Message_Ptr,
3054 sc->ha_Fvirt + MessageOffset,
3055 sizeof(I2O_UTIL_NOP_MESSAGE));
3056 /*
3057 * Issue the NOP
3058 */
3059 sc->ha_Virt->ToFIFO = MessageOffset;
3060 }
3061
3062 /*
3063 * Asynchronous command with no return requirements,
3064 * and a generic handler for immunity against odd error
3065 * returns from the adapter.
3066 */
2038fb68 3067 if (ccb == NULL) {
984263bc
MD
3068 /*
3069 * Return Reply so that it can be used for the
3070 * next command
3071 */
3072 sc->ha_Virt->FromFIFO = ReplyOffset;
3073 continue;
3074 }
3075
3076 /* Welease Wadjah! (and stop timeouts) */
3077 ASR_ccbRemove (sc, ccb);
3078
3079 switch (
3080 I2O_SINGLE_REPLY_MESSAGE_FRAME_getDetailedStatusCode(
3081 &(Reply->StdReplyFrame))) {
3082
3083 case I2O_SCSI_DSC_SUCCESS:
3084 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3085 ccb->ccb_h.status |= CAM_REQ_CMP;
3086 break;
3087
3088 case I2O_SCSI_DSC_CHECK_CONDITION:
3089 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3090 ccb->ccb_h.status |= CAM_REQ_CMP|CAM_AUTOSNS_VALID;
3091 break;
3092
3093 case I2O_SCSI_DSC_BUSY:
3094 /* FALLTHRU */
3095 case I2O_SCSI_HBA_DSC_ADAPTER_BUSY:
3096 /* FALLTHRU */
3097 case I2O_SCSI_HBA_DSC_SCSI_BUS_RESET:
3098 /* FALLTHRU */
3099 case I2O_SCSI_HBA_DSC_BUS_BUSY:
3100 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3101 ccb->ccb_h.status |= CAM_SCSI_BUSY;
3102 break;
3103
3104 case I2O_SCSI_HBA_DSC_SELECTION_TIMEOUT:
3105 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3106 ccb->ccb_h.status |= CAM_SEL_TIMEOUT;
3107 break;
3108
3109 case I2O_SCSI_HBA_DSC_COMMAND_TIMEOUT:
3110 /* FALLTHRU */
3111 case I2O_SCSI_HBA_DSC_DEVICE_NOT_PRESENT:
3112 /* FALLTHRU */
3113 case I2O_SCSI_HBA_DSC_LUN_INVALID:
3114 /* FALLTHRU */
3115 case I2O_SCSI_HBA_DSC_SCSI_TID_INVALID:
3116 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3117 ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
3118 break;
3119
3120 case I2O_SCSI_HBA_DSC_DATA_OVERRUN:
3121 /* FALLTHRU */
3122 case I2O_SCSI_HBA_DSC_REQUEST_LENGTH_ERROR:
3123 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3124 ccb->ccb_h.status |= CAM_DATA_RUN_ERR;
3125 break;
3126
3127 default:
3128 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3129 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
3130 break;
3131 }
3132 if ((ccb->csio.resid = ccb->csio.dxfer_len) != 0) {
3133 ccb->csio.resid -=
3134 I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_getTransferCount(
3135 Reply);
3136 }
3137
984263bc
MD
3138 /* Sense data in reply packet */
3139 if (ccb->ccb_h.status & CAM_AUTOSNS_VALID) {
3140 u_int16_t size = I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_getAutoSenseTransferCount(Reply);
3141
3142 if (size) {
3143 if (size > sizeof(ccb->csio.sense_data)) {
3144 size = sizeof(ccb->csio.sense_data);
3145 }
3146 if (size > I2O_SCSI_SENSE_DATA_SZ) {
3147 size = I2O_SCSI_SENSE_DATA_SZ;
3148 }
3149 if ((ccb->csio.sense_len)
3150 && (size > ccb->csio.sense_len)) {
3151 size = ccb->csio.sense_len;
3152 }
3153 bcopy ((caddr_t)Reply->SenseData,
3154 (caddr_t)&(ccb->csio.sense_data), size);
3155 }
3156 }
3157
3158 /*
3159 * Return Reply so that it can be used for the next command
3160 * since we have no more need for it now
3161 */
3162 sc->ha_Virt->FromFIFO = ReplyOffset;
3163
3164 if (ccb->ccb_h.path) {
3165 xpt_done ((union ccb *)ccb);
3166 } else {
3167 wakeup ((caddr_t)ccb);
3168 }
3169 }
984263bc
MD
3170 return (processed);
3171} /* asr_intr */
3172
3173#undef QueueSize /* Grrrr */
3174#undef SG_Size /* Grrrr */
3175
3176/*
3177 * Meant to be included at the bottom of asr.c !!!
3178 */
3179
3180/*
3181 * Included here as hard coded. Done because other necessary include
3182 * files utilize C++ comment structures which make them a nuisance to
3183 * included here just to pick up these three typedefs.
3184 */
3185typedef U32 DPT_TAG_T;
3186typedef U32 DPT_MSG_T;
3187typedef U32 DPT_RTN_T;
3188
3189#undef SCSI_RESET /* Conflicts with "scsi/scsiconf.h" defintion */
1f2de5d4 3190#include "osd_unix.h"
984263bc
MD
3191
3192#define asr_unit(dev) minor(dev)
3193
3194STATIC INLINE Asr_softc_t *
3195ASR_get_sc (
b13267a5 3196 IN cdev_t dev)
984263bc
MD
3197{
3198 int unit = asr_unit(dev);
3199 OUT Asr_softc_t * sc = Asr_softc;
3200
3201 while (sc && sc->ha_sim[0] && (cam_sim_unit(sc->ha_sim[0]) != unit)) {
3202 sc = sc->ha_next;
3203 }
3204 return (sc);
3205} /* ASR_get_sc */
3206
3207STATIC u_int8_t ASR_ctlr_held;
3208#if (!defined(UNREFERENCED_PARAMETER))
3209# define UNREFERENCED_PARAMETER(x) (void)(x)
3210#endif
3211
3212STATIC int
fef8985e 3213asr_open(struct dev_open_args *ap)
984263bc 3214{
b13267a5 3215 cdev_t dev = ap->a_head.a_dev;
fef8985e 3216 OUT int error;
984263bc 3217
2038fb68 3218 if (ASR_get_sc (dev) == NULL) {
984263bc
MD
3219 return (ENODEV);
3220 }
7f2216bc 3221 crit_enter();
984263bc
MD
3222 if (ASR_ctlr_held) {
3223 error = EBUSY;
895c1f85 3224 } else if ((error = priv_check_cred(ap->a_cred, PRIV_ROOT, 0)) == 0) {
984263bc
MD
3225 ++ASR_ctlr_held;
3226 }
7f2216bc 3227 crit_exit();
984263bc
MD
3228 return (error);
3229} /* asr_open */
3230
3231STATIC int
fef8985e 3232asr_close(struct dev_close_args *ap)
984263bc 3233{
984263bc
MD
3234 ASR_ctlr_held = 0;
3235 return (0);
3236} /* asr_close */
3237
3238
3239/*-------------------------------------------------------------------------*/
3240/* Function ASR_queue_i */
3241/*-------------------------------------------------------------------------*/
3242/* The Parameters Passed To This Function Are : */
3243/* Asr_softc_t * : HBA miniport driver's adapter data storage. */
3244/* PI2O_MESSAGE_FRAME : Msg Structure Pointer For This Command */
3245/* I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME following the Msg Structure */
3246/* */
3247/* This Function Will Take The User Request Packet And Convert It To An */
3248/* I2O MSG And Send It Off To The Adapter. */
3249/* */
3250/* Return : 0 For OK, Error Code Otherwise */
3251/*-------------------------------------------------------------------------*/
3252STATIC INLINE int
3253ASR_queue_i(
3254 IN Asr_softc_t * sc,
3255 INOUT PI2O_MESSAGE_FRAME Packet)
3256{
3257 union asr_ccb * ccb;
3258 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply;
3259 PI2O_MESSAGE_FRAME Message_Ptr;
3260 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply_Ptr;
3261 int MessageSizeInBytes;
3262 int ReplySizeInBytes;
3263 int error;
3264 int s;
3265 /* Scatter Gather buffer list */
3266 struct ioctlSgList_S {
3267 SLIST_ENTRY(ioctlSgList_S) link;
3268 caddr_t UserSpace;
3269 I2O_FLAGS_COUNT FlagsCount;
3270 char KernelSpace[sizeof(long)];
3271 } * elm;
3272 /* Generates a `first' entry */
3273 SLIST_HEAD(ioctlSgListHead_S, ioctlSgList_S) sgList;
3274
3275 if (ASR_getBlinkLedCode(sc)) {
3276 debug_usr_cmd_printf ("Adapter currently in BlinkLed %x\n",
3277 ASR_getBlinkLedCode(sc));
3278 return (EIO);
3279 }
3280 /* Copy in the message into a local allocation */
978400d3
SW
3281 Message_Ptr = (PI2O_MESSAGE_FRAME)kmalloc (
3282 sizeof(I2O_MESSAGE_FRAME), M_TEMP, M_WAITOK);
984263bc
MD
3283 if ((error = copyin ((caddr_t)Packet, (caddr_t)Message_Ptr,
3284 sizeof(I2O_MESSAGE_FRAME))) != 0) {
efda3bd0 3285 kfree (Message_Ptr, M_TEMP);
984263bc
MD
3286 debug_usr_cmd_printf ("Can't copy in packet errno=%d\n", error);
3287 return (error);
3288 }
3289 /* Acquire information to determine type of packet */
3290 MessageSizeInBytes = (I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr)<<2);
3291 /* The offset of the reply information within the user packet */
3292 Reply = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)((char *)Packet
3293 + MessageSizeInBytes);
3294
3295 /* Check if the message is a synchronous initialization command */
3296 s = I2O_MESSAGE_FRAME_getFunction(Message_Ptr);
efda3bd0 3297 kfree (Message_Ptr, M_TEMP);
984263bc
MD
3298 switch (s) {
3299
3300 case I2O_EXEC_IOP_RESET:
3301 { U32 status;
3302
3303 status = ASR_resetIOP(sc->ha_Virt, sc->ha_Fvirt);
3304 ReplySizeInBytes = sizeof(status);
3305 debug_usr_cmd_printf ("resetIOP done\n");
3306 return (copyout ((caddr_t)&status, (caddr_t)Reply,
3307 ReplySizeInBytes));
3308 }
3309
3310 case I2O_EXEC_STATUS_GET:
3311 { I2O_EXEC_STATUS_GET_REPLY status;
3312
3313 if (ASR_getStatus (sc->ha_Virt, sc->ha_Fvirt, &status)
3314 == (PI2O_EXEC_STATUS_GET_REPLY)NULL) {
3315 debug_usr_cmd_printf ("getStatus failed\n");
3316 return (ENXIO);
3317 }
3318 ReplySizeInBytes = sizeof(status);
3319 debug_usr_cmd_printf ("getStatus done\n");
3320 return (copyout ((caddr_t)&status, (caddr_t)Reply,
3321 ReplySizeInBytes));
3322 }
3323
3324 case I2O_EXEC_OUTBOUND_INIT:
3325 { U32 status;
3326
3327 status = ASR_initOutBound(sc);
3328 ReplySizeInBytes = sizeof(status);
3329 debug_usr_cmd_printf ("intOutBound done\n");
3330 return (copyout ((caddr_t)&status, (caddr_t)Reply,
3331 ReplySizeInBytes));
3332 }
3333 }
3334
3335 /* Determine if the message size is valid */
3336 if ((MessageSizeInBytes < sizeof(I2O_MESSAGE_FRAME))
3337 || (MAX_INBOUND_SIZE < MessageSizeInBytes)) {
3338 debug_usr_cmd_printf ("Packet size %d incorrect\n",
3339 MessageSizeInBytes);
3340 return (EINVAL);
3341 }
3342
978400d3
SW
3343 Message_Ptr = (PI2O_MESSAGE_FRAME)kmalloc (MessageSizeInBytes,
3344 M_TEMP, M_WAITOK);
984263bc
MD
3345 if ((error = copyin ((caddr_t)Packet, (caddr_t)Message_Ptr,
3346 MessageSizeInBytes)) != 0) {
efda3bd0 3347 kfree (Message_Ptr, M_TEMP);
984263bc
MD
3348 debug_usr_cmd_printf ("Can't copy in packet[%d] errno=%d\n",
3349 MessageSizeInBytes, error);
3350 return (error);
3351 }
3352
3353 /* Check the size of the reply frame, and start constructing */
3354
978400d3
SW
3355 Reply_Ptr = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)kmalloc (
3356 sizeof(I2O_MESSAGE_FRAME), M_TEMP, M_WAITOK);
984263bc
MD
3357 if ((error = copyin ((caddr_t)Reply, (caddr_t)Reply_Ptr,
3358 sizeof(I2O_MESSAGE_FRAME))) != 0) {
efda3bd0
MD
3359 kfree (Reply_Ptr, M_TEMP);
3360 kfree (Message_Ptr, M_TEMP);
984263bc
MD
3361 debug_usr_cmd_printf (
3362 "Failed to copy in reply frame, errno=%d\n",
3363 error);
3364 return (error);
3365 }
3366 ReplySizeInBytes = (I2O_MESSAGE_FRAME_getMessageSize(
3367 &(Reply_Ptr->StdReplyFrame.StdMessageFrame)) << 2);
efda3bd0 3368 kfree (Reply_Ptr, M_TEMP);
984263bc 3369 if (ReplySizeInBytes < sizeof(I2O_SINGLE_REPLY_MESSAGE_FRAME)) {
efda3bd0 3370 kfree (Message_Ptr, M_TEMP);
984263bc
MD
3371 debug_usr_cmd_printf (
3372 "Failed to copy in reply frame[%d], errno=%d\n",
3373 ReplySizeInBytes, error);
3374 return (EINVAL);
3375 }
3376
978400d3 3377 Reply_Ptr = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)kmalloc (
984263bc
MD
3378 ((ReplySizeInBytes > sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME))
3379 ? ReplySizeInBytes
3380 : sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)),
978400d3 3381 M_TEMP, M_WAITOK);
984263bc
MD
3382 (void)ASR_fillMessage ((char *)Reply_Ptr, ReplySizeInBytes);
3383 Reply_Ptr->StdReplyFrame.StdMessageFrame.InitiatorContext
3384 = Message_Ptr->InitiatorContext;
3385 Reply_Ptr->StdReplyFrame.TransactionContext
3386 = ((PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr)->TransactionContext;
3387 I2O_MESSAGE_FRAME_setMsgFlags(
3388 &(Reply_Ptr->StdReplyFrame.StdMessageFrame),
3389 I2O_MESSAGE_FRAME_getMsgFlags(
3390 &(Reply_Ptr->StdReplyFrame.StdMessageFrame))
3391 | I2O_MESSAGE_FLAGS_REPLY);
3392
3393 /* Check if the message is a special case command */
3394 switch (I2O_MESSAGE_FRAME_getFunction(Message_Ptr)) {
3395 case I2O_EXEC_SYS_TAB_SET: /* Special Case of empty Scatter Gather */
3396 if (MessageSizeInBytes == ((I2O_MESSAGE_FRAME_getVersionOffset(
3397 Message_Ptr) & 0xF0) >> 2)) {
efda3bd0 3398 kfree (Message_Ptr, M_TEMP);
984263bc
MD
3399 I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode(
3400 &(Reply_Ptr->StdReplyFrame),
3401 (ASR_setSysTab(sc) != CAM_REQ_CMP));
3402 I2O_MESSAGE_FRAME_setMessageSize(
3403 &(Reply_Ptr->StdReplyFrame.StdMessageFrame),
3404 sizeof(I2O_SINGLE_REPLY_MESSAGE_FRAME));
3405 error = copyout ((caddr_t)Reply_Ptr, (caddr_t)Reply,
3406 ReplySizeInBytes);
efda3bd0 3407 kfree (Reply_Ptr, M_TEMP);
984263bc
MD
3408 return (error);
3409 }
3410 }
3411
3412 /* Deal in the general case */
3413 /* First allocate and optionally copy in each scatter gather element */
3414 SLIST_INIT(&sgList);
3415 if ((I2O_MESSAGE_FRAME_getVersionOffset(Message_Ptr) & 0xF0) != 0) {
3416 PI2O_SGE_SIMPLE_ELEMENT sg;
3417
3418 /*
3419 * since this code is reused in several systems, code
3420 * efficiency is greater by using a shift operation rather
3421 * than a divide by sizeof(u_int32_t).
3422 */
3423 sg = (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr
3424 + ((I2O_MESSAGE_FRAME_getVersionOffset(Message_Ptr) & 0xF0)
3425 >> 2));
3426 while (sg < (PI2O_SGE_SIMPLE_ELEMENT)(((caddr_t)Message_Ptr)
3427 + MessageSizeInBytes)) {
3428 caddr_t v;
3429 int len;
3430
3431 if ((I2O_FLAGS_COUNT_getFlags(&(sg->FlagsCount))
3432 & I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT) == 0) {
3433 error = EINVAL;
3434 break;
3435 }
3436 len = I2O_FLAGS_COUNT_getCount(&(sg->FlagsCount));
3437 debug_usr_cmd_printf ("SG[%d] = %x[%d]\n",
3438 sg - (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr
3439 + ((I2O_MESSAGE_FRAME_getVersionOffset(
3440 Message_Ptr) & 0xF0) >> 2)),
3441 I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg), len);
3442
978400d3 3443 elm = (struct ioctlSgList_S *)kmalloc (
984263bc 3444 sizeof(*elm) - sizeof(elm->KernelSpace) + len,
978400d3 3445 M_TEMP, M_WAITOK);
984263bc
MD
3446 SLIST_INSERT_HEAD(&sgList, elm, link);
3447 elm->FlagsCount = sg->FlagsCount;
3448 elm->UserSpace = (caddr_t)
3449 (I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg));
3450 v = elm->KernelSpace;
3451 /* Copy in outgoing data (DIR bit could be invalid) */
3452 if ((error = copyin (elm->UserSpace, (caddr_t)v, len))
3453 != 0) {
3454 break;
3455 }
3456 /*
3457 * If the buffer is not contiguous, lets
3458 * break up the scatter/gather entries.
3459 */
3460 while ((len > 0)
3461 && (sg < (PI2O_SGE_SIMPLE_ELEMENT)
3462 (((caddr_t)Message_Ptr) + MAX_INBOUND_SIZE))) {
3463 int next, base, span;
3464
3465 span = 0;
3466 next = base = KVTOPHYS(v);
3467 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg,
3468 base);
3469
3470 /* How far can we go physically contiguously */
3471 while ((len > 0) && (base == next)) {
3472 int size;
3473
3474 next = trunc_page(base) + PAGE_SIZE;
3475 size = next - base;
3476 if (size > len) {
3477 size = len;
3478 }
3479 span += size;
3480 v += size;
3481 len -= size;
3482 base = KVTOPHYS(v);
3483 }
3484
3485 /* Construct the Flags */
3486 I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount),
3487 span);
3488 {
3489 int flags = I2O_FLAGS_COUNT_getFlags(
3490 &(elm->FlagsCount));
3491 /* Any remaining length? */
3492 if (len > 0) {
3493 flags &=
3494 ~(I2O_SGL_FLAGS_END_OF_BUFFER
3495 | I2O_SGL_FLAGS_LAST_ELEMENT);
3496 }
3497 I2O_FLAGS_COUNT_setFlags(
3498 &(sg->FlagsCount), flags);
3499 }
3500
3501 debug_usr_cmd_printf ("sg[%d] = %x[%d]\n",
3502 sg - (PI2O_SGE_SIMPLE_ELEMENT)
3503 ((char *)Message_Ptr
3504 + ((I2O_MESSAGE_FRAME_getVersionOffset(
3505 Message_Ptr) & 0xF0) >> 2)),
3506 I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg),
3507 span);
3508 if (len <= 0) {
3509 break;
3510 }
3511
3512 /*
3513 * Incrementing requires resizing of the
3514 * packet, and moving up the existing SG
3515 * elements.
3516 */
3517 ++sg;
3518 MessageSizeInBytes += sizeof(*sg);
3519 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
3520 I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr)
3521 + (sizeof(*sg) / sizeof(U32)));
3522 {
3523 PI2O_MESSAGE_FRAME NewMessage_Ptr;
3524
978400d3 3525 NewMessage_Ptr
984263bc 3526 = (PI2O_MESSAGE_FRAME)
efda3bd0 3527 kmalloc (MessageSizeInBytes,
978400d3 3528 M_TEMP, M_WAITOK);
984263bc
MD
3529 span = ((caddr_t)sg)
3530 - (caddr_t)Message_Ptr;
3531 bcopy ((caddr_t)Message_Ptr,
3532 (caddr_t)NewMessage_Ptr, span);
3533 bcopy ((caddr_t)(sg-1),
3534 ((caddr_t)NewMessage_Ptr) + span,
3535 MessageSizeInBytes - span);
efda3bd0 3536 kfree (Message_Ptr, M_TEMP);
984263bc
MD
3537 sg = (PI2O_SGE_SIMPLE_ELEMENT)
3538 (((caddr_t)NewMessage_Ptr) + span);
3539 Message_Ptr = NewMessage_Ptr;
3540 }
3541 }
3542 if ((error)
3543 || ((I2O_FLAGS_COUNT_getFlags(&(sg->FlagsCount))
3544 & I2O_SGL_FLAGS_LAST_ELEMENT) != 0)) {
3545 break;
3546 }
3547 ++sg;
3548 }
3549 if (error) {
3550 while ((elm = SLIST_FIRST(&sgList))
2038fb68 3551 != NULL) {
984263bc 3552 SLIST_REMOVE_HEAD(&sgList, link);
efda3bd0 3553 kfree (elm, M_TEMP);
984263bc 3554 }
efda3bd0
MD
3555 kfree (Reply_Ptr, M_TEMP);
3556 kfree (Message_Ptr, M_TEMP);
984263bc
MD
3557 return (error);
3558 }
3559 }
3560
3561 debug_usr_cmd_printf ("Inbound: ");
3562 debug_usr_cmd_dump_message(Message_Ptr);
3563
3564 /* Send the command */
2038fb68 3565 if ((ccb = asr_alloc_ccb (sc)) == NULL) {
984263bc
MD
3566 /* Free up in-kernel buffers */
3567 while ((elm = SLIST_FIRST(&sgList))
2038fb68 3568 != NULL) {
984263bc 3569 SLIST_REMOVE_HEAD(&sgList, link);
efda3bd0 3570 kfree (elm, M_TEMP);
984263bc 3571 }
efda3bd0
MD
3572 kfree (Reply_Ptr, M_TEMP);
3573 kfree (Message_Ptr, M_TEMP);
984263bc
MD
3574 return (ENOMEM);
3575 }
3576
3577 /*
3578 * We do not need any (optional byteswapping) method access to
3579 * the Initiator context field.
3580 */
3581 I2O_MESSAGE_FRAME_setInitiatorContext64(
3582 (PI2O_MESSAGE_FRAME)Message_Ptr, (long)ccb);
3583
3584 (void)ASR_queue (sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
3585
efda3bd0 3586 kfree (Message_Ptr, M_TEMP);
984263bc
MD
3587
3588 /*
3589 * Wait for the board to report a finished instruction.
3590 */
7f2216bc 3591 crit_enter();
984263bc
MD
3592 while ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
3593 if (ASR_getBlinkLedCode(sc)) {
3594 /* Reset Adapter */
e3869ec7 3595 kprintf ("asr%d: Blink LED 0x%x resetting adapter\n",
984263bc
MD
3596 cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)),
3597 ASR_getBlinkLedCode(sc));
3598 if (ASR_reset (sc) == ENXIO) {
3599 /* Command Cleanup */
3600 ASR_ccbRemove(sc, ccb);
3601 }
7f2216bc 3602 crit_exit();
984263bc
MD
3603 /* Free up in-kernel buffers */
3604 while ((elm = SLIST_FIRST(&sgList))
2038fb68 3605 != NULL) {
984263bc 3606 SLIST_REMOVE_HEAD(&sgList, link);
efda3bd0 3607 kfree (elm, M_TEMP);
984263bc 3608 }
efda3bd0 3609 kfree (Reply_Ptr, M_TEMP);
984263bc
MD
3610 asr_free_ccb(ccb);
3611 return (EIO);
3612 }
3613 /* Check every second for BlinkLed */
377d4740 3614 tsleep((caddr_t)ccb, 0, "asr", hz);
984263bc 3615 }
7f2216bc 3616 crit_exit();
984263bc
MD
3617
3618 debug_usr_cmd_printf ("Outbound: ");
3619 debug_usr_cmd_dump_message(Reply_Ptr);
3620
3621 I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode(
3622 &(Reply_Ptr->StdReplyFrame),
3623 (ccb->ccb_h.status != CAM_REQ_CMP));
3624
3625 if (ReplySizeInBytes >= (sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
3626 - I2O_SCSI_SENSE_DATA_SZ - sizeof(U32))) {
3627 I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_setTransferCount(Reply_Ptr,
3628 ccb->csio.dxfer_len - ccb->csio.resid);
3629 }
3630 if ((ccb->ccb_h.status & CAM_AUTOSNS_VALID) && (ReplySizeInBytes
3631 > (sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
3632 - I2O_SCSI_SENSE_DATA_SZ))) {
3633 int size = ReplySizeInBytes
3634 - sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
3635 - I2O_SCSI_SENSE_DATA_SZ;
3636
3637 if (size > sizeof(ccb->csio.sense_data)) {
3638 size = sizeof(ccb->csio.sense_data);
3639 }
3640 bcopy ((caddr_t)&(ccb->csio.sense_data), (caddr_t)Reply_Ptr->SenseData,
3641 size);
3642 I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_setAutoSenseTransferCount(
3643 Reply_Ptr, size);
3644 }
3645
3646 /* Free up in-kernel buffers */
2038fb68 3647 while ((elm = SLIST_FIRST(&sgList)) != NULL) {
984263bc
MD
3648 /* Copy out as necessary */
3649 if ((error == 0)
3650 /* DIR bit considered `