Initial import from FreeBSD RELENG_4:
[dragonfly.git] / sys / dev / raid / asr / asr.c
CommitLineData
984263bc
MD
1/* $FreeBSD: src/sys/dev/asr/asr.c,v 1.3.2.2 2001/08/23 05:21:29 scottl Exp $ */
2/*
3 * Copyright (c) 1996-2000 Distributed Processing Technology Corporation
4 * Copyright (c) 2000-2001 Adaptec Corporation
5 * All rights reserved.
6 *
7 * TERMS AND CONDITIONS OF USE
8 *
9 * Redistribution and use in source form, with or without modification, are
10 * permitted provided that redistributions of source code must retain the
11 * above copyright notice, this list of conditions and the following disclaimer.
12 *
13 * This software is provided `as is' by Adaptec and any express or implied
14 * warranties, including, but not limited to, the implied warranties of
15 * merchantability and fitness for a particular purpose, are disclaimed. In no
16 * event shall Adaptec be liable for any direct, indirect, incidental, special,
17 * exemplary or consequential damages (including, but not limited to,
18 * procurement of substitute goods or services; loss of use, data, or profits;
19 * or business interruptions) however caused and on any theory of liability,
20 * whether in contract, strict liability, or tort (including negligence or
21 * otherwise) arising in any way out of the use of this driver software, even
22 * if advised of the possibility of such damage.
23 *
24 * SCSI I2O host adapter driver
25 *
26 * V1.08 2001/08/21 Mark_Salyzyn@adaptec.com
27 * - The 2000S and 2005S do not initialize on some machines,
28 * increased timeout to 255ms from 50ms for the StatusGet
29 * command.
30 * V1.07 2001/05/22 Mark_Salyzyn@adaptec.com
31 * - I knew this one was too good to be true. The error return
32 * on ioctl commands needs to be compared to CAM_REQ_CMP, not
33 * to the bit masked status.
34 * V1.06 2001/05/08 Mark_Salyzyn@adaptec.com
35 * - The 2005S that was supported is affectionately called the
36 * Conjoined BAR Firmware. In order to support RAID-5 in a
37 * 16MB low-cost configuration, Firmware was forced to go
38 * to a Split BAR Firmware. This requires a separate IOP and
39 * Messaging base address.
40 * V1.05 2001/04/25 Mark_Salyzyn@adaptec.com
41 * - Handle support for 2005S Zero Channel RAID solution.
42 * - System locked up if the Adapter locked up. Do not try
43 * to send other commands if the resetIOP command fails. The
44 * fail outstanding command discovery loop was flawed as the
45 * removal of the command from the list prevented discovering
46 * all the commands.
47 * - Comment changes to clarify driver.
48 * - SysInfo searched for an EATA SmartROM, not an I2O SmartROM.
49 * - We do not use the AC_FOUND_DEV event because of I2O.
50 * Removed asr_async.
51 * V1.04 2000/09/22 Mark_Salyzyn@adaptec.com, msmith@freebsd.org,
52 * lampa@fee.vutbr.cz and Scott_Long@adaptec.com.
53 * - Removed support for PM1554, PM2554 and PM2654 in Mode-0
54 * mode as this is confused with competitor adapters in run
55 * mode.
56 * - critical locking needed in ASR_ccbAdd and ASR_ccbRemove
57 * to prevent operating system panic.
58 * - moved default major number to 154 from 97.
59 * V1.03 2000/07/12 Mark_Salyzyn@adaptec.com
60 * - The controller is not actually an ASR (Adaptec SCSI RAID)
61 * series that is visible, it's more of an internal code name.
62 * remove any visible references within reason for now.
63 * - bus_ptr->LUN was not correctly zeroed when initially
64 * allocated causing a possible panic of the operating system
65 * during boot.
66 * V1.02 2000/06/26 Mark_Salyzyn@adaptec.com
67 * - Code always fails for ASR_getTid affecting performance.
68 * - initiated a set of changes that resulted from a formal
69 * code inspection by Mark_Salyzyn@adaptec.com,
70 * George_Dake@adaptec.com, Jeff_Zeak@adaptec.com,
71 * Martin_Wilson@adaptec.com and Vincent_Trandoan@adaptec.com.
72 * Their findings were focussed on the LCT & TID handler, and
73 * all resulting changes were to improve code readability,
74 * consistency or have a positive effect on performance.
75 * V1.01 2000/06/14 Mark_Salyzyn@adaptec.com
76 * - Passthrough returned an incorrect error.
77 * - Passthrough did not migrate the intrinsic scsi layer wakeup
78 * on command completion.
79 * - generate control device nodes using make_dev and delete_dev.
80 * - Performance affected by TID caching reallocing.
81 * - Made suggested changes by Justin_Gibbs@adaptec.com
82 * - use splcam instead of splbio.
83 * - use cam_imask instead of bio_imask.
84 * - use u_int8_t instead of u_char.
85 * - use u_int16_t instead of u_short.
86 * - use u_int32_t instead of u_long where appropriate.
87 * - use 64 bit context handler instead of 32 bit.
88 * - create_ccb should only allocate the worst case
89 * requirements for the driver since CAM may evolve
90 * making union ccb much larger than needed here.
91 * renamed create_ccb to asr_alloc_ccb.
92 * - go nutz justifying all debug prints as macros
93 * defined at the top and remove unsightly ifdefs.
94 * - INLINE STATIC viewed as confusing. Historically
95 * utilized to affect code performance and debug
96 * issues in OS, Compiler or OEM specific situations.
97 * V1.00 2000/05/31 Mark_Salyzyn@adaptec.com
98 * - Ported from FreeBSD 2.2.X DPT I2O driver.
99 * changed struct scsi_xfer to union ccb/struct ccb_hdr
100 * changed variable name xs to ccb
101 * changed struct scsi_link to struct cam_path
102 * changed struct scsibus_data to struct cam_sim
103 * stopped using fordriver for holding on to the TID
104 * use proprietary packet creation instead of scsi_inquire
105 * CAM layer sends synchronize commands.
106 */
107
108#define ASR_VERSION 1
109#define ASR_REVISION '0'
110#define ASR_SUBREVISION '8'
111#define ASR_MONTH 8
112#define ASR_DAY 21
113#define ASR_YEAR 2001 - 1980
114
115/*
116 * Debug macros to reduce the unsightly ifdefs
117 */
118#if (defined(DEBUG_ASR) || defined(DEBUG_ASR_USR_CMD) || defined(DEBUG_ASR_CMD))
119# define debug_asr_message(message) \
120 { \
121 u_int32_t * pointer = (u_int32_t *)message; \
122 u_int32_t length = I2O_MESSAGE_FRAME_getMessageSize(message);\
123 u_int32_t counter = 0; \
124 \
125 while (length--) { \
126 printf ("%08lx%c", (u_long)*(pointer++), \
127 (((++counter & 7) == 0) || (length == 0)) \
128 ? '\n' \
129 : ' '); \
130 } \
131 }
132#endif /* DEBUG_ASR || DEBUG_ASR_USR_CMD || DEBUG_ASR_CMD */
133
134#if (defined(DEBUG_ASR))
135 /* Breaks on none STDC based compilers :-( */
136# define debug_asr_printf(fmt,args...) printf(fmt, ##args)
137# define debug_asr_dump_message(message) debug_asr_message(message)
138# define debug_asr_print_path(ccb) xpt_print_path(ccb->ccb_h.path);
139 /* None fatal version of the ASSERT macro */
140# if (defined(__STDC__))
141# define ASSERT(phrase) if(!(phrase))printf(#phrase " at line %d file %s\n",__LINE__,__FILE__)
142# else
143# define ASSERT(phrase) if(!(phrase))printf("phrase" " at line %d file %s\n",__LINE__,__FILE__)
144# endif
145#else /* DEBUG_ASR */
146# define debug_asr_printf(fmt,args...)
147# define debug_asr_dump_message(message)
148# define debug_asr_print_path(ccb)
149# define ASSERT(x)
150#endif /* DEBUG_ASR */
151
152/*
153 * If DEBUG_ASR_CMD is defined:
154 * 0 - Display incoming SCSI commands
155 * 1 - add in a quick character before queueing.
156 * 2 - add in outgoing message frames.
157 */
158#if (defined(DEBUG_ASR_CMD))
159# define debug_asr_cmd_printf(fmt,args...) printf(fmt,##args)
160# define debug_asr_dump_ccb(ccb) \
161 { \
162 u_int8_t * cp = (unsigned char *)&(ccb->csio.cdb_io); \
163 int len = ccb->csio.cdb_len; \
164 \
165 while (len) { \
166 debug_asr_cmd_printf (" %02x", *(cp++)); \
167 --len; \
168 } \
169 }
170# if (DEBUG_ASR_CMD > 0)
171# define debug_asr_cmd1_printf debug_asr_cmd_printf
172# else
173# define debug_asr_cmd1_printf(fmt,args...)
174# endif
175# if (DEBUG_ASR_CMD > 1)
176# define debug_asr_cmd2_printf debug_asr_cmd_printf
177# define debug_asr_cmd2_dump_message(message) debug_asr_message(message)
178# else
179# define debug_asr_cmd2_printf(fmt,args...)
180# define debug_asr_cmd2_dump_message(message)
181# endif
182#else /* DEBUG_ASR_CMD */
183# define debug_asr_cmd_printf(fmt,args...)
184# define debug_asr_cmd_dump_ccb(ccb)
185# define debug_asr_cmd1_printf(fmt,args...)
186# define debug_asr_cmd2_printf(fmt,args...)
187# define debug_asr_cmd2_dump_message(message)
188#endif /* DEBUG_ASR_CMD */
189
190#if (defined(DEBUG_ASR_USR_CMD))
191# define debug_usr_cmd_printf(fmt,args...) printf(fmt,##args)
192# define debug_usr_cmd_dump_message(message) debug_usr_message(message)
193#else /* DEBUG_ASR_USR_CMD */
194# define debug_usr_cmd_printf(fmt,args...)
195# define debug_usr_cmd_dump_message(message)
196#endif /* DEBUG_ASR_USR_CMD */
197
198#define dsDescription_size 46 /* Snug as a bug in a rug */
199#include "dev/asr/dptsig.h"
200
201static dpt_sig_S ASR_sig = {
202 { 'd', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION, PROC_INTEL,
203 PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM, FT_HBADRVR, 0,
204 OEM_DPT, OS_FREE_BSD, CAP_ABOVE16MB, DEV_ALL,
205 ADF_ALL_SC5,
206 0, 0, ASR_VERSION, ASR_REVISION, ASR_SUBREVISION,
207 ASR_MONTH, ASR_DAY, ASR_YEAR,
208/* 01234567890123456789012345678901234567890123456789 < 50 chars */
209 "Adaptec FreeBSD 4.0.0 Unix SCSI I2O HBA Driver"
210 /* ^^^^^ asr_attach alters these to match OS */
211};
212
213#include <sys/param.h> /* TRUE=1 and FALSE=0 defined here */
214#include <sys/kernel.h>
215#include <sys/systm.h>
216#include <sys/malloc.h>
217#include <sys/proc.h>
218#include <sys/conf.h>
219#include <sys/disklabel.h>
220#include <sys/bus.h>
221#include <machine/resource.h>
222#include <machine/bus.h>
223#include <sys/rman.h>
224#include <sys/stat.h>
225
226#include <cam/cam.h>
227#include <cam/cam_ccb.h>
228#include <cam/cam_sim.h>
229#include <cam/cam_xpt_sim.h>
230#include <cam/cam_xpt_periph.h>
231
232#include <cam/scsi/scsi_all.h>
233#include <cam/scsi/scsi_message.h>
234
235#include <vm/vm.h>
236#include <vm/pmap.h>
237#include <machine/cputypes.h>
238#include <machine/clock.h>
239#include <i386/include/vmparam.h>
240
241#include <pci/pcivar.h>
242#include <pci/pcireg.h>
243
244#define STATIC static
245#define INLINE
246
247#if (defined(DEBUG_ASR) && (DEBUG_ASR > 0))
248# undef STATIC
249# define STATIC
250# undef INLINE
251# define INLINE
252#endif
253#define IN
254#define OUT
255#define INOUT
256
257#define osdSwap4(x) ((u_long)ntohl((u_long)(x)))
258#define KVTOPHYS(x) vtophys(x)
259#include "dev/asr/dptalign.h"
260#include "dev/asr/i2oexec.h"
261#include "dev/asr/i2obscsi.h"
262#include "dev/asr/i2odpt.h"
263#include "dev/asr/i2oadptr.h"
264#include "opt_asr.h"
265
266#include "dev/asr/sys_info.h"
267
268/* Configuration Definitions */
269
270#define SG_SIZE 58 /* Scatter Gather list Size */
271#define MAX_TARGET_ID 126 /* Maximum Target ID supported */
272#define MAX_LUN 255 /* Maximum LUN Supported */
273#define MAX_CHANNEL 7 /* Maximum Channel # Supported by driver */
274#define MAX_INBOUND 2000 /* Max CCBs, Also Max Queue Size */
275#define MAX_OUTBOUND 256 /* Maximum outbound frames/adapter */
276#define MAX_INBOUND_SIZE 512 /* Maximum inbound frame size */
277#define MAX_MAP 4194304L /* Maximum mapping size of IOP */
278 /* Also serves as the minimum map for */
279 /* the 2005S zero channel RAID product */
280
281/**************************************************************************
282** ASR Host Adapter structure - One Structure For Each Host Adapter That **
283** Is Configured Into The System. The Structure Supplies Configuration **
284** Information, Status Info, Queue Info And An Active CCB List Pointer. **
285***************************************************************************/
286
287/* I2O register set */
288typedef struct {
289 U8 Address[0x30];
290 volatile U32 Status;
291 volatile U32 Mask;
292# define Mask_InterruptsDisabled 0x08
293 U32 x[2];
294 volatile U32 ToFIFO; /* In Bound FIFO */
295 volatile U32 FromFIFO; /* Out Bound FIFO */
296} i2oRegs_t;
297
298/*
299 * A MIX of performance and space considerations for TID lookups
300 */
301typedef u_int16_t tid_t;
302
303typedef struct {
304 u_int32_t size; /* up to MAX_LUN */
305 tid_t TID[1];
306} lun2tid_t;
307
308typedef struct {
309 u_int32_t size; /* up to MAX_TARGET */
310 lun2tid_t * LUN[1];
311} target2lun_t;
312
313/*
314 * To ensure that we only allocate and use the worst case ccb here, lets
315 * make our own local ccb union. If asr_alloc_ccb is utilized for another
316 * ccb type, ensure that you add the additional structures into our local
317 * ccb union. To ensure strict type checking, we will utilize the local
318 * ccb definition wherever possible.
319 */
320union asr_ccb {
321 struct ccb_hdr ccb_h; /* For convenience */
322 struct ccb_scsiio csio;
323 struct ccb_setasync csa;
324};
325
326typedef struct Asr_softc {
327 u_int16_t ha_irq;
328 void * ha_Base; /* base port for each board */
329 u_int8_t * volatile ha_blinkLED;
330 i2oRegs_t * ha_Virt; /* Base address of IOP */
331 U8 * ha_Fvirt; /* Base address of Frames */
332 I2O_IOP_ENTRY ha_SystemTable;
333 LIST_HEAD(,ccb_hdr) ha_ccb; /* ccbs in use */
334 struct cam_path * ha_path[MAX_CHANNEL+1];
335 struct cam_sim * ha_sim[MAX_CHANNEL+1];
336#if __FreeBSD_version >= 400000
337 struct resource * ha_mem_res;
338 struct resource * ha_mes_res;
339 struct resource * ha_irq_res;
340 void * ha_intr;
341#endif
342 PI2O_LCT ha_LCT; /* Complete list of devices */
343# define le_type IdentityTag[0]
344# define I2O_BSA 0x20
345# define I2O_FCA 0x40
346# define I2O_SCSI 0x00
347# define I2O_PORT 0x80
348# define I2O_UNKNOWN 0x7F
349# define le_bus IdentityTag[1]
350# define le_target IdentityTag[2]
351# define le_lun IdentityTag[3]
352 target2lun_t * ha_targets[MAX_CHANNEL+1];
353 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME ha_Msgs;
354 u_long ha_Msgs_Phys;
355
356 u_int8_t ha_in_reset;
357# define HA_OPERATIONAL 0
358# define HA_IN_RESET 1
359# define HA_OFF_LINE 2
360# define HA_OFF_LINE_RECOVERY 3
361 /* Configuration information */
362 /* The target id maximums we take */
363 u_int8_t ha_MaxBus; /* Maximum bus */
364 u_int8_t ha_MaxId; /* Maximum target ID */
365 u_int8_t ha_MaxLun; /* Maximum target LUN */
366 u_int8_t ha_SgSize; /* Max SG elements */
367 u_int8_t ha_pciBusNum;
368 u_int8_t ha_pciDeviceNum;
369 u_int8_t ha_adapter_target[MAX_CHANNEL+1];
370 u_int16_t ha_QueueSize; /* Max outstanding commands */
371 u_int16_t ha_Msgs_Count;
372
373 /* Links into other parents and HBAs */
374 struct Asr_softc * ha_next; /* HBA list */
375
376#ifdef ASR_MEASURE_PERFORMANCE
377#define MAX_TIMEQ_SIZE 256 // assumes MAX 256 scsi commands sent
378 asr_perf_t ha_performance;
379 u_int32_t ha_submitted_ccbs_count;
380
381 // Queueing macros for a circular queue
382#define TIMEQ_FREE_LIST_EMPTY(head, tail) (-1 == (head) && -1 == (tail))
383#define TIMEQ_FREE_LIST_FULL(head, tail) ((((tail) + 1) % MAX_TIMEQ_SIZE) == (head))
384#define ENQ_TIMEQ_FREE_LIST(item, Q, head, tail) \
385 if (!TIMEQ_FREE_LIST_FULL((head), (tail))) { \
386 if TIMEQ_FREE_LIST_EMPTY((head),(tail)) { \
387 (head) = (tail) = 0; \
388 } \
389 else (tail) = ((tail) + 1) % MAX_TIMEQ_SIZE; \
390 Q[(tail)] = (item); \
391 } \
392 else { \
393 debug_asr_printf("asr: Enqueueing when TimeQ Free List is full... This should not happen!\n"); \
394 }
395#define DEQ_TIMEQ_FREE_LIST(item, Q, head, tail) \
396 if (!TIMEQ_FREE_LIST_EMPTY((head), (tail))) { \
397 item = Q[(head)]; \
398 if ((head) == (tail)) { (head) = (tail) = -1; } \
399 else (head) = ((head) + 1) % MAX_TIMEQ_SIZE; \
400 } \
401 else { \
402 (item) = -1; \
403 debug_asr_printf("asr: Dequeueing when TimeQ Free List is empty... This should not happen!\n"); \
404 }
405
406 // Circular queue of time stamps
407 struct timeval ha_timeQ[MAX_TIMEQ_SIZE];
408 u_int32_t ha_timeQFreeList[MAX_TIMEQ_SIZE];
409 int ha_timeQFreeHead;
410 int ha_timeQFreeTail;
411#endif
412} Asr_softc_t;
413
414STATIC Asr_softc_t * Asr_softc;
415
416/*
417 * Prototypes of the routines we have in this object.
418 */
419
420/* Externally callable routines */
421#if __FreeBSD_version >= 400000
422#define PROBE_ARGS IN device_t tag
423#define PROBE_RET int
424#define PROBE_SET() u_long id = (pci_get_device(tag)<<16)|pci_get_vendor(tag)
425#define PROBE_RETURN(retval) if(retval){device_set_desc(tag,retval);return(0);}else{return(ENXIO);}
426#define ATTACH_ARGS IN device_t tag
427#define ATTACH_RET int
428#define ATTACH_SET() int unit = device_get_unit(tag)
429#define ATTACH_RETURN(retval) return(retval)
430#else
431#define PROBE_ARGS IN pcici_t tag, IN pcidi_t id
432#define PROBE_RET const char *
433#define PROBE_SET()
434#define PROBE_RETURN(retval) return(retval)
435#define ATTACH_ARGS IN pcici_t tag, IN int unit
436#define ATTACH_RET void
437#define ATTACH_SET()
438#define ATTACH_RETURN(retval) return
439#endif
440/* I2O HDM interface */
441STATIC PROBE_RET asr_probe __P((PROBE_ARGS));
442STATIC ATTACH_RET asr_attach __P((ATTACH_ARGS));
443/* DOMINO placeholder */
444STATIC PROBE_RET domino_probe __P((PROBE_ARGS));
445STATIC ATTACH_RET domino_attach __P((ATTACH_ARGS));
446/* MODE0 adapter placeholder */
447STATIC PROBE_RET mode0_probe __P((PROBE_ARGS));
448STATIC ATTACH_RET mode0_attach __P((ATTACH_ARGS));
449
450STATIC Asr_softc_t * ASR_get_sc __P((
451 IN dev_t dev));
452STATIC int asr_ioctl __P((
453 IN dev_t dev,
454 IN u_long cmd,
455 INOUT caddr_t data,
456 int flag,
457 struct proc * proc));
458STATIC int asr_open __P((
459 IN dev_t dev,
460 int32_t flags,
461 int32_t ifmt,
462 IN struct proc * proc));
463STATIC int asr_close __P((
464 dev_t dev,
465 int flags,
466 int ifmt,
467 struct proc * proc));
468STATIC int asr_intr __P((
469 IN Asr_softc_t * sc));
470STATIC void asr_timeout __P((
471 INOUT void * arg));
472STATIC int ASR_init __P((
473 IN Asr_softc_t * sc));
474STATIC INLINE int ASR_acquireLct __P((
475 INOUT Asr_softc_t * sc));
476STATIC INLINE int ASR_acquireHrt __P((
477 INOUT Asr_softc_t * sc));
478STATIC void asr_action __P((
479 IN struct cam_sim * sim,
480 IN union ccb * ccb));
481STATIC void asr_poll __P((
482 IN struct cam_sim * sim));
483
484/*
485 * Here is the auto-probe structure used to nest our tests appropriately
486 * during the startup phase of the operating system.
487 */
488#if __FreeBSD_version >= 400000
489STATIC device_method_t asr_methods[] = {
490 DEVMETHOD(device_probe, asr_probe),
491 DEVMETHOD(device_attach, asr_attach),
492 { 0, 0 }
493};
494
495STATIC driver_t asr_driver = {
496 "asr",
497 asr_methods,
498 sizeof(Asr_softc_t)
499};
500
501STATIC devclass_t asr_devclass;
502
503DRIVER_MODULE(asr, pci, asr_driver, asr_devclass, 0, 0);
504
505STATIC device_method_t domino_methods[] = {
506 DEVMETHOD(device_probe, domino_probe),
507 DEVMETHOD(device_attach, domino_attach),
508 { 0, 0 }
509};
510
511STATIC driver_t domino_driver = {
512 "domino",
513 domino_methods,
514 0
515};
516
517STATIC devclass_t domino_devclass;
518
519DRIVER_MODULE(domino, pci, domino_driver, domino_devclass, 0, 0);
520
521STATIC device_method_t mode0_methods[] = {
522 DEVMETHOD(device_probe, mode0_probe),
523 DEVMETHOD(device_attach, mode0_attach),
524 { 0, 0 }
525};
526
527STATIC driver_t mode0_driver = {
528 "mode0",
529 mode0_methods,
530 0
531};
532
533STATIC devclass_t mode0_devclass;
534
535DRIVER_MODULE(mode0, pci, mode0_driver, mode0_devclass, 0, 0);
536#else
537STATIC u_long asr_pcicount = 0;
538STATIC struct pci_device asr_pcidev = {
539 "asr",
540 asr_probe,
541 asr_attach,
542 &asr_pcicount,
543 NULL
544};
545DATA_SET (asr_pciset, asr_pcidev);
546
547STATIC u_long domino_pcicount = 0;
548STATIC struct pci_device domino_pcidev = {
549 "domino",
550 domino_probe,
551 domino_attach,
552 &domino_pcicount,
553 NULL
554};
555DATA_SET (domino_pciset, domino_pcidev);
556
557STATIC u_long mode0_pcicount = 0;
558STATIC struct pci_device mode0_pcidev = {
559 "mode0",
560 mode0_probe,
561 mode0_attach,
562 &mode0_pcicount,
563 NULL
564};
565DATA_SET (mode0_pciset, mode0_pcidev);
566#endif
567
568/*
569 * devsw for asr hba driver
570 *
571 * only ioctl is used. the sd driver provides all other access.
572 */
573#define CDEV_MAJOR 154 /* prefered default character major */
574STATIC struct cdevsw asr_cdevsw = {
575 asr_open, /* open */
576 asr_close, /* close */
577 noread, /* read */
578 nowrite, /* write */
579 asr_ioctl, /* ioctl */
580 nopoll, /* poll */
581 nommap, /* mmap */
582 nostrategy, /* strategy */
583 "asr", /* name */
584 CDEV_MAJOR, /* maj */
585 nodump, /* dump */
586 nopsize, /* psize */
587 0, /* flags */
588 -1 /* bmaj */
589};
590
591#ifdef ASR_MEASURE_PERFORMANCE
592STATIC u_int32_t asr_time_delta __P((IN struct timeval start,
593 IN struct timeval end));
594#endif
595
596/*
597 * Initialize the dynamic cdevsw hooks.
598 */
599STATIC void
600asr_drvinit (
601 void * unused)
602{
603 static int asr_devsw_installed = 0;
604
605 if (asr_devsw_installed) {
606 return;
607 }
608 asr_devsw_installed++;
609 /*
610 * Find a free spot (the report during driver load used by
611 * osd layer in engine to generate the controlling nodes).
612 */
613 while ((asr_cdevsw.d_maj < NUMCDEVSW)
614 && (devsw(makedev(asr_cdevsw.d_maj,0)) != (struct cdevsw *)NULL)) {
615 ++asr_cdevsw.d_maj;
616 }
617 if (asr_cdevsw.d_maj >= NUMCDEVSW) for (
618 asr_cdevsw.d_maj = 0;
619 (asr_cdevsw.d_maj < CDEV_MAJOR)
620 && (devsw(makedev(asr_cdevsw.d_maj,0)) != (struct cdevsw *)NULL);
621 ++asr_cdevsw.d_maj);
622 /*
623 * Come to papa
624 */
625 cdevsw_add(&asr_cdevsw);
626 /*
627 * delete any nodes that would attach to the primary adapter,
628 * let the adapter scans add them.
629 */
630 destroy_dev(makedev(asr_cdevsw.d_maj,0));
631} /* asr_drvinit */
632
633/* Must initialize before CAM layer picks up our HBA driver */
634SYSINIT(asrdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,asr_drvinit,NULL)
635
636/* I2O support routines */
637#define defAlignLong(STRUCT,NAME) char NAME[sizeof(STRUCT)]
638#define getAlignLong(STRUCT,NAME) ((STRUCT *)(NAME))
639
640/*
641 * Fill message with default.
642 */
643STATIC PI2O_MESSAGE_FRAME
644ASR_fillMessage (
645 IN char * Message,
646 IN u_int16_t size)
647{
648 OUT PI2O_MESSAGE_FRAME Message_Ptr;
649
650 Message_Ptr = getAlignLong(I2O_MESSAGE_FRAME, Message);
651 bzero ((void *)Message_Ptr, size);
652 I2O_MESSAGE_FRAME_setVersionOffset(Message_Ptr, I2O_VERSION_11);
653 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
654 (size + sizeof(U32) - 1) >> 2);
655 I2O_MESSAGE_FRAME_setInitiatorAddress (Message_Ptr, 1);
656 return (Message_Ptr);
657} /* ASR_fillMessage */
658
659#define EMPTY_QUEUE ((U32)-1L)
660
661STATIC INLINE U32
662ASR_getMessage(
663 IN i2oRegs_t * virt)
664{
665 OUT U32 MessageOffset;
666
667 if ((MessageOffset = virt->ToFIFO) == EMPTY_QUEUE) {
668 MessageOffset = virt->ToFIFO;
669 }
670 return (MessageOffset);
671} /* ASR_getMessage */
672
673/* Issue a polled command */
674STATIC U32
675ASR_initiateCp (
676 INOUT i2oRegs_t * virt,
677 INOUT U8 * fvirt,
678 IN PI2O_MESSAGE_FRAME Message)
679{
680 OUT U32 Mask = -1L;
681 U32 MessageOffset;
682 u_int Delay = 1500;
683
684 /*
685 * ASR_initiateCp is only used for synchronous commands and will
686 * be made more resiliant to adapter delays since commands like
687 * resetIOP can cause the adapter to be deaf for a little time.
688 */
689 while (((MessageOffset = ASR_getMessage(virt)) == EMPTY_QUEUE)
690 && (--Delay != 0)) {
691 DELAY (10000);
692 }
693 if (MessageOffset != EMPTY_QUEUE) {
694 bcopy (Message, fvirt + MessageOffset,
695 I2O_MESSAGE_FRAME_getMessageSize(Message) << 2);
696 /*
697 * Disable the Interrupts
698 */
699 virt->Mask = (Mask = virt->Mask) | Mask_InterruptsDisabled;
700 virt->ToFIFO = MessageOffset;
701 }
702 return (Mask);
703} /* ASR_initiateCp */
704
705/*
706 * Reset the adapter.
707 */
708STATIC U32
709ASR_resetIOP (
710 INOUT i2oRegs_t * virt,
711 INOUT U8 * fvirt)
712{
713 struct resetMessage {
714 I2O_EXEC_IOP_RESET_MESSAGE M;
715 U32 R;
716 };
717 defAlignLong(struct resetMessage,Message);
718 PI2O_EXEC_IOP_RESET_MESSAGE Message_Ptr;
719 OUT U32 * volatile Reply_Ptr;
720 U32 Old;
721
722 /*
723 * Build up our copy of the Message.
724 */
725 Message_Ptr = (PI2O_EXEC_IOP_RESET_MESSAGE)ASR_fillMessage(Message,
726 sizeof(I2O_EXEC_IOP_RESET_MESSAGE));
727 I2O_EXEC_IOP_RESET_MESSAGE_setFunction(Message_Ptr, I2O_EXEC_IOP_RESET);
728 /*
729 * Reset the Reply Status
730 */
731 *(Reply_Ptr = (U32 *)((char *)Message_Ptr
732 + sizeof(I2O_EXEC_IOP_RESET_MESSAGE))) = 0;
733 I2O_EXEC_IOP_RESET_MESSAGE_setStatusWordLowAddress(Message_Ptr,
734 KVTOPHYS((void *)Reply_Ptr));
735 /*
736 * Send the Message out
737 */
738 if ((Old = ASR_initiateCp (virt, fvirt, (PI2O_MESSAGE_FRAME)Message_Ptr)) != (U32)-1L) {
739 /*
740 * Wait for a response (Poll), timeouts are dangerous if
741 * the card is truly responsive. We assume response in 2s.
742 */
743 u_int8_t Delay = 200;
744
745 while ((*Reply_Ptr == 0) && (--Delay != 0)) {
746 DELAY (10000);
747 }
748 /*
749 * Re-enable the interrupts.
750 */
751 virt->Mask = Old;
752 ASSERT (*Reply_Ptr);
753 return (*Reply_Ptr);
754 }
755 ASSERT (Old != (U32)-1L);
756 return (0);
757} /* ASR_resetIOP */
758
759/*
760 * Get the curent state of the adapter
761 */
762STATIC INLINE PI2O_EXEC_STATUS_GET_REPLY
763ASR_getStatus (
764 INOUT i2oRegs_t * virt,
765 INOUT U8 * fvirt,
766 OUT PI2O_EXEC_STATUS_GET_REPLY buffer)
767{
768 defAlignLong(I2O_EXEC_STATUS_GET_MESSAGE,Message);
769 PI2O_EXEC_STATUS_GET_MESSAGE Message_Ptr;
770 U32 Old;
771
772 /*
773 * Build up our copy of the Message.
774 */
775 Message_Ptr = (PI2O_EXEC_STATUS_GET_MESSAGE)ASR_fillMessage(Message,
776 sizeof(I2O_EXEC_STATUS_GET_MESSAGE));
777 I2O_EXEC_STATUS_GET_MESSAGE_setFunction(Message_Ptr,
778 I2O_EXEC_STATUS_GET);
779 I2O_EXEC_STATUS_GET_MESSAGE_setReplyBufferAddressLow(Message_Ptr,
780 KVTOPHYS((void *)buffer));
781 /* This one is a Byte Count */
782 I2O_EXEC_STATUS_GET_MESSAGE_setReplyBufferLength(Message_Ptr,
783 sizeof(I2O_EXEC_STATUS_GET_REPLY));
784 /*
785 * Reset the Reply Status
786 */
787 bzero ((void *)buffer, sizeof(I2O_EXEC_STATUS_GET_REPLY));
788 /*
789 * Send the Message out
790 */
791 if ((Old = ASR_initiateCp (virt, fvirt, (PI2O_MESSAGE_FRAME)Message_Ptr)) != (U32)-1L) {
792 /*
793 * Wait for a response (Poll), timeouts are dangerous if
794 * the card is truly responsive. We assume response in 50ms.
795 */
796 u_int8_t Delay = 255;
797
798 while (*((U8 * volatile)&(buffer->SyncByte)) == 0) {
799 if (--Delay == 0) {
800 buffer = (PI2O_EXEC_STATUS_GET_REPLY)NULL;
801 break;
802 }
803 DELAY (1000);
804 }
805 /*
806 * Re-enable the interrupts.
807 */
808 virt->Mask = Old;
809 return (buffer);
810 }
811 return ((PI2O_EXEC_STATUS_GET_REPLY)NULL);
812} /* ASR_getStatus */
813
814/*
815 * Check if the device is a SCSI I2O HBA, and add it to the list.
816 */
817
818/*
819 * Probe for ASR controller. If we find it, we will use it.
820 * virtual adapters.
821 */
822STATIC PROBE_RET
823asr_probe(PROBE_ARGS)
824{
825 PROBE_SET();
826 if ((id == 0xA5011044) || (id == 0xA5111044)) {
827 PROBE_RETURN ("Adaptec Caching SCSI RAID");
828 }
829 PROBE_RETURN (NULL);
830} /* asr_probe */
831
832/*
833 * Probe/Attach for DOMINO chipset.
834 */
835STATIC PROBE_RET
836domino_probe(PROBE_ARGS)
837{
838 PROBE_SET();
839 if (id == 0x10121044) {
840 PROBE_RETURN ("Adaptec Caching Memory Controller");
841 }
842 PROBE_RETURN (NULL);
843} /* domino_probe */
844
845STATIC ATTACH_RET
846domino_attach (ATTACH_ARGS)
847{
848 ATTACH_RETURN (0);
849} /* domino_attach */
850
851/*
852 * Probe/Attach for MODE0 adapters.
853 */
854STATIC PROBE_RET
855mode0_probe(PROBE_ARGS)
856{
857 PROBE_SET();
858
859 /*
860 * If/When we can get a business case to commit to a
861 * Mode0 driver here, we can make all these tests more
862 * specific and robust. Mode0 adapters have their processors
863 * turned off, this the chips are in a raw state.
864 */
865
866 /* This is a PLX9054 */
867 if (id == 0x905410B5) {
868 PROBE_RETURN ("Adaptec Mode0 PM3757");
869 }
870 /* This is a PLX9080 */
871 if (id == 0x908010B5) {
872 PROBE_RETURN ("Adaptec Mode0 PM3754/PM3755");
873 }
874 /* This is a ZION 80303 */
875 if (id == 0x53098086) {
876 PROBE_RETURN ("Adaptec Mode0 3010S");
877 }
878 /* This is an i960RS */
879 if (id == 0x39628086) {
880 PROBE_RETURN ("Adaptec Mode0 2100S");
881 }
882 /* This is an i960RN */
883 if (id == 0x19648086) {
884 PROBE_RETURN ("Adaptec Mode0 PM2865/2400A/3200S/3400S");
885 }
886#if 0 /* this would match any generic i960 -- mjs */
887 /* This is an i960RP (typically also on Motherboards) */
888 if (id == 0x19608086) {
889 PROBE_RETURN ("Adaptec Mode0 PM2554/PM1554/PM2654");
890 }
891#endif
892 PROBE_RETURN (NULL);
893} /* mode0_probe */
894
895STATIC ATTACH_RET
896mode0_attach (ATTACH_ARGS)
897{
898 ATTACH_RETURN (0);
899} /* mode0_attach */
900
901STATIC INLINE union asr_ccb *
902asr_alloc_ccb (
903 IN Asr_softc_t * sc)
904{
905 OUT union asr_ccb * new_ccb;
906
907 if ((new_ccb = (union asr_ccb *)malloc(sizeof(*new_ccb),
908 M_DEVBUF, M_WAITOK)) != (union asr_ccb *)NULL) {
909 bzero (new_ccb, sizeof(*new_ccb));
910 new_ccb->ccb_h.pinfo.priority = 1;
911 new_ccb->ccb_h.pinfo.index = CAM_UNQUEUED_INDEX;
912 new_ccb->ccb_h.spriv_ptr0 = sc;
913 }
914 return (new_ccb);
915} /* asr_alloc_ccb */
916
917STATIC INLINE void
918asr_free_ccb (
919 IN union asr_ccb * free_ccb)
920{
921 free(free_ccb, M_DEVBUF);
922} /* asr_free_ccb */
923
924/*
925 * Print inquiry data `carefully'
926 */
927STATIC void
928ASR_prstring (
929 u_int8_t * s,
930 int len)
931{
932 while ((--len >= 0) && (*s) && (*s != ' ') && (*s != '-')) {
933 printf ("%c", *(s++));
934 }
935} /* ASR_prstring */
936
937/*
938 * Prototypes
939 */
940STATIC INLINE int ASR_queue __P((
941 IN Asr_softc_t * sc,
942 IN PI2O_MESSAGE_FRAME Message));
943/*
944 * Send a message synchronously and without Interrupt to a ccb.
945 */
946STATIC int
947ASR_queue_s (
948 INOUT union asr_ccb * ccb,
949 IN PI2O_MESSAGE_FRAME Message)
950{
951 int s;
952 U32 Mask;
953 Asr_softc_t * sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
954
955 /*
956 * We do not need any (optional byteswapping) method access to
957 * the Initiator context field.
958 */
959 I2O_MESSAGE_FRAME_setInitiatorContext64(Message, (long)ccb);
960
961 /* Prevent interrupt service */
962 s = splcam ();
963 sc->ha_Virt->Mask = (Mask = sc->ha_Virt->Mask)
964 | Mask_InterruptsDisabled;
965
966 if (ASR_queue (sc, Message) == EMPTY_QUEUE) {
967 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
968 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
969 }
970
971 /*
972 * Wait for this board to report a finished instruction.
973 */
974 while ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
975 (void)asr_intr (sc);
976 }
977
978 /* Re-enable Interrupts */
979 sc->ha_Virt->Mask = Mask;
980 splx(s);
981
982 return (ccb->ccb_h.status);
983} /* ASR_queue_s */
984
985/*
986 * Send a message synchronously to a Asr_softc_t
987 */
988STATIC int
989ASR_queue_c (
990 IN Asr_softc_t * sc,
991 IN PI2O_MESSAGE_FRAME Message)
992{
993 union asr_ccb * ccb;
994 OUT int status;
995
996 if ((ccb = asr_alloc_ccb (sc)) == (union asr_ccb *)NULL) {
997 return (CAM_REQUEUE_REQ);
998 }
999
1000 status = ASR_queue_s (ccb, Message);
1001
1002 asr_free_ccb(ccb);
1003
1004 return (status);
1005} /* ASR_queue_c */
1006
1007/*
1008 * Add the specified ccb to the active queue
1009 */
1010STATIC INLINE void
1011ASR_ccbAdd (
1012 IN Asr_softc_t * sc,
1013 INOUT union asr_ccb * ccb)
1014{
1015 int s;
1016
1017 s = splcam();
1018 LIST_INSERT_HEAD(&(sc->ha_ccb), &(ccb->ccb_h), sim_links.le);
1019 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1020 if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT) {
1021 /*
1022 * RAID systems can take considerable time to
1023 * complete some commands given the large cache
1024 * flashes switching from write back to write thru.
1025 */
1026 ccb->ccb_h.timeout = 6 * 60 * 1000;
1027 }
1028 ccb->ccb_h.timeout_ch = timeout(asr_timeout, (caddr_t)ccb,
1029 (ccb->ccb_h.timeout * hz) / 1000);
1030 }
1031 splx(s);
1032} /* ASR_ccbAdd */
1033
1034/*
1035 * Remove the specified ccb from the active queue.
1036 */
1037STATIC INLINE void
1038ASR_ccbRemove (
1039 IN Asr_softc_t * sc,
1040 INOUT union asr_ccb * ccb)
1041{
1042 int s;
1043
1044 s = splcam();
1045 untimeout(asr_timeout, (caddr_t)ccb, ccb->ccb_h.timeout_ch);
1046 LIST_REMOVE(&(ccb->ccb_h), sim_links.le);
1047 splx(s);
1048} /* ASR_ccbRemove */
1049
1050/*
1051 * Fail all the active commands, so they get re-issued by the operating
1052 * system.
1053 */
1054STATIC INLINE void
1055ASR_failActiveCommands (
1056 IN Asr_softc_t * sc)
1057{
1058 struct ccb_hdr * ccb;
1059 int s;
1060
1061#if 0 /* Currently handled by callers, unnecessary paranoia currently */
1062 /* Left in for historical perspective. */
1063 defAlignLong(I2O_EXEC_LCT_NOTIFY_MESSAGE,Message);
1064 PI2O_EXEC_LCT_NOTIFY_MESSAGE Message_Ptr;
1065
1066 /* Send a blind LCT command to wait for the enableSys to complete */
1067 Message_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)ASR_fillMessage(Message,
1068 sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) - sizeof(I2O_SG_ELEMENT));
1069 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
1070 I2O_EXEC_LCT_NOTIFY);
1071 I2O_EXEC_LCT_NOTIFY_MESSAGE_setClassIdentifier(Message_Ptr,
1072 I2O_CLASS_MATCH_ANYCLASS);
1073 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1074#endif
1075
1076 s = splcam();
1077 /*
1078 * We do not need to inform the CAM layer that we had a bus
1079 * reset since we manage it on our own, this also prevents the
1080 * SCSI_DELAY settling that would be required on other systems.
1081 * The `SCSI_DELAY' has already been handled by the card via the
1082 * acquisition of the LCT table while we are at CAM priority level.
1083 * for (int bus = 0; bus <= sc->ha_MaxBus; ++bus) {
1084 * xpt_async (AC_BUS_RESET, sc->ha_path[bus], NULL);
1085 * }
1086 */
1087 while ((ccb = LIST_FIRST(&(sc->ha_ccb))) != (struct ccb_hdr *)NULL) {
1088 ASR_ccbRemove (sc, (union asr_ccb *)ccb);
1089
1090 ccb->status &= ~CAM_STATUS_MASK;
1091 ccb->status |= CAM_REQUEUE_REQ;
1092 /* Nothing Transfered */
1093 ((struct ccb_scsiio *)ccb)->resid
1094 = ((struct ccb_scsiio *)ccb)->dxfer_len;
1095
1096 if (ccb->path) {
1097 xpt_done ((union ccb *)ccb);
1098 } else {
1099 wakeup ((caddr_t)ccb);
1100 }
1101 }
1102 splx(s);
1103} /* ASR_failActiveCommands */
1104
1105/*
1106 * The following command causes the HBA to reset the specific bus
1107 */
1108STATIC INLINE void
1109ASR_resetBus(
1110 IN Asr_softc_t * sc,
1111 IN int bus)
1112{
1113 defAlignLong(I2O_HBA_BUS_RESET_MESSAGE,Message);
1114 I2O_HBA_BUS_RESET_MESSAGE * Message_Ptr;
1115 PI2O_LCT_ENTRY Device;
1116
1117 Message_Ptr = (I2O_HBA_BUS_RESET_MESSAGE *)ASR_fillMessage(Message,
1118 sizeof(I2O_HBA_BUS_RESET_MESSAGE));
1119 I2O_MESSAGE_FRAME_setFunction(&Message_Ptr->StdMessageFrame,
1120 I2O_HBA_BUS_RESET);
1121 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
1122 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
1123 ++Device) {
1124 if (((Device->le_type & I2O_PORT) != 0)
1125 && (Device->le_bus == bus)) {
1126 I2O_MESSAGE_FRAME_setTargetAddress(
1127 &Message_Ptr->StdMessageFrame,
1128 I2O_LCT_ENTRY_getLocalTID(Device));
1129 /* Asynchronous command, with no expectations */
1130 (void)ASR_queue(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1131 break;
1132 }
1133 }
1134} /* ASR_resetBus */
1135
1136STATIC INLINE int
1137ASR_getBlinkLedCode (
1138 IN Asr_softc_t * sc)
1139{
1140 if ((sc != (Asr_softc_t *)NULL)
1141 && (sc->ha_blinkLED != (u_int8_t *)NULL)
1142 && (sc->ha_blinkLED[1] == 0xBC)) {
1143 return (sc->ha_blinkLED[0]);
1144 }
1145 return (0);
1146} /* ASR_getBlinkCode */
1147
1148/*
1149 * Determine the address of an TID lookup. Must be done at high priority
1150 * since the address can be changed by other threads of execution.
1151 *
1152 * Returns NULL pointer if not indexible (but will attempt to generate
1153 * an index if `new_entry' flag is set to TRUE).
1154 *
1155 * All addressible entries are to be guaranteed zero if never initialized.
1156 */
1157STATIC INLINE tid_t *
1158ASR_getTidAddress(
1159 INOUT Asr_softc_t * sc,
1160 IN int bus,
1161 IN int target,
1162 IN int lun,
1163 IN int new_entry)
1164{
1165 target2lun_t * bus_ptr;
1166 lun2tid_t * target_ptr;
1167 unsigned new_size;
1168
1169 /*
1170 * Validity checking of incoming parameters. More of a bound
1171 * expansion limit than an issue with the code dealing with the
1172 * values.
1173 *
1174 * sc must be valid before it gets here, so that check could be
1175 * dropped if speed a critical issue.
1176 */
1177 if ((sc == (Asr_softc_t *)NULL)
1178 || (bus > MAX_CHANNEL)
1179 || (target > sc->ha_MaxId)
1180 || (lun > sc->ha_MaxLun)) {
1181 debug_asr_printf("(%lx,%d,%d,%d) target out of range\n",
1182 (u_long)sc, bus, target, lun);
1183 return ((tid_t *)NULL);
1184 }
1185 /*
1186 * See if there is an associated bus list.
1187 *
1188 * for performance, allocate in size of BUS_CHUNK chunks.
1189 * BUS_CHUNK must be a power of two. This is to reduce
1190 * fragmentation effects on the allocations.
1191 */
1192# define BUS_CHUNK 8
1193 new_size = ((target + BUS_CHUNK - 1) & ~(BUS_CHUNK - 1));
1194 if ((bus_ptr = sc->ha_targets[bus]) == (target2lun_t *)NULL) {
1195 /*
1196 * Allocate a new structure?
1197 * Since one element in structure, the +1
1198 * needed for size has been abstracted.
1199 */
1200 if ((new_entry == FALSE)
1201 || ((sc->ha_targets[bus] = bus_ptr = (target2lun_t *)malloc (
1202 sizeof(*bus_ptr) + (sizeof(bus_ptr->LUN) * new_size),
1203 M_TEMP, M_WAITOK))
1204 == (target2lun_t *)NULL)) {
1205 debug_asr_printf("failed to allocate bus list\n");
1206 return ((tid_t *)NULL);
1207 }
1208 bzero (bus_ptr, sizeof(*bus_ptr)
1209 + (sizeof(bus_ptr->LUN) * new_size));
1210 bus_ptr->size = new_size + 1;
1211 } else if (bus_ptr->size <= new_size) {
1212 target2lun_t * new_bus_ptr;
1213
1214 /*
1215 * Reallocate a new structure?
1216 * Since one element in structure, the +1
1217 * needed for size has been abstracted.
1218 */
1219 if ((new_entry == FALSE)
1220 || ((new_bus_ptr = (target2lun_t *)malloc (
1221 sizeof(*bus_ptr) + (sizeof(bus_ptr->LUN) * new_size),
1222 M_TEMP, M_WAITOK))
1223 == (target2lun_t *)NULL)) {
1224 debug_asr_printf("failed to reallocate bus list\n");
1225 return ((tid_t *)NULL);
1226 }
1227 /*
1228 * Zero and copy the whole thing, safer, simpler coding
1229 * and not really performance critical at this point.
1230 */
1231 bzero (new_bus_ptr, sizeof(*bus_ptr)
1232 + (sizeof(bus_ptr->LUN) * new_size));
1233 bcopy (bus_ptr, new_bus_ptr, sizeof(*bus_ptr)
1234 + (sizeof(bus_ptr->LUN) * (bus_ptr->size - 1)));
1235 sc->ha_targets[bus] = new_bus_ptr;
1236 free (bus_ptr, M_TEMP);
1237 bus_ptr = new_bus_ptr;
1238 bus_ptr->size = new_size + 1;
1239 }
1240 /*
1241 * We now have the bus list, lets get to the target list.
1242 * Since most systems have only *one* lun, we do not allocate
1243 * in chunks as above, here we allow one, then in chunk sizes.
1244 * TARGET_CHUNK must be a power of two. This is to reduce
1245 * fragmentation effects on the allocations.
1246 */
1247# define TARGET_CHUNK 8
1248 if ((new_size = lun) != 0) {
1249 new_size = ((lun + TARGET_CHUNK - 1) & ~(TARGET_CHUNK - 1));
1250 }
1251 if ((target_ptr = bus_ptr->LUN[target]) == (lun2tid_t *)NULL) {
1252 /*
1253 * Allocate a new structure?
1254 * Since one element in structure, the +1
1255 * needed for size has been abstracted.
1256 */
1257 if ((new_entry == FALSE)
1258 || ((bus_ptr->LUN[target] = target_ptr = (lun2tid_t *)malloc (
1259 sizeof(*target_ptr) + (sizeof(target_ptr->TID) * new_size),
1260 M_TEMP, M_WAITOK))
1261 == (lun2tid_t *)NULL)) {
1262 debug_asr_printf("failed to allocate target list\n");
1263 return ((tid_t *)NULL);
1264 }
1265 bzero (target_ptr, sizeof(*target_ptr)
1266 + (sizeof(target_ptr->TID) * new_size));
1267 target_ptr->size = new_size + 1;
1268 } else if (target_ptr->size <= new_size) {
1269 lun2tid_t * new_target_ptr;
1270
1271 /*
1272 * Reallocate a new structure?
1273 * Since one element in structure, the +1
1274 * needed for size has been abstracted.
1275 */
1276 if ((new_entry == FALSE)
1277 || ((new_target_ptr = (lun2tid_t *)malloc (
1278 sizeof(*target_ptr) + (sizeof(target_ptr->TID) * new_size),
1279 M_TEMP, M_WAITOK))
1280 == (lun2tid_t *)NULL)) {
1281 debug_asr_printf("failed to reallocate target list\n");
1282 return ((tid_t *)NULL);
1283 }
1284 /*
1285 * Zero and copy the whole thing, safer, simpler coding
1286 * and not really performance critical at this point.
1287 */
1288 bzero (new_target_ptr, sizeof(*target_ptr)
1289 + (sizeof(target_ptr->TID) * new_size));
1290 bcopy (target_ptr, new_target_ptr,
1291 sizeof(*target_ptr)
1292 + (sizeof(target_ptr->TID) * (target_ptr->size - 1)));
1293 bus_ptr->LUN[target] = new_target_ptr;
1294 free (target_ptr, M_TEMP);
1295 target_ptr = new_target_ptr;
1296 target_ptr->size = new_size + 1;
1297 }
1298 /*
1299 * Now, acquire the TID address from the LUN indexed list.
1300 */
1301 return (&(target_ptr->TID[lun]));
1302} /* ASR_getTidAddress */
1303
1304/*
1305 * Get a pre-existing TID relationship.
1306 *
1307 * If the TID was never set, return (tid_t)-1.
1308 *
1309 * should use mutex rather than spl.
1310 */
1311STATIC INLINE tid_t
1312ASR_getTid (
1313 IN Asr_softc_t * sc,
1314 IN int bus,
1315 IN int target,
1316 IN int lun)
1317{
1318 tid_t * tid_ptr;
1319 int s;
1320 OUT tid_t retval;
1321
1322 s = splcam();
1323 if (((tid_ptr = ASR_getTidAddress (sc, bus, target, lun, FALSE))
1324 == (tid_t *)NULL)
1325 /* (tid_t)0 or (tid_t)-1 indicate no TID */
1326 || (*tid_ptr == (tid_t)0)) {
1327 splx(s);
1328 return ((tid_t)-1);
1329 }
1330 retval = *tid_ptr;
1331 splx(s);
1332 return (retval);
1333} /* ASR_getTid */
1334
1335/*
1336 * Set a TID relationship.
1337 *
1338 * If the TID was not set, return (tid_t)-1.
1339 *
1340 * should use mutex rather than spl.
1341 */
1342STATIC INLINE tid_t
1343ASR_setTid (
1344 INOUT Asr_softc_t * sc,
1345 IN int bus,
1346 IN int target,
1347 IN int lun,
1348 INOUT tid_t TID)
1349{
1350 tid_t * tid_ptr;
1351 int s;
1352
1353 if (TID != (tid_t)-1) {
1354 if (TID == 0) {
1355 return ((tid_t)-1);
1356 }
1357 s = splcam();
1358 if ((tid_ptr = ASR_getTidAddress (sc, bus, target, lun, TRUE))
1359 == (tid_t *)NULL) {
1360 splx(s);
1361 return ((tid_t)-1);
1362 }
1363 *tid_ptr = TID;
1364 splx(s);
1365 }
1366 return (TID);
1367} /* ASR_setTid */
1368
1369/*-------------------------------------------------------------------------*/
1370/* Function ASR_rescan */
1371/*-------------------------------------------------------------------------*/
1372/* The Parameters Passed To This Function Are : */
1373/* Asr_softc_t * : HBA miniport driver's adapter data storage. */
1374/* */
1375/* This Function Will rescan the adapter and resynchronize any data */
1376/* */
1377/* Return : 0 For OK, Error Code Otherwise */
1378/*-------------------------------------------------------------------------*/
1379
1380STATIC INLINE int
1381ASR_rescan(
1382 IN Asr_softc_t * sc)
1383{
1384 int bus;
1385 OUT int error;
1386
1387 /*
1388 * Re-acquire the LCT table and synchronize us to the adapter.
1389 */
1390 if ((error = ASR_acquireLct(sc)) == 0) {
1391 error = ASR_acquireHrt(sc);
1392 }
1393
1394 if (error != 0) {
1395 return error;
1396 }
1397
1398 bus = sc->ha_MaxBus;
1399 /* Reset all existing cached TID lookups */
1400 do {
1401 int target, event = 0;
1402
1403 /*
1404 * Scan for all targets on this bus to see if they
1405 * got affected by the rescan.
1406 */
1407 for (target = 0; target <= sc->ha_MaxId; ++target) {
1408 int lun;
1409
1410 /* Stay away from the controller ID */
1411 if (target == sc->ha_adapter_target[bus]) {
1412 continue;
1413 }
1414 for (lun = 0; lun <= sc->ha_MaxLun; ++lun) {
1415 PI2O_LCT_ENTRY Device;
1416 tid_t TID = (tid_t)-1;
1417 tid_t LastTID;
1418
1419 /*
1420 * See if the cached TID changed. Search for
1421 * the device in our new LCT.
1422 */
1423 for (Device = sc->ha_LCT->LCTEntry;
1424 Device < (PI2O_LCT_ENTRY)(((U32 *)sc->ha_LCT)
1425 + I2O_LCT_getTableSize(sc->ha_LCT));
1426 ++Device) {
1427 if ((Device->le_type != I2O_UNKNOWN)
1428 && (Device->le_bus == bus)
1429 && (Device->le_target == target)
1430 && (Device->le_lun == lun)
1431 && (I2O_LCT_ENTRY_getUserTID(Device)
1432 == 0xFFF)) {
1433 TID = I2O_LCT_ENTRY_getLocalTID(
1434 Device);
1435 break;
1436 }
1437 }
1438 /*
1439 * Indicate to the OS that the label needs
1440 * to be recalculated, or that the specific
1441 * open device is no longer valid (Merde)
1442 * because the cached TID changed.
1443 */
1444 LastTID = ASR_getTid (sc, bus, target, lun);
1445 if (LastTID != TID) {
1446 struct cam_path * path;
1447
1448 if (xpt_create_path(&path,
1449 /*periph*/NULL,
1450 cam_sim_path(sc->ha_sim[bus]),
1451 target, lun) != CAM_REQ_CMP) {
1452 if (TID == (tid_t)-1) {
1453 event |= AC_LOST_DEVICE;
1454 } else {
1455 event |= AC_INQ_CHANGED
1456 | AC_GETDEV_CHANGED;
1457 }
1458 } else {
1459 if (TID == (tid_t)-1) {
1460 xpt_async(
1461 AC_LOST_DEVICE,
1462 path, NULL);
1463 } else if (LastTID == (tid_t)-1) {
1464 struct ccb_getdev ccb;
1465
1466 xpt_setup_ccb(
1467 &(ccb.ccb_h),
1468 path, /*priority*/5);
1469 xpt_async(
1470 AC_FOUND_DEVICE,
1471 path,
1472 &ccb);
1473 } else {
1474 xpt_async(
1475 AC_INQ_CHANGED,
1476 path, NULL);
1477 xpt_async(
1478 AC_GETDEV_CHANGED,
1479 path, NULL);
1480 }
1481 }
1482 }
1483 /*
1484 * We have the option of clearing the
1485 * cached TID for it to be rescanned, or to
1486 * set it now even if the device never got
1487 * accessed. We chose the later since we
1488 * currently do not use the condition that
1489 * the TID ever got cached.
1490 */
1491 ASR_setTid (sc, bus, target, lun, TID);
1492 }
1493 }
1494 /*
1495 * The xpt layer can not handle multiple events at the
1496 * same call.
1497 */
1498 if (event & AC_LOST_DEVICE) {
1499 xpt_async(AC_LOST_DEVICE, sc->ha_path[bus], NULL);
1500 }
1501 if (event & AC_INQ_CHANGED) {
1502 xpt_async(AC_INQ_CHANGED, sc->ha_path[bus], NULL);
1503 }
1504 if (event & AC_GETDEV_CHANGED) {
1505 xpt_async(AC_GETDEV_CHANGED, sc->ha_path[bus], NULL);
1506 }
1507 } while (--bus >= 0);
1508 return (error);
1509} /* ASR_rescan */
1510
1511/*-------------------------------------------------------------------------*/
1512/* Function ASR_reset */
1513/*-------------------------------------------------------------------------*/
1514/* The Parameters Passed To This Function Are : */
1515/* Asr_softc_t * : HBA miniport driver's adapter data storage. */
1516/* */
1517/* This Function Will reset the adapter and resynchronize any data */
1518/* */
1519/* Return : None */
1520/*-------------------------------------------------------------------------*/
1521
1522STATIC INLINE int
1523ASR_reset(
1524 IN Asr_softc_t * sc)
1525{
1526 int s, retVal;
1527
1528 s = splcam();
1529 if ((sc->ha_in_reset == HA_IN_RESET)
1530 || (sc->ha_in_reset == HA_OFF_LINE_RECOVERY)) {
1531 splx (s);
1532 return (EBUSY);
1533 }
1534 /*
1535 * Promotes HA_OPERATIONAL to HA_IN_RESET,
1536 * or HA_OFF_LINE to HA_OFF_LINE_RECOVERY.
1537 */
1538 ++(sc->ha_in_reset);
1539 if (ASR_resetIOP (sc->ha_Virt, sc->ha_Fvirt) == 0) {
1540 debug_asr_printf ("ASR_resetIOP failed\n");
1541 /*
1542 * We really need to take this card off-line, easier said
1543 * than make sense. Better to keep retrying for now since if a
1544 * UART cable is connected the blinkLEDs the adapter is now in
1545 * a hard state requiring action from the monitor commands to
1546 * the HBA to continue. For debugging waiting forever is a
1547 * good thing. In a production system, however, one may wish
1548 * to instead take the card off-line ...
1549 */
1550# if 0 && (defined(HA_OFF_LINE))
1551 /*
1552 * Take adapter off-line.
1553 */
1554 printf ("asr%d: Taking adapter off-line\n",
1555 sc->ha_path[0]
1556 ? cam_sim_unit(xpt_path_sim(sc->ha_path[0]))
1557 : 0);
1558 sc->ha_in_reset = HA_OFF_LINE;
1559 splx (s);
1560 return (ENXIO);
1561# else
1562 /* Wait Forever */
1563 while (ASR_resetIOP (sc->ha_Virt, sc->ha_Fvirt) == 0);
1564# endif
1565 }
1566 retVal = ASR_init (sc);
1567 splx (s);
1568 if (retVal != 0) {
1569 debug_asr_printf ("ASR_init failed\n");
1570 sc->ha_in_reset = HA_OFF_LINE;
1571 return (ENXIO);
1572 }
1573 if (ASR_rescan (sc) != 0) {
1574 debug_asr_printf ("ASR_rescan failed\n");
1575 }
1576 ASR_failActiveCommands (sc);
1577 if (sc->ha_in_reset == HA_OFF_LINE_RECOVERY) {
1578 printf ("asr%d: Brining adapter back on-line\n",
1579 sc->ha_path[0]
1580 ? cam_sim_unit(xpt_path_sim(sc->ha_path[0]))
1581 : 0);
1582 }
1583 sc->ha_in_reset = HA_OPERATIONAL;
1584 return (0);
1585} /* ASR_reset */
1586
1587/*
1588 * Device timeout handler.
1589 */
1590STATIC void
1591asr_timeout(
1592 INOUT void * arg)
1593{
1594 union asr_ccb * ccb = (union asr_ccb *)arg;
1595 Asr_softc_t * sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
1596 int s;
1597
1598 debug_asr_print_path(ccb);
1599 debug_asr_printf("timed out");
1600
1601 /*
1602 * Check if the adapter has locked up?
1603 */
1604 if ((s = ASR_getBlinkLedCode(sc)) != 0) {
1605 /* Reset Adapter */
1606 printf ("asr%d: Blink LED 0x%x resetting adapter\n",
1607 cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)), s);
1608 if (ASR_reset (sc) == ENXIO) {
1609 /* Try again later */
1610 ccb->ccb_h.timeout_ch = timeout(asr_timeout,
1611 (caddr_t)ccb,
1612 (ccb->ccb_h.timeout * hz) / 1000);
1613 }
1614 return;
1615 }
1616 /*
1617 * Abort does not function on the ASR card!!! Walking away from
1618 * the SCSI command is also *very* dangerous. A SCSI BUS reset is
1619 * our best bet, followed by a complete adapter reset if that fails.
1620 */
1621 s = splcam();
1622 /* Check if we already timed out once to raise the issue */
1623 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_CMD_TIMEOUT) {
1624 debug_asr_printf (" AGAIN\nreinitializing adapter\n");
1625 if (ASR_reset (sc) == ENXIO) {
1626 ccb->ccb_h.timeout_ch = timeout(asr_timeout,
1627 (caddr_t)ccb,
1628 (ccb->ccb_h.timeout * hz) / 1000);
1629 }
1630 splx(s);
1631 return;
1632 }
1633 debug_asr_printf ("\nresetting bus\n");
1634 /* If the BUS reset does not take, then an adapter reset is next! */
1635 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1636 ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
1637 ccb->ccb_h.timeout_ch = timeout(asr_timeout, (caddr_t)ccb,
1638 (ccb->ccb_h.timeout * hz) / 1000);
1639 ASR_resetBus (sc, cam_sim_bus(xpt_path_sim(ccb->ccb_h.path)));
1640 xpt_async (AC_BUS_RESET, ccb->ccb_h.path, NULL);
1641 splx(s);
1642} /* asr_timeout */
1643
1644/*
1645 * send a message asynchronously
1646 */
1647STATIC INLINE int
1648ASR_queue(
1649 IN Asr_softc_t * sc,
1650 IN PI2O_MESSAGE_FRAME Message)
1651{
1652 OUT U32 MessageOffset;
1653 union asr_ccb * ccb;
1654
1655 debug_asr_printf ("Host Command Dump:\n");
1656 debug_asr_dump_message (Message);
1657
1658 ccb = (union asr_ccb *)(long)
1659 I2O_MESSAGE_FRAME_getInitiatorContext64(Message);
1660
1661 if ((MessageOffset = ASR_getMessage(sc->ha_Virt)) != EMPTY_QUEUE) {
1662#ifdef ASR_MEASURE_PERFORMANCE
1663 int startTimeIndex;
1664
1665 if (ccb) {
1666 ++sc->ha_performance.command_count[
1667 (int) ccb->csio.cdb_io.cdb_bytes[0]];
1668 DEQ_TIMEQ_FREE_LIST(startTimeIndex,
1669 sc->ha_timeQFreeList,
1670 sc->ha_timeQFreeHead,
1671 sc->ha_timeQFreeTail);
1672 if (-1 != startTimeIndex) {
1673 microtime(&(sc->ha_timeQ[startTimeIndex]));
1674 }
1675 /* Time stamp the command before we send it out */
1676 ((PRIVATE_SCSI_SCB_EXECUTE_MESSAGE *) Message)->
1677 PrivateMessageFrame.TransactionContext
1678 = (I2O_TRANSACTION_CONTEXT) startTimeIndex;
1679
1680 ++sc->ha_submitted_ccbs_count;
1681 if (sc->ha_performance.max_submit_count
1682 < sc->ha_submitted_ccbs_count) {
1683 sc->ha_performance.max_submit_count
1684 = sc->ha_submitted_ccbs_count;
1685 }
1686 }
1687#endif
1688 bcopy (Message, sc->ha_Fvirt + MessageOffset,
1689 I2O_MESSAGE_FRAME_getMessageSize(Message) << 2);
1690 if (ccb) {
1691 ASR_ccbAdd (sc, ccb);
1692 }
1693 /* Post the command */
1694 sc->ha_Virt->ToFIFO = MessageOffset;
1695 } else {
1696 if (ASR_getBlinkLedCode(sc)) {
1697 /*
1698 * Unlikely we can do anything if we can't grab a
1699 * message frame :-(, but lets give it a try.
1700 */
1701 (void)ASR_reset (sc);
1702 }
1703 }
1704 return (MessageOffset);
1705} /* ASR_queue */
1706
1707
1708/* Simple Scatter Gather elements */
1709#define SG(SGL,Index,Flags,Buffer,Size) \
1710 I2O_FLAGS_COUNT_setCount( \
1711 &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index].FlagsCount), \
1712 Size); \
1713 I2O_FLAGS_COUNT_setFlags( \
1714 &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index].FlagsCount), \
1715 I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT | (Flags)); \
1716 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress( \
1717 &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index]), \
1718 (Buffer == NULL) ? NULL : KVTOPHYS(Buffer))
1719
1720/*
1721 * Retrieve Parameter Group.
1722 * Buffer must be allocated using defAlignLong macro.
1723 */
1724STATIC void *
1725ASR_getParams(
1726 IN Asr_softc_t * sc,
1727 IN tid_t TID,
1728 IN int Group,
1729 OUT void * Buffer,
1730 IN unsigned BufferSize)
1731{
1732 struct paramGetMessage {
1733 I2O_UTIL_PARAMS_GET_MESSAGE M;
1734 char F[
1735 sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT)];
1736 struct Operations {
1737 I2O_PARAM_OPERATIONS_LIST_HEADER Header;
1738 I2O_PARAM_OPERATION_ALL_TEMPLATE Template[1];
1739 } O;
1740 };
1741 defAlignLong(struct paramGetMessage, Message);
1742 struct Operations * Operations_Ptr;
1743 I2O_UTIL_PARAMS_GET_MESSAGE * Message_Ptr;
1744 struct ParamBuffer {
1745 I2O_PARAM_RESULTS_LIST_HEADER Header;
1746 I2O_PARAM_READ_OPERATION_RESULT Read;
1747 char Info[1];
1748 } * Buffer_Ptr;
1749
1750 Message_Ptr = (I2O_UTIL_PARAMS_GET_MESSAGE *)ASR_fillMessage(Message,
1751 sizeof(I2O_UTIL_PARAMS_GET_MESSAGE)
1752 + sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT));
1753 Operations_Ptr = (struct Operations *)((char *)Message_Ptr
1754 + sizeof(I2O_UTIL_PARAMS_GET_MESSAGE)
1755 + sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT));
1756 bzero ((void *)Operations_Ptr, sizeof(struct Operations));
1757 I2O_PARAM_OPERATIONS_LIST_HEADER_setOperationCount(
1758 &(Operations_Ptr->Header), 1);
1759 I2O_PARAM_OPERATION_ALL_TEMPLATE_setOperation(
1760 &(Operations_Ptr->Template[0]), I2O_PARAMS_OPERATION_FIELD_GET);
1761 I2O_PARAM_OPERATION_ALL_TEMPLATE_setFieldCount(
1762 &(Operations_Ptr->Template[0]), 0xFFFF);
1763 I2O_PARAM_OPERATION_ALL_TEMPLATE_setGroupNumber(
1764 &(Operations_Ptr->Template[0]), Group);
1765 bzero ((void *)(Buffer_Ptr = getAlignLong(struct ParamBuffer, Buffer)),
1766 BufferSize);
1767
1768 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
1769 I2O_VERSION_11
1770 + (((sizeof(I2O_UTIL_PARAMS_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1771 / sizeof(U32)) << 4));
1772 I2O_MESSAGE_FRAME_setTargetAddress (&(Message_Ptr->StdMessageFrame),
1773 TID);
1774 I2O_MESSAGE_FRAME_setFunction (&(Message_Ptr->StdMessageFrame),
1775 I2O_UTIL_PARAMS_GET);
1776 /*
1777 * Set up the buffers as scatter gather elements.
1778 */
1779 SG(&(Message_Ptr->SGL), 0,
1780 I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER,
1781 Operations_Ptr, sizeof(struct Operations));
1782 SG(&(Message_Ptr->SGL), 1,
1783 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
1784 Buffer_Ptr, BufferSize);
1785
1786 if ((ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) == CAM_REQ_CMP)
1787 && (Buffer_Ptr->Header.ResultCount)) {
1788 return ((void *)(Buffer_Ptr->Info));
1789 }
1790 return ((void *)NULL);
1791} /* ASR_getParams */
1792
1793/*
1794 * Acquire the LCT information.
1795 */
1796STATIC INLINE int
1797ASR_acquireLct (
1798 INOUT Asr_softc_t * sc)
1799{
1800 PI2O_EXEC_LCT_NOTIFY_MESSAGE Message_Ptr;
1801 PI2O_SGE_SIMPLE_ELEMENT sg;
1802 int MessageSizeInBytes;
1803 caddr_t v;
1804 int len;
1805 I2O_LCT Table;
1806 PI2O_LCT_ENTRY Entry;
1807
1808 /*
1809 * sc value assumed valid
1810 */
1811 MessageSizeInBytes = sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE)
1812 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT);
1813 if ((Message_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)malloc (
1814 MessageSizeInBytes, M_TEMP, M_WAITOK))
1815 == (PI2O_EXEC_LCT_NOTIFY_MESSAGE)NULL) {
1816 return (ENOMEM);
1817 }
1818 (void)ASR_fillMessage((char *)Message_Ptr, MessageSizeInBytes);
1819 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
1820 (I2O_VERSION_11 +
1821 (((sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1822 / sizeof(U32)) << 4)));
1823 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
1824 I2O_EXEC_LCT_NOTIFY);
1825 I2O_EXEC_LCT_NOTIFY_MESSAGE_setClassIdentifier(Message_Ptr,
1826 I2O_CLASS_MATCH_ANYCLASS);
1827 /*
1828 * Call the LCT table to determine the number of device entries
1829 * to reserve space for.
1830 */
1831 SG(&(Message_Ptr->SGL), 0,
1832 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER, &Table,
1833 sizeof(I2O_LCT));
1834 /*
1835 * since this code is reused in several systems, code efficiency
1836 * is greater by using a shift operation rather than a divide by
1837 * sizeof(u_int32_t).
1838 */
1839 I2O_LCT_setTableSize(&Table,
1840 (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)) >> 2);
1841 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1842 /*
1843 * Determine the size of the LCT table.
1844 */
1845 if (sc->ha_LCT) {
1846 free (sc->ha_LCT, M_TEMP);
1847 }
1848 /*
1849 * malloc only generates contiguous memory when less than a
1850 * page is expected. We must break the request up into an SG list ...
1851 */
1852 if (((len = (I2O_LCT_getTableSize(&Table) << 2)) <=
1853 (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)))
1854 || (len > (128 * 1024))) { /* Arbitrary */
1855 free (Message_Ptr, M_TEMP);
1856 return (EINVAL);
1857 }
1858 if ((sc->ha_LCT = (PI2O_LCT)malloc (len, M_TEMP, M_WAITOK))
1859 == (PI2O_LCT)NULL) {
1860 free (Message_Ptr, M_TEMP);
1861 return (ENOMEM);
1862 }
1863 /*
1864 * since this code is reused in several systems, code efficiency
1865 * is greater by using a shift operation rather than a divide by
1866 * sizeof(u_int32_t).
1867 */
1868 I2O_LCT_setTableSize(sc->ha_LCT,
1869 (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)) >> 2);
1870 /*
1871 * Convert the access to the LCT table into a SG list.
1872 */
1873 sg = Message_Ptr->SGL.u.Simple;
1874 v = (caddr_t)(sc->ha_LCT);
1875 for (;;) {
1876 int next, base, span;
1877
1878 span = 0;
1879 next = base = KVTOPHYS(v);
1880 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, base);
1881
1882 /* How far can we go contiguously */
1883 while ((len > 0) && (base == next)) {
1884 int size;
1885
1886 next = trunc_page(base) + PAGE_SIZE;
1887 size = next - base;
1888 if (size > len) {
1889 size = len;
1890 }
1891 span += size;
1892 v += size;
1893 len -= size;
1894 base = KVTOPHYS(v);
1895 }
1896
1897 /* Construct the Flags */
1898 I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), span);
1899 {
1900 int rw = I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT;
1901 if (len <= 0) {
1902 rw = (I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT
1903 | I2O_SGL_FLAGS_LAST_ELEMENT
1904 | I2O_SGL_FLAGS_END_OF_BUFFER);
1905 }
1906 I2O_FLAGS_COUNT_setFlags(&(sg->FlagsCount), rw);
1907 }
1908
1909 if (len <= 0) {
1910 break;
1911 }
1912
1913 /*
1914 * Incrementing requires resizing of the packet.
1915 */
1916 ++sg;
1917 MessageSizeInBytes += sizeof(*sg);
1918 I2O_MESSAGE_FRAME_setMessageSize(
1919 &(Message_Ptr->StdMessageFrame),
1920 I2O_MESSAGE_FRAME_getMessageSize(
1921 &(Message_Ptr->StdMessageFrame))
1922 + (sizeof(*sg) / sizeof(U32)));
1923 {
1924 PI2O_EXEC_LCT_NOTIFY_MESSAGE NewMessage_Ptr;
1925
1926 if ((NewMessage_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)
1927 malloc (MessageSizeInBytes, M_TEMP, M_WAITOK))
1928 == (PI2O_EXEC_LCT_NOTIFY_MESSAGE)NULL) {
1929 free (sc->ha_LCT, M_TEMP);
1930 sc->ha_LCT = (PI2O_LCT)NULL;
1931 free (Message_Ptr, M_TEMP);
1932 return (ENOMEM);
1933 }
1934 span = ((caddr_t)sg) - (caddr_t)Message_Ptr;
1935 bcopy ((caddr_t)Message_Ptr,
1936 (caddr_t)NewMessage_Ptr, span);
1937 free (Message_Ptr, M_TEMP);
1938 sg = (PI2O_SGE_SIMPLE_ELEMENT)
1939 (((caddr_t)NewMessage_Ptr) + span);
1940 Message_Ptr = NewMessage_Ptr;
1941 }
1942 }
1943 { int retval;
1944
1945 retval = ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1946 free (Message_Ptr, M_TEMP);
1947 if (retval != CAM_REQ_CMP) {
1948 return (ENODEV);
1949 }
1950 }
1951 /* If the LCT table grew, lets truncate accesses */
1952 if (I2O_LCT_getTableSize(&Table) < I2O_LCT_getTableSize(sc->ha_LCT)) {
1953 I2O_LCT_setTableSize(sc->ha_LCT, I2O_LCT_getTableSize(&Table));
1954 }
1955 for (Entry = sc->ha_LCT->LCTEntry; Entry < (PI2O_LCT_ENTRY)
1956 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
1957 ++Entry) {
1958 Entry->le_type = I2O_UNKNOWN;
1959 switch (I2O_CLASS_ID_getClass(&(Entry->ClassID))) {
1960
1961 case I2O_CLASS_RANDOM_BLOCK_STORAGE:
1962 Entry->le_type = I2O_BSA;
1963 break;
1964
1965 case I2O_CLASS_SCSI_PERIPHERAL:
1966 Entry->le_type = I2O_SCSI;
1967 break;
1968
1969 case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
1970 Entry->le_type = I2O_FCA;
1971 break;
1972
1973 case I2O_CLASS_BUS_ADAPTER_PORT:
1974 Entry->le_type = I2O_PORT | I2O_SCSI;
1975 /* FALLTHRU */
1976 case I2O_CLASS_FIBRE_CHANNEL_PORT:
1977 if (I2O_CLASS_ID_getClass(&(Entry->ClassID)) ==
1978 I2O_CLASS_FIBRE_CHANNEL_PORT) {
1979 Entry->le_type = I2O_PORT | I2O_FCA;
1980 }
1981 { struct ControllerInfo {
1982 I2O_PARAM_RESULTS_LIST_HEADER Header;
1983 I2O_PARAM_READ_OPERATION_RESULT Read;
1984 I2O_HBA_SCSI_CONTROLLER_INFO_SCALAR Info;
1985 };
1986 defAlignLong(struct ControllerInfo, Buffer);
1987 PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR Info;
1988
1989 Entry->le_bus = 0xff;
1990 Entry->le_target = 0xff;
1991 Entry->le_lun = 0xff;
1992
1993 if ((Info = (PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR)
1994 ASR_getParams(sc,
1995 I2O_LCT_ENTRY_getLocalTID(Entry),
1996 I2O_HBA_SCSI_CONTROLLER_INFO_GROUP_NO,
1997 Buffer, sizeof(struct ControllerInfo)))
1998 == (PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR)NULL) {
1999 continue;
2000 }
2001 Entry->le_target
2002 = I2O_HBA_SCSI_CONTROLLER_INFO_SCALAR_getInitiatorID(
2003 Info);
2004 Entry->le_lun = 0;
2005 } /* FALLTHRU */
2006 default:
2007 continue;
2008 }
2009 { struct DeviceInfo {
2010 I2O_PARAM_RESULTS_LIST_HEADER Header;
2011 I2O_PARAM_READ_OPERATION_RESULT Read;
2012 I2O_DPT_DEVICE_INFO_SCALAR Info;
2013 };
2014 defAlignLong (struct DeviceInfo, Buffer);
2015 PI2O_DPT_DEVICE_INFO_SCALAR Info;
2016
2017 Entry->le_bus = 0xff;
2018 Entry->le_target = 0xff;
2019 Entry->le_lun = 0xff;
2020
2021 if ((Info = (PI2O_DPT_DEVICE_INFO_SCALAR)
2022 ASR_getParams(sc,
2023 I2O_LCT_ENTRY_getLocalTID(Entry),
2024 I2O_DPT_DEVICE_INFO_GROUP_NO,
2025 Buffer, sizeof(struct DeviceInfo)))
2026 == (PI2O_DPT_DEVICE_INFO_SCALAR)NULL) {
2027 continue;
2028 }
2029 Entry->le_type
2030 |= I2O_DPT_DEVICE_INFO_SCALAR_getDeviceType(Info);
2031 Entry->le_bus
2032 = I2O_DPT_DEVICE_INFO_SCALAR_getBus(Info);
2033 if ((Entry->le_bus > sc->ha_MaxBus)
2034 && (Entry->le_bus <= MAX_CHANNEL)) {
2035 sc->ha_MaxBus = Entry->le_bus;
2036 }
2037 Entry->le_target
2038 = I2O_DPT_DEVICE_INFO_SCALAR_getIdentifier(Info);
2039 Entry->le_lun
2040 = I2O_DPT_DEVICE_INFO_SCALAR_getLunInfo(Info);
2041 }
2042 }
2043 /*
2044 * A zero return value indicates success.
2045 */
2046 return (0);
2047} /* ASR_acquireLct */
2048
2049/*
2050 * Initialize a message frame.
2051 * We assume that the CDB has already been set up, so all we do here is
2052 * generate the Scatter Gather list.
2053 */
2054STATIC INLINE PI2O_MESSAGE_FRAME
2055ASR_init_message(
2056 IN union asr_ccb * ccb,
2057 OUT PI2O_MESSAGE_FRAME Message)
2058{
2059 int next, span, base, rw;
2060 OUT PI2O_MESSAGE_FRAME Message_Ptr;
2061 Asr_softc_t * sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
2062 PI2O_SGE_SIMPLE_ELEMENT sg;
2063 caddr_t v;
2064 vm_size_t size, len;
2065 U32 MessageSize;
2066
2067 /* We only need to zero out the PRIVATE_SCSI_SCB_EXECUTE_MESSAGE */
2068 bzero (Message_Ptr = getAlignLong(I2O_MESSAGE_FRAME, Message),
2069 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) - sizeof(I2O_SG_ELEMENT)));
2070
2071 {
2072 int target = ccb->ccb_h.target_id;
2073 int lun = ccb->ccb_h.target_lun;
2074 int bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path));
2075 tid_t TID;
2076
2077 if ((TID = ASR_getTid (sc, bus, target, lun)) == (tid_t)-1) {
2078 PI2O_LCT_ENTRY Device;
2079
2080 TID = (tid_t)0;
2081 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
2082 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
2083 ++Device) {
2084 if ((Device->le_type != I2O_UNKNOWN)
2085 && (Device->le_bus == bus)
2086 && (Device->le_target == target)
2087 && (Device->le_lun == lun)
2088 && (I2O_LCT_ENTRY_getUserTID(Device) == 0xFFF)) {
2089 TID = I2O_LCT_ENTRY_getLocalTID(Device);
2090 ASR_setTid (sc, Device->le_bus,
2091 Device->le_target, Device->le_lun,
2092 TID);
2093 break;
2094 }
2095 }
2096 }
2097 if (TID == (tid_t)0) {
2098 return ((PI2O_MESSAGE_FRAME)NULL);
2099 }
2100 I2O_MESSAGE_FRAME_setTargetAddress(Message_Ptr, TID);
2101 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setTID(
2102 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, TID);
2103 }
2104 I2O_MESSAGE_FRAME_setVersionOffset(Message_Ptr, I2O_VERSION_11 |
2105 (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) - sizeof(I2O_SG_ELEMENT))
2106 / sizeof(U32)) << 4));
2107 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
2108 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2109 - sizeof(I2O_SG_ELEMENT)) / sizeof(U32));
2110 I2O_MESSAGE_FRAME_setInitiatorAddress (Message_Ptr, 1);
2111 I2O_MESSAGE_FRAME_setFunction(Message_Ptr, I2O_PRIVATE_MESSAGE);
2112 I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode (
2113 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, I2O_SCSI_SCB_EXEC);
2114 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (
2115 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
2116 I2O_SCB_FLAG_ENABLE_DISCONNECT
2117 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2118 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
2119 /*
2120 * We do not need any (optional byteswapping) method access to
2121 * the Initiator & Transaction context field.
2122 */
2123 I2O_MESSAGE_FRAME_setInitiatorContext64(Message, (long)ccb);
2124
2125 I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
2126 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, DPT_ORGANIZATION_ID);
2127 /*
2128 * copy the cdb over
2129 */
2130 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(
2131 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, ccb->csio.cdb_len);
2132 bcopy (&(ccb->csio.cdb_io),
2133 ((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)->CDB, ccb->csio.cdb_len);
2134
2135 /*
2136 * Given a buffer describing a transfer, set up a scatter/gather map
2137 * in a ccb to map that SCSI transfer.
2138 */
2139
2140 rw = (ccb->ccb_h.flags & CAM_DIR_IN) ? 0 : I2O_SGL_FLAGS_DIR;
2141
2142 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (
2143 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
2144 (ccb->csio.dxfer_len)
2145 ? ((rw) ? (I2O_SCB_FLAG_XFER_TO_DEVICE
2146 | I2O_SCB_FLAG_ENABLE_DISCONNECT
2147 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2148 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER)
2149 : (I2O_SCB_FLAG_XFER_FROM_DEVICE
2150 | I2O_SCB_FLAG_ENABLE_DISCONNECT
2151 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2152 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER))
2153 : (I2O_SCB_FLAG_ENABLE_DISCONNECT
2154 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2155 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
2156
2157 /*
2158 * Given a transfer described by a `data', fill in the SG list.
2159 */
2160 sg = &((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)->SGL.u.Simple[0];
2161
2162 len = ccb->csio.dxfer_len;
2163 v = ccb->csio.data_ptr;
2164 ASSERT (ccb->csio.dxfer_len >= 0);
2165 MessageSize = I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr);
2166 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setByteCount(
2167 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, len);
2168 while ((len > 0) && (sg < &((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2169 Message_Ptr)->SGL.u.Simple[SG_SIZE])) {
2170 span = 0;
2171 next = base = KVTOPHYS(v);
2172 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, base);
2173
2174 /* How far can we go contiguously */
2175 while ((len > 0) && (base == next)) {
2176 next = trunc_page(base) + PAGE_SIZE;
2177 size = next - base;
2178 if (size > len) {
2179 size = len;
2180 }
2181 span += size;
2182 v += size;
2183 len -= size;
2184 base = KVTOPHYS(v);
2185 }
2186
2187 I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), span);
2188 if (len == 0) {
2189 rw |= I2O_SGL_FLAGS_LAST_ELEMENT;
2190 }
2191 I2O_FLAGS_COUNT_setFlags(&(sg->FlagsCount),
2192 I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT | rw);
2193 ++sg;
2194 MessageSize += sizeof(*sg) / sizeof(U32);
2195 }
2196 /* We always do the request sense ... */
2197 if ((span = ccb->csio.sense_len) == 0) {
2198 span = sizeof(ccb->csio.sense_data);
2199 }
2200 SG(sg, 0, I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
2201 &(ccb->csio.sense_data), span);
2202 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
2203 MessageSize + (sizeof(*sg) / sizeof(U32)));
2204 return (Message_Ptr);
2205} /* ASR_init_message */
2206
2207/*
2208 * Reset the adapter.
2209 */
2210STATIC INLINE U32
2211ASR_initOutBound (
2212 INOUT Asr_softc_t * sc)
2213{
2214 struct initOutBoundMessage {
2215 I2O_EXEC_OUTBOUND_INIT_MESSAGE M;
2216 U32 R;
2217 };
2218 defAlignLong(struct initOutBoundMessage,Message);
2219 PI2O_EXEC_OUTBOUND_INIT_MESSAGE Message_Ptr;
2220 OUT U32 * volatile Reply_Ptr;
2221 U32 Old;
2222
2223 /*
2224 * Build up our copy of the Message.
2225 */
2226 Message_Ptr = (PI2O_EXEC_OUTBOUND_INIT_MESSAGE)ASR_fillMessage(Message,
2227 sizeof(I2O_EXEC_OUTBOUND_INIT_MESSAGE));
2228 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
2229 I2O_EXEC_OUTBOUND_INIT);
2230 I2O_EXEC_OUTBOUND_INIT_MESSAGE_setHostPageFrameSize(Message_Ptr, PAGE_SIZE);
2231 I2O_EXEC_OUTBOUND_INIT_MESSAGE_setOutboundMFrameSize(Message_Ptr,
2232 sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME));
2233 /*
2234 * Reset the Reply Status
2235 */
2236 *(Reply_Ptr = (U32 *)((char *)Message_Ptr
2237 + sizeof(I2O_EXEC_OUTBOUND_INIT_MESSAGE))) = 0;
2238 SG (&(Message_Ptr->SGL), 0, I2O_SGL_FLAGS_LAST_ELEMENT, Reply_Ptr,
2239 sizeof(U32));
2240 /*
2241 * Send the Message out
2242 */
2243 if ((Old = ASR_initiateCp (sc->ha_Virt, sc->ha_Fvirt, (PI2O_MESSAGE_FRAME)Message_Ptr)) != (U32)-1L) {
2244 u_long size, addr;
2245
2246 /*
2247 * Wait for a response (Poll).
2248 */
2249 while (*Reply_Ptr < I2O_EXEC_OUTBOUND_INIT_REJECTED);
2250 /*
2251 * Re-enable the interrupts.
2252 */
2253 sc->ha_Virt->Mask = Old;
2254 /*
2255 * Populate the outbound table.
2256 */
2257 if (sc->ha_Msgs == (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL) {
2258
2259 /* Allocate the reply frames */
2260 size = sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
2261 * sc->ha_Msgs_Count;
2262
2263 /*
2264 * contigmalloc only works reliably at
2265 * initialization time.
2266 */
2267 if ((sc->ha_Msgs = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
2268 contigmalloc (size, M_DEVBUF, M_WAITOK, 0ul,
2269 0xFFFFFFFFul, (u_long)sizeof(U32), 0ul))
2270 != (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL) {
2271 (void)bzero ((char *)sc->ha_Msgs, size);
2272 sc->ha_Msgs_Phys = KVTOPHYS(sc->ha_Msgs);
2273 }
2274 }
2275
2276 /* Initialize the outbound FIFO */
2277 if (sc->ha_Msgs != (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL)
2278 for (size = sc->ha_Msgs_Count, addr = sc->ha_Msgs_Phys;
2279 size; --size) {
2280 sc->ha_Virt->FromFIFO = addr;
2281 addr += sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME);
2282 }
2283 return (*Reply_Ptr);
2284 }
2285 return (0);
2286} /* ASR_initOutBound */
2287
2288/*
2289 * Set the system table
2290 */
2291STATIC INLINE int
2292ASR_setSysTab(
2293 IN Asr_softc_t * sc)
2294{
2295 PI2O_EXEC_SYS_TAB_SET_MESSAGE Message_Ptr;
2296 PI2O_SET_SYSTAB_HEADER SystemTable;
2297 Asr_softc_t * ha;
2298 PI2O_SGE_SIMPLE_ELEMENT sg;
2299 int retVal;
2300
2301 if ((SystemTable = (PI2O_SET_SYSTAB_HEADER)malloc (
2302 sizeof(I2O_SET_SYSTAB_HEADER), M_TEMP, M_WAITOK))
2303 == (PI2O_SET_SYSTAB_HEADER)NULL) {
2304 return (ENOMEM);
2305 }
2306 bzero (SystemTable, sizeof(I2O_SET_SYSTAB_HEADER));
2307 for (ha = Asr_softc; ha; ha = ha->ha_next) {
2308 ++SystemTable->NumberEntries;
2309 }
2310 if ((Message_Ptr = (PI2O_EXEC_SYS_TAB_SET_MESSAGE)malloc (
2311 sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
2312 + ((3+SystemTable->NumberEntries) * sizeof(I2O_SGE_SIMPLE_ELEMENT)),
2313 M_TEMP, M_WAITOK)) == (PI2O_EXEC_SYS_TAB_SET_MESSAGE)NULL) {
2314 free (SystemTable, M_TEMP);
2315 return (ENOMEM);
2316 }
2317 (void)ASR_fillMessage((char *)Message_Ptr,
2318 sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
2319 + ((3+SystemTable->NumberEntries) * sizeof(I2O_SGE_SIMPLE_ELEMENT)));
2320 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
2321 (I2O_VERSION_11 +
2322 (((sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
2323 / sizeof(U32)) << 4)));
2324 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
2325 I2O_EXEC_SYS_TAB_SET);
2326 /*
2327 * Call the LCT table to determine the number of device entries
2328 * to reserve space for.
2329 * since this code is reused in several systems, code efficiency
2330 * is greater by using a shift operation rather than a divide by
2331 * sizeof(u_int32_t).
2332 */
2333 sg = (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr
2334 + ((I2O_MESSAGE_FRAME_getVersionOffset(
2335 &(Message_Ptr->StdMessageFrame)) & 0xF0) >> 2));
2336 SG(sg, 0, I2O_SGL_FLAGS_DIR, SystemTable, sizeof(I2O_SET_SYSTAB_HEADER));
2337 ++sg;
2338 for (ha = Asr_softc; ha; ha = ha->ha_next) {
2339 SG(sg, 0,
2340 ((ha->ha_next)
2341 ? (I2O_SGL_FLAGS_DIR)
2342 : (I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER)),
2343 &(ha->ha_SystemTable), sizeof(ha->ha_SystemTable));
2344 ++sg;
2345 }
2346 SG(sg, 0, I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER, NULL, 0);
2347 SG(sg, 1, I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_LAST_ELEMENT
2348 | I2O_SGL_FLAGS_END_OF_BUFFER, NULL, 0);
2349 retVal = ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
2350 free (Message_Ptr, M_TEMP);
2351 free (SystemTable, M_TEMP);
2352 return (retVal);
2353} /* ASR_setSysTab */
2354
2355STATIC INLINE int
2356ASR_acquireHrt (
2357 INOUT Asr_softc_t * sc)
2358{
2359 defAlignLong(I2O_EXEC_HRT_GET_MESSAGE,Message);
2360 I2O_EXEC_HRT_GET_MESSAGE * Message_Ptr;
2361 struct {
2362 I2O_HRT Header;
2363 I2O_HRT_ENTRY Entry[MAX_CHANNEL];
2364 } Hrt;
2365 u_int8_t NumberOfEntries;
2366 PI2O_HRT_ENTRY Entry;
2367
2368 bzero ((void *)&Hrt, sizeof (Hrt));
2369 Message_Ptr = (I2O_EXEC_HRT_GET_MESSAGE *)ASR_fillMessage(Message,
2370 sizeof(I2O_EXEC_HRT_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
2371 + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2372 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
2373 (I2O_VERSION_11
2374 + (((sizeof(I2O_EXEC_HRT_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
2375 / sizeof(U32)) << 4)));
2376 I2O_MESSAGE_FRAME_setFunction (&(Message_Ptr->StdMessageFrame),
2377 I2O_EXEC_HRT_GET);
2378
2379 /*
2380 * Set up the buffers as scatter gather elements.
2381 */
2382 SG(&(Message_Ptr->SGL), 0,
2383 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
2384 &Hrt, sizeof(Hrt));
2385 if (ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) != CAM_REQ_CMP) {
2386 return (ENODEV);
2387 }
2388 if ((NumberOfEntries = I2O_HRT_getNumberEntries(&Hrt.Header))
2389 > (MAX_CHANNEL + 1)) {
2390 NumberOfEntries = MAX_CHANNEL + 1;
2391 }
2392 for (Entry = Hrt.Header.HRTEntry;
2393 NumberOfEntries != 0;
2394 ++Entry, --NumberOfEntries) {
2395 PI2O_LCT_ENTRY Device;
2396
2397 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
2398 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
2399 ++Device) {
2400 if (I2O_LCT_ENTRY_getLocalTID(Device)
2401 == (I2O_HRT_ENTRY_getAdapterID(Entry) & 0xFFF)) {
2402 Device->le_bus = I2O_HRT_ENTRY_getAdapterID(
2403 Entry) >> 16;
2404 if ((Device->le_bus > sc->ha_MaxBus)
2405 && (Device->le_bus <= MAX_CHANNEL)) {
2406 sc->ha_MaxBus = Device->le_bus;
2407 }
2408 }
2409 }
2410 }
2411 return (0);
2412} /* ASR_acquireHrt */
2413
2414/*
2415 * Enable the adapter.
2416 */
2417STATIC INLINE int
2418ASR_enableSys (
2419 IN Asr_softc_t * sc)
2420{
2421 defAlignLong(I2O_EXEC_SYS_ENABLE_MESSAGE,Message);
2422 PI2O_EXEC_SYS_ENABLE_MESSAGE Message_Ptr;
2423
2424 Message_Ptr = (PI2O_EXEC_SYS_ENABLE_MESSAGE)ASR_fillMessage(Message,
2425 sizeof(I2O_EXEC_SYS_ENABLE_MESSAGE));
2426 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
2427 I2O_EXEC_SYS_ENABLE);
2428 return (ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) != 0);
2429} /* ASR_enableSys */
2430
2431/*
2432 * Perform the stages necessary to initialize the adapter
2433 */
2434STATIC int
2435ASR_init(
2436 IN Asr_softc_t * sc)
2437{
2438 return ((ASR_initOutBound(sc) == 0)
2439 || (ASR_setSysTab(sc) != CAM_REQ_CMP)
2440 || (ASR_enableSys(sc) != CAM_REQ_CMP));
2441} /* ASR_init */
2442
2443/*
2444 * Send a Synchronize Cache command to the target device.
2445 */
2446STATIC INLINE void
2447ASR_sync (
2448 IN Asr_softc_t * sc,
2449 IN int bus,
2450 IN int target,
2451 IN int lun)
2452{
2453 tid_t TID;
2454
2455 /*
2456 * We will not synchronize the device when there are outstanding
2457 * commands issued by the OS (this is due to a locked up device,
2458 * as the OS normally would flush all outstanding commands before
2459 * issuing a shutdown or an adapter reset).
2460 */
2461 if ((sc != (Asr_softc_t *)NULL)
2462 && (LIST_FIRST(&(sc->ha_ccb)) != (struct ccb_hdr *)NULL)
2463 && ((TID = ASR_getTid (sc, bus, target, lun)) != (tid_t)-1)
2464 && (TID != (tid_t)0)) {
2465 defAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE,Message);
2466 PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE Message_Ptr;
2467
2468 bzero (Message_Ptr
2469 = getAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE, Message),
2470 sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2471 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2472
2473 I2O_MESSAGE_FRAME_setVersionOffset(
2474 (PI2O_MESSAGE_FRAME)Message_Ptr,
2475 I2O_VERSION_11
2476 | (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2477 - sizeof(I2O_SG_ELEMENT))
2478 / sizeof(U32)) << 4));
2479 I2O_MESSAGE_FRAME_setMessageSize(
2480 (PI2O_MESSAGE_FRAME)Message_Ptr,
2481 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2482 - sizeof(I2O_SG_ELEMENT))
2483 / sizeof(U32));
2484 I2O_MESSAGE_FRAME_setInitiatorAddress (
2485 (PI2O_MESSAGE_FRAME)Message_Ptr, 1);
2486 I2O_MESSAGE_FRAME_setFunction(
2487 (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_PRIVATE_MESSAGE);
2488 I2O_MESSAGE_FRAME_setTargetAddress(
2489 (PI2O_MESSAGE_FRAME)Message_Ptr, TID);
2490 I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode (
2491 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2492 I2O_SCSI_SCB_EXEC);
2493 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setTID(Message_Ptr, TID);
2494 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2495 I2O_SCB_FLAG_ENABLE_DISCONNECT
2496 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2497 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
2498 I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
2499 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2500 DPT_ORGANIZATION_ID);
2501 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(Message_Ptr, 6);
2502 Message_Ptr->CDB[0] = SYNCHRONIZE_CACHE;
2503 Message_Ptr->CDB[1] = (lun << 5);
2504
2505 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2506 (I2O_SCB_FLAG_XFER_FROM_DEVICE
2507 | I2O_SCB_FLAG_ENABLE_DISCONNECT
2508 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2509 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
2510
2511 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
2512
2513 }
2514}
2515
2516STATIC INLINE void
2517ASR_synchronize (
2518 IN Asr_softc_t * sc)
2519{
2520 int bus, target, lun;
2521
2522 for (bus = 0; bus <= sc->ha_MaxBus; ++bus) {
2523 for (target = 0; target <= sc->ha_MaxId; ++target) {
2524 for (lun = 0; lun <= sc->ha_MaxLun; ++lun) {
2525 ASR_sync(sc,bus,target,lun);
2526 }
2527 }
2528 }
2529}
2530
2531/*
2532 * Reset the HBA, targets and BUS.
2533 * Currently this resets *all* the SCSI busses.
2534 */
2535STATIC INLINE void
2536asr_hbareset(
2537 IN Asr_softc_t * sc)
2538{
2539 ASR_synchronize (sc);
2540 (void)ASR_reset (sc);
2541} /* asr_hbareset */
2542
2543/*
2544 * A reduced copy of the real pci_map_mem, incorporating the MAX_MAP
2545 * limit and a reduction in error checking (in the pre 4.0 case).
2546 */
2547STATIC int
2548asr_pci_map_mem (
2549#if __FreeBSD_version >= 400000
2550 IN device_t tag,
2551#else
2552 IN pcici_t tag,
2553#endif
2554 IN Asr_softc_t * sc)
2555{
2556 int rid;
2557 u_int32_t p, l, s;
2558
2559#if __FreeBSD_version >= 400000
2560 /*
2561 * I2O specification says we must find first *memory* mapped BAR
2562 */
2563 for (rid = PCIR_MAPS;
2564 rid < (PCIR_MAPS + 4 * sizeof(u_int32_t));
2565 rid += sizeof(u_int32_t)) {
2566 p = pci_read_config(tag, rid, sizeof(p));
2567 if ((p & 1) == 0) {
2568 break;
2569 }
2570 }
2571 /*
2572 * Give up?
2573 */
2574 if (rid >= (PCIR_MAPS + 4 * sizeof(u_int32_t))) {
2575 rid = PCIR_MAPS;
2576 }
2577 p = pci_read_config(tag, rid, sizeof(p));
2578 pci_write_config(tag, rid, -1, sizeof(p));
2579 l = 0 - (pci_read_config(tag, rid, sizeof(l)) & ~15);
2580 pci_write_config(tag, rid, p, sizeof(p));
2581 if (l > MAX_MAP) {
2582 l = MAX_MAP;
2583 }
2584 /*
2585 * The 2005S Zero Channel RAID solution is not a perfect PCI
2586 * citizen. It asks for 4MB on BAR0, and 0MB on BAR1, once
2587 * enabled it rewrites the size of BAR0 to 2MB, sets BAR1 to
2588 * BAR0+2MB and sets it's size to 2MB. The IOP registers are
2589 * accessible via BAR0, the messaging registers are accessible
2590 * via BAR1. If the subdevice code is 50 to 59 decimal.
2591 */
2592 s = pci_read_config(tag, PCIR_DEVVENDOR, sizeof(s));
2593 if (s != 0xA5111044) {
2594 s = pci_read_config(tag, PCIR_SUBVEND_0, sizeof(s));
2595 if ((((ADPTDOMINATOR_SUB_ID_START ^ s) & 0xF000FFFF) == 0)
2596 && (ADPTDOMINATOR_SUB_ID_START <= s)
2597 && (s <= ADPTDOMINATOR_SUB_ID_END)) {
2598 l = MAX_MAP; /* Conjoined BAR Raptor Daptor */
2599 }
2600 }
2601 p &= ~15;
2602 sc->ha_mem_res = bus_alloc_resource(tag, SYS_RES_MEMORY, &rid,
2603 p, p + l, l, RF_ACTIVE);
2604 if (sc->ha_mem_res == (struct resource *)NULL) {
2605 return (0);
2606 }
2607 sc->ha_Base = (void *)rman_get_start(sc->ha_mem_res);
2608 if (sc->ha_Base == (void *)NULL) {
2609 return (0);
2610 }
2611 sc->ha_Virt = (i2oRegs_t *) rman_get_virtual(sc->ha_mem_res);
2612 if (s == 0xA5111044) { /* Split BAR Raptor Daptor */
2613 if ((rid += sizeof(u_int32_t))
2614 >= (PCIR_MAPS + 4 * sizeof(u_int32_t))) {
2615 return (0);
2616 }
2617 p = pci_read_config(tag, rid, sizeof(p));
2618 pci_write_config(tag, rid, -1, sizeof(p));
2619 l = 0 - (pci_read_config(tag, rid, sizeof(l)) & ~15);
2620 pci_write_config(tag, rid, p, sizeof(p));
2621 if (l > MAX_MAP) {
2622 l = MAX_MAP;
2623 }
2624 p &= ~15;
2625 sc->ha_mes_res = bus_alloc_resource(tag, SYS_RES_MEMORY, &rid,
2626 p, p + l, l, RF_ACTIVE);
2627 if (sc->ha_mes_res == (struct resource *)NULL) {
2628 return (0);
2629 }
2630 if ((void *)rman_get_start(sc->ha_mes_res) == (void *)NULL) {
2631 return (0);
2632 }
2633 sc->ha_Fvirt = (U8 *) rman_get_virtual(sc->ha_mes_res);
2634 } else {
2635 sc->ha_Fvirt = (U8 *)(sc->ha_Virt);
2636 }
2637#else
2638 vm_size_t psize, poffs;
2639
2640 /*
2641 * I2O specification says we must find first *memory* mapped BAR
2642 */
2643 for (rid = PCI_MAP_REG_START;
2644 rid < (PCI_MAP_REG_START + 4 * sizeof(u_int32_t));
2645 rid += sizeof(u_int32_t)) {
2646 p = pci_conf_read (tag, rid);
2647 if ((p & 1) == 0) {
2648 break;
2649 }
2650 }
2651 if (rid >= (PCI_MAP_REG_START + 4 * sizeof(u_int32_t))) {
2652 rid = PCI_MAP_REG_START;
2653 }
2654 /*
2655 ** save old mapping, get size and type of memory
2656 **
2657 ** type is in the lowest four bits.
2658 ** If device requires 2^n bytes, the next
2659 ** n-4 bits are read as 0.
2660 */
2661
2662 sc->ha_Base = (void *)((p = pci_conf_read (tag, rid))
2663 & PCI_MAP_MEMORY_ADDRESS_MASK);
2664 pci_conf_write (tag, rid, 0xfffffffful);
2665 l = pci_conf_read (tag, rid);
2666 pci_conf_write (tag, rid, p);
2667
2668 /*
2669 ** check the type
2670 */
2671
2672 if (!((l & PCI_MAP_MEMORY_TYPE_MASK) == PCI_MAP_MEMORY_TYPE_32BIT_1M
2673 && ((u_long)sc->ha_Base & ~0xfffff) == 0)
2674 && ((l & PCI_MAP_MEMORY_TYPE_MASK) != PCI_MAP_MEMORY_TYPE_32BIT)) {
2675 debug_asr_printf (
2676 "asr_pci_map_mem failed: bad memory type=0x%x\n",
2677 (unsigned) l);
2678 return (0);
2679 };
2680
2681 /*
2682 ** get the size.
2683 */
2684
2685 psize = -(l & PCI_MAP_MEMORY_ADDRESS_MASK);
2686 if (psize > MAX_MAP) {
2687 psize = MAX_MAP;
2688 }
2689 /*
2690 * The 2005S Zero Channel RAID solution is not a perfect PCI
2691 * citizen. It asks for 4MB on BAR0, and 0MB on BAR1, once
2692 * enabled it rewrites the size of BAR0 to 2MB, sets BAR1 to
2693 * BAR0+2MB and sets it's size to 2MB. The IOP registers are
2694 * accessible via BAR0, the messaging registers are accessible
2695 * via BAR1. If the subdevice code is 50 to 59 decimal.
2696 */
2697 s = pci_read_config(tag, PCIR_DEVVENDOR, sizeof(s));
2698 if (s != 0xA5111044) {
2699 s = pci_conf_read (tag, PCIR_SUBVEND_0)
2700 if ((((ADPTDOMINATOR_SUB_ID_START ^ s) & 0xF000FFFF) == 0)
2701 && (ADPTDOMINATOR_SUB_ID_START <= s)
2702 && (s <= ADPTDOMINATOR_SUB_ID_END)) {
2703 psize = MAX_MAP;
2704 }
2705 }
2706
2707 if ((sc->ha_Base == (void *)NULL)
2708 || (sc->ha_Base == (void *)PCI_MAP_MEMORY_ADDRESS_MASK)) {
2709 debug_asr_printf ("asr_pci_map_mem: not configured by bios.\n");
2710 return (0);
2711 };
2712
2713 /*
2714 ** Truncate sc->ha_Base to page boundary.
2715 ** (Or does pmap_mapdev the job?)
2716 */
2717
2718 poffs = (u_long)sc->ha_Base - trunc_page ((u_long)sc->ha_Base);
2719 sc->ha_Virt = (i2oRegs_t *)pmap_mapdev ((u_long)sc->ha_Base - poffs,
2720 psize + poffs);
2721
2722 if (sc->ha_Virt == (i2oRegs_t *)NULL) {
2723 return (0);
2724 }
2725
2726 sc->ha_Virt = (i2oRegs_t *)((u_long)sc->ha_Virt + poffs);
2727 if (s == 0xA5111044) {
2728 if ((rid += sizeof(u_int32_t))
2729 >= (PCI_MAP_REG_START + 4 * sizeof(u_int32_t))) {
2730 return (0);
2731 }
2732
2733 /*
2734 ** save old mapping, get size and type of memory
2735 **
2736 ** type is in the lowest four bits.
2737 ** If device requires 2^n bytes, the next
2738 ** n-4 bits are read as 0.
2739 */
2740
2741 if ((((p = pci_conf_read (tag, rid))
2742 & PCI_MAP_MEMORY_ADDRESS_MASK) == 0L)
2743 || ((p & PCI_MAP_MEMORY_ADDRESS_MASK)
2744 == PCI_MAP_MEMORY_ADDRESS_MASK)) {
2745 debug_asr_printf ("asr_pci_map_mem: not configured by bios.\n");
2746 }
2747 pci_conf_write (tag, rid, 0xfffffffful);
2748 l = pci_conf_read (tag, rid);
2749 pci_conf_write (tag, rid, p);
2750 p &= PCI_MAP_MEMORY_TYPE_MASK;
2751
2752 /*
2753 ** check the type
2754 */
2755
2756 if (!((l & PCI_MAP_MEMORY_TYPE_MASK)
2757 == PCI_MAP_MEMORY_TYPE_32BIT_1M
2758 && (p & ~0xfffff) == 0)
2759 && ((l & PCI_MAP_MEMORY_TYPE_MASK)
2760 != PCI_MAP_MEMORY_TYPE_32BIT)) {
2761 debug_asr_printf (
2762 "asr_pci_map_mem failed: bad memory type=0x%x\n",
2763 (unsigned) l);
2764 return (0);
2765 };
2766
2767 /*
2768 ** get the size.
2769 */
2770
2771 psize = -(l & PCI_MAP_MEMORY_ADDRESS_MASK);
2772 if (psize > MAX_MAP) {
2773 psize = MAX_MAP;
2774 }
2775
2776 /*
2777 ** Truncate p to page boundary.
2778 ** (Or does pmap_mapdev the job?)
2779 */
2780
2781 poffs = p - trunc_page (p);
2782 sc->ha_Fvirt = (U8 *)pmap_mapdev (p - poffs, psize + poffs);
2783
2784 if (sc->ha_Fvirt == (U8 *)NULL) {
2785 return (0);
2786 }
2787
2788 sc->ha_Fvirt = (U8 *)((u_long)sc->ha_Fvirt + poffs);
2789 } else {
2790 sc->ha_Fvirt = (U8 *)(sc->ha_Virt);
2791 }
2792#endif
2793 return (1);
2794} /* asr_pci_map_mem */
2795
2796/*
2797 * A simplified copy of the real pci_map_int with additional
2798 * registration requirements.
2799 */
2800STATIC int
2801asr_pci_map_int (
2802#if __FreeBSD_version >= 400000
2803 IN device_t tag,
2804#else
2805 IN pcici_t tag,
2806#endif
2807 IN Asr_softc_t * sc)
2808{
2809#if __FreeBSD_version >= 400000
2810 int rid = 0;
2811
2812 sc->ha_irq_res = bus_alloc_resource(tag, SYS_RES_IRQ, &rid,
2813 0, ~0, 1, RF_ACTIVE | RF_SHAREABLE);
2814 if (sc->ha_irq_res == (struct resource *)NULL) {
2815 return (0);
2816 }
2817 if (bus_setup_intr(tag, sc->ha_irq_res, INTR_TYPE_CAM,
2818 (driver_intr_t *)asr_intr, (void *)sc, &(sc->ha_intr))) {
2819 return (0);
2820 }
2821 sc->ha_irq = pci_read_config(tag, PCIR_INTLINE, sizeof(char));
2822#else
2823 if (!pci_map_int(tag, (pci_inthand_t *)asr_intr,
2824 (void *)sc, &cam_imask)) {
2825 return (0);
2826 }
2827 sc->ha_irq = pci_conf_read(tag, PCIR_INTLINE);
2828#endif
2829 return (1);
2830} /* asr_pci_map_int */
2831
2832/*
2833 * Attach the devices, and virtual devices to the driver list.
2834 */
2835STATIC ATTACH_RET
2836asr_attach (ATTACH_ARGS)
2837{
2838 Asr_softc_t * sc;
2839 struct scsi_inquiry_data * iq;
2840 ATTACH_SET();
2841
2842 if ((sc = malloc(sizeof(*sc), M_DEVBUF, M_NOWAIT)) == (Asr_softc_t *)NULL) {
2843 ATTACH_RETURN(ENOMEM);
2844 }
2845 if (Asr_softc == (Asr_softc_t *)NULL) {
2846 /*
2847 * Fixup the OS revision as saved in the dptsig for the
2848 * engine (dptioctl.h) to pick up.
2849 */
2850 bcopy (osrelease, &ASR_sig.dsDescription[16], 5);
2851 printf ("asr%d: major=%d\n", unit, asr_cdevsw.d_maj);
2852 }
2853 /*
2854 * Initialize the software structure
2855 */
2856 bzero (sc, sizeof(*sc));
2857 LIST_INIT(&(sc->ha_ccb));
2858# ifdef ASR_MEASURE_PERFORMANCE
2859 {
2860 u_int32_t i;
2861
2862 // initialize free list for timeQ
2863 sc->ha_timeQFreeHead = 0;
2864 sc->ha_timeQFreeTail = MAX_TIMEQ_SIZE - 1;
2865 for (i = 0; i < MAX_TIMEQ_SIZE; i++) {
2866 sc->ha_timeQFreeList[i] = i;
2867 }
2868 }
2869# endif
2870 /* Link us into the HA list */
2871 {
2872 Asr_softc_t **ha;
2873
2874 for (ha = &Asr_softc; *ha; ha = &((*ha)->ha_next));
2875 *(ha) = sc;
2876 }
2877 {
2878 PI2O_EXEC_STATUS_GET_REPLY status;
2879 int size;
2880
2881 /*
2882 * This is the real McCoy!
2883 */
2884 if (!asr_pci_map_mem(tag, sc)) {
2885 printf ("asr%d: could not map memory\n", unit);
2886 ATTACH_RETURN(ENXIO);
2887 }
2888 /* Enable if not formerly enabled */
2889#if __FreeBSD_version >= 400000
2890 pci_write_config (tag, PCIR_COMMAND,
2891 pci_read_config (tag, PCIR_COMMAND, sizeof(char))
2892 | PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN, sizeof(char));
2893 /* Knowledge is power, responsibility is direct */
2894 {
2895 struct pci_devinfo {
2896 STAILQ_ENTRY(pci_devinfo) pci_links;
2897 struct resource_list resources;
2898 pcicfgregs cfg;
2899 } * dinfo = device_get_ivars(tag);
2900 sc->ha_pciBusNum = dinfo->cfg.bus;
2901 sc->ha_pciDeviceNum = (dinfo->cfg.slot << 3)
2902 | dinfo->cfg.func;
2903 }
2904#else
2905 pci_conf_write (tag, PCIR_COMMAND,
2906 pci_conf_read (tag, PCIR_COMMAND)
2907 | PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN);
2908 /* Knowledge is power, responsibility is direct */
2909 switch (pci_mechanism) {
2910
2911 case 1:
2912 sc->ha_pciBusNum = tag.cfg1 >> 16;
2913 sc->ha_pciDeviceNum = tag.cfg1 >> 8;
2914
2915 case 2:
2916 sc->ha_pciBusNum = tag.cfg2.forward;
2917 sc->ha_pciDeviceNum = ((tag.cfg2.enable >> 1) & 7)
2918 | (tag.cfg2.port >> 5);
2919 }
2920#endif
2921 /* Check if the device is there? */
2922 if ((ASR_resetIOP(sc->ha_Virt, sc->ha_Fvirt) == 0)
2923 || ((status = (PI2O_EXEC_STATUS_GET_REPLY)malloc (
2924 sizeof(I2O_EXEC_STATUS_GET_REPLY), M_TEMP, M_WAITOK))
2925 == (PI2O_EXEC_STATUS_GET_REPLY)NULL)
2926 || (ASR_getStatus(sc->ha_Virt, sc->ha_Fvirt, status) == NULL)) {
2927 printf ("asr%d: could not initialize hardware\n", unit);
2928 ATTACH_RETURN(ENODEV); /* Get next, maybe better luck */
2929 }
2930 sc->ha_SystemTable.OrganizationID = status->OrganizationID;
2931 sc->ha_SystemTable.IOP_ID = status->IOP_ID;
2932 sc->ha_SystemTable.I2oVersion = status->I2oVersion;
2933 sc->ha_SystemTable.IopState = status->IopState;
2934 sc->ha_SystemTable.MessengerType = status->MessengerType;
2935 sc->ha_SystemTable.InboundMessageFrameSize
2936 = status->InboundMFrameSize;
2937 sc->ha_SystemTable.MessengerInfo.InboundMessagePortAddressLow
2938 = (U32)(sc->ha_Base) + (U32)(&(((i2oRegs_t *)NULL)->ToFIFO));
2939
2940 if (!asr_pci_map_int(tag, (void *)sc)) {
2941 printf ("asr%d: could not map interrupt\n", unit);
2942 ATTACH_RETURN(ENXIO);
2943 }
2944
2945 /* Adjust the maximim inbound count */
2946 if (((sc->ha_QueueSize
2947 = I2O_EXEC_STATUS_GET_REPLY_getMaxInboundMFrames(status))
2948 > MAX_INBOUND)
2949 || (sc->ha_QueueSize == 0)) {
2950 sc->ha_QueueSize = MAX_INBOUND;
2951 }
2952
2953 /* Adjust the maximum outbound count */
2954 if (((sc->ha_Msgs_Count
2955 = I2O_EXEC_STATUS_GET_REPLY_getMaxOutboundMFrames(status))
2956 > MAX_OUTBOUND)
2957 || (sc->ha_Msgs_Count == 0)) {
2958 sc->ha_Msgs_Count = MAX_OUTBOUND;
2959 }
2960 if (sc->ha_Msgs_Count > sc->ha_QueueSize) {
2961 sc->ha_Msgs_Count = sc->ha_QueueSize;
2962 }
2963
2964 /* Adjust the maximum SG size to adapter */
2965 if ((size = (I2O_EXEC_STATUS_GET_REPLY_getInboundMFrameSize(
2966 status) << 2)) > MAX_INBOUND_SIZE) {
2967 size = MAX_INBOUND_SIZE;
2968 }
2969 free (status, M_TEMP);
2970 sc->ha_SgSize = (size - sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2971 + sizeof(I2O_SG_ELEMENT)) / sizeof(I2O_SGE_SIMPLE_ELEMENT);
2972 }
2973
2974 /*
2975 * Only do a bus/HBA reset on the first time through. On this
2976 * first time through, we do not send a flush to the devices.
2977 */
2978 if (ASR_init(sc) == 0) {
2979 struct BufferInfo {
2980 I2O_PARAM_RESULTS_LIST_HEADER Header;
2981 I2O_PARAM_READ_OPERATION_RESULT Read;
2982 I2O_DPT_EXEC_IOP_BUFFERS_SCALAR Info;
2983 };
2984 defAlignLong (struct BufferInfo, Buffer);
2985 PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR Info;
2986# define FW_DEBUG_BLED_OFFSET 8
2987
2988 if ((Info = (PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR)
2989 ASR_getParams(sc, 0,
2990 I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO,
2991 Buffer, sizeof(struct BufferInfo)))
2992 != (PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR)NULL) {
2993 sc->ha_blinkLED = sc->ha_Fvirt
2994 + I2O_DPT_EXEC_IOP_BUFFERS_SCALAR_getSerialOutputOffset(Info)
2995 + FW_DEBUG_BLED_OFFSET;
2996 }
2997 if (ASR_acquireLct(sc) == 0) {
2998 (void)ASR_acquireHrt(sc);
2999 }
3000 } else {
3001 printf ("asr%d: failed to initialize\n", unit);
3002 ATTACH_RETURN(ENXIO);
3003 }
3004 /*
3005 * Add in additional probe responses for more channels. We
3006 * are reusing the variable `target' for a channel loop counter.
3007 * Done here because of we need both the acquireLct and
3008 * acquireHrt data.
3009 */
3010 { PI2O_LCT_ENTRY Device;
3011
3012 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
3013 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
3014 ++Device) {
3015 if (Device->le_type == I2O_UNKNOWN) {
3016 continue;
3017 }
3018 if (I2O_LCT_ENTRY_getUserTID(Device) == 0xFFF) {
3019 if (Device->le_target > sc->ha_MaxId) {
3020 sc->ha_MaxId = Device->le_target;
3021 }
3022 if (Device->le_lun > sc->ha_MaxLun) {
3023 sc->ha_MaxLun = Device->le_lun;
3024 }
3025 }
3026 if (((Device->le_type & I2O_PORT) != 0)
3027 && (Device->le_bus <= MAX_CHANNEL)) {
3028 /* Do not increase MaxId for efficiency */
3029 sc->ha_adapter_target[Device->le_bus]
3030 = Device->le_target;
3031 }
3032 }
3033 }
3034
3035
3036 /*
3037 * Print the HBA model number as inquired from the card.
3038 */
3039
3040 printf ("asr%d:", unit);
3041
3042 if ((iq = (struct scsi_inquiry_data *)malloc (
3043 sizeof(struct scsi_inquiry_data), M_TEMP, M_WAITOK))
3044 != (struct scsi_inquiry_data *)NULL) {
3045 defAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE,Message);
3046 PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE Message_Ptr;
3047 int posted = 0;
3048
3049 bzero (iq, sizeof(struct scsi_inquiry_data));
3050 bzero (Message_Ptr
3051 = getAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE, Message),
3052 sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
3053 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT));
3054
3055 I2O_MESSAGE_FRAME_setVersionOffset(
3056 (PI2O_MESSAGE_FRAME)Message_Ptr,
3057 I2O_VERSION_11
3058 | (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
3059 - sizeof(I2O_SG_ELEMENT))
3060 / sizeof(U32)) << 4));
3061 I2O_MESSAGE_FRAME_setMessageSize(
3062 (PI2O_MESSAGE_FRAME)Message_Ptr,
3063 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
3064 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT))
3065 / sizeof(U32));
3066 I2O_MESSAGE_FRAME_setInitiatorAddress (
3067 (PI2O_MESSAGE_FRAME)Message_Ptr, 1);
3068 I2O_MESSAGE_FRAME_setFunction(
3069 (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_PRIVATE_MESSAGE);
3070 I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode (
3071 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
3072 I2O_SCSI_SCB_EXEC);
3073 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
3074 I2O_SCB_FLAG_ENABLE_DISCONNECT
3075 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
3076 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
3077 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setInterpret(Message_Ptr, 1);
3078 I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
3079 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
3080 DPT_ORGANIZATION_ID);
3081 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(Message_Ptr, 6);
3082 Message_Ptr->CDB[0] = INQUIRY;
3083 Message_Ptr->CDB[4] = (unsigned char)sizeof(struct scsi_inquiry_data);
3084 if (Message_Ptr->CDB[4] == 0) {
3085 Message_Ptr->CDB[4] = 255;
3086 }
3087
3088 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
3089 (I2O_SCB_FLAG_XFER_FROM_DEVICE
3090 | I2O_SCB_FLAG_ENABLE_DISCONNECT
3091 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
3092 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
3093
3094 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setByteCount(
3095 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
3096 sizeof(struct scsi_inquiry_data));
3097 SG(&(Message_Ptr->SGL), 0,
3098 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
3099 iq, sizeof(struct scsi_inquiry_data));
3100 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
3101
3102 if (iq->vendor[0] && (iq->vendor[0] != ' ')) {
3103 printf (" ");
3104 ASR_prstring (iq->vendor, 8);
3105 ++posted;
3106 }
3107 if (iq->product[0] && (iq->product[0] != ' ')) {
3108 printf (" ");
3109 ASR_prstring (iq->product, 16);
3110 ++posted;
3111 }
3112 if (iq->revision[0] && (iq->revision[0] != ' ')) {
3113 printf (" FW Rev. ");
3114 ASR_prstring (iq->revision, 4);
3115 ++posted;
3116 }
3117 free ((caddr_t)iq, M_TEMP);
3118 if (posted) {
3119 printf (",");
3120 }
3121 }
3122 printf (" %d channel, %d CCBs, Protocol I2O\n", sc->ha_MaxBus + 1,
3123 (sc->ha_QueueSize > MAX_INBOUND) ? MAX_INBOUND : sc->ha_QueueSize);
3124
3125 /*
3126 * fill in the prototype cam_path.
3127 */
3128 {
3129 int bus;
3130 union asr_ccb * ccb;
3131
3132 if ((ccb = asr_alloc_ccb (sc)) == (union asr_ccb *)NULL) {
3133 printf ("asr%d: CAM could not be notified of asynchronous callback parameters\n", unit);
3134 ATTACH_RETURN(ENOMEM);
3135 }
3136 for (bus = 0; bus <= sc->ha_MaxBus; ++bus) {
3137 struct cam_devq * devq;
3138 int QueueSize = sc->ha_QueueSize;
3139
3140 if (QueueSize > MAX_INBOUND) {
3141 QueueSize = MAX_INBOUND;
3142 }
3143
3144 /*
3145 * Create the device queue for our SIM(s).
3146 */
3147 if ((devq = cam_simq_alloc(QueueSize)) == NULL) {
3148 continue;
3149 }
3150
3151 /*
3152 * Construct our first channel SIM entry
3153 */
3154 sc->ha_sim[bus] = cam_sim_alloc(
3155 asr_action, asr_poll, "asr", sc,
3156 unit, 1, QueueSize, devq);
3157 if (sc->ha_sim[bus] == NULL) {
3158 continue;
3159 }
3160
3161 if (xpt_bus_register(sc->ha_sim[bus], bus)
3162 != CAM_SUCCESS) {
3163 cam_sim_free(sc->ha_sim[bus],
3164 /*free_devq*/TRUE);
3165 sc->ha_sim[bus] = NULL;
3166 continue;
3167 }
3168
3169 if (xpt_create_path(&(sc->ha_path[bus]), /*periph*/NULL,
3170 cam_sim_path(sc->ha_sim[bus]), CAM_TARGET_WILDCARD,
3171 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
3172 xpt_bus_deregister(
3173 cam_sim_path(sc->ha_sim[bus]));
3174 cam_sim_free(sc->ha_sim[bus],
3175 /*free_devq*/TRUE);
3176 sc->ha_sim[bus] = NULL;
3177 continue;
3178 }
3179 }
3180 asr_free_ccb (ccb);
3181 }
3182 /*
3183 * Generate the device node information
3184 */
3185 (void)make_dev(&asr_cdevsw, unit, 0, 0, S_IRWXU, "rasr%d", unit);
3186 destroy_dev(makedev(asr_cdevsw.d_maj,unit+1));
3187 ATTACH_RETURN(0);
3188} /* asr_attach */
3189
3190STATIC void
3191asr_poll(
3192 IN struct cam_sim *sim)
3193{
3194 asr_intr(cam_sim_softc(sim));
3195} /* asr_poll */
3196
3197STATIC void
3198asr_action(
3199 IN struct cam_sim * sim,
3200 IN union ccb * ccb)
3201{
3202 struct Asr_softc * sc;
3203
3204 debug_asr_printf ("asr_action(%lx,%lx{%x})\n",
3205 (u_long)sim, (u_long)ccb, ccb->ccb_h.func_code);
3206
3207 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("asr_action\n"));
3208
3209 ccb->ccb_h.spriv_ptr0 = sc = (struct Asr_softc *)cam_sim_softc(sim);
3210
3211 switch (ccb->ccb_h.func_code) {
3212
3213 /* Common cases first */
3214 case XPT_SCSI_IO: /* Execute the requested I/O operation */
3215 {
3216 struct Message {
3217 char M[MAX_INBOUND_SIZE];
3218 };
3219 defAlignLong(struct Message,Message);
3220 PI2O_MESSAGE_FRAME Message_Ptr;
3221
3222 /* Reject incoming commands while we are resetting the card */
3223 if (sc->ha_in_reset != HA_OPERATIONAL) {
3224 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3225 if (sc->ha_in_reset >= HA_OFF_LINE) {
3226 /* HBA is now off-line */
3227 ccb->ccb_h.status |= CAM_UNREC_HBA_ERROR;
3228 } else {
3229 /* HBA currently resetting, try again later. */
3230 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
3231 }
3232 debug_asr_cmd_printf (" e\n");
3233 xpt_done(ccb);
3234 debug_asr_cmd_printf (" q\n");
3235 break;
3236 }
3237 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
3238 printf(
3239 "asr%d WARNING: scsi_cmd(%x) already done on b%dt%du%d\n",
3240 cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)),
3241 ccb->csio.cdb_io.cdb_bytes[0],
3242 cam_sim_bus(sim),
3243 ccb->ccb_h.target_id,
3244 ccb->ccb_h.target_lun);
3245 }
3246 debug_asr_cmd_printf ("(%d,%d,%d,%d)",
3247 cam_sim_unit(sim),
3248 cam_sim_bus(sim),
3249 ccb->ccb_h.target_id,
3250 ccb->ccb_h.target_lun);
3251 debug_asr_cmd_dump_ccb(ccb);
3252
3253 if ((Message_Ptr = ASR_init_message ((union asr_ccb *)ccb,
3254 (PI2O_MESSAGE_FRAME)Message)) != (PI2O_MESSAGE_FRAME)NULL) {
3255 debug_asr_cmd2_printf ("TID=%x:\n",
3256 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_getTID(
3257 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr));
3258 debug_asr_cmd2_dump_message(Message_Ptr);
3259 debug_asr_cmd1_printf (" q");
3260
3261 if (ASR_queue (sc, Message_Ptr) == EMPTY_QUEUE) {
3262#ifdef ASR_MEASURE_PERFORMANCE
3263 ++sc->ha_performance.command_too_busy;
3264#endif
3265 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3266 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
3267 debug_asr_cmd_printf (" E\n");
3268 xpt_done(ccb);
3269 }
3270 debug_asr_cmd_printf (" Q\n");
3271 break;
3272 }
3273 /*
3274 * We will get here if there is no valid TID for the device
3275 * referenced in the scsi command packet.
3276 */
3277 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3278 ccb->ccb_h.status |= CAM_SEL_TIMEOUT;
3279 debug_asr_cmd_printf (" B\n");
3280 xpt_done(ccb);
3281 break;
3282 }
3283
3284 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */
3285 /* Rese HBA device ... */
3286 asr_hbareset (sc);
3287 ccb->ccb_h.status = CAM_REQ_CMP;
3288 xpt_done(ccb);
3289 break;
3290
3291# if (defined(REPORT_LUNS))
3292 case REPORT_LUNS:
3293# endif
3294 case XPT_ABORT: /* Abort the specified CCB */
3295 /* XXX Implement */
3296 ccb->ccb_h.status = CAM_REQ_INVALID;
3297 xpt_done(ccb);
3298 break;
3299
3300 case XPT_SET_TRAN_SETTINGS:
3301 /* XXX Implement */
3302 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
3303 xpt_done(ccb);
3304 break;
3305
3306 case XPT_GET_TRAN_SETTINGS:
3307 /* Get default/user set transfer settings for the target */
3308 {
3309 struct ccb_trans_settings *cts;
3310 u_int target_mask;
3311
3312 cts = &(ccb->cts);
3313 target_mask = 0x01 << ccb->ccb_h.target_id;
3314 if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) {
3315 cts->flags = CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB;
3316 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
3317 cts->sync_period = 6; /* 40MHz */
3318 cts->sync_offset = 15;
3319
3320 cts->valid = CCB_TRANS_SYNC_RATE_VALID
3321 | CCB_TRANS_SYNC_OFFSET_VALID
3322 | CCB_TRANS_BUS_WIDTH_VALID
3323 | CCB_TRANS_DISC_VALID
3324 | CCB_TRANS_TQ_VALID;
3325 ccb->ccb_h.status = CAM_REQ_CMP;
3326 } else {
3327 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
3328 }
3329 xpt_done(ccb);
3330 break;
3331 }
3332
3333 case XPT_CALC_GEOMETRY:
3334 {
3335 struct ccb_calc_geometry *ccg;
3336 u_int32_t size_mb;
3337 u_int32_t secs_per_cylinder;
3338
3339 ccg = &(ccb->ccg);
3340 size_mb = ccg->volume_size
3341 / ((1024L * 1024L) / ccg->block_size);
3342
3343 if (size_mb > 4096) {
3344 ccg->heads = 255;
3345 ccg->secs_per_track = 63;
3346 } else if (size_mb > 2048) {
3347 ccg->heads = 128;
3348 ccg->secs_per_track = 63;
3349 } else if (size_mb > 1024) {
3350 ccg->heads = 65;
3351 ccg->secs_per_track = 63;
3352 } else {
3353 ccg->heads = 64;
3354 ccg->secs_per_track = 32;
3355 }
3356 secs_per_cylinder = ccg->heads * ccg->secs_per_track;
3357 ccg->cylinders = ccg->volume_size / secs_per_cylinder;
3358 ccb->ccb_h.status = CAM_REQ_CMP;
3359 xpt_done(ccb);
3360 break;
3361 }
3362
3363 case XPT_RESET_BUS: /* Reset the specified SCSI bus */
3364 ASR_resetBus (sc, cam_sim_bus(sim));
3365 ccb->ccb_h.status = CAM_REQ_CMP;
3366 xpt_done(ccb);
3367 break;
3368
3369 case XPT_TERM_IO: /* Terminate the I/O process */
3370 /* XXX Implement */
3371 ccb->ccb_h.status = CAM_REQ_INVALID;
3372 xpt_done(ccb);
3373 break;
3374
3375 case XPT_PATH_INQ: /* Path routing inquiry */
3376 {
3377 struct ccb_pathinq *cpi = &(ccb->cpi);
3378
3379 cpi->version_num = 1; /* XXX??? */
3380 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
3381 cpi->target_sprt = 0;
3382 /* Not necessary to reset bus, done by HDM initialization */
3383 cpi->hba_misc = PIM_NOBUSRESET;
3384 cpi->hba_eng_cnt = 0;
3385 cpi->max_target = sc->ha_MaxId;
3386 cpi->max_lun = sc->ha_MaxLun;
3387 cpi->initiator_id = sc->ha_adapter_target[cam_sim_bus(sim)];
3388 cpi->bus_id = cam_sim_bus(sim);
3389 cpi->base_transfer_speed = 3300;
3390 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
3391 strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN);
3392 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
3393 cpi->unit_number = cam_sim_unit(sim);
3394 cpi->ccb_h.status = CAM_REQ_CMP;
3395 xpt_done(ccb);
3396 break;
3397 }
3398 default:
3399 ccb->ccb_h.status = CAM_REQ_INVALID;
3400 xpt_done(ccb);
3401 break;
3402 }
3403} /* asr_action */
3404
3405#ifdef ASR_MEASURE_PERFORMANCE
3406#define WRITE_OP 1
3407#define READ_OP 2
3408#define min_submitR sc->ha_performance.read_by_size_min_time[index]
3409#define max_submitR sc->ha_performance.read_by_size_max_time[index]
3410#define min_submitW sc->ha_performance.write_by_size_min_time[index]
3411#define max_submitW sc->ha_performance.write_by_size_max_time[index]
3412
3413STATIC INLINE void
3414asr_IObySize(
3415 IN Asr_softc_t * sc,
3416 IN u_int32_t submitted_time,
3417 IN int op,
3418 IN int index)
3419{
3420 struct timeval submitted_timeval;
3421
3422 submitted_timeval.tv_sec = 0;
3423 submitted_timeval.tv_usec = submitted_time;
3424
3425 if ( op == READ_OP ) {
3426 ++sc->ha_performance.read_by_size_count[index];
3427
3428 if ( submitted_time != 0xffffffff ) {
3429 timevaladd(
3430 &(sc->ha_performance.read_by_size_total_time[index]),
3431 &submitted_timeval);
3432 if ( (min_submitR == 0)
3433 || (submitted_time < min_submitR) ) {
3434 min_submitR = submitted_time;
3435 }
3436
3437 if ( submitted_time > max_submitR ) {
3438 max_submitR = submitted_time;
3439 }
3440 }
3441 } else {
3442 ++sc->ha_performance.write_by_size_count[index];
3443 if ( submitted_time != 0xffffffff ) {
3444 timevaladd(
3445 &(sc->ha_performance.write_by_size_total_time[index]),
3446 &submitted_timeval);
3447 if ( (submitted_time < min_submitW)
3448 || (min_submitW == 0) ) {
3449 min_submitW = submitted_time;
3450 }
3451
3452 if ( submitted_time > max_submitW ) {
3453 max_submitW = submitted_time;
3454 }
3455 }
3456 }
3457} /* asr_IObySize */
3458#endif
3459
3460/*
3461 * Handle processing of current CCB as pointed to by the Status.
3462 */
3463STATIC int
3464asr_intr (
3465 IN Asr_softc_t * sc)
3466{
3467 OUT int processed;
3468
3469#ifdef ASR_MEASURE_PERFORMANCE
3470 struct timeval junk;
3471
3472 microtime(&junk);
3473 sc->ha_performance.intr_started = junk;
3474#endif
3475
3476 for (processed = 0;
3477 sc->ha_Virt->Status & Mask_InterruptsDisabled;
3478 processed = 1) {
3479 union asr_ccb * ccb;
3480 U32 ReplyOffset;
3481 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply;
3482
3483 if (((ReplyOffset = sc->ha_Virt->FromFIFO) == EMPTY_QUEUE)
3484 && ((ReplyOffset = sc->ha_Virt->FromFIFO) == EMPTY_QUEUE)) {
3485 break;
3486 }
3487 Reply = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)(ReplyOffset
3488 - sc->ha_Msgs_Phys + (char *)(sc->ha_Msgs));
3489 /*
3490 * We do not need any (optional byteswapping) method access to
3491 * the Initiator context field.
3492 */
3493 ccb = (union asr_ccb *)(long)
3494 I2O_MESSAGE_FRAME_getInitiatorContext64(
3495 &(Reply->StdReplyFrame.StdMessageFrame));
3496 if (I2O_MESSAGE_FRAME_getMsgFlags(
3497 &(Reply->StdReplyFrame.StdMessageFrame))
3498 & I2O_MESSAGE_FLAGS_FAIL) {
3499 defAlignLong(I2O_UTIL_NOP_MESSAGE,Message);
3500 PI2O_UTIL_NOP_MESSAGE Message_Ptr;
3501 U32 MessageOffset;
3502
3503 MessageOffset = (u_long)
3504 I2O_FAILURE_REPLY_MESSAGE_FRAME_getPreservedMFA(
3505 (PI2O_FAILURE_REPLY_MESSAGE_FRAME)Reply);
3506 /*
3507 * Get the Original Message Frame's address, and get
3508 * it's Transaction Context into our space. (Currently
3509 * unused at original authorship, but better to be
3510 * safe than sorry). Straight copy means that we
3511 * need not concern ourselves with the (optional
3512 * byteswapping) method access.
3513 */
3514 Reply->StdReplyFrame.TransactionContext
3515 = ((PI2O_SINGLE_REPLY_MESSAGE_FRAME)
3516 (sc->ha_Fvirt + MessageOffset))->TransactionContext;
3517 /*
3518 * For 64 bit machines, we need to reconstruct the
3519 * 64 bit context.
3520 */
3521 ccb = (union asr_ccb *)(long)
3522 I2O_MESSAGE_FRAME_getInitiatorContext64(
3523 &(Reply->StdReplyFrame.StdMessageFrame));
3524 /*
3525 * Unique error code for command failure.
3526 */
3527 I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode(
3528 &(Reply->StdReplyFrame), (u_int16_t)-2);
3529 /*
3530 * Modify the message frame to contain a NOP and
3531 * re-issue it to the controller.
3532 */
3533 Message_Ptr = (PI2O_UTIL_NOP_MESSAGE)ASR_fillMessage(
3534 Message, sizeof(I2O_UTIL_NOP_MESSAGE));
3535# if (I2O_UTIL_NOP != 0)
3536 I2O_MESSAGE_FRAME_setFunction (
3537 &(Message_Ptr->StdMessageFrame),
3538 I2O_UTIL_NOP);
3539# endif
3540 /*
3541 * Copy the packet out to the Original Message
3542 */
3543 bcopy ((caddr_t)Message_Ptr,
3544 sc->ha_Fvirt + MessageOffset,
3545 sizeof(I2O_UTIL_NOP_MESSAGE));
3546 /*
3547 * Issue the NOP
3548 */
3549 sc->ha_Virt->ToFIFO = MessageOffset;
3550 }
3551
3552 /*
3553 * Asynchronous command with no return requirements,
3554 * and a generic handler for immunity against odd error
3555 * returns from the adapter.
3556 */
3557 if (ccb == (union asr_ccb *)NULL) {
3558 /*
3559 * Return Reply so that it can be used for the
3560 * next command
3561 */
3562 sc->ha_Virt->FromFIFO = ReplyOffset;
3563 continue;
3564 }
3565
3566 /* Welease Wadjah! (and stop timeouts) */
3567 ASR_ccbRemove (sc, ccb);
3568
3569 switch (
3570 I2O_SINGLE_REPLY_MESSAGE_FRAME_getDetailedStatusCode(
3571 &(Reply->StdReplyFrame))) {
3572
3573 case I2O_SCSI_DSC_SUCCESS:
3574 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3575 ccb->ccb_h.status |= CAM_REQ_CMP;
3576 break;
3577
3578 case I2O_SCSI_DSC_CHECK_CONDITION:
3579 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3580 ccb->ccb_h.status |= CAM_REQ_CMP|CAM_AUTOSNS_VALID;
3581 break;
3582
3583 case I2O_SCSI_DSC_BUSY:
3584 /* FALLTHRU */
3585 case I2O_SCSI_HBA_DSC_ADAPTER_BUSY:
3586 /* FALLTHRU */
3587 case I2O_SCSI_HBA_DSC_SCSI_BUS_RESET:
3588 /* FALLTHRU */
3589 case I2O_SCSI_HBA_DSC_BUS_BUSY:
3590 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3591 ccb->ccb_h.status |= CAM_SCSI_BUSY;
3592 break;
3593
3594 case I2O_SCSI_HBA_DSC_SELECTION_TIMEOUT:
3595 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3596 ccb->ccb_h.status |= CAM_SEL_TIMEOUT;
3597 break;
3598
3599 case I2O_SCSI_HBA_DSC_COMMAND_TIMEOUT:
3600 /* FALLTHRU */
3601 case I2O_SCSI_HBA_DSC_DEVICE_NOT_PRESENT:
3602 /* FALLTHRU */
3603 case I2O_SCSI_HBA_DSC_LUN_INVALID:
3604 /* FALLTHRU */
3605 case I2O_SCSI_HBA_DSC_SCSI_TID_INVALID:
3606 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3607 ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
3608 break;
3609
3610 case I2O_SCSI_HBA_DSC_DATA_OVERRUN:
3611 /* FALLTHRU */
3612 case I2O_SCSI_HBA_DSC_REQUEST_LENGTH_ERROR:
3613 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3614 ccb->ccb_h.status |= CAM_DATA_RUN_ERR;
3615 break;
3616
3617 default:
3618 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3619 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
3620 break;
3621 }
3622 if ((ccb->csio.resid = ccb->csio.dxfer_len) != 0) {
3623 ccb->csio.resid -=
3624 I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_getTransferCount(
3625 Reply);
3626 }
3627
3628#ifdef ASR_MEASURE_PERFORMANCE
3629 {
3630 struct timeval endTime;
3631 u_int32_t submitted_time;
3632 u_int32_t size;
3633 int op_type;
3634 int startTimeIndex;
3635
3636 --sc->ha_submitted_ccbs_count;
3637 startTimeIndex
3638 = (int)Reply->StdReplyFrame.TransactionContext;
3639 if (-1 != startTimeIndex) {
3640 /* Compute the time spent in device/adapter */
3641 microtime(&endTime);
3642 submitted_time = asr_time_delta(sc->ha_timeQ[
3643 startTimeIndex], endTime);
3644 /* put the startTimeIndex back on free list */
3645 ENQ_TIMEQ_FREE_LIST(startTimeIndex,
3646 sc->ha_timeQFreeList,
3647 sc->ha_timeQFreeHead,
3648 sc->ha_timeQFreeTail);
3649 } else {
3650 submitted_time = 0xffffffff;
3651 }
3652
3653#define maxctime sc->ha_performance.max_command_time[ccb->csio.cdb_io.cdb_bytes[0]]
3654#define minctime sc->ha_performance.min_command_time[ccb->csio.cdb_io.cdb_bytes[0]]
3655 if (submitted_time != 0xffffffff) {
3656 if ( maxctime < submitted_time ) {
3657 maxctime = submitted_time;
3658 }
3659 if ( (minctime == 0)
3660 || (minctime > submitted_time) ) {
3661 minctime = submitted_time;
3662 }
3663
3664 if ( sc->ha_performance.max_submit_time
3665 < submitted_time ) {
3666 sc->ha_performance.max_submit_time
3667 = submitted_time;
3668 }
3669 if ( sc->ha_performance.min_submit_time == 0
3670 || sc->ha_performance.min_submit_time
3671 > submitted_time) {
3672 sc->ha_performance.min_submit_time
3673 = submitted_time;
3674 }
3675
3676 switch ( ccb->csio.cdb_io.cdb_bytes[0] ) {
3677
3678 case 0xa8: /* 12-byte READ */
3679 /* FALLTHRU */
3680 case 0x08: /* 6-byte READ */
3681 /* FALLTHRU */
3682 case 0x28: /* 10-byte READ */
3683 op_type = READ_OP;
3684 break;
3685
3686 case 0x0a: /* 6-byte WRITE */
3687 /* FALLTHRU */
3688 case 0xaa: /* 12-byte WRITE */
3689 /* FALLTHRU */
3690 case 0x2a: /* 10-byte WRITE */
3691 op_type = WRITE_OP;
3692 break;
3693
3694 default:
3695 op_type = 0;
3696 break;
3697 }
3698
3699 if ( op_type != 0 ) {
3700 struct scsi_rw_big * cmd;
3701
3702 cmd = (struct scsi_rw_big *)
3703 &(ccb->csio.cdb_io);
3704
3705 size = (((u_int32_t) cmd->length2 << 8)
3706 | ((u_int32_t) cmd->length1)) << 9;
3707
3708 switch ( size ) {
3709
3710 case 512:
3711 asr_IObySize(sc,
3712 submitted_time, op_type,
3713 SIZE_512);
3714 break;
3715
3716 case 1024:
3717 asr_IObySize(sc,
3718 submitted_time, op_type,
3719 SIZE_1K);
3720 break;
3721
3722 case 2048:
3723 asr_IObySize(sc,
3724 submitted_time, op_type,
3725 SIZE_2K);
3726 break;
3727
3728 case 4096:
3729 asr_IObySize(sc,
3730 submitted_time, op_type,
3731 SIZE_4K);
3732 break;
3733
3734 case 8192:
3735 asr_IObySize(sc,
3736 submitted_time, op_type,
3737 SIZE_8K);
3738 break;
3739
3740 case 16384:
3741 asr_IObySize(sc,
3742 submitted_time, op_type,
3743 SIZE_16K);
3744 break;
3745
3746 case 32768:
3747 asr_IObySize(sc,
3748 submitted_time, op_type,
3749 SIZE_32K);
3750 break;
3751
3752 case 65536:
3753 asr_IObySize(sc,
3754 submitted_time, op_type,
3755 SIZE_64K);
3756 break;
3757
3758 default:
3759 if ( size > (1 << 16) ) {
3760 asr_IObySize(sc,
3761 submitted_time,
3762 op_type,
3763 SIZE_BIGGER);
3764 } else {
3765 asr_IObySize(sc,
3766 submitted_time,
3767 op_type,
3768 SIZE_OTHER);
3769 }
3770 break;
3771 }
3772 }
3773 }
3774 }
3775#endif
3776 /* Sense data in reply packet */
3777 if (ccb->ccb_h.status & CAM_AUTOSNS_VALID) {
3778 u_int16_t size = I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_getAutoSenseTransferCount(Reply);
3779
3780 if (size) {
3781 if (size > sizeof(ccb->csio.sense_data)) {
3782 size = sizeof(ccb->csio.sense_data);
3783 }
3784 if (size > I2O_SCSI_SENSE_DATA_SZ) {
3785 size = I2O_SCSI_SENSE_DATA_SZ;
3786 }
3787 if ((ccb->csio.sense_len)
3788 && (size > ccb->csio.sense_len)) {
3789 size = ccb->csio.sense_len;
3790 }
3791 bcopy ((caddr_t)Reply->SenseData,
3792 (caddr_t)&(ccb->csio.sense_data), size);
3793 }
3794 }
3795
3796 /*
3797 * Return Reply so that it can be used for the next command
3798 * since we have no more need for it now
3799 */
3800 sc->ha_Virt->FromFIFO = ReplyOffset;
3801
3802 if (ccb->ccb_h.path) {
3803 xpt_done ((union ccb *)ccb);
3804 } else {
3805 wakeup ((caddr_t)ccb);
3806 }
3807 }
3808#ifdef ASR_MEASURE_PERFORMANCE
3809 {
3810 u_int32_t result;
3811
3812 microtime(&junk);
3813 result = asr_time_delta(sc->ha_performance.intr_started, junk);
3814
3815 if (result != 0xffffffff) {
3816 if ( sc->ha_performance.max_intr_time < result ) {
3817 sc->ha_performance.max_intr_time = result;
3818 }
3819
3820 if ( (sc->ha_performance.min_intr_time == 0)
3821 || (sc->ha_performance.min_intr_time > result) ) {
3822 sc->ha_performance.min_intr_time = result;
3823 }
3824 }
3825 }
3826#endif
3827 return (processed);
3828} /* asr_intr */
3829
3830#undef QueueSize /* Grrrr */
3831#undef SG_Size /* Grrrr */
3832
3833/*
3834 * Meant to be included at the bottom of asr.c !!!
3835 */
3836
3837/*
3838 * Included here as hard coded. Done because other necessary include
3839 * files utilize C++ comment structures which make them a nuisance to
3840 * included here just to pick up these three typedefs.
3841 */
3842typedef U32 DPT_TAG_T;
3843typedef U32 DPT_MSG_T;
3844typedef U32 DPT_RTN_T;
3845
3846#undef SCSI_RESET /* Conflicts with "scsi/scsiconf.h" defintion */
3847#include "dev/asr/osd_unix.h"
3848
3849#define asr_unit(dev) minor(dev)
3850
3851STATIC INLINE Asr_softc_t *
3852ASR_get_sc (
3853 IN dev_t dev)
3854{
3855 int unit = asr_unit(dev);
3856 OUT Asr_softc_t * sc = Asr_softc;
3857
3858 while (sc && sc->ha_sim[0] && (cam_sim_unit(sc->ha_sim[0]) != unit)) {
3859 sc = sc->ha_next;
3860 }
3861 return (sc);
3862} /* ASR_get_sc */
3863
3864STATIC u_int8_t ASR_ctlr_held;
3865#if (!defined(UNREFERENCED_PARAMETER))
3866# define UNREFERENCED_PARAMETER(x) (void)(x)
3867#endif
3868
3869STATIC int
3870asr_open(
3871 IN dev_t dev,
3872 int32_t flags,
3873 int32_t ifmt,
3874 IN struct proc * proc)
3875{
3876 int s;
3877 OUT int error;
3878 UNREFERENCED_PARAMETER(flags);
3879 UNREFERENCED_PARAMETER(ifmt);
3880
3881 if (ASR_get_sc (dev) == (Asr_softc_t *)NULL) {
3882 return (ENODEV);
3883 }
3884 s = splcam ();
3885 if (ASR_ctlr_held) {
3886 error = EBUSY;
3887 } else if ((error = suser(proc)) == 0) {
3888 ++ASR_ctlr_held;
3889 }
3890 splx(s);
3891 return (error);
3892} /* asr_open */
3893
3894STATIC int
3895asr_close(
3896 dev_t dev,
3897 int flags,
3898 int ifmt,
3899 struct proc * proc)
3900{
3901 UNREFERENCED_PARAMETER(dev);
3902 UNREFERENCED_PARAMETER(flags);
3903 UNREFERENCED_PARAMETER(ifmt);
3904 UNREFERENCED_PARAMETER(proc);
3905
3906 ASR_ctlr_held = 0;
3907 return (0);
3908} /* asr_close */
3909
3910
3911/*-------------------------------------------------------------------------*/
3912/* Function ASR_queue_i */
3913/*-------------------------------------------------------------------------*/
3914/* The Parameters Passed To This Function Are : */
3915/* Asr_softc_t * : HBA miniport driver's adapter data storage. */
3916/* PI2O_MESSAGE_FRAME : Msg Structure Pointer For This Command */
3917/* I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME following the Msg Structure */
3918/* */
3919/* This Function Will Take The User Request Packet And Convert It To An */