Nuke huge mbuf macros stage 1/2: Remove massive inline mbuf macros to reduce
[dragonfly.git] / sys / dev / raid / asr / asr.c
CommitLineData
984263bc 1/* $FreeBSD: src/sys/dev/asr/asr.c,v 1.3.2.2 2001/08/23 05:21:29 scottl Exp $ */
dadab5e9 2/* $DragonFly: src/sys/dev/raid/asr/asr.c,v 1.5 2003/06/25 03:55:46 dillon Exp $ */
984263bc
MD
3/*
4 * Copyright (c) 1996-2000 Distributed Processing Technology Corporation
5 * Copyright (c) 2000-2001 Adaptec Corporation
6 * All rights reserved.
7 *
8 * TERMS AND CONDITIONS OF USE
9 *
10 * Redistribution and use in source form, with or without modification, are
11 * permitted provided that redistributions of source code must retain the
12 * above copyright notice, this list of conditions and the following disclaimer.
13 *
14 * This software is provided `as is' by Adaptec and any express or implied
15 * warranties, including, but not limited to, the implied warranties of
16 * merchantability and fitness for a particular purpose, are disclaimed. In no
17 * event shall Adaptec be liable for any direct, indirect, incidental, special,
18 * exemplary or consequential damages (including, but not limited to,
19 * procurement of substitute goods or services; loss of use, data, or profits;
20 * or business interruptions) however caused and on any theory of liability,
21 * whether in contract, strict liability, or tort (including negligence or
22 * otherwise) arising in any way out of the use of this driver software, even
23 * if advised of the possibility of such damage.
24 *
25 * SCSI I2O host adapter driver
26 *
27 * V1.08 2001/08/21 Mark_Salyzyn@adaptec.com
28 * - The 2000S and 2005S do not initialize on some machines,
29 * increased timeout to 255ms from 50ms for the StatusGet
30 * command.
31 * V1.07 2001/05/22 Mark_Salyzyn@adaptec.com
32 * - I knew this one was too good to be true. The error return
33 * on ioctl commands needs to be compared to CAM_REQ_CMP, not
34 * to the bit masked status.
35 * V1.06 2001/05/08 Mark_Salyzyn@adaptec.com
36 * - The 2005S that was supported is affectionately called the
37 * Conjoined BAR Firmware. In order to support RAID-5 in a
38 * 16MB low-cost configuration, Firmware was forced to go
39 * to a Split BAR Firmware. This requires a separate IOP and
40 * Messaging base address.
41 * V1.05 2001/04/25 Mark_Salyzyn@adaptec.com
42 * - Handle support for 2005S Zero Channel RAID solution.
43 * - System locked up if the Adapter locked up. Do not try
44 * to send other commands if the resetIOP command fails. The
45 * fail outstanding command discovery loop was flawed as the
46 * removal of the command from the list prevented discovering
47 * all the commands.
48 * - Comment changes to clarify driver.
49 * - SysInfo searched for an EATA SmartROM, not an I2O SmartROM.
50 * - We do not use the AC_FOUND_DEV event because of I2O.
51 * Removed asr_async.
52 * V1.04 2000/09/22 Mark_Salyzyn@adaptec.com, msmith@freebsd.org,
53 * lampa@fee.vutbr.cz and Scott_Long@adaptec.com.
54 * - Removed support for PM1554, PM2554 and PM2654 in Mode-0
55 * mode as this is confused with competitor adapters in run
56 * mode.
57 * - critical locking needed in ASR_ccbAdd and ASR_ccbRemove
58 * to prevent operating system panic.
59 * - moved default major number to 154 from 97.
60 * V1.03 2000/07/12 Mark_Salyzyn@adaptec.com
61 * - The controller is not actually an ASR (Adaptec SCSI RAID)
62 * series that is visible, it's more of an internal code name.
63 * remove any visible references within reason for now.
64 * - bus_ptr->LUN was not correctly zeroed when initially
65 * allocated causing a possible panic of the operating system
66 * during boot.
67 * V1.02 2000/06/26 Mark_Salyzyn@adaptec.com
68 * - Code always fails for ASR_getTid affecting performance.
69 * - initiated a set of changes that resulted from a formal
70 * code inspection by Mark_Salyzyn@adaptec.com,
71 * George_Dake@adaptec.com, Jeff_Zeak@adaptec.com,
72 * Martin_Wilson@adaptec.com and Vincent_Trandoan@adaptec.com.
73 * Their findings were focussed on the LCT & TID handler, and
74 * all resulting changes were to improve code readability,
75 * consistency or have a positive effect on performance.
76 * V1.01 2000/06/14 Mark_Salyzyn@adaptec.com
77 * - Passthrough returned an incorrect error.
78 * - Passthrough did not migrate the intrinsic scsi layer wakeup
79 * on command completion.
80 * - generate control device nodes using make_dev and delete_dev.
81 * - Performance affected by TID caching reallocing.
82 * - Made suggested changes by Justin_Gibbs@adaptec.com
83 * - use splcam instead of splbio.
84 * - use cam_imask instead of bio_imask.
85 * - use u_int8_t instead of u_char.
86 * - use u_int16_t instead of u_short.
87 * - use u_int32_t instead of u_long where appropriate.
88 * - use 64 bit context handler instead of 32 bit.
89 * - create_ccb should only allocate the worst case
90 * requirements for the driver since CAM may evolve
91 * making union ccb much larger than needed here.
92 * renamed create_ccb to asr_alloc_ccb.
93 * - go nutz justifying all debug prints as macros
94 * defined at the top and remove unsightly ifdefs.
95 * - INLINE STATIC viewed as confusing. Historically
96 * utilized to affect code performance and debug
97 * issues in OS, Compiler or OEM specific situations.
98 * V1.00 2000/05/31 Mark_Salyzyn@adaptec.com
99 * - Ported from FreeBSD 2.2.X DPT I2O driver.
100 * changed struct scsi_xfer to union ccb/struct ccb_hdr
101 * changed variable name xs to ccb
102 * changed struct scsi_link to struct cam_path
103 * changed struct scsibus_data to struct cam_sim
104 * stopped using fordriver for holding on to the TID
105 * use proprietary packet creation instead of scsi_inquire
106 * CAM layer sends synchronize commands.
107 */
108
109#define ASR_VERSION 1
110#define ASR_REVISION '0'
111#define ASR_SUBREVISION '8'
112#define ASR_MONTH 8
113#define ASR_DAY 21
114#define ASR_YEAR 2001 - 1980
115
116/*
117 * Debug macros to reduce the unsightly ifdefs
118 */
119#if (defined(DEBUG_ASR) || defined(DEBUG_ASR_USR_CMD) || defined(DEBUG_ASR_CMD))
120# define debug_asr_message(message) \
121 { \
122 u_int32_t * pointer = (u_int32_t *)message; \
123 u_int32_t length = I2O_MESSAGE_FRAME_getMessageSize(message);\
124 u_int32_t counter = 0; \
125 \
126 while (length--) { \
127 printf ("%08lx%c", (u_long)*(pointer++), \
128 (((++counter & 7) == 0) || (length == 0)) \
129 ? '\n' \
130 : ' '); \
131 } \
132 }
133#endif /* DEBUG_ASR || DEBUG_ASR_USR_CMD || DEBUG_ASR_CMD */
134
135#if (defined(DEBUG_ASR))
136 /* Breaks on none STDC based compilers :-( */
137# define debug_asr_printf(fmt,args...) printf(fmt, ##args)
138# define debug_asr_dump_message(message) debug_asr_message(message)
139# define debug_asr_print_path(ccb) xpt_print_path(ccb->ccb_h.path);
140 /* None fatal version of the ASSERT macro */
141# if (defined(__STDC__))
142# define ASSERT(phrase) if(!(phrase))printf(#phrase " at line %d file %s\n",__LINE__,__FILE__)
143# else
144# define ASSERT(phrase) if(!(phrase))printf("phrase" " at line %d file %s\n",__LINE__,__FILE__)
145# endif
146#else /* DEBUG_ASR */
147# define debug_asr_printf(fmt,args...)
148# define debug_asr_dump_message(message)
149# define debug_asr_print_path(ccb)
150# define ASSERT(x)
151#endif /* DEBUG_ASR */
152
153/*
154 * If DEBUG_ASR_CMD is defined:
155 * 0 - Display incoming SCSI commands
156 * 1 - add in a quick character before queueing.
157 * 2 - add in outgoing message frames.
158 */
159#if (defined(DEBUG_ASR_CMD))
160# define debug_asr_cmd_printf(fmt,args...) printf(fmt,##args)
161# define debug_asr_dump_ccb(ccb) \
162 { \
163 u_int8_t * cp = (unsigned char *)&(ccb->csio.cdb_io); \
164 int len = ccb->csio.cdb_len; \
165 \
166 while (len) { \
167 debug_asr_cmd_printf (" %02x", *(cp++)); \
168 --len; \
169 } \
170 }
171# if (DEBUG_ASR_CMD > 0)
172# define debug_asr_cmd1_printf debug_asr_cmd_printf
173# else
174# define debug_asr_cmd1_printf(fmt,args...)
175# endif
176# if (DEBUG_ASR_CMD > 1)
177# define debug_asr_cmd2_printf debug_asr_cmd_printf
178# define debug_asr_cmd2_dump_message(message) debug_asr_message(message)
179# else
180# define debug_asr_cmd2_printf(fmt,args...)
181# define debug_asr_cmd2_dump_message(message)
182# endif
183#else /* DEBUG_ASR_CMD */
184# define debug_asr_cmd_printf(fmt,args...)
185# define debug_asr_cmd_dump_ccb(ccb)
186# define debug_asr_cmd1_printf(fmt,args...)
187# define debug_asr_cmd2_printf(fmt,args...)
188# define debug_asr_cmd2_dump_message(message)
189#endif /* DEBUG_ASR_CMD */
190
191#if (defined(DEBUG_ASR_USR_CMD))
192# define debug_usr_cmd_printf(fmt,args...) printf(fmt,##args)
193# define debug_usr_cmd_dump_message(message) debug_usr_message(message)
194#else /* DEBUG_ASR_USR_CMD */
195# define debug_usr_cmd_printf(fmt,args...)
196# define debug_usr_cmd_dump_message(message)
197#endif /* DEBUG_ASR_USR_CMD */
198
199#define dsDescription_size 46 /* Snug as a bug in a rug */
200#include "dev/asr/dptsig.h"
201
202static dpt_sig_S ASR_sig = {
203 { 'd', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION, PROC_INTEL,
204 PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM, FT_HBADRVR, 0,
205 OEM_DPT, OS_FREE_BSD, CAP_ABOVE16MB, DEV_ALL,
206 ADF_ALL_SC5,
207 0, 0, ASR_VERSION, ASR_REVISION, ASR_SUBREVISION,
208 ASR_MONTH, ASR_DAY, ASR_YEAR,
209/* 01234567890123456789012345678901234567890123456789 < 50 chars */
210 "Adaptec FreeBSD 4.0.0 Unix SCSI I2O HBA Driver"
211 /* ^^^^^ asr_attach alters these to match OS */
212};
213
214#include <sys/param.h> /* TRUE=1 and FALSE=0 defined here */
215#include <sys/kernel.h>
216#include <sys/systm.h>
217#include <sys/malloc.h>
218#include <sys/proc.h>
219#include <sys/conf.h>
220#include <sys/disklabel.h>
221#include <sys/bus.h>
222#include <machine/resource.h>
223#include <machine/bus.h>
224#include <sys/rman.h>
225#include <sys/stat.h>
226
227#include <cam/cam.h>
228#include <cam/cam_ccb.h>
229#include <cam/cam_sim.h>
230#include <cam/cam_xpt_sim.h>
231#include <cam/cam_xpt_periph.h>
232
233#include <cam/scsi/scsi_all.h>
234#include <cam/scsi/scsi_message.h>
235
236#include <vm/vm.h>
237#include <vm/pmap.h>
238#include <machine/cputypes.h>
239#include <machine/clock.h>
240#include <i386/include/vmparam.h>
241
242#include <pci/pcivar.h>
243#include <pci/pcireg.h>
244
245#define STATIC static
246#define INLINE
247
248#if (defined(DEBUG_ASR) && (DEBUG_ASR > 0))
249# undef STATIC
250# define STATIC
251# undef INLINE
252# define INLINE
253#endif
254#define IN
255#define OUT
256#define INOUT
257
258#define osdSwap4(x) ((u_long)ntohl((u_long)(x)))
259#define KVTOPHYS(x) vtophys(x)
260#include "dev/asr/dptalign.h"
261#include "dev/asr/i2oexec.h"
262#include "dev/asr/i2obscsi.h"
263#include "dev/asr/i2odpt.h"
264#include "dev/asr/i2oadptr.h"
265#include "opt_asr.h"
266
267#include "dev/asr/sys_info.h"
268
269/* Configuration Definitions */
270
271#define SG_SIZE 58 /* Scatter Gather list Size */
272#define MAX_TARGET_ID 126 /* Maximum Target ID supported */
273#define MAX_LUN 255 /* Maximum LUN Supported */
274#define MAX_CHANNEL 7 /* Maximum Channel # Supported by driver */
275#define MAX_INBOUND 2000 /* Max CCBs, Also Max Queue Size */
276#define MAX_OUTBOUND 256 /* Maximum outbound frames/adapter */
277#define MAX_INBOUND_SIZE 512 /* Maximum inbound frame size */
278#define MAX_MAP 4194304L /* Maximum mapping size of IOP */
279 /* Also serves as the minimum map for */
280 /* the 2005S zero channel RAID product */
281
282/**************************************************************************
283** ASR Host Adapter structure - One Structure For Each Host Adapter That **
284** Is Configured Into The System. The Structure Supplies Configuration **
285** Information, Status Info, Queue Info And An Active CCB List Pointer. **
286***************************************************************************/
287
288/* I2O register set */
289typedef struct {
290 U8 Address[0x30];
291 volatile U32 Status;
292 volatile U32 Mask;
293# define Mask_InterruptsDisabled 0x08
294 U32 x[2];
295 volatile U32 ToFIFO; /* In Bound FIFO */
296 volatile U32 FromFIFO; /* Out Bound FIFO */
297} i2oRegs_t;
298
299/*
300 * A MIX of performance and space considerations for TID lookups
301 */
302typedef u_int16_t tid_t;
303
304typedef struct {
305 u_int32_t size; /* up to MAX_LUN */
306 tid_t TID[1];
307} lun2tid_t;
308
309typedef struct {
310 u_int32_t size; /* up to MAX_TARGET */
311 lun2tid_t * LUN[1];
312} target2lun_t;
313
314/*
315 * To ensure that we only allocate and use the worst case ccb here, lets
316 * make our own local ccb union. If asr_alloc_ccb is utilized for another
317 * ccb type, ensure that you add the additional structures into our local
318 * ccb union. To ensure strict type checking, we will utilize the local
319 * ccb definition wherever possible.
320 */
321union asr_ccb {
322 struct ccb_hdr ccb_h; /* For convenience */
323 struct ccb_scsiio csio;
324 struct ccb_setasync csa;
325};
326
327typedef struct Asr_softc {
328 u_int16_t ha_irq;
329 void * ha_Base; /* base port for each board */
330 u_int8_t * volatile ha_blinkLED;
331 i2oRegs_t * ha_Virt; /* Base address of IOP */
332 U8 * ha_Fvirt; /* Base address of Frames */
333 I2O_IOP_ENTRY ha_SystemTable;
334 LIST_HEAD(,ccb_hdr) ha_ccb; /* ccbs in use */
335 struct cam_path * ha_path[MAX_CHANNEL+1];
336 struct cam_sim * ha_sim[MAX_CHANNEL+1];
337#if __FreeBSD_version >= 400000
338 struct resource * ha_mem_res;
339 struct resource * ha_mes_res;
340 struct resource * ha_irq_res;
341 void * ha_intr;
342#endif
343 PI2O_LCT ha_LCT; /* Complete list of devices */
344# define le_type IdentityTag[0]
345# define I2O_BSA 0x20
346# define I2O_FCA 0x40
347# define I2O_SCSI 0x00
348# define I2O_PORT 0x80
349# define I2O_UNKNOWN 0x7F
350# define le_bus IdentityTag[1]
351# define le_target IdentityTag[2]
352# define le_lun IdentityTag[3]
353 target2lun_t * ha_targets[MAX_CHANNEL+1];
354 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME ha_Msgs;
355 u_long ha_Msgs_Phys;
356
357 u_int8_t ha_in_reset;
358# define HA_OPERATIONAL 0
359# define HA_IN_RESET 1
360# define HA_OFF_LINE 2
361# define HA_OFF_LINE_RECOVERY 3
362 /* Configuration information */
363 /* The target id maximums we take */
364 u_int8_t ha_MaxBus; /* Maximum bus */
365 u_int8_t ha_MaxId; /* Maximum target ID */
366 u_int8_t ha_MaxLun; /* Maximum target LUN */
367 u_int8_t ha_SgSize; /* Max SG elements */
368 u_int8_t ha_pciBusNum;
369 u_int8_t ha_pciDeviceNum;
370 u_int8_t ha_adapter_target[MAX_CHANNEL+1];
371 u_int16_t ha_QueueSize; /* Max outstanding commands */
372 u_int16_t ha_Msgs_Count;
373
374 /* Links into other parents and HBAs */
375 struct Asr_softc * ha_next; /* HBA list */
376
377#ifdef ASR_MEASURE_PERFORMANCE
378#define MAX_TIMEQ_SIZE 256 // assumes MAX 256 scsi commands sent
379 asr_perf_t ha_performance;
380 u_int32_t ha_submitted_ccbs_count;
381
382 // Queueing macros for a circular queue
383#define TIMEQ_FREE_LIST_EMPTY(head, tail) (-1 == (head) && -1 == (tail))
384#define TIMEQ_FREE_LIST_FULL(head, tail) ((((tail) + 1) % MAX_TIMEQ_SIZE) == (head))
385#define ENQ_TIMEQ_FREE_LIST(item, Q, head, tail) \
386 if (!TIMEQ_FREE_LIST_FULL((head), (tail))) { \
387 if TIMEQ_FREE_LIST_EMPTY((head),(tail)) { \
388 (head) = (tail) = 0; \
389 } \
390 else (tail) = ((tail) + 1) % MAX_TIMEQ_SIZE; \
391 Q[(tail)] = (item); \
392 } \
393 else { \
394 debug_asr_printf("asr: Enqueueing when TimeQ Free List is full... This should not happen!\n"); \
395 }
396#define DEQ_TIMEQ_FREE_LIST(item, Q, head, tail) \
397 if (!TIMEQ_FREE_LIST_EMPTY((head), (tail))) { \
398 item = Q[(head)]; \
399 if ((head) == (tail)) { (head) = (tail) = -1; } \
400 else (head) = ((head) + 1) % MAX_TIMEQ_SIZE; \
401 } \
402 else { \
403 (item) = -1; \
404 debug_asr_printf("asr: Dequeueing when TimeQ Free List is empty... This should not happen!\n"); \
405 }
406
407 // Circular queue of time stamps
408 struct timeval ha_timeQ[MAX_TIMEQ_SIZE];
409 u_int32_t ha_timeQFreeList[MAX_TIMEQ_SIZE];
410 int ha_timeQFreeHead;
411 int ha_timeQFreeTail;
412#endif
413} Asr_softc_t;
414
415STATIC Asr_softc_t * Asr_softc;
416
417/*
418 * Prototypes of the routines we have in this object.
419 */
420
421/* Externally callable routines */
422#if __FreeBSD_version >= 400000
423#define PROBE_ARGS IN device_t tag
424#define PROBE_RET int
425#define PROBE_SET() u_long id = (pci_get_device(tag)<<16)|pci_get_vendor(tag)
426#define PROBE_RETURN(retval) if(retval){device_set_desc(tag,retval);return(0);}else{return(ENXIO);}
427#define ATTACH_ARGS IN device_t tag
428#define ATTACH_RET int
429#define ATTACH_SET() int unit = device_get_unit(tag)
430#define ATTACH_RETURN(retval) return(retval)
431#else
432#define PROBE_ARGS IN pcici_t tag, IN pcidi_t id
433#define PROBE_RET const char *
434#define PROBE_SET()
435#define PROBE_RETURN(retval) return(retval)
436#define ATTACH_ARGS IN pcici_t tag, IN int unit
437#define ATTACH_RET void
438#define ATTACH_SET()
439#define ATTACH_RETURN(retval) return
440#endif
441/* I2O HDM interface */
442STATIC PROBE_RET asr_probe __P((PROBE_ARGS));
443STATIC ATTACH_RET asr_attach __P((ATTACH_ARGS));
444/* DOMINO placeholder */
445STATIC PROBE_RET domino_probe __P((PROBE_ARGS));
446STATIC ATTACH_RET domino_attach __P((ATTACH_ARGS));
447/* MODE0 adapter placeholder */
448STATIC PROBE_RET mode0_probe __P((PROBE_ARGS));
449STATIC ATTACH_RET mode0_attach __P((ATTACH_ARGS));
450
451STATIC Asr_softc_t * ASR_get_sc __P((
452 IN dev_t dev));
453STATIC int asr_ioctl __P((
454 IN dev_t dev,
455 IN u_long cmd,
456 INOUT caddr_t data,
457 int flag,
a99c2fff 458 d_thread_t *td));
984263bc
MD
459STATIC int asr_open __P((
460 IN dev_t dev,
461 int32_t flags,
462 int32_t ifmt,
a99c2fff 463 IN d_thread_t *td));
984263bc
MD
464STATIC int asr_close __P((
465 dev_t dev,
466 int flags,
467 int ifmt,
a99c2fff 468 d_thread_t *td));
984263bc
MD
469STATIC int asr_intr __P((
470 IN Asr_softc_t * sc));
471STATIC void asr_timeout __P((
472 INOUT void * arg));
473STATIC int ASR_init __P((
474 IN Asr_softc_t * sc));
475STATIC INLINE int ASR_acquireLct __P((
476 INOUT Asr_softc_t * sc));
477STATIC INLINE int ASR_acquireHrt __P((
478 INOUT Asr_softc_t * sc));
479STATIC void asr_action __P((
480 IN struct cam_sim * sim,
481 IN union ccb * ccb));
482STATIC void asr_poll __P((
483 IN struct cam_sim * sim));
484
485/*
486 * Here is the auto-probe structure used to nest our tests appropriately
487 * during the startup phase of the operating system.
488 */
489#if __FreeBSD_version >= 400000
490STATIC device_method_t asr_methods[] = {
491 DEVMETHOD(device_probe, asr_probe),
492 DEVMETHOD(device_attach, asr_attach),
493 { 0, 0 }
494};
495
496STATIC driver_t asr_driver = {
497 "asr",
498 asr_methods,
499 sizeof(Asr_softc_t)
500};
501
502STATIC devclass_t asr_devclass;
503
504DRIVER_MODULE(asr, pci, asr_driver, asr_devclass, 0, 0);
505
506STATIC device_method_t domino_methods[] = {
507 DEVMETHOD(device_probe, domino_probe),
508 DEVMETHOD(device_attach, domino_attach),
509 { 0, 0 }
510};
511
512STATIC driver_t domino_driver = {
513 "domino",
514 domino_methods,
515 0
516};
517
518STATIC devclass_t domino_devclass;
519
520DRIVER_MODULE(domino, pci, domino_driver, domino_devclass, 0, 0);
521
522STATIC device_method_t mode0_methods[] = {
523 DEVMETHOD(device_probe, mode0_probe),
524 DEVMETHOD(device_attach, mode0_attach),
525 { 0, 0 }
526};
527
528STATIC driver_t mode0_driver = {
529 "mode0",
530 mode0_methods,
531 0
532};
533
534STATIC devclass_t mode0_devclass;
535
536DRIVER_MODULE(mode0, pci, mode0_driver, mode0_devclass, 0, 0);
537#else
538STATIC u_long asr_pcicount = 0;
539STATIC struct pci_device asr_pcidev = {
540 "asr",
541 asr_probe,
542 asr_attach,
543 &asr_pcicount,
544 NULL
545};
546DATA_SET (asr_pciset, asr_pcidev);
547
548STATIC u_long domino_pcicount = 0;
549STATIC struct pci_device domino_pcidev = {
550 "domino",
551 domino_probe,
552 domino_attach,
553 &domino_pcicount,
554 NULL
555};
556DATA_SET (domino_pciset, domino_pcidev);
557
558STATIC u_long mode0_pcicount = 0;
559STATIC struct pci_device mode0_pcidev = {
560 "mode0",
561 mode0_probe,
562 mode0_attach,
563 &mode0_pcicount,
564 NULL
565};
566DATA_SET (mode0_pciset, mode0_pcidev);
567#endif
568
569/*
570 * devsw for asr hba driver
571 *
572 * only ioctl is used. the sd driver provides all other access.
573 */
574#define CDEV_MAJOR 154 /* prefered default character major */
575STATIC struct cdevsw asr_cdevsw = {
576 asr_open, /* open */
577 asr_close, /* close */
578 noread, /* read */
579 nowrite, /* write */
580 asr_ioctl, /* ioctl */
581 nopoll, /* poll */
582 nommap, /* mmap */
583 nostrategy, /* strategy */
584 "asr", /* name */
585 CDEV_MAJOR, /* maj */
586 nodump, /* dump */
587 nopsize, /* psize */
588 0, /* flags */
589 -1 /* bmaj */
590};
591
592#ifdef ASR_MEASURE_PERFORMANCE
593STATIC u_int32_t asr_time_delta __P((IN struct timeval start,
594 IN struct timeval end));
595#endif
596
597/*
598 * Initialize the dynamic cdevsw hooks.
599 */
600STATIC void
601asr_drvinit (
602 void * unused)
603{
604 static int asr_devsw_installed = 0;
605
606 if (asr_devsw_installed) {
607 return;
608 }
609 asr_devsw_installed++;
610 /*
611 * Find a free spot (the report during driver load used by
612 * osd layer in engine to generate the controlling nodes).
613 */
614 while ((asr_cdevsw.d_maj < NUMCDEVSW)
615 && (devsw(makedev(asr_cdevsw.d_maj,0)) != (struct cdevsw *)NULL)) {
616 ++asr_cdevsw.d_maj;
617 }
618 if (asr_cdevsw.d_maj >= NUMCDEVSW) for (
619 asr_cdevsw.d_maj = 0;
620 (asr_cdevsw.d_maj < CDEV_MAJOR)
621 && (devsw(makedev(asr_cdevsw.d_maj,0)) != (struct cdevsw *)NULL);
622 ++asr_cdevsw.d_maj);
623 /*
624 * Come to papa
625 */
626 cdevsw_add(&asr_cdevsw);
627 /*
628 * delete any nodes that would attach to the primary adapter,
629 * let the adapter scans add them.
630 */
631 destroy_dev(makedev(asr_cdevsw.d_maj,0));
632} /* asr_drvinit */
633
634/* Must initialize before CAM layer picks up our HBA driver */
635SYSINIT(asrdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,asr_drvinit,NULL)
636
637/* I2O support routines */
638#define defAlignLong(STRUCT,NAME) char NAME[sizeof(STRUCT)]
639#define getAlignLong(STRUCT,NAME) ((STRUCT *)(NAME))
640
641/*
642 * Fill message with default.
643 */
644STATIC PI2O_MESSAGE_FRAME
645ASR_fillMessage (
646 IN char * Message,
647 IN u_int16_t size)
648{
649 OUT PI2O_MESSAGE_FRAME Message_Ptr;
650
651 Message_Ptr = getAlignLong(I2O_MESSAGE_FRAME, Message);
652 bzero ((void *)Message_Ptr, size);
653 I2O_MESSAGE_FRAME_setVersionOffset(Message_Ptr, I2O_VERSION_11);
654 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
655 (size + sizeof(U32) - 1) >> 2);
656 I2O_MESSAGE_FRAME_setInitiatorAddress (Message_Ptr, 1);
657 return (Message_Ptr);
658} /* ASR_fillMessage */
659
660#define EMPTY_QUEUE ((U32)-1L)
661
662STATIC INLINE U32
663ASR_getMessage(
664 IN i2oRegs_t * virt)
665{
666 OUT U32 MessageOffset;
667
668 if ((MessageOffset = virt->ToFIFO) == EMPTY_QUEUE) {
669 MessageOffset = virt->ToFIFO;
670 }
671 return (MessageOffset);
672} /* ASR_getMessage */
673
674/* Issue a polled command */
675STATIC U32
676ASR_initiateCp (
677 INOUT i2oRegs_t * virt,
678 INOUT U8 * fvirt,
679 IN PI2O_MESSAGE_FRAME Message)
680{
681 OUT U32 Mask = -1L;
682 U32 MessageOffset;
683 u_int Delay = 1500;
684
685 /*
686 * ASR_initiateCp is only used for synchronous commands and will
687 * be made more resiliant to adapter delays since commands like
688 * resetIOP can cause the adapter to be deaf for a little time.
689 */
690 while (((MessageOffset = ASR_getMessage(virt)) == EMPTY_QUEUE)
691 && (--Delay != 0)) {
692 DELAY (10000);
693 }
694 if (MessageOffset != EMPTY_QUEUE) {
695 bcopy (Message, fvirt + MessageOffset,
696 I2O_MESSAGE_FRAME_getMessageSize(Message) << 2);
697 /*
698 * Disable the Interrupts
699 */
700 virt->Mask = (Mask = virt->Mask) | Mask_InterruptsDisabled;
701 virt->ToFIFO = MessageOffset;
702 }
703 return (Mask);
704} /* ASR_initiateCp */
705
706/*
707 * Reset the adapter.
708 */
709STATIC U32
710ASR_resetIOP (
711 INOUT i2oRegs_t * virt,
712 INOUT U8 * fvirt)
713{
714 struct resetMessage {
715 I2O_EXEC_IOP_RESET_MESSAGE M;
716 U32 R;
717 };
718 defAlignLong(struct resetMessage,Message);
719 PI2O_EXEC_IOP_RESET_MESSAGE Message_Ptr;
720 OUT U32 * volatile Reply_Ptr;
721 U32 Old;
722
723 /*
724 * Build up our copy of the Message.
725 */
726 Message_Ptr = (PI2O_EXEC_IOP_RESET_MESSAGE)ASR_fillMessage(Message,
727 sizeof(I2O_EXEC_IOP_RESET_MESSAGE));
728 I2O_EXEC_IOP_RESET_MESSAGE_setFunction(Message_Ptr, I2O_EXEC_IOP_RESET);
729 /*
730 * Reset the Reply Status
731 */
732 *(Reply_Ptr = (U32 *)((char *)Message_Ptr
733 + sizeof(I2O_EXEC_IOP_RESET_MESSAGE))) = 0;
734 I2O_EXEC_IOP_RESET_MESSAGE_setStatusWordLowAddress(Message_Ptr,
735 KVTOPHYS((void *)Reply_Ptr));
736 /*
737 * Send the Message out
738 */
739 if ((Old = ASR_initiateCp (virt, fvirt, (PI2O_MESSAGE_FRAME)Message_Ptr)) != (U32)-1L) {
740 /*
741 * Wait for a response (Poll), timeouts are dangerous if
742 * the card is truly responsive. We assume response in 2s.
743 */
744 u_int8_t Delay = 200;
745
746 while ((*Reply_Ptr == 0) && (--Delay != 0)) {
747 DELAY (10000);
748 }
749 /*
750 * Re-enable the interrupts.
751 */
752 virt->Mask = Old;
753 ASSERT (*Reply_Ptr);
754 return (*Reply_Ptr);
755 }
756 ASSERT (Old != (U32)-1L);
757 return (0);
758} /* ASR_resetIOP */
759
760/*
761 * Get the curent state of the adapter
762 */
763STATIC INLINE PI2O_EXEC_STATUS_GET_REPLY
764ASR_getStatus (
765 INOUT i2oRegs_t * virt,
766 INOUT U8 * fvirt,
767 OUT PI2O_EXEC_STATUS_GET_REPLY buffer)
768{
769 defAlignLong(I2O_EXEC_STATUS_GET_MESSAGE,Message);
770 PI2O_EXEC_STATUS_GET_MESSAGE Message_Ptr;
771 U32 Old;
772
773 /*
774 * Build up our copy of the Message.
775 */
776 Message_Ptr = (PI2O_EXEC_STATUS_GET_MESSAGE)ASR_fillMessage(Message,
777 sizeof(I2O_EXEC_STATUS_GET_MESSAGE));
778 I2O_EXEC_STATUS_GET_MESSAGE_setFunction(Message_Ptr,
779 I2O_EXEC_STATUS_GET);
780 I2O_EXEC_STATUS_GET_MESSAGE_setReplyBufferAddressLow(Message_Ptr,
781 KVTOPHYS((void *)buffer));
782 /* This one is a Byte Count */
783 I2O_EXEC_STATUS_GET_MESSAGE_setReplyBufferLength(Message_Ptr,
784 sizeof(I2O_EXEC_STATUS_GET_REPLY));
785 /*
786 * Reset the Reply Status
787 */
788 bzero ((void *)buffer, sizeof(I2O_EXEC_STATUS_GET_REPLY));
789 /*
790 * Send the Message out
791 */
792 if ((Old = ASR_initiateCp (virt, fvirt, (PI2O_MESSAGE_FRAME)Message_Ptr)) != (U32)-1L) {
793 /*
794 * Wait for a response (Poll), timeouts are dangerous if
795 * the card is truly responsive. We assume response in 50ms.
796 */
797 u_int8_t Delay = 255;
798
799 while (*((U8 * volatile)&(buffer->SyncByte)) == 0) {
800 if (--Delay == 0) {
801 buffer = (PI2O_EXEC_STATUS_GET_REPLY)NULL;
802 break;
803 }
804 DELAY (1000);
805 }
806 /*
807 * Re-enable the interrupts.
808 */
809 virt->Mask = Old;
810 return (buffer);
811 }
812 return ((PI2O_EXEC_STATUS_GET_REPLY)NULL);
813} /* ASR_getStatus */
814
815/*
816 * Check if the device is a SCSI I2O HBA, and add it to the list.
817 */
818
819/*
820 * Probe for ASR controller. If we find it, we will use it.
821 * virtual adapters.
822 */
823STATIC PROBE_RET
824asr_probe(PROBE_ARGS)
825{
826 PROBE_SET();
827 if ((id == 0xA5011044) || (id == 0xA5111044)) {
828 PROBE_RETURN ("Adaptec Caching SCSI RAID");
829 }
830 PROBE_RETURN (NULL);
831} /* asr_probe */
832
833/*
834 * Probe/Attach for DOMINO chipset.
835 */
836STATIC PROBE_RET
837domino_probe(PROBE_ARGS)
838{
839 PROBE_SET();
840 if (id == 0x10121044) {
841 PROBE_RETURN ("Adaptec Caching Memory Controller");
842 }
843 PROBE_RETURN (NULL);
844} /* domino_probe */
845
846STATIC ATTACH_RET
847domino_attach (ATTACH_ARGS)
848{
849 ATTACH_RETURN (0);
850} /* domino_attach */
851
852/*
853 * Probe/Attach for MODE0 adapters.
854 */
855STATIC PROBE_RET
856mode0_probe(PROBE_ARGS)
857{
858 PROBE_SET();
859
860 /*
861 * If/When we can get a business case to commit to a
862 * Mode0 driver here, we can make all these tests more
863 * specific and robust. Mode0 adapters have their processors
864 * turned off, this the chips are in a raw state.
865 */
866
867 /* This is a PLX9054 */
868 if (id == 0x905410B5) {
869 PROBE_RETURN ("Adaptec Mode0 PM3757");
870 }
871 /* This is a PLX9080 */
872 if (id == 0x908010B5) {
873 PROBE_RETURN ("Adaptec Mode0 PM3754/PM3755");
874 }
875 /* This is a ZION 80303 */
876 if (id == 0x53098086) {
877 PROBE_RETURN ("Adaptec Mode0 3010S");
878 }
879 /* This is an i960RS */
880 if (id == 0x39628086) {
881 PROBE_RETURN ("Adaptec Mode0 2100S");
882 }
883 /* This is an i960RN */
884 if (id == 0x19648086) {
885 PROBE_RETURN ("Adaptec Mode0 PM2865/2400A/3200S/3400S");
886 }
887#if 0 /* this would match any generic i960 -- mjs */
888 /* This is an i960RP (typically also on Motherboards) */
889 if (id == 0x19608086) {
890 PROBE_RETURN ("Adaptec Mode0 PM2554/PM1554/PM2654");
891 }
892#endif
893 PROBE_RETURN (NULL);
894} /* mode0_probe */
895
896STATIC ATTACH_RET
897mode0_attach (ATTACH_ARGS)
898{
899 ATTACH_RETURN (0);
900} /* mode0_attach */
901
902STATIC INLINE union asr_ccb *
903asr_alloc_ccb (
904 IN Asr_softc_t * sc)
905{
906 OUT union asr_ccb * new_ccb;
907
908 if ((new_ccb = (union asr_ccb *)malloc(sizeof(*new_ccb),
909 M_DEVBUF, M_WAITOK)) != (union asr_ccb *)NULL) {
910 bzero (new_ccb, sizeof(*new_ccb));
911 new_ccb->ccb_h.pinfo.priority = 1;
912 new_ccb->ccb_h.pinfo.index = CAM_UNQUEUED_INDEX;
913 new_ccb->ccb_h.spriv_ptr0 = sc;
914 }
915 return (new_ccb);
916} /* asr_alloc_ccb */
917
918STATIC INLINE void
919asr_free_ccb (
920 IN union asr_ccb * free_ccb)
921{
922 free(free_ccb, M_DEVBUF);
923} /* asr_free_ccb */
924
925/*
926 * Print inquiry data `carefully'
927 */
928STATIC void
929ASR_prstring (
930 u_int8_t * s,
931 int len)
932{
933 while ((--len >= 0) && (*s) && (*s != ' ') && (*s != '-')) {
934 printf ("%c", *(s++));
935 }
936} /* ASR_prstring */
937
938/*
939 * Prototypes
940 */
941STATIC INLINE int ASR_queue __P((
942 IN Asr_softc_t * sc,
943 IN PI2O_MESSAGE_FRAME Message));
944/*
945 * Send a message synchronously and without Interrupt to a ccb.
946 */
947STATIC int
948ASR_queue_s (
949 INOUT union asr_ccb * ccb,
950 IN PI2O_MESSAGE_FRAME Message)
951{
952 int s;
953 U32 Mask;
954 Asr_softc_t * sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
955
956 /*
957 * We do not need any (optional byteswapping) method access to
958 * the Initiator context field.
959 */
960 I2O_MESSAGE_FRAME_setInitiatorContext64(Message, (long)ccb);
961
962 /* Prevent interrupt service */
963 s = splcam ();
964 sc->ha_Virt->Mask = (Mask = sc->ha_Virt->Mask)
965 | Mask_InterruptsDisabled;
966
967 if (ASR_queue (sc, Message) == EMPTY_QUEUE) {
968 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
969 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
970 }
971
972 /*
973 * Wait for this board to report a finished instruction.
974 */
975 while ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
976 (void)asr_intr (sc);
977 }
978
979 /* Re-enable Interrupts */
980 sc->ha_Virt->Mask = Mask;
981 splx(s);
982
983 return (ccb->ccb_h.status);
984} /* ASR_queue_s */
985
986/*
987 * Send a message synchronously to a Asr_softc_t
988 */
989STATIC int
990ASR_queue_c (
991 IN Asr_softc_t * sc,
992 IN PI2O_MESSAGE_FRAME Message)
993{
994 union asr_ccb * ccb;
995 OUT int status;
996
997 if ((ccb = asr_alloc_ccb (sc)) == (union asr_ccb *)NULL) {
998 return (CAM_REQUEUE_REQ);
999 }
1000
1001 status = ASR_queue_s (ccb, Message);
1002
1003 asr_free_ccb(ccb);
1004
1005 return (status);
1006} /* ASR_queue_c */
1007
1008/*
1009 * Add the specified ccb to the active queue
1010 */
1011STATIC INLINE void
1012ASR_ccbAdd (
1013 IN Asr_softc_t * sc,
1014 INOUT union asr_ccb * ccb)
1015{
1016 int s;
1017
1018 s = splcam();
1019 LIST_INSERT_HEAD(&(sc->ha_ccb), &(ccb->ccb_h), sim_links.le);
1020 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1021 if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT) {
1022 /*
1023 * RAID systems can take considerable time to
1024 * complete some commands given the large cache
1025 * flashes switching from write back to write thru.
1026 */
1027 ccb->ccb_h.timeout = 6 * 60 * 1000;
1028 }
1029 ccb->ccb_h.timeout_ch = timeout(asr_timeout, (caddr_t)ccb,
1030 (ccb->ccb_h.timeout * hz) / 1000);
1031 }
1032 splx(s);
1033} /* ASR_ccbAdd */
1034
1035/*
1036 * Remove the specified ccb from the active queue.
1037 */
1038STATIC INLINE void
1039ASR_ccbRemove (
1040 IN Asr_softc_t * sc,
1041 INOUT union asr_ccb * ccb)
1042{
1043 int s;
1044
1045 s = splcam();
1046 untimeout(asr_timeout, (caddr_t)ccb, ccb->ccb_h.timeout_ch);
1047 LIST_REMOVE(&(ccb->ccb_h), sim_links.le);
1048 splx(s);
1049} /* ASR_ccbRemove */
1050
1051/*
1052 * Fail all the active commands, so they get re-issued by the operating
1053 * system.
1054 */
1055STATIC INLINE void
1056ASR_failActiveCommands (
1057 IN Asr_softc_t * sc)
1058{
1059 struct ccb_hdr * ccb;
1060 int s;
1061
1062#if 0 /* Currently handled by callers, unnecessary paranoia currently */
1063 /* Left in for historical perspective. */
1064 defAlignLong(I2O_EXEC_LCT_NOTIFY_MESSAGE,Message);
1065 PI2O_EXEC_LCT_NOTIFY_MESSAGE Message_Ptr;
1066
1067 /* Send a blind LCT command to wait for the enableSys to complete */
1068 Message_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)ASR_fillMessage(Message,
1069 sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) - sizeof(I2O_SG_ELEMENT));
1070 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
1071 I2O_EXEC_LCT_NOTIFY);
1072 I2O_EXEC_LCT_NOTIFY_MESSAGE_setClassIdentifier(Message_Ptr,
1073 I2O_CLASS_MATCH_ANYCLASS);
1074 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1075#endif
1076
1077 s = splcam();
1078 /*
1079 * We do not need to inform the CAM layer that we had a bus
1080 * reset since we manage it on our own, this also prevents the
1081 * SCSI_DELAY settling that would be required on other systems.
1082 * The `SCSI_DELAY' has already been handled by the card via the
1083 * acquisition of the LCT table while we are at CAM priority level.
1084 * for (int bus = 0; bus <= sc->ha_MaxBus; ++bus) {
1085 * xpt_async (AC_BUS_RESET, sc->ha_path[bus], NULL);
1086 * }
1087 */
1088 while ((ccb = LIST_FIRST(&(sc->ha_ccb))) != (struct ccb_hdr *)NULL) {
1089 ASR_ccbRemove (sc, (union asr_ccb *)ccb);
1090
1091 ccb->status &= ~CAM_STATUS_MASK;
1092 ccb->status |= CAM_REQUEUE_REQ;
1093 /* Nothing Transfered */
1094 ((struct ccb_scsiio *)ccb)->resid
1095 = ((struct ccb_scsiio *)ccb)->dxfer_len;
1096
1097 if (ccb->path) {
1098 xpt_done ((union ccb *)ccb);
1099 } else {
1100 wakeup ((caddr_t)ccb);
1101 }
1102 }
1103 splx(s);
1104} /* ASR_failActiveCommands */
1105
1106/*
1107 * The following command causes the HBA to reset the specific bus
1108 */
1109STATIC INLINE void
1110ASR_resetBus(
1111 IN Asr_softc_t * sc,
1112 IN int bus)
1113{
1114 defAlignLong(I2O_HBA_BUS_RESET_MESSAGE,Message);
1115 I2O_HBA_BUS_RESET_MESSAGE * Message_Ptr;
1116 PI2O_LCT_ENTRY Device;
1117
1118 Message_Ptr = (I2O_HBA_BUS_RESET_MESSAGE *)ASR_fillMessage(Message,
1119 sizeof(I2O_HBA_BUS_RESET_MESSAGE));
1120 I2O_MESSAGE_FRAME_setFunction(&Message_Ptr->StdMessageFrame,
1121 I2O_HBA_BUS_RESET);
1122 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
1123 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
1124 ++Device) {
1125 if (((Device->le_type & I2O_PORT) != 0)
1126 && (Device->le_bus == bus)) {
1127 I2O_MESSAGE_FRAME_setTargetAddress(
1128 &Message_Ptr->StdMessageFrame,
1129 I2O_LCT_ENTRY_getLocalTID(Device));
1130 /* Asynchronous command, with no expectations */
1131 (void)ASR_queue(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1132 break;
1133 }
1134 }
1135} /* ASR_resetBus */
1136
1137STATIC INLINE int
1138ASR_getBlinkLedCode (
1139 IN Asr_softc_t * sc)
1140{
1141 if ((sc != (Asr_softc_t *)NULL)
1142 && (sc->ha_blinkLED != (u_int8_t *)NULL)
1143 && (sc->ha_blinkLED[1] == 0xBC)) {
1144 return (sc->ha_blinkLED[0]);
1145 }
1146 return (0);
1147} /* ASR_getBlinkCode */
1148
1149/*
1150 * Determine the address of an TID lookup. Must be done at high priority
1151 * since the address can be changed by other threads of execution.
1152 *
1153 * Returns NULL pointer if not indexible (but will attempt to generate
1154 * an index if `new_entry' flag is set to TRUE).
1155 *
1156 * All addressible entries are to be guaranteed zero if never initialized.
1157 */
1158STATIC INLINE tid_t *
1159ASR_getTidAddress(
1160 INOUT Asr_softc_t * sc,
1161 IN int bus,
1162 IN int target,
1163 IN int lun,
1164 IN int new_entry)
1165{
1166 target2lun_t * bus_ptr;
1167 lun2tid_t * target_ptr;
1168 unsigned new_size;
1169
1170 /*
1171 * Validity checking of incoming parameters. More of a bound
1172 * expansion limit than an issue with the code dealing with the
1173 * values.
1174 *
1175 * sc must be valid before it gets here, so that check could be
1176 * dropped if speed a critical issue.
1177 */
1178 if ((sc == (Asr_softc_t *)NULL)
1179 || (bus > MAX_CHANNEL)
1180 || (target > sc->ha_MaxId)
1181 || (lun > sc->ha_MaxLun)) {
1182 debug_asr_printf("(%lx,%d,%d,%d) target out of range\n",
1183 (u_long)sc, bus, target, lun);
1184 return ((tid_t *)NULL);
1185 }
1186 /*
1187 * See if there is an associated bus list.
1188 *
1189 * for performance, allocate in size of BUS_CHUNK chunks.
1190 * BUS_CHUNK must be a power of two. This is to reduce
1191 * fragmentation effects on the allocations.
1192 */
1193# define BUS_CHUNK 8
1194 new_size = ((target + BUS_CHUNK - 1) & ~(BUS_CHUNK - 1));
1195 if ((bus_ptr = sc->ha_targets[bus]) == (target2lun_t *)NULL) {
1196 /*
1197 * Allocate a new structure?
1198 * Since one element in structure, the +1
1199 * needed for size has been abstracted.
1200 */
1201 if ((new_entry == FALSE)
1202 || ((sc->ha_targets[bus] = bus_ptr = (target2lun_t *)malloc (
1203 sizeof(*bus_ptr) + (sizeof(bus_ptr->LUN) * new_size),
1204 M_TEMP, M_WAITOK))
1205 == (target2lun_t *)NULL)) {
1206 debug_asr_printf("failed to allocate bus list\n");
1207 return ((tid_t *)NULL);
1208 }
1209 bzero (bus_ptr, sizeof(*bus_ptr)
1210 + (sizeof(bus_ptr->LUN) * new_size));
1211 bus_ptr->size = new_size + 1;
1212 } else if (bus_ptr->size <= new_size) {
1213 target2lun_t * new_bus_ptr;
1214
1215 /*
1216 * Reallocate a new structure?
1217 * Since one element in structure, the +1
1218 * needed for size has been abstracted.
1219 */
1220 if ((new_entry == FALSE)
1221 || ((new_bus_ptr = (target2lun_t *)malloc (
1222 sizeof(*bus_ptr) + (sizeof(bus_ptr->LUN) * new_size),
1223 M_TEMP, M_WAITOK))
1224 == (target2lun_t *)NULL)) {
1225 debug_asr_printf("failed to reallocate bus list\n");
1226 return ((tid_t *)NULL);
1227 }
1228 /*
1229 * Zero and copy the whole thing, safer, simpler coding
1230 * and not really performance critical at this point.
1231 */
1232 bzero (new_bus_ptr, sizeof(*bus_ptr)
1233 + (sizeof(bus_ptr->LUN) * new_size));
1234 bcopy (bus_ptr, new_bus_ptr, sizeof(*bus_ptr)
1235 + (sizeof(bus_ptr->LUN) * (bus_ptr->size - 1)));
1236 sc->ha_targets[bus] = new_bus_ptr;
1237 free (bus_ptr, M_TEMP);
1238 bus_ptr = new_bus_ptr;
1239 bus_ptr->size = new_size + 1;
1240 }
1241 /*
1242 * We now have the bus list, lets get to the target list.
1243 * Since most systems have only *one* lun, we do not allocate
1244 * in chunks as above, here we allow one, then in chunk sizes.
1245 * TARGET_CHUNK must be a power of two. This is to reduce
1246 * fragmentation effects on the allocations.
1247 */
1248# define TARGET_CHUNK 8
1249 if ((new_size = lun) != 0) {
1250 new_size = ((lun + TARGET_CHUNK - 1) & ~(TARGET_CHUNK - 1));
1251 }
1252 if ((target_ptr = bus_ptr->LUN[target]) == (lun2tid_t *)NULL) {
1253 /*
1254 * Allocate a new structure?
1255 * Since one element in structure, the +1
1256 * needed for size has been abstracted.
1257 */
1258 if ((new_entry == FALSE)
1259 || ((bus_ptr->LUN[target] = target_ptr = (lun2tid_t *)malloc (
1260 sizeof(*target_ptr) + (sizeof(target_ptr->TID) * new_size),
1261 M_TEMP, M_WAITOK))
1262 == (lun2tid_t *)NULL)) {
1263 debug_asr_printf("failed to allocate target list\n");
1264 return ((tid_t *)NULL);
1265 }
1266 bzero (target_ptr, sizeof(*target_ptr)
1267 + (sizeof(target_ptr->TID) * new_size));
1268 target_ptr->size = new_size + 1;
1269 } else if (target_ptr->size <= new_size) {
1270 lun2tid_t * new_target_ptr;
1271
1272 /*
1273 * Reallocate a new structure?
1274 * Since one element in structure, the +1
1275 * needed for size has been abstracted.
1276 */
1277 if ((new_entry == FALSE)
1278 || ((new_target_ptr = (lun2tid_t *)malloc (
1279 sizeof(*target_ptr) + (sizeof(target_ptr->TID) * new_size),
1280 M_TEMP, M_WAITOK))
1281 == (lun2tid_t *)NULL)) {
1282 debug_asr_printf("failed to reallocate target list\n");
1283 return ((tid_t *)NULL);
1284 }
1285 /*
1286 * Zero and copy the whole thing, safer, simpler coding
1287 * and not really performance critical at this point.
1288 */
1289 bzero (new_target_ptr, sizeof(*target_ptr)
1290 + (sizeof(target_ptr->TID) * new_size));
1291 bcopy (target_ptr, new_target_ptr,
1292 sizeof(*target_ptr)
1293 + (sizeof(target_ptr->TID) * (target_ptr->size - 1)));
1294 bus_ptr->LUN[target] = new_target_ptr;
1295 free (target_ptr, M_TEMP);
1296 target_ptr = new_target_ptr;
1297 target_ptr->size = new_size + 1;
1298 }
1299 /*
1300 * Now, acquire the TID address from the LUN indexed list.
1301 */
1302 return (&(target_ptr->TID[lun]));
1303} /* ASR_getTidAddress */
1304
1305/*
1306 * Get a pre-existing TID relationship.
1307 *
1308 * If the TID was never set, return (tid_t)-1.
1309 *
1310 * should use mutex rather than spl.
1311 */
1312STATIC INLINE tid_t
1313ASR_getTid (
1314 IN Asr_softc_t * sc,
1315 IN int bus,
1316 IN int target,
1317 IN int lun)
1318{
1319 tid_t * tid_ptr;
1320 int s;
1321 OUT tid_t retval;
1322
1323 s = splcam();
1324 if (((tid_ptr = ASR_getTidAddress (sc, bus, target, lun, FALSE))
1325 == (tid_t *)NULL)
1326 /* (tid_t)0 or (tid_t)-1 indicate no TID */
1327 || (*tid_ptr == (tid_t)0)) {
1328 splx(s);
1329 return ((tid_t)-1);
1330 }
1331 retval = *tid_ptr;
1332 splx(s);
1333 return (retval);
1334} /* ASR_getTid */
1335
1336/*
1337 * Set a TID relationship.
1338 *
1339 * If the TID was not set, return (tid_t)-1.
1340 *
1341 * should use mutex rather than spl.
1342 */
1343STATIC INLINE tid_t
1344ASR_setTid (
1345 INOUT Asr_softc_t * sc,
1346 IN int bus,
1347 IN int target,
1348 IN int lun,
1349 INOUT tid_t TID)
1350{
1351 tid_t * tid_ptr;
1352 int s;
1353
1354 if (TID != (tid_t)-1) {
1355 if (TID == 0) {
1356 return ((tid_t)-1);
1357 }
1358 s = splcam();
1359 if ((tid_ptr = ASR_getTidAddress (sc, bus, target, lun, TRUE))
1360 == (tid_t *)NULL) {
1361 splx(s);
1362 return ((tid_t)-1);
1363 }
1364 *tid_ptr = TID;
1365 splx(s);
1366 }
1367 return (TID);
1368} /* ASR_setTid */
1369
1370/*-------------------------------------------------------------------------*/
1371/* Function ASR_rescan */
1372/*-------------------------------------------------------------------------*/
1373/* The Parameters Passed To This Function Are : */
1374/* Asr_softc_t * : HBA miniport driver's adapter data storage. */
1375/* */
1376/* This Function Will rescan the adapter and resynchronize any data */
1377/* */
1378/* Return : 0 For OK, Error Code Otherwise */
1379/*-------------------------------------------------------------------------*/
1380
1381STATIC INLINE int
1382ASR_rescan(
1383 IN Asr_softc_t * sc)
1384{
1385 int bus;
1386 OUT int error;
1387
1388 /*
1389 * Re-acquire the LCT table and synchronize us to the adapter.
1390 */
1391 if ((error = ASR_acquireLct(sc)) == 0) {
1392 error = ASR_acquireHrt(sc);
1393 }
1394
1395 if (error != 0) {
1396 return error;
1397 }
1398
1399 bus = sc->ha_MaxBus;
1400 /* Reset all existing cached TID lookups */
1401 do {
1402 int target, event = 0;
1403
1404 /*
1405 * Scan for all targets on this bus to see if they
1406 * got affected by the rescan.
1407 */
1408 for (target = 0; target <= sc->ha_MaxId; ++target) {
1409 int lun;
1410
1411 /* Stay away from the controller ID */
1412 if (target == sc->ha_adapter_target[bus]) {
1413 continue;
1414 }
1415 for (lun = 0; lun <= sc->ha_MaxLun; ++lun) {
1416 PI2O_LCT_ENTRY Device;
1417 tid_t TID = (tid_t)-1;
1418 tid_t LastTID;
1419
1420 /*
1421 * See if the cached TID changed. Search for
1422 * the device in our new LCT.
1423 */
1424 for (Device = sc->ha_LCT->LCTEntry;
1425 Device < (PI2O_LCT_ENTRY)(((U32 *)sc->ha_LCT)
1426 + I2O_LCT_getTableSize(sc->ha_LCT));
1427 ++Device) {
1428 if ((Device->le_type != I2O_UNKNOWN)
1429 && (Device->le_bus == bus)
1430 && (Device->le_target == target)
1431 && (Device->le_lun == lun)
1432 && (I2O_LCT_ENTRY_getUserTID(Device)
1433 == 0xFFF)) {
1434 TID = I2O_LCT_ENTRY_getLocalTID(
1435 Device);
1436 break;
1437 }
1438 }
1439 /*
1440 * Indicate to the OS that the label needs
1441 * to be recalculated, or that the specific
1442 * open device is no longer valid (Merde)
1443 * because the cached TID changed.
1444 */
1445 LastTID = ASR_getTid (sc, bus, target, lun);
1446 if (LastTID != TID) {
1447 struct cam_path * path;
1448
1449 if (xpt_create_path(&path,
1450 /*periph*/NULL,
1451 cam_sim_path(sc->ha_sim[bus]),
1452 target, lun) != CAM_REQ_CMP) {
1453 if (TID == (tid_t)-1) {
1454 event |= AC_LOST_DEVICE;
1455 } else {
1456 event |= AC_INQ_CHANGED
1457 | AC_GETDEV_CHANGED;
1458 }
1459 } else {
1460 if (TID == (tid_t)-1) {
1461 xpt_async(
1462 AC_LOST_DEVICE,
1463 path, NULL);
1464 } else if (LastTID == (tid_t)-1) {
1465 struct ccb_getdev ccb;
1466
1467 xpt_setup_ccb(
1468 &(ccb.ccb_h),
1469 path, /*priority*/5);
1470 xpt_async(
1471 AC_FOUND_DEVICE,
1472 path,
1473 &ccb);
1474 } else {
1475 xpt_async(
1476 AC_INQ_CHANGED,
1477 path, NULL);
1478 xpt_async(
1479 AC_GETDEV_CHANGED,
1480 path, NULL);
1481 }
1482 }
1483 }
1484 /*
1485 * We have the option of clearing the
1486 * cached TID for it to be rescanned, or to
1487 * set it now even if the device never got
1488 * accessed. We chose the later since we
1489 * currently do not use the condition that
1490 * the TID ever got cached.
1491 */
1492 ASR_setTid (sc, bus, target, lun, TID);
1493 }
1494 }
1495 /*
1496 * The xpt layer can not handle multiple events at the
1497 * same call.
1498 */
1499 if (event & AC_LOST_DEVICE) {
1500 xpt_async(AC_LOST_DEVICE, sc->ha_path[bus], NULL);
1501 }
1502 if (event & AC_INQ_CHANGED) {
1503 xpt_async(AC_INQ_CHANGED, sc->ha_path[bus], NULL);
1504 }
1505 if (event & AC_GETDEV_CHANGED) {
1506 xpt_async(AC_GETDEV_CHANGED, sc->ha_path[bus], NULL);
1507 }
1508 } while (--bus >= 0);
1509 return (error);
1510} /* ASR_rescan */
1511
1512/*-------------------------------------------------------------------------*/
1513/* Function ASR_reset */
1514/*-------------------------------------------------------------------------*/
1515/* The Parameters Passed To This Function Are : */
1516/* Asr_softc_t * : HBA miniport driver's adapter data storage. */
1517/* */
1518/* This Function Will reset the adapter and resynchronize any data */
1519/* */
1520/* Return : None */
1521/*-------------------------------------------------------------------------*/
1522
1523STATIC INLINE int
1524ASR_reset(
1525 IN Asr_softc_t * sc)
1526{
1527 int s, retVal;
1528
1529 s = splcam();
1530 if ((sc->ha_in_reset == HA_IN_RESET)
1531 || (sc->ha_in_reset == HA_OFF_LINE_RECOVERY)) {
1532 splx (s);
1533 return (EBUSY);
1534 }
1535 /*
1536 * Promotes HA_OPERATIONAL to HA_IN_RESET,
1537 * or HA_OFF_LINE to HA_OFF_LINE_RECOVERY.
1538 */
1539 ++(sc->ha_in_reset);
1540 if (ASR_resetIOP (sc->ha_Virt, sc->ha_Fvirt) == 0) {
1541 debug_asr_printf ("ASR_resetIOP failed\n");
1542 /*
1543 * We really need to take this card off-line, easier said
1544 * than make sense. Better to keep retrying for now since if a
1545 * UART cable is connected the blinkLEDs the adapter is now in
1546 * a hard state requiring action from the monitor commands to
1547 * the HBA to continue. For debugging waiting forever is a
1548 * good thing. In a production system, however, one may wish
1549 * to instead take the card off-line ...
1550 */
1551# if 0 && (defined(HA_OFF_LINE))
1552 /*
1553 * Take adapter off-line.
1554 */
1555 printf ("asr%d: Taking adapter off-line\n",
1556 sc->ha_path[0]
1557 ? cam_sim_unit(xpt_path_sim(sc->ha_path[0]))
1558 : 0);
1559 sc->ha_in_reset = HA_OFF_LINE;
1560 splx (s);
1561 return (ENXIO);
1562# else
1563 /* Wait Forever */
1564 while (ASR_resetIOP (sc->ha_Virt, sc->ha_Fvirt) == 0);
1565# endif
1566 }
1567 retVal = ASR_init (sc);
1568 splx (s);
1569 if (retVal != 0) {
1570 debug_asr_printf ("ASR_init failed\n");
1571 sc->ha_in_reset = HA_OFF_LINE;
1572 return (ENXIO);
1573 }
1574 if (ASR_rescan (sc) != 0) {
1575 debug_asr_printf ("ASR_rescan failed\n");
1576 }
1577 ASR_failActiveCommands (sc);
1578 if (sc->ha_in_reset == HA_OFF_LINE_RECOVERY) {
1579 printf ("asr%d: Brining adapter back on-line\n",
1580 sc->ha_path[0]
1581 ? cam_sim_unit(xpt_path_sim(sc->ha_path[0]))
1582 : 0);
1583 }
1584 sc->ha_in_reset = HA_OPERATIONAL;
1585 return (0);
1586} /* ASR_reset */
1587
1588/*
1589 * Device timeout handler.
1590 */
1591STATIC void
1592asr_timeout(
1593 INOUT void * arg)
1594{
1595 union asr_ccb * ccb = (union asr_ccb *)arg;
1596 Asr_softc_t * sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
1597 int s;
1598
1599 debug_asr_print_path(ccb);
1600 debug_asr_printf("timed out");
1601
1602 /*
1603 * Check if the adapter has locked up?
1604 */
1605 if ((s = ASR_getBlinkLedCode(sc)) != 0) {
1606 /* Reset Adapter */
1607 printf ("asr%d: Blink LED 0x%x resetting adapter\n",
1608 cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)), s);
1609 if (ASR_reset (sc) == ENXIO) {
1610 /* Try again later */
1611 ccb->ccb_h.timeout_ch = timeout(asr_timeout,
1612 (caddr_t)ccb,
1613 (ccb->ccb_h.timeout * hz) / 1000);
1614 }
1615 return;
1616 }
1617 /*
1618 * Abort does not function on the ASR card!!! Walking away from
1619 * the SCSI command is also *very* dangerous. A SCSI BUS reset is
1620 * our best bet, followed by a complete adapter reset if that fails.
1621 */
1622 s = splcam();
1623 /* Check if we already timed out once to raise the issue */
1624 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_CMD_TIMEOUT) {
1625 debug_asr_printf (" AGAIN\nreinitializing adapter\n");
1626 if (ASR_reset (sc) == ENXIO) {
1627 ccb->ccb_h.timeout_ch = timeout(asr_timeout,
1628 (caddr_t)ccb,
1629 (ccb->ccb_h.timeout * hz) / 1000);
1630 }
1631 splx(s);
1632 return;
1633 }
1634 debug_asr_printf ("\nresetting bus\n");
1635 /* If the BUS reset does not take, then an adapter reset is next! */
1636 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1637 ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
1638 ccb->ccb_h.timeout_ch = timeout(asr_timeout, (caddr_t)ccb,
1639 (ccb->ccb_h.timeout * hz) / 1000);
1640 ASR_resetBus (sc, cam_sim_bus(xpt_path_sim(ccb->ccb_h.path)));
1641 xpt_async (AC_BUS_RESET, ccb->ccb_h.path, NULL);
1642 splx(s);
1643} /* asr_timeout */
1644
1645/*
1646 * send a message asynchronously
1647 */
1648STATIC INLINE int
1649ASR_queue(
1650 IN Asr_softc_t * sc,
1651 IN PI2O_MESSAGE_FRAME Message)
1652{
1653 OUT U32 MessageOffset;
1654 union asr_ccb * ccb;
1655
1656 debug_asr_printf ("Host Command Dump:\n");
1657 debug_asr_dump_message (Message);
1658
1659 ccb = (union asr_ccb *)(long)
1660 I2O_MESSAGE_FRAME_getInitiatorContext64(Message);
1661
1662 if ((MessageOffset = ASR_getMessage(sc->ha_Virt)) != EMPTY_QUEUE) {
1663#ifdef ASR_MEASURE_PERFORMANCE
1664 int startTimeIndex;
1665
1666 if (ccb) {
1667 ++sc->ha_performance.command_count[
1668 (int) ccb->csio.cdb_io.cdb_bytes[0]];
1669 DEQ_TIMEQ_FREE_LIST(startTimeIndex,
1670 sc->ha_timeQFreeList,
1671 sc->ha_timeQFreeHead,
1672 sc->ha_timeQFreeTail);
1673 if (-1 != startTimeIndex) {
1674 microtime(&(sc->ha_timeQ[startTimeIndex]));
1675 }
1676 /* Time stamp the command before we send it out */
1677 ((PRIVATE_SCSI_SCB_EXECUTE_MESSAGE *) Message)->
1678 PrivateMessageFrame.TransactionContext
1679 = (I2O_TRANSACTION_CONTEXT) startTimeIndex;
1680
1681 ++sc->ha_submitted_ccbs_count;
1682 if (sc->ha_performance.max_submit_count
1683 < sc->ha_submitted_ccbs_count) {
1684 sc->ha_performance.max_submit_count
1685 = sc->ha_submitted_ccbs_count;
1686 }
1687 }
1688#endif
1689 bcopy (Message, sc->ha_Fvirt + MessageOffset,
1690 I2O_MESSAGE_FRAME_getMessageSize(Message) << 2);
1691 if (ccb) {
1692 ASR_ccbAdd (sc, ccb);
1693 }
1694 /* Post the command */
1695 sc->ha_Virt->ToFIFO = MessageOffset;
1696 } else {
1697 if (ASR_getBlinkLedCode(sc)) {
1698 /*
1699 * Unlikely we can do anything if we can't grab a
1700 * message frame :-(, but lets give it a try.
1701 */
1702 (void)ASR_reset (sc);
1703 }
1704 }
1705 return (MessageOffset);
1706} /* ASR_queue */
1707
1708
1709/* Simple Scatter Gather elements */
1710#define SG(SGL,Index,Flags,Buffer,Size) \
1711 I2O_FLAGS_COUNT_setCount( \
1712 &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index].FlagsCount), \
1713 Size); \
1714 I2O_FLAGS_COUNT_setFlags( \
1715 &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index].FlagsCount), \
1716 I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT | (Flags)); \
1717 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress( \
1718 &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index]), \
1719 (Buffer == NULL) ? NULL : KVTOPHYS(Buffer))
1720
1721/*
1722 * Retrieve Parameter Group.
1723 * Buffer must be allocated using defAlignLong macro.
1724 */
1725STATIC void *
1726ASR_getParams(
1727 IN Asr_softc_t * sc,
1728 IN tid_t TID,
1729 IN int Group,
1730 OUT void * Buffer,
1731 IN unsigned BufferSize)
1732{
1733 struct paramGetMessage {
1734 I2O_UTIL_PARAMS_GET_MESSAGE M;
1735 char F[
1736 sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT)];
1737 struct Operations {
1738 I2O_PARAM_OPERATIONS_LIST_HEADER Header;
1739 I2O_PARAM_OPERATION_ALL_TEMPLATE Template[1];
1740 } O;
1741 };
1742 defAlignLong(struct paramGetMessage, Message);
1743 struct Operations * Operations_Ptr;
1744 I2O_UTIL_PARAMS_GET_MESSAGE * Message_Ptr;
1745 struct ParamBuffer {
1746 I2O_PARAM_RESULTS_LIST_HEADER Header;
1747 I2O_PARAM_READ_OPERATION_RESULT Read;
1748 char Info[1];
1749 } * Buffer_Ptr;
1750
1751 Message_Ptr = (I2O_UTIL_PARAMS_GET_MESSAGE *)ASR_fillMessage(Message,
1752 sizeof(I2O_UTIL_PARAMS_GET_MESSAGE)
1753 + sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT));
1754 Operations_Ptr = (struct Operations *)((char *)Message_Ptr
1755 + sizeof(I2O_UTIL_PARAMS_GET_MESSAGE)
1756 + sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT));
1757 bzero ((void *)Operations_Ptr, sizeof(struct Operations));
1758 I2O_PARAM_OPERATIONS_LIST_HEADER_setOperationCount(
1759 &(Operations_Ptr->Header), 1);
1760 I2O_PARAM_OPERATION_ALL_TEMPLATE_setOperation(
1761 &(Operations_Ptr->Template[0]), I2O_PARAMS_OPERATION_FIELD_GET);
1762 I2O_PARAM_OPERATION_ALL_TEMPLATE_setFieldCount(
1763 &(Operations_Ptr->Template[0]), 0xFFFF);
1764 I2O_PARAM_OPERATION_ALL_TEMPLATE_setGroupNumber(
1765 &(Operations_Ptr->Template[0]), Group);
1766 bzero ((void *)(Buffer_Ptr = getAlignLong(struct ParamBuffer, Buffer)),
1767 BufferSize);
1768
1769 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
1770 I2O_VERSION_11
1771 + (((sizeof(I2O_UTIL_PARAMS_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1772 / sizeof(U32)) << 4));
1773 I2O_MESSAGE_FRAME_setTargetAddress (&(Message_Ptr->StdMessageFrame),
1774 TID);
1775 I2O_MESSAGE_FRAME_setFunction (&(Message_Ptr->StdMessageFrame),
1776 I2O_UTIL_PARAMS_GET);
1777 /*
1778 * Set up the buffers as scatter gather elements.
1779 */
1780 SG(&(Message_Ptr->SGL), 0,
1781 I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER,
1782 Operations_Ptr, sizeof(struct Operations));
1783 SG(&(Message_Ptr->SGL), 1,
1784 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
1785 Buffer_Ptr, BufferSize);
1786
1787 if ((ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) == CAM_REQ_CMP)
1788 && (Buffer_Ptr->Header.ResultCount)) {
1789 return ((void *)(Buffer_Ptr->Info));
1790 }
1791 return ((void *)NULL);
1792} /* ASR_getParams */
1793
1794/*
1795 * Acquire the LCT information.
1796 */
1797STATIC INLINE int
1798ASR_acquireLct (
1799 INOUT Asr_softc_t * sc)
1800{
1801 PI2O_EXEC_LCT_NOTIFY_MESSAGE Message_Ptr;
1802 PI2O_SGE_SIMPLE_ELEMENT sg;
1803 int MessageSizeInBytes;
1804 caddr_t v;
1805 int len;
1806 I2O_LCT Table;
1807 PI2O_LCT_ENTRY Entry;
1808
1809 /*
1810 * sc value assumed valid
1811 */
1812 MessageSizeInBytes = sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE)
1813 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT);
1814 if ((Message_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)malloc (
1815 MessageSizeInBytes, M_TEMP, M_WAITOK))
1816 == (PI2O_EXEC_LCT_NOTIFY_MESSAGE)NULL) {
1817 return (ENOMEM);
1818 }
1819 (void)ASR_fillMessage((char *)Message_Ptr, MessageSizeInBytes);
1820 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
1821 (I2O_VERSION_11 +
1822 (((sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1823 / sizeof(U32)) << 4)));
1824 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
1825 I2O_EXEC_LCT_NOTIFY);
1826 I2O_EXEC_LCT_NOTIFY_MESSAGE_setClassIdentifier(Message_Ptr,
1827 I2O_CLASS_MATCH_ANYCLASS);
1828 /*
1829 * Call the LCT table to determine the number of device entries
1830 * to reserve space for.
1831 */
1832 SG(&(Message_Ptr->SGL), 0,
1833 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER, &Table,
1834 sizeof(I2O_LCT));
1835 /*
1836 * since this code is reused in several systems, code efficiency
1837 * is greater by using a shift operation rather than a divide by
1838 * sizeof(u_int32_t).
1839 */
1840 I2O_LCT_setTableSize(&Table,
1841 (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)) >> 2);
1842 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1843 /*
1844 * Determine the size of the LCT table.
1845 */
1846 if (sc->ha_LCT) {
1847 free (sc->ha_LCT, M_TEMP);
1848 }
1849 /*
1850 * malloc only generates contiguous memory when less than a
1851 * page is expected. We must break the request up into an SG list ...
1852 */
1853 if (((len = (I2O_LCT_getTableSize(&Table) << 2)) <=
1854 (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)))
1855 || (len > (128 * 1024))) { /* Arbitrary */
1856 free (Message_Ptr, M_TEMP);
1857 return (EINVAL);
1858 }
1859 if ((sc->ha_LCT = (PI2O_LCT)malloc (len, M_TEMP, M_WAITOK))
1860 == (PI2O_LCT)NULL) {
1861 free (Message_Ptr, M_TEMP);
1862 return (ENOMEM);
1863 }
1864 /*
1865 * since this code is reused in several systems, code efficiency
1866 * is greater by using a shift operation rather than a divide by
1867 * sizeof(u_int32_t).
1868 */
1869 I2O_LCT_setTableSize(sc->ha_LCT,
1870 (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)) >> 2);
1871 /*
1872 * Convert the access to the LCT table into a SG list.
1873 */
1874 sg = Message_Ptr->SGL.u.Simple;
1875 v = (caddr_t)(sc->ha_LCT);
1876 for (;;) {
1877 int next, base, span;
1878
1879 span = 0;
1880 next = base = KVTOPHYS(v);
1881 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, base);
1882
1883 /* How far can we go contiguously */
1884 while ((len > 0) && (base == next)) {
1885 int size;
1886
1887 next = trunc_page(base) + PAGE_SIZE;
1888 size = next - base;
1889 if (size > len) {
1890 size = len;
1891 }
1892 span += size;
1893 v += size;
1894 len -= size;
1895 base = KVTOPHYS(v);
1896 }
1897
1898 /* Construct the Flags */
1899 I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), span);
1900 {
1901 int rw = I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT;
1902 if (len <= 0) {
1903 rw = (I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT
1904 | I2O_SGL_FLAGS_LAST_ELEMENT
1905 | I2O_SGL_FLAGS_END_OF_BUFFER);
1906 }
1907 I2O_FLAGS_COUNT_setFlags(&(sg->FlagsCount), rw);
1908 }
1909
1910 if (len <= 0) {
1911 break;
1912 }
1913
1914 /*
1915 * Incrementing requires resizing of the packet.
1916 */
1917 ++sg;
1918 MessageSizeInBytes += sizeof(*sg);
1919 I2O_MESSAGE_FRAME_setMessageSize(
1920 &(Message_Ptr->StdMessageFrame),
1921 I2O_MESSAGE_FRAME_getMessageSize(
1922 &(Message_Ptr->StdMessageFrame))
1923 + (sizeof(*sg) / sizeof(U32)));
1924 {
1925 PI2O_EXEC_LCT_NOTIFY_MESSAGE NewMessage_Ptr;
1926
1927 if ((NewMessage_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)
1928 malloc (MessageSizeInBytes, M_TEMP, M_WAITOK))
1929 == (PI2O_EXEC_LCT_NOTIFY_MESSAGE)NULL) {
1930 free (sc->ha_LCT, M_TEMP);
1931 sc->ha_LCT = (PI2O_LCT)NULL;
1932 free (Message_Ptr, M_TEMP);
1933 return (ENOMEM);
1934 }
1935 span = ((caddr_t)sg) - (caddr_t)Message_Ptr;
1936 bcopy ((caddr_t)Message_Ptr,
1937 (caddr_t)NewMessage_Ptr, span);
1938 free (Message_Ptr, M_TEMP);
1939 sg = (PI2O_SGE_SIMPLE_ELEMENT)
1940 (((caddr_t)NewMessage_Ptr) + span);
1941 Message_Ptr = NewMessage_Ptr;
1942 }
1943 }
1944 { int retval;
1945
1946 retval = ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1947 free (Message_Ptr, M_TEMP);
1948 if (retval != CAM_REQ_CMP) {
1949 return (ENODEV);
1950 }
1951 }
1952 /* If the LCT table grew, lets truncate accesses */
1953 if (I2O_LCT_getTableSize(&Table) < I2O_LCT_getTableSize(sc->ha_LCT)) {
1954 I2O_LCT_setTableSize(sc->ha_LCT, I2O_LCT_getTableSize(&Table));
1955 }
1956 for (Entry = sc->ha_LCT->LCTEntry; Entry < (PI2O_LCT_ENTRY)
1957 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
1958 ++Entry) {
1959 Entry->le_type = I2O_UNKNOWN;
1960 switch (I2O_CLASS_ID_getClass(&(Entry->ClassID))) {
1961
1962 case I2O_CLASS_RANDOM_BLOCK_STORAGE:
1963 Entry->le_type = I2O_BSA;
1964 break;
1965
1966 case I2O_CLASS_SCSI_PERIPHERAL:
1967 Entry->le_type = I2O_SCSI;
1968 break;
1969
1970 case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
1971 Entry->le_type = I2O_FCA;
1972 break;
1973
1974 case I2O_CLASS_BUS_ADAPTER_PORT:
1975 Entry->le_type = I2O_PORT | I2O_SCSI;
1976 /* FALLTHRU */
1977 case I2O_CLASS_FIBRE_CHANNEL_PORT:
1978 if (I2O_CLASS_ID_getClass(&(Entry->ClassID)) ==
1979 I2O_CLASS_FIBRE_CHANNEL_PORT) {
1980 Entry->le_type = I2O_PORT | I2O_FCA;
1981 }
1982 { struct ControllerInfo {
1983 I2O_PARAM_RESULTS_LIST_HEADER Header;
1984 I2O_PARAM_READ_OPERATION_RESULT Read;
1985 I2O_HBA_SCSI_CONTROLLER_INFO_SCALAR Info;
1986 };
1987 defAlignLong(struct ControllerInfo, Buffer);
1988 PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR Info;
1989
1990 Entry->le_bus = 0xff;
1991 Entry->le_target = 0xff;
1992 Entry->le_lun = 0xff;
1993
1994 if ((Info = (PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR)
1995 ASR_getParams(sc,
1996 I2O_LCT_ENTRY_getLocalTID(Entry),
1997 I2O_HBA_SCSI_CONTROLLER_INFO_GROUP_NO,
1998 Buffer, sizeof(struct ControllerInfo)))
1999 == (PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR)NULL) {
2000 continue;
2001 }
2002 Entry->le_target
2003 = I2O_HBA_SCSI_CONTROLLER_INFO_SCALAR_getInitiatorID(
2004 Info);
2005 Entry->le_lun = 0;
2006 } /* FALLTHRU */
2007 default:
2008 continue;
2009 }
2010 { struct DeviceInfo {
2011 I2O_PARAM_RESULTS_LIST_HEADER Header;
2012 I2O_PARAM_READ_OPERATION_RESULT Read;
2013 I2O_DPT_DEVICE_INFO_SCALAR Info;
2014 };
2015 defAlignLong (struct DeviceInfo, Buffer);
2016 PI2O_DPT_DEVICE_INFO_SCALAR Info;
2017
2018 Entry->le_bus = 0xff;
2019 Entry->le_target = 0xff;
2020 Entry->le_lun = 0xff;
2021
2022 if ((Info = (PI2O_DPT_DEVICE_INFO_SCALAR)
2023 ASR_getParams(sc,
2024 I2O_LCT_ENTRY_getLocalTID(Entry),
2025 I2O_DPT_DEVICE_INFO_GROUP_NO,
2026 Buffer, sizeof(struct DeviceInfo)))
2027 == (PI2O_DPT_DEVICE_INFO_SCALAR)NULL) {
2028 continue;
2029 }
2030 Entry->le_type
2031 |= I2O_DPT_DEVICE_INFO_SCALAR_getDeviceType(Info);
2032 Entry->le_bus
2033 = I2O_DPT_DEVICE_INFO_SCALAR_getBus(Info);
2034 if ((Entry->le_bus > sc->ha_MaxBus)
2035 && (Entry->le_bus <= MAX_CHANNEL)) {
2036 sc->ha_MaxBus = Entry->le_bus;
2037 }
2038 Entry->le_target
2039 = I2O_DPT_DEVICE_INFO_SCALAR_getIdentifier(Info);
2040 Entry->le_lun
2041 = I2O_DPT_DEVICE_INFO_SCALAR_getLunInfo(Info);
2042 }
2043 }
2044 /*
2045 * A zero return value indicates success.
2046 */
2047 return (0);
2048} /* ASR_acquireLct */
2049
2050/*
2051 * Initialize a message frame.
2052 * We assume that the CDB has already been set up, so all we do here is
2053 * generate the Scatter Gather list.
2054 */
2055STATIC INLINE PI2O_MESSAGE_FRAME
2056ASR_init_message(
2057 IN union asr_ccb * ccb,
2058 OUT PI2O_MESSAGE_FRAME Message)
2059{
2060 int next, span, base, rw;
2061 OUT PI2O_MESSAGE_FRAME Message_Ptr;
2062 Asr_softc_t * sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
2063 PI2O_SGE_SIMPLE_ELEMENT sg;
2064 caddr_t v;
2065 vm_size_t size, len;
2066 U32 MessageSize;
2067
2068 /* We only need to zero out the PRIVATE_SCSI_SCB_EXECUTE_MESSAGE */
2069 bzero (Message_Ptr = getAlignLong(I2O_MESSAGE_FRAME, Message),
2070 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) - sizeof(I2O_SG_ELEMENT)));
2071
2072 {
2073 int target = ccb->ccb_h.target_id;
2074 int lun = ccb->ccb_h.target_lun;
2075 int bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path));
2076 tid_t TID;
2077
2078 if ((TID = ASR_getTid (sc, bus, target, lun)) == (tid_t)-1) {
2079 PI2O_LCT_ENTRY Device;
2080
2081 TID = (tid_t)0;
2082 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
2083 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
2084 ++Device) {
2085 if ((Device->le_type != I2O_UNKNOWN)
2086 && (Device->le_bus == bus)
2087 && (Device->le_target == target)
2088 && (Device->le_lun == lun)
2089 && (I2O_LCT_ENTRY_getUserTID(Device) == 0xFFF)) {
2090 TID = I2O_LCT_ENTRY_getLocalTID(Device);
2091 ASR_setTid (sc, Device->le_bus,
2092 Device->le_target, Device->le_lun,
2093 TID);
2094 break;
2095 }
2096 }
2097 }
2098 if (TID == (tid_t)0) {
2099 return ((PI2O_MESSAGE_FRAME)NULL);
2100 }
2101 I2O_MESSAGE_FRAME_setTargetAddress(Message_Ptr, TID);
2102 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setTID(
2103 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, TID);
2104 }
2105 I2O_MESSAGE_FRAME_setVersionOffset(Message_Ptr, I2O_VERSION_11 |
2106 (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) - sizeof(I2O_SG_ELEMENT))
2107 / sizeof(U32)) << 4));
2108 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
2109 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2110 - sizeof(I2O_SG_ELEMENT)) / sizeof(U32));
2111 I2O_MESSAGE_FRAME_setInitiatorAddress (Message_Ptr, 1);
2112 I2O_MESSAGE_FRAME_setFunction(Message_Ptr, I2O_PRIVATE_MESSAGE);
2113 I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode (
2114 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, I2O_SCSI_SCB_EXEC);
2115 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (
2116 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
2117 I2O_SCB_FLAG_ENABLE_DISCONNECT
2118 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2119 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
2120 /*
2121 * We do not need any (optional byteswapping) method access to
2122 * the Initiator & Transaction context field.
2123 */
2124 I2O_MESSAGE_FRAME_setInitiatorContext64(Message, (long)ccb);
2125
2126 I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
2127 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, DPT_ORGANIZATION_ID);
2128 /*
2129 * copy the cdb over
2130 */
2131 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(
2132 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, ccb->csio.cdb_len);
2133 bcopy (&(ccb->csio.cdb_io),
2134 ((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)->CDB, ccb->csio.cdb_len);
2135
2136 /*
2137 * Given a buffer describing a transfer, set up a scatter/gather map
2138 * in a ccb to map that SCSI transfer.
2139 */
2140
2141 rw = (ccb->ccb_h.flags & CAM_DIR_IN) ? 0 : I2O_SGL_FLAGS_DIR;
2142
2143 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (
2144 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
2145 (ccb->csio.dxfer_len)
2146 ? ((rw) ? (I2O_SCB_FLAG_XFER_TO_DEVICE
2147 | I2O_SCB_FLAG_ENABLE_DISCONNECT
2148 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2149 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER)
2150 : (I2O_SCB_FLAG_XFER_FROM_DEVICE
2151 | I2O_SCB_FLAG_ENABLE_DISCONNECT
2152 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2153 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER))
2154 : (I2O_SCB_FLAG_ENABLE_DISCONNECT
2155 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2156 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
2157
2158 /*
2159 * Given a transfer described by a `data', fill in the SG list.
2160 */
2161 sg = &((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)->SGL.u.Simple[0];
2162
2163 len = ccb->csio.dxfer_len;
2164 v = ccb->csio.data_ptr;
2165 ASSERT (ccb->csio.dxfer_len >= 0);
2166 MessageSize = I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr);
2167 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setByteCount(
2168 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, len);
2169 while ((len > 0) && (sg < &((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2170 Message_Ptr)->SGL.u.Simple[SG_SIZE])) {
2171 span = 0;
2172 next = base = KVTOPHYS(v);
2173 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, base);
2174
2175 /* How far can we go contiguously */
2176 while ((len > 0) && (base == next)) {
2177 next = trunc_page(base) + PAGE_SIZE;
2178 size = next - base;
2179 if (size > len) {
2180 size = len;
2181 }
2182 span += size;
2183 v += size;
2184 len -= size;
2185 base = KVTOPHYS(v);
2186 }
2187
2188 I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), span);
2189 if (len == 0) {
2190 rw |= I2O_SGL_FLAGS_LAST_ELEMENT;
2191 }
2192 I2O_FLAGS_COUNT_setFlags(&(sg->FlagsCount),
2193 I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT | rw);
2194 ++sg;
2195 MessageSize += sizeof(*sg) / sizeof(U32);
2196 }
2197 /* We always do the request sense ... */
2198 if ((span = ccb->csio.sense_len) == 0) {
2199 span = sizeof(ccb->csio.sense_data);
2200 }
2201 SG(sg, 0, I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
2202 &(ccb->csio.sense_data), span);
2203 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
2204 MessageSize + (sizeof(*sg) / sizeof(U32)));
2205 return (Message_Ptr);
2206} /* ASR_init_message */
2207
2208/*
2209 * Reset the adapter.
2210 */
2211STATIC INLINE U32
2212ASR_initOutBound (
2213 INOUT Asr_softc_t * sc)
2214{
2215 struct initOutBoundMessage {
2216 I2O_EXEC_OUTBOUND_INIT_MESSAGE M;
2217 U32 R;
2218 };
2219 defAlignLong(struct initOutBoundMessage,Message);
2220 PI2O_EXEC_OUTBOUND_INIT_MESSAGE Message_Ptr;
2221 OUT U32 * volatile Reply_Ptr;
2222 U32 Old;
2223
2224 /*
2225 * Build up our copy of the Message.
2226 */
2227 Message_Ptr = (PI2O_EXEC_OUTBOUND_INIT_MESSAGE)ASR_fillMessage(Message,
2228 sizeof(I2O_EXEC_OUTBOUND_INIT_MESSAGE));
2229 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
2230 I2O_EXEC_OUTBOUND_INIT);
2231 I2O_EXEC_OUTBOUND_INIT_MESSAGE_setHostPageFrameSize(Message_Ptr, PAGE_SIZE);
2232 I2O_EXEC_OUTBOUND_INIT_MESSAGE_setOutboundMFrameSize(Message_Ptr,
2233 sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME));
2234 /*
2235 * Reset the Reply Status
2236 */
2237 *(Reply_Ptr = (U32 *)((char *)Message_Ptr
2238 + sizeof(I2O_EXEC_OUTBOUND_INIT_MESSAGE))) = 0;
2239 SG (&(Message_Ptr->SGL), 0, I2O_SGL_FLAGS_LAST_ELEMENT, Reply_Ptr,
2240 sizeof(U32));
2241 /*
2242 * Send the Message out
2243 */
2244 if ((Old = ASR_initiateCp (sc->ha_Virt, sc->ha_Fvirt, (PI2O_MESSAGE_FRAME)Message_Ptr)) != (U32)-1L) {
2245 u_long size, addr;
2246
2247 /*
2248 * Wait for a response (Poll).
2249 */
2250 while (*Reply_Ptr < I2O_EXEC_OUTBOUND_INIT_REJECTED);
2251 /*
2252 * Re-enable the interrupts.
2253 */
2254 sc->ha_Virt->Mask = Old;
2255 /*
2256 * Populate the outbound table.
2257 */
2258 if (sc->ha_Msgs == (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL) {
2259
2260 /* Allocate the reply frames */
2261 size = sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
2262 * sc->ha_Msgs_Count;
2263
2264 /*
2265 * contigmalloc only works reliably at
2266 * initialization time.
2267 */
2268 if ((sc->ha_Msgs = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
2269 contigmalloc (size, M_DEVBUF, M_WAITOK, 0ul,
2270 0xFFFFFFFFul, (u_long)sizeof(U32), 0ul))
2271 != (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL) {
2272 (void)bzero ((char *)sc->ha_Msgs, size);
2273 sc->ha_Msgs_Phys = KVTOPHYS(sc->ha_Msgs);
2274 }
2275 }
2276
2277 /* Initialize the outbound FIFO */
2278 if (sc->ha_Msgs != (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL)
2279 for (size = sc->ha_Msgs_Count, addr = sc->ha_Msgs_Phys;
2280 size; --size) {
2281 sc->ha_Virt->FromFIFO = addr;
2282 addr += sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME);
2283 }
2284 return (*Reply_Ptr);
2285 }
2286 return (0);
2287} /* ASR_initOutBound */
2288
2289/*
2290 * Set the system table
2291 */
2292STATIC INLINE int
2293ASR_setSysTab(
2294 IN Asr_softc_t * sc)
2295{
2296 PI2O_EXEC_SYS_TAB_SET_MESSAGE Message_Ptr;
2297 PI2O_SET_SYSTAB_HEADER SystemTable;
2298 Asr_softc_t * ha;
2299 PI2O_SGE_SIMPLE_ELEMENT sg;
2300 int retVal;
2301
2302 if ((SystemTable = (PI2O_SET_SYSTAB_HEADER)malloc (
2303 sizeof(I2O_SET_SYSTAB_HEADER), M_TEMP, M_WAITOK))
2304 == (PI2O_SET_SYSTAB_HEADER)NULL) {
2305 return (ENOMEM);
2306 }
2307 bzero (SystemTable, sizeof(I2O_SET_SYSTAB_HEADER));
2308 for (ha = Asr_softc; ha; ha = ha->ha_next) {
2309 ++SystemTable->NumberEntries;
2310 }
2311 if ((Message_Ptr = (PI2O_EXEC_SYS_TAB_SET_MESSAGE)malloc (
2312 sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
2313 + ((3+SystemTable->NumberEntries) * sizeof(I2O_SGE_SIMPLE_ELEMENT)),
2314 M_TEMP, M_WAITOK)) == (PI2O_EXEC_SYS_TAB_SET_MESSAGE)NULL) {
2315 free (SystemTable, M_TEMP);
2316 return (ENOMEM);
2317 }
2318 (void)ASR_fillMessage((char *)Message_Ptr,
2319 sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
2320 + ((3+SystemTable->NumberEntries) * sizeof(I2O_SGE_SIMPLE_ELEMENT)));
2321 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
2322 (I2O_VERSION_11 +
2323 (((sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
2324 / sizeof(U32)) << 4)));
2325 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
2326 I2O_EXEC_SYS_TAB_SET);
2327 /*
2328 * Call the LCT table to determine the number of device entries
2329 * to reserve space for.
2330 * since this code is reused in several systems, code efficiency
2331 * is greater by using a shift operation rather than a divide by
2332 * sizeof(u_int32_t).
2333 */
2334 sg = (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr
2335 + ((I2O_MESSAGE_FRAME_getVersionOffset(
2336 &(Message_Ptr->StdMessageFrame)) & 0xF0) >> 2));
2337 SG(sg, 0, I2O_SGL_FLAGS_DIR, SystemTable, sizeof(I2O_SET_SYSTAB_HEADER));
2338 ++sg;
2339 for (ha = Asr_softc; ha; ha = ha->ha_next) {
2340 SG(sg, 0,
2341 ((ha->ha_next)
2342 ? (I2O_SGL_FLAGS_DIR)
2343 : (I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER)),
2344 &(ha->ha_SystemTable), sizeof(ha->ha_SystemTable));
2345 ++sg;
2346 }
2347 SG(sg, 0, I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER, NULL, 0);
2348 SG(sg, 1, I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_LAST_ELEMENT
2349 | I2O_SGL_FLAGS_END_OF_BUFFER, NULL, 0);
2350 retVal = ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
2351 free (Message_Ptr, M_TEMP);
2352 free (SystemTable, M_TEMP);
2353 return (retVal);
2354} /* ASR_setSysTab */
2355
2356STATIC INLINE int
2357ASR_acquireHrt (
2358 INOUT Asr_softc_t * sc)
2359{
2360 defAlignLong(I2O_EXEC_HRT_GET_MESSAGE,Message);
2361 I2O_EXEC_HRT_GET_MESSAGE * Message_Ptr;
2362 struct {
2363 I2O_HRT Header;
2364 I2O_HRT_ENTRY Entry[MAX_CHANNEL];
2365 } Hrt;
2366 u_int8_t NumberOfEntries;
2367 PI2O_HRT_ENTRY Entry;
2368
2369 bzero ((void *)&Hrt, sizeof (Hrt));
2370 Message_Ptr = (I2O_EXEC_HRT_GET_MESSAGE *)ASR_fillMessage(Message,
2371 sizeof(I2O_EXEC_HRT_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
2372 + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2373 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
2374 (I2O_VERSION_11
2375 + (((sizeof(I2O_EXEC_HRT_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
2376 / sizeof(U32)) << 4)));
2377 I2O_MESSAGE_FRAME_setFunction (&(Message_Ptr->StdMessageFrame),
2378 I2O_EXEC_HRT_GET);
2379
2380 /*
2381 * Set up the buffers as scatter gather elements.
2382 */
2383 SG(&(Message_Ptr->SGL), 0,
2384 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
2385 &Hrt, sizeof(Hrt));
2386 if (ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) != CAM_REQ_CMP) {
2387 return (ENODEV);
2388 }
2389 if ((NumberOfEntries = I2O_HRT_getNumberEntries(&Hrt.Header))
2390 > (MAX_CHANNEL + 1)) {
2391 NumberOfEntries = MAX_CHANNEL + 1;
2392 }
2393 for (Entry = Hrt.Header.HRTEntry;
2394 NumberOfEntries != 0;
2395 ++Entry, --NumberOfEntries) {
2396 PI2O_LCT_ENTRY Device;
2397
2398 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
2399 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
2400 ++Device) {
2401 if (I2O_LCT_ENTRY_getLocalTID(Device)
2402 == (I2O_HRT_ENTRY_getAdapterID(Entry) & 0xFFF)) {
2403 Device->le_bus = I2O_HRT_ENTRY_getAdapterID(
2404 Entry) >> 16;
2405 if ((Device->le_bus > sc->ha_MaxBus)
2406 && (Device->le_bus <= MAX_CHANNEL)) {
2407 sc->ha_MaxBus = Device->le_bus;
2408 }
2409 }
2410 }
2411 }
2412 return (0);
2413} /* ASR_acquireHrt */
2414
2415/*
2416 * Enable the adapter.
2417 */
2418STATIC INLINE int
2419ASR_enableSys (
2420 IN Asr_softc_t * sc)
2421{
2422 defAlignLong(I2O_EXEC_SYS_ENABLE_MESSAGE,Message);
2423 PI2O_EXEC_SYS_ENABLE_MESSAGE Message_Ptr;
2424
2425 Message_Ptr = (PI2O_EXEC_SYS_ENABLE_MESSAGE)ASR_fillMessage(Message,
2426 sizeof(I2O_EXEC_SYS_ENABLE_MESSAGE));
2427 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
2428 I2O_EXEC_SYS_ENABLE);
2429 return (ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) != 0);
2430} /* ASR_enableSys */
2431
2432/*
2433 * Perform the stages necessary to initialize the adapter
2434 */
2435STATIC int
2436ASR_init(
2437 IN Asr_softc_t * sc)
2438{
2439 return ((ASR_initOutBound(sc) == 0)
2440 || (ASR_setSysTab(sc) != CAM_REQ_CMP)
2441 || (ASR_enableSys(sc) != CAM_REQ_CMP));
2442} /* ASR_init */
2443
2444/*
2445 * Send a Synchronize Cache command to the target device.
2446 */
2447STATIC INLINE void
2448ASR_sync (
2449 IN Asr_softc_t * sc,
2450 IN int bus,
2451 IN int target,
2452 IN int lun)
2453{
2454 tid_t TID;
2455
2456 /*
2457 * We will not synchronize the device when there are outstanding
2458 * commands issued by the OS (this is due to a locked up device,
2459 * as the OS normally would flush all outstanding commands before
2460 * issuing a shutdown or an adapter reset).
2461 */
2462 if ((sc != (Asr_softc_t *)NULL)
2463 && (LIST_FIRST(&(sc->ha_ccb)) != (struct ccb_hdr *)NULL)
2464 && ((TID = ASR_getTid (sc, bus, target, lun)) != (tid_t)-1)
2465 && (TID != (tid_t)0)) {
2466 defAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE,Message);
2467 PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE Message_Ptr;
2468
2469 bzero (Message_Ptr
2470 = getAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE, Message),
2471 sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2472 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2473
2474 I2O_MESSAGE_FRAME_setVersionOffset(
2475 (PI2O_MESSAGE_FRAME)Message_Ptr,
2476 I2O_VERSION_11
2477 | (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2478 - sizeof(I2O_SG_ELEMENT))
2479 / sizeof(U32)) << 4));
2480 I2O_MESSAGE_FRAME_setMessageSize(
2481 (PI2O_MESSAGE_FRAME)Message_Ptr,
2482 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2483 - sizeof(I2O_SG_ELEMENT))
2484 / sizeof(U32));
2485 I2O_MESSAGE_FRAME_setInitiatorAddress (
2486 (PI2O_MESSAGE_FRAME)Message_Ptr, 1);
2487 I2O_MESSAGE_FRAME_setFunction(
2488 (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_PRIVATE_MESSAGE);
2489 I2O_MESSAGE_FRAME_setTargetAddress(
2490 (PI2O_MESSAGE_FRAME)Message_Ptr, TID);
2491 I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode (
2492 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2493 I2O_SCSI_SCB_EXEC);
2494 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setTID(Message_Ptr, TID);
2495 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2496 I2O_SCB_FLAG_ENABLE_DISCONNECT
2497 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2498 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
2499 I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
2500 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2501 DPT_ORGANIZATION_ID);
2502 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(Message_Ptr, 6);
2503 Message_Ptr->CDB[0] = SYNCHRONIZE_CACHE;
2504 Message_Ptr->CDB[1] = (lun << 5);
2505
2506 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2507 (I2O_SCB_FLAG_XFER_FROM_DEVICE
2508 | I2O_SCB_FLAG_ENABLE_DISCONNECT
2509 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2510 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
2511
2512 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
2513
2514 }
2515}
2516
2517STATIC INLINE void
2518ASR_synchronize (
2519 IN Asr_softc_t * sc)
2520{
2521 int bus, target, lun;
2522
2523 for (bus = 0; bus <= sc->ha_MaxBus; ++bus) {
2524 for (target = 0; target <= sc->ha_MaxId; ++target) {
2525 for (lun = 0; lun <= sc->ha_MaxLun; ++lun) {
2526 ASR_sync(sc,bus,target,lun);
2527 }
2528 }
2529 }
2530}
2531
2532/*
2533 * Reset the HBA, targets and BUS.
2534 * Currently this resets *all* the SCSI busses.
2535 */
2536STATIC INLINE void
2537asr_hbareset(
2538 IN Asr_softc_t * sc)
2539{
2540 ASR_synchronize (sc);
2541 (void)ASR_reset (sc);
2542} /* asr_hbareset */
2543
2544/*
2545 * A reduced copy of the real pci_map_mem, incorporating the MAX_MAP
2546 * limit and a reduction in error checking (in the pre 4.0 case).
2547 */
2548STATIC int
2549asr_pci_map_mem (
2550#if __FreeBSD_version >= 400000
2551 IN device_t tag,
2552#else
2553 IN pcici_t tag,
2554#endif
2555 IN Asr_softc_t * sc)
2556{
2557 int rid;
2558 u_int32_t p, l, s;
2559
2560#if __FreeBSD_version >= 400000
2561 /*
2562 * I2O specification says we must find first *memory* mapped BAR
2563 */
2564 for (rid = PCIR_MAPS;
2565 rid < (PCIR_MAPS + 4 * sizeof(u_int32_t));
2566 rid += sizeof(u_int32_t)) {
2567 p = pci_read_config(tag, rid, sizeof(p));
2568 if ((p & 1) == 0) {
2569 break;
2570 }
2571 }
2572 /*
2573 * Give up?
2574 */
2575 if (rid >= (PCIR_MAPS + 4 * sizeof(u_int32_t))) {
2576 rid = PCIR_MAPS;
2577 }
2578 p = pci_read_config(tag, rid, sizeof(p));
2579 pci_write_config(tag, rid, -1, sizeof(p));
2580 l = 0 - (pci_read_config(tag, rid, sizeof(l)) & ~15);
2581 pci_write_config(tag, rid, p, sizeof(p));
2582 if (l > MAX_MAP) {
2583 l = MAX_MAP;
2584 }
2585 /*
2586 * The 2005S Zero Channel RAID solution is not a perfect PCI
2587 * citizen. It asks for 4MB on BAR0, and 0MB on BAR1, once
2588 * enabled it rewrites the size of BAR0 to 2MB, sets BAR1 to
2589 * BAR0+2MB and sets it's size to 2MB. The IOP registers are
2590 * accessible via BAR0, the messaging registers are accessible
2591 * via BAR1. If the subdevice code is 50 to 59 decimal.
2592 */
2593 s = pci_read_config(tag, PCIR_DEVVENDOR, sizeof(s));
2594 if (s != 0xA5111044) {
2595 s = pci_read_config(tag, PCIR_SUBVEND_0, sizeof(s));
2596 if ((((ADPTDOMINATOR_SUB_ID_START ^ s) & 0xF000FFFF) == 0)
2597 && (ADPTDOMINATOR_SUB_ID_START <= s)
2598 && (s <= ADPTDOMINATOR_SUB_ID_END)) {
2599 l = MAX_MAP; /* Conjoined BAR Raptor Daptor */
2600 }
2601 }
2602 p &= ~15;
2603 sc->ha_mem_res = bus_alloc_resource(tag, SYS_RES_MEMORY, &rid,
2604 p, p + l, l, RF_ACTIVE);
2605 if (sc->ha_mem_res == (struct resource *)NULL) {
2606 return (0);
2607 }
2608 sc->ha_Base = (void *)rman_get_start(sc->ha_mem_res);
2609 if (sc->ha_Base == (void *)NULL) {
2610 return (0);
2611 }
2612 sc->ha_Virt = (i2oRegs_t *) rman_get_virtual(sc->ha_mem_res);
2613 if (s == 0xA5111044) { /* Split BAR Raptor Daptor */
2614 if ((rid += sizeof(u_int32_t))
2615 >= (PCIR_MAPS + 4 * sizeof(u_int32_t))) {
2616 return (0);
2617 }
2618 p = pci_read_config(tag, rid, sizeof(p));
2619 pci_write_config(tag, rid, -1, sizeof(p));
2620 l = 0 - (pci_read_config(tag, rid, sizeof(l)) & ~15);
2621 pci_write_config(tag, rid, p, sizeof(p));
2622 if (l > MAX_MAP) {
2623 l = MAX_MAP;
2624 }
2625 p &= ~15;
2626 sc->ha_mes_res = bus_alloc_resource(tag, SYS_RES_MEMORY, &rid,
2627 p, p + l, l, RF_ACTIVE);
2628 if (sc->ha_mes_res == (struct resource *)NULL) {
2629 return (0);
2630 }
2631 if ((void *)rman_get_start(sc->ha_mes_res) == (void *)NULL) {
2632 return (0);
2633 }
2634 sc->ha_Fvirt = (U8 *) rman_get_virtual(sc->ha_mes_res);
2635 } else {
2636 sc->ha_Fvirt = (U8 *)(sc->ha_Virt);
2637 }
2638#else
2639 vm_size_t psize, poffs;
2640
2641 /*
2642 * I2O specification says we must find first *memory* mapped BAR
2643 */
2644 for (rid = PCI_MAP_REG_START;
2645 rid < (PCI_MAP_REG_START + 4 * sizeof(u_int32_t));
2646 rid += sizeof(u_int32_t)) {
2647 p = pci_conf_read (tag, rid);
2648 if ((p & 1) == 0) {
2649 break;
2650 }
2651 }
2652 if (rid >= (PCI_MAP_REG_START + 4 * sizeof(u_int32_t))) {
2653 rid = PCI_MAP_REG_START;
2654 }
2655 /*
2656 ** save old mapping, get size and type of memory
2657 **
2658 ** type is in the lowest four bits.
2659 ** If device requires 2^n bytes, the next
2660 ** n-4 bits are read as 0.
2661 */
2662
2663 sc->ha_Base = (void *)((p = pci_conf_read (tag, rid))
2664 & PCI_MAP_MEMORY_ADDRESS_MASK);
2665 pci_conf_write (tag, rid, 0xfffffffful);
2666 l = pci_conf_read (tag, rid);
2667 pci_conf_write (tag, rid, p);
2668
2669 /*
2670 ** check the type
2671 */
2672
2673 if (!((l & PCI_MAP_MEMORY_TYPE_MASK) == PCI_MAP_MEMORY_TYPE_32BIT_1M
2674 && ((u_long)sc->ha_Base & ~0xfffff) == 0)
2675 && ((l & PCI_MAP_MEMORY_TYPE_MASK) != PCI_MAP_MEMORY_TYPE_32BIT)) {
2676 debug_asr_printf (
2677 "asr_pci_map_mem failed: bad memory type=0x%x\n",
2678 (unsigned) l);
2679 return (0);
2680 };
2681
2682 /*
2683 ** get the size.
2684 */
2685
2686 psize = -(l & PCI_MAP_MEMORY_ADDRESS_MASK);
2687 if (psize > MAX_MAP) {
2688 psize = MAX_MAP;
2689 }
2690 /*
2691 * The 2005S Zero Channel RAID solution is not a perfect PCI
2692 * citizen. It asks for 4MB on BAR0, and 0MB on BAR1, once
2693 * enabled it rewrites the size of BAR0 to 2MB, sets BAR1 to
2694 * BAR0+2MB and sets it's size to 2MB. The IOP registers are
2695 * accessible via BAR0, the messaging registers are accessible
2696 * via BAR1. If the subdevice code is 50 to 59 decimal.
2697 */
2698 s = pci_read_config(tag, PCIR_DEVVENDOR, sizeof(s));
2699 if (s != 0xA5111044) {
2700 s = pci_conf_read (tag, PCIR_SUBVEND_0)
2701 if ((((ADPTDOMINATOR_SUB_ID_START ^ s) & 0xF000FFFF) == 0)
2702 && (ADPTDOMINATOR_SUB_ID_START <= s)
2703 && (s <= ADPTDOMINATOR_SUB_ID_END)) {
2704 psize = MAX_MAP;
2705 }
2706 }
2707
2708 if ((sc->ha_Base == (void *)NULL)
2709 || (sc->ha_Base == (void *)PCI_MAP_MEMORY_ADDRESS_MASK)) {
2710 debug_asr_printf ("asr_pci_map_mem: not configured by bios.\n");
2711 return (0);
2712 };
2713
2714 /*
2715 ** Truncate sc->ha_Base to page boundary.
2716 ** (Or does pmap_mapdev the job?)
2717 */
2718
2719 poffs = (u_long)sc->ha_Base - trunc_page ((u_long)sc->ha_Base);
2720 sc->ha_Virt = (i2oRegs_t *)pmap_mapdev ((u_long)sc->ha_Base - poffs,
2721 psize + poffs);
2722
2723 if (sc->ha_Virt == (i2oRegs_t *)NULL) {
2724 return (0);
2725 }
2726
2727 sc->ha_Virt = (i2oRegs_t *)((u_long)sc->ha_Virt + poffs);
2728 if (s == 0xA5111044) {
2729 if ((rid += sizeof(u_int32_t))
2730 >= (PCI_MAP_REG_START + 4 * sizeof(u_int32_t))) {
2731 return (0);
2732 }
2733
2734 /*
2735 ** save old mapping, get size and type of memory
2736 **
2737 ** type is in the lowest four bits.
2738 ** If device requires 2^n bytes, the next
2739 ** n-4 bits are read as 0.
2740 */
2741
2742 if ((((p = pci_conf_read (tag, rid))
2743 & PCI_MAP_MEMORY_ADDRESS_MASK) == 0L)
2744 || ((p & PCI_MAP_MEMORY_ADDRESS_MASK)
2745 == PCI_MAP_MEMORY_ADDRESS_MASK)) {
2746 debug_asr_printf ("asr_pci_map_mem: not configured by bios.\n");
2747 }
2748 pci_conf_write (tag, rid, 0xfffffffful);
2749 l = pci_conf_read (tag, rid);
2750 pci_conf_write (tag, rid, p);
2751 p &= PCI_MAP_MEMORY_TYPE_MASK;
2752
2753 /*
2754 ** check the type
2755 */
2756
2757 if (!((l & PCI_MAP_MEMORY_TYPE_MASK)
2758 == PCI_MAP_MEMORY_TYPE_32BIT_1M
2759 && (p & ~0xfffff) == 0)
2760 && ((l & PCI_MAP_MEMORY_TYPE_MASK)
2761 != PCI_MAP_MEMORY_TYPE_32BIT)) {
2762 debug_asr_printf (
2763 "asr_pci_map_mem failed: bad memory type=0x%x\n",
2764 (unsigned) l);
2765 return (0);
2766 };
2767
2768 /*
2769 ** get the size.
2770 */
2771
2772 psize = -(l & PCI_MAP_MEMORY_ADDRESS_MASK);
2773 if (psize > MAX_MAP) {
2774 psize = MAX_MAP;
2775 }
2776
2777 /*
2778 ** Truncate p to page boundary.
2779 ** (Or does pmap_mapdev the job?)
2780 */
2781
2782 poffs = p - trunc_page (p);
2783 sc->ha_Fvirt = (U8 *)pmap_mapdev (p - poffs, psize + poffs);
2784
2785 if (sc->ha_Fvirt == (U8 *)NULL) {
2786 return (0);
2787 }
2788
2789 sc->ha_Fvirt = (U8 *)((u_long)sc->ha_Fvirt + poffs);
2790 } else {
2791 sc->ha_Fvirt = (U8 *)(sc->ha_Virt);
2792 }
2793#endif
2794 return (1);
2795} /* asr_pci_map_mem */
2796
2797/*
2798 * A simplified copy of the real pci_map_int with additional
2799 * registration requirements.
2800 */
2801STATIC int
2802asr_pci_map_int (
2803#if __FreeBSD_version >= 400000
2804 IN device_t tag,
2805#else
2806 IN pcici_t tag,
2807#endif
2808 IN Asr_softc_t * sc)
2809{
2810#if __FreeBSD_version >= 400000
2811 int rid = 0;
2812
2813 sc->ha_irq_res = bus_alloc_resource(tag, SYS_RES_IRQ, &rid,
2814 0, ~0, 1, RF_ACTIVE | RF_SHAREABLE);
2815 if (sc->ha_irq_res == (struct resource *)NULL) {
2816 return (0);
2817 }
2818 if (bus_setup_intr(tag, sc->ha_irq_res, INTR_TYPE_CAM,
2819 (driver_intr_t *)asr_intr, (void *)sc, &(sc->ha_intr))) {
2820 return (0);
2821 }
2822 sc->ha_irq = pci_read_config(tag, PCIR_INTLINE, sizeof(char));
2823#else
2824 if (!pci_map_int(tag, (pci_inthand_t *)asr_intr,
2825 (void *)sc, &cam_imask)) {
2826 return (0);
2827 }
2828 sc->ha_irq = pci_conf_read(tag, PCIR_INTLINE);
2829#endif
2830 return (1);
2831} /* asr_pci_map_int */
2832
2833/*
2834 * Attach the devices, and virtual devices to the driver list.
2835 */
2836STATIC ATTACH_RET
2837asr_attach (ATTACH_ARGS)
2838{
2839 Asr_softc_t * sc;
2840 struct scsi_inquiry_data * iq;
2841 ATTACH_SET();
2842
2843 if ((sc = malloc(sizeof(*sc), M_DEVBUF, M_NOWAIT)) == (Asr_softc_t *)NULL) {
2844 ATTACH_RETURN(ENOMEM);
2845 }
2846 if (Asr_softc == (Asr_softc_t *)NULL) {
2847 /*
2848 * Fixup the OS revision as saved in the dptsig for the
2849 * engine (dptioctl.h) to pick up.
2850 */
2851 bcopy (osrelease, &ASR_sig.dsDescription[16], 5);
2852 printf ("asr%d: major=%d\n", unit, asr_cdevsw.d_maj);
2853 }
2854 /*
2855 * Initialize the software structure
2856 */
2857 bzero (sc, sizeof(*sc));
2858 LIST_INIT(&(sc->ha_ccb));
2859# ifdef ASR_MEASURE_PERFORMANCE
2860 {
2861 u_int32_t i;
2862
2863 // initialize free list for timeQ
2864 sc->ha_timeQFreeHead = 0;
2865 sc->ha_timeQFreeTail = MAX_TIMEQ_SIZE - 1;
2866 for (i = 0; i < MAX_TIMEQ_SIZE; i++) {
2867 sc->ha_timeQFreeList[i] = i;
2868 }
2869 }
2870# endif
2871 /* Link us into the HA list */
2872 {
2873 Asr_softc_t **ha;
2874
2875 for (ha = &Asr_softc; *ha; ha = &((*ha)->ha_next));
2876 *(ha) = sc;
2877 }
2878 {
2879 PI2O_EXEC_STATUS_GET_REPLY status;
2880 int size;
2881
2882 /*
2883 * This is the real McCoy!
2884 */
2885 if (!asr_pci_map_mem(tag, sc)) {
2886 printf ("asr%d: could not map memory\n", unit);
2887 ATTACH_RETURN(ENXIO);
2888 }
2889 /* Enable if not formerly enabled */
2890#if __FreeBSD_version >= 400000
2891 pci_write_config (tag, PCIR_COMMAND,
2892 pci_read_config (tag, PCIR_COMMAND, sizeof(char))
2893 | PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN, sizeof(char));
2894 /* Knowledge is power, responsibility is direct */
2895 {
2896 struct pci_devinfo {
2897 STAILQ_ENTRY(pci_devinfo) pci_links;
2898 struct resource_list resources;
2899 pcicfgregs cfg;
2900 } * dinfo = device_get_ivars(tag);
2901 sc->ha_pciBusNum = dinfo->cfg.bus;
2902 sc->ha_pciDeviceNum = (dinfo->cfg.slot << 3)
2903 | dinfo->cfg.func;
2904 }
2905#else
2906 pci_conf_write (tag, PCIR_COMMAND,
2907 pci_conf_read (tag, PCIR_COMMAND)
2908 | PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN);
2909 /* Knowledge is power, responsibility is direct */
2910 switch (pci_mechanism) {
2911
2912 case 1:
2913 sc->ha_pciBusNum = tag.cfg1 >> 16;
2914 sc->ha_pciDeviceNum = tag.cfg1 >> 8;
2915
2916 case 2:
2917 sc->ha_pciBusNum = tag.cfg2.forward;
2918 sc->ha_pciDeviceNum = ((tag.cfg2.enable >> 1) & 7)
2919 | (tag.cfg2.port >> 5);
2920 }
2921#endif
2922 /* Check if the device is there? */
2923 if ((ASR_resetIOP(sc->ha_Virt, sc->ha_Fvirt) == 0)
2924 || ((status = (PI2O_EXEC_STATUS_GET_REPLY)malloc (
2925 sizeof(I2O_EXEC_STATUS_GET_REPLY), M_TEMP, M_WAITOK))
2926 == (PI2O_EXEC_STATUS_GET_REPLY)NULL)
2927 || (ASR_getStatus(sc->ha_Virt, sc->ha_Fvirt, status) == NULL)) {
2928 printf ("asr%d: could not initialize hardware\n", unit);
2929 ATTACH_RETURN(ENODEV); /* Get next, maybe better luck */
2930 }
2931 sc->ha_SystemTable.OrganizationID = status->OrganizationID;
2932 sc->ha_SystemTable.IOP_ID = status->IOP_ID;
2933 sc->ha_SystemTable.I2oVersion = status->I2oVersion;
2934 sc->ha_SystemTable.IopState = status->IopState;
2935 sc->ha_SystemTable.MessengerType = status->MessengerType;
2936 sc->ha_SystemTable.InboundMessageFrameSize
2937 = status->InboundMFrameSize;
2938 sc->ha_SystemTable.MessengerInfo.InboundMessagePortAddressLow
2939 = (U32)(sc->ha_Base) + (U32)(&(((i2oRegs_t *)NULL)->ToFIFO));
2940
2941 if (!asr_pci_map_int(tag, (void *)sc)) {
2942 printf ("asr%d: could not map interrupt\n", unit);
2943 ATTACH_RETURN(ENXIO);
2944 }
2945
2946 /* Adjust the maximim inbound count */
2947 if (((sc->ha_QueueSize
2948 = I2O_EXEC_STATUS_GET_REPLY_getMaxInboundMFrames(status))
2949 > MAX_INBOUND)
2950 || (sc->ha_QueueSize == 0)) {
2951 sc->ha_QueueSize = MAX_INBOUND;
2952 }
2953
2954 /* Adjust the maximum outbound count */
2955 if (((sc->ha_Msgs_Count
2956 = I2O_EXEC_STATUS_GET_REPLY_getMaxOutboundMFrames(status))
2957 > MAX_OUTBOUND)
2958 || (sc->ha_Msgs_Count == 0)) {
2959 sc->ha_Msgs_Count = MAX_OUTBOUND;
2960 }
2961 if (sc->ha_Msgs_Count > sc->ha_QueueSize) {
2962 sc->ha_Msgs_Count = sc->ha_QueueSize;
2963 }
2964
2965 /* Adjust the maximum SG size to adapter */
2966 if ((size = (I2O_EXEC_STATUS_GET_REPLY_getInboundMFrameSize(
2967 status) << 2)) > MAX_INBOUND_SIZE) {
2968 size = MAX_INBOUND_SIZE;
2969 }
2970 free (status, M_TEMP);
2971 sc->ha_SgSize = (size - sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2972 + sizeof(I2O_SG_ELEMENT)) / sizeof(I2O_SGE_SIMPLE_ELEMENT);
2973 }
2974
2975 /*
2976 * Only do a bus/HBA reset on the first time through. On this
2977 * first time through, we do not send a flush to the devices.
2978 */
2979 if (ASR_init(sc) == 0) {
2980 struct BufferInfo {
2981 I2O_PARAM_RESULTS_LIST_HEADER Header;
2982 I2O_PARAM_READ_OPERATION_RESULT Read;
2983 I2O_DPT_EXEC_IOP_BUFFERS_SCALAR Info;
2984 };
2985 defAlignLong (struct BufferInfo, Buffer);
2986 PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR Info;
2987# define FW_DEBUG_BLED_OFFSET 8
2988
2989 if ((Info = (PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR)
2990 ASR_getParams(sc, 0,
2991 I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO,
2992 Buffer, sizeof(struct BufferInfo)))
2993 != (PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR)NULL) {
2994 sc->ha_blinkLED = sc->ha_Fvirt
2995 + I2O_DPT_EXEC_IOP_BUFFERS_SCALAR_getSerialOutputOffset(Info)
2996 + FW_DEBUG_BLED_OFFSET;
2997 }
2998 if (ASR_acquireLct(sc) == 0) {
2999 (void)ASR_acquireHrt(sc);
3000 }
3001 } else {
3002 printf ("asr%d: failed to initialize\n", unit);
3003 ATTACH_RETURN(ENXIO);
3004 }
3005 /*
3006 * Add in additional probe responses for more channels. We
3007 * are reusing the variable `target' for a channel loop counter.
3008 * Done here because of we need both the acquireLct and
3009 * acquireHrt data.
3010 */
3011 { PI2O_LCT_ENTRY Device;
3012
3013 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
3014 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
3015 ++Device) {
3016 if (Device->le_type == I2O_UNKNOWN) {
3017 continue;
3018 }
3019 if (I2O_LCT_ENTRY_getUserTID(Device) == 0xFFF) {
3020 if (Device->le_target > sc->ha_MaxId) {
3021 sc->ha_MaxId = Device->le_target;
3022 }
3023 if (Device->le_lun > sc->ha_MaxLun) {
3024 sc->ha_MaxLun = Device->le_lun;
3025 }
3026 }
3027 if (((Device->le_type & I2O_PORT) != 0)
3028 && (Device->le_bus <= MAX_CHANNEL)) {
3029 /* Do not increase MaxId for efficiency */
3030 sc->ha_adapter_target[Device->le_bus]
3031 = Device->le_target;
3032 }
3033 }
3034 }
3035
3036
3037 /*
3038 * Print the HBA model number as inquired from the card.
3039 */
3040
3041 printf ("asr%d:", unit);
3042
3043 if ((iq = (struct scsi_inquiry_data *)malloc (
3044 sizeof(struct scsi_inquiry_data), M_TEMP, M_WAITOK))
3045 != (struct scsi_inquiry_data *)NULL) {
3046 defAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE,Message);
3047 PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE Message_Ptr;
3048 int posted = 0;
3049
3050 bzero (iq, sizeof(struct scsi_inquiry_data));
3051 bzero (Message_Ptr
3052 = getAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE, Message),
3053 sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
3054 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT));
3055
3056 I2O_MESSAGE_FRAME_setVersionOffset(
3057 (PI2O_MESSAGE_FRAME)Message_Ptr,
3058 I2O_VERSION_11
3059 | (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
3060 - sizeof(I2O_SG_ELEMENT))
3061 / sizeof(U32)) << 4));
3062 I2O_MESSAGE_FRAME_setMessageSize(
3063 (PI2O_MESSAGE_FRAME)Message_Ptr,
3064 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
3065 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT))
3066 / sizeof(U32));
3067 I2O_MESSAGE_FRAME_setInitiatorAddress (
3068 (PI2O_MESSAGE_FRAME)Message_Ptr, 1);
3069 I2O_MESSAGE_FRAME_setFunction(
3070 (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_PRIVATE_MESSAGE);
3071 I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode (
3072 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
3073 I2O_SCSI_SCB_EXEC);
3074 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
3075 I2O_SCB_FLAG_ENABLE_DISCONNECT
3076 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
3077 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
3078 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setInterpret(Message_Ptr, 1);
3079 I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
3080 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
3081 DPT_ORGANIZATION_ID);
3082 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(Message_Ptr, 6);
3083 Message_Ptr->CDB[0] = INQUIRY;
3084 Message_Ptr->CDB[4] = (unsigned char)sizeof(struct scsi_inquiry_data);
3085 if (Message_Ptr->CDB[4] == 0) {
3086 Message_Ptr->CDB[4] = 255;
3087 }
3088
3089 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
3090 (I2O_SCB_FLAG_XFER_FROM_DEVICE
3091 | I2O_SCB_FLAG_ENABLE_DISCONNECT
3092 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
3093 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
3094
3095 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setByteCount(
3096 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
3097 sizeof(struct scsi_inquiry_data));
3098 SG(&(Message_Ptr->SGL), 0,
3099 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
3100 iq, sizeof(struct scsi_inquiry_data));
3101 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
3102
3103 if (iq->vendor[0] && (iq->vendor[0] != ' ')) {
3104 printf (" ");
3105 ASR_prstring (iq->vendor, 8);
3106 ++posted;
3107 }
3108 if (iq->product[0] && (iq->product[0] != ' ')) {
3109 printf (" ");
3110 ASR_prstring (iq->product, 16);
3111 ++posted;
3112 }
3113 if (iq->revision[0] && (iq->revision[0] != ' ')) {
3114 printf (" FW Rev. ");
3115 ASR_prstring (iq->revision, 4);
3116 ++posted;
3117 }
3118 free ((caddr_t)iq, M_TEMP);
3119 if (posted) {
3120 printf (",");
3121 }
3122 }
3123 printf (" %d channel, %d CCBs, Protocol I2O\n", sc->ha_MaxBus + 1,
3124 (sc->ha_QueueSize > MAX_INBOUND) ? MAX_INBOUND : sc->ha_QueueSize);
3125
3126 /*
3127 * fill in the prototype cam_path.
3128 */
3129 {
3130 int bus;
3131 union asr_ccb * ccb;
3132
3133 if ((ccb = asr_alloc_ccb (sc)) == (union asr_ccb *)NULL) {
3134 printf ("asr%d: CAM could not be notified of asynchronous callback parameters\n", unit);
3135 ATTACH_RETURN(ENOMEM);
3136 }
3137 for (bus = 0; bus <= sc->ha_MaxBus; ++bus) {
3138 struct cam_devq * devq;
3139 int QueueSize = sc->ha_QueueSize;
3140
3141 if (QueueSize > MAX_INBOUND) {
3142 QueueSize = MAX_INBOUND;
3143 }
3144
3145 /*
3146 * Create the device queue for our SIM(s).
3147 */
3148 if ((devq = cam_simq_alloc(QueueSize)) == NULL) {
3149 continue;
3150 }
3151
3152 /*
3153 * Construct our first channel SIM entry
3154 */
3155 sc->ha_sim[bus] = cam_sim_alloc(
3156 asr_action, asr_poll, "asr", sc,
3157 unit, 1, QueueSize, devq);
3158 if (sc->ha_sim[bus] == NULL) {
3159 continue;
3160 }
3161
3162 if (xpt_bus_register(sc->ha_sim[bus], bus)
3163 != CAM_SUCCESS) {
3164 cam_sim_free(sc->ha_sim[bus],
3165 /*free_devq*/TRUE);
3166 sc->ha_sim[bus] = NULL;
3167 continue;
3168 }
3169
3170 if (xpt_create_path(&(sc->ha_path[bus]), /*periph*/NULL,
3171 cam_sim_path(sc->ha_sim[bus]), CAM_TARGET_WILDCARD,
3172 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
3173 xpt_bus_deregister(
3174 cam_sim_path(sc->ha_sim[bus]));
3175 cam_sim_free(sc->ha_sim[bus],
3176 /*free_devq*/TRUE);
3177 sc->ha_sim[bus] = NULL;
3178 continue;
3179 }
3180 }
3181 asr_free_ccb (ccb);
3182 }
3183 /*
3184 * Generate the device node information
3185 */
3186 (void)make_dev(&asr_cdevsw, unit, 0, 0, S_IRWXU, "rasr%d", unit);
3187 destroy_dev(makedev(asr_cdevsw.d_maj,unit+1));
3188 ATTACH_RETURN(0);
3189} /* asr_attach */
3190
3191STATIC void
3192asr_poll(
3193 IN struct cam_sim *sim)
3194{
3195 asr_intr(cam_sim_softc(sim));
3196} /* asr_poll */
3197
3198STATIC void
3199asr_action(
3200 IN struct cam_sim * sim,
3201 IN union ccb * ccb)
3202{
3203 struct Asr_softc * sc;
3204
3205 debug_asr_printf ("asr_action(%lx,%lx{%x})\n",
3206 (u_long)sim, (u_long)ccb, ccb->ccb_h.func_code);
3207
3208 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("asr_action\n"));
3209
3210 ccb->ccb_h.spriv_ptr0 = sc = (struct Asr_softc *)cam_sim_softc(sim);
3211
3212 switch (ccb->ccb_h.func_code) {
3213
3214 /* Common cases first */
3215 case XPT_SCSI_IO: /* Execute the requested I/O operation */
3216 {
3217 struct Message {
3218 char M[MAX_INBOUND_SIZE];
3219 };
3220 defAlignLong(struct Message,Message);
3221 PI2O_MESSAGE_FRAME Message_Ptr;
3222
3223 /* Reject incoming commands while we are resetting the card */
3224 if (sc->ha_in_reset != HA_OPERATIONAL) {
3225 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3226 if (sc->ha_in_reset >= HA_OFF_LINE) {
3227 /* HBA is now off-line */
3228 ccb->ccb_h.status |= CAM_UNREC_HBA_ERROR;
3229 } else {
3230 /* HBA currently resetting, try again later. */
3231 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
3232 }
3233 debug_asr_cmd_printf (" e\n");
3234 xpt_done(ccb);
3235 debug_asr_cmd_printf (" q\n");
3236 break;
3237 }
3238 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
3239 printf(
3240 "asr%d WARNING: scsi_cmd(%x) already done on b%dt%du%d\n",
3241 cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)),
3242 ccb->csio.cdb_io.cdb_bytes[0],
3243 cam_sim_bus(sim),
3244 ccb->ccb_h.target_id,
3245 ccb->ccb_h.target_lun);
3246 }
3247 debug_asr_cmd_printf ("(%d,%d,%d,%d)",
3248 cam_sim_unit(sim),
3249 cam_sim_bus(sim),
3250 ccb->ccb_h.target_id,
3251 ccb->ccb_h.target_lun);
3252 debug_asr_cmd_dump_ccb(ccb);
3253
3254 if ((Message_Ptr = ASR_init_message ((union asr_ccb *)ccb,
3255 (PI2O_MESSAGE_FRAME)Message)) != (PI2O_MESSAGE_FRAME)NULL) {
3256 debug_asr_cmd2_printf ("TID=%x:\n",
3257 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_getTID(
3258 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr));
3259 debug_asr_cmd2_dump_message(Message_Ptr);
3260 debug_asr_cmd1_printf (" q");
3261
3262 if (ASR_queue (sc, Message_Ptr) == EMPTY_QUEUE) {
3263#ifdef ASR_MEASURE_PERFORMANCE
3264 ++sc->ha_performance.command_too_busy;
3265#endif
3266 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3267 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
3268 debug_asr_cmd_printf (" E\n");
3269 xpt_done(ccb);
3270 }
3271 debug_asr_cmd_printf (" Q\n");
3272 break;
3273 }
3274 /*
3275 * We will get here if there is no valid TID for the device
3276 * referenced in the scsi command packet.
3277 */
3278 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3279 ccb->ccb_h.status |= CAM_SEL_TIMEOUT;
3280 debug_asr_cmd_printf (" B\n");
3281 xpt_done(ccb);
3282 break;
3283 }
3284
3285 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */
3286 /* Rese HBA device ... */
3287 asr_hbareset (sc);
3288 ccb->ccb_h.status = CAM_REQ_CMP;
3289 xpt_done(ccb);
3290 break;
3291
3292# if (defined(REPORT_LUNS))
3293 case REPORT_LUNS:
3294# endif
3295 case XPT_ABORT: /* Abort the specified CCB */
3296 /* XXX Implement */
3297 ccb->ccb_h.status = CAM_REQ_INVALID;
3298 xpt_done(ccb);
3299 break;
3300
3301 case XPT_SET_TRAN_SETTINGS:
3302 /* XXX Implement */
3303 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
3304 xpt_done(ccb);
3305 break;
3306
3307 case XPT_GET_TRAN_SETTINGS:
3308 /* Get default/user set transfer settings for the target */
3309 {
3310 struct ccb_trans_settings *cts;
3311 u_int target_mask;
3312
3313 cts = &(ccb->cts);
3314 target_mask = 0x01 << ccb->ccb_h.target_id;
3315 if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) {
3316 cts->flags = CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB;
3317 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
3318 cts->sync_period = 6; /* 40MHz */
3319 cts->sync_offset = 15;
3320
3321 cts->valid = CCB_TRANS_SYNC_RATE_VALID
3322 | CCB_TRANS_SYNC_OFFSET_VALID
3323 | CCB_TRANS_BUS_WIDTH_VALID
3324 | CCB_TRANS_DISC_VALID
3325 | CCB_TRANS_TQ_VALID;
3326 ccb->ccb_h.status = CAM_REQ_CMP;
3327 } else {
3328 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
3329 }
3330 xpt_done(ccb);
3331 break;
3332 }
3333
3334 case XPT_CALC_GEOMETRY:
3335 {
3336 struct ccb_calc_geometry *ccg;
3337 u_int32_t size_mb;
3338 u_int32_t secs_per_cylinder;
3339
3340 ccg = &(ccb->ccg);
3341 size_mb = ccg->volume_size
3342 / ((1024L * 1024L) / ccg->block_size);
3343
3344 if (size_mb > 4096) {
3345 ccg->heads = 255;
3346 ccg->secs_per_track = 63;
3347 } else if (size_mb > 2048) {
3348 ccg->heads = 128;
3349 ccg->secs_per_track = 63;
3350 } else if (size_mb > 1024) {
3351 ccg->heads = 65;
3352 ccg->secs_per_track = 63;
3353 } else {
3354 ccg->heads = 64;
3355 ccg->secs_per_track = 32;
3356 }
3357 secs_per_cylinder = ccg->heads * ccg->secs_per_track;
3358 ccg->cylinders = ccg->volume_size / secs_per_cylinder;
3359 ccb->ccb_h.status = CAM_REQ_CMP;
3360 xpt_done(ccb);
3361 break;
3362 }
3363
3364 case XPT_RESET_BUS: /* Reset the specified SCSI bus */
3365 ASR_resetBus (sc, cam_sim_bus(sim));
3366 ccb->ccb_h.status = CAM_REQ_CMP;
3367 xpt_done(ccb);
3368 break;
3369
3370 case XPT_TERM_IO: /* Terminate the I/O process */
3371 /* XXX Implement */
3372 ccb->ccb_h.status = CAM_REQ_INVALID;
3373 xpt_done(ccb);
3374 break;
3375
3376 case XPT_PATH_INQ: /* Path routing inquiry */
3377 {
3378 struct ccb_pathinq *cpi = &(ccb->cpi);
3379
3380 cpi->version_num = 1; /* XXX??? */
3381 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
3382 cpi->target_sprt = 0;
3383 /* Not necessary to reset bus, done by HDM initialization */
3384 cpi->hba_misc = PIM_NOBUSRESET;
3385 cpi->hba_eng_cnt = 0;
3386 cpi->max_target = sc->ha_MaxId;
3387 cpi->max_lun = sc->ha_MaxLun;
3388 cpi->initiator_id = sc->ha_adapter_target[cam_sim_bus(sim)];
3389 cpi->bus_id = cam_sim_bus(sim);
3390 cpi->base_transfer_speed = 3300;
3391 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
3392 strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN);
3393 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
3394 cpi->unit_number = cam_sim_unit(sim);
3395 cpi->ccb_h.status = CAM_REQ_CMP;
3396 xpt_done(ccb);
3397 break;
3398 }
3399 default:
3400 ccb->ccb_h.status = CAM_REQ_INVALID;
3401 xpt_done(ccb);
3402 break;
3403 }
3404} /* asr_action */
3405
3406#ifdef ASR_MEASURE_PERFORMANCE
3407#define WRITE_OP 1
3408#define READ_OP 2
3409#define min_submitR sc->ha_performance.read_by_size_min_time[index]
3410#define max_submitR sc->ha_performance.read_by_size_max_time[index]
3411#define min_submitW sc->ha_performance.write_by_size_min_time[index]
3412#define max_submitW sc->ha_performance.write_by_size_max_time[index]
3413
3414STATIC INLINE void
3415asr_IObySize(
3416 IN Asr_softc_t * sc,
3417 IN u_int32_t submitted_time,
3418 IN int op,
3419 IN int index)
3420{
3421 struct timeval submitted_timeval;
3422
3423 submitted_timeval.tv_sec = 0;
3424 submitted_timeval.tv_usec = submitted_time;
3425
3426 if ( op == READ_OP ) {
3427 ++sc->ha_performance.read_by_size_count[index];
3428
3429 if ( submitted_time != 0xffffffff ) {
3430 timevaladd(
3431 &(sc->ha_performance.read_by_size_total_time[index]),
3432 &submitted_timeval);
3433 if ( (min_submitR == 0)
3434 || (submitted_time < min_submitR) ) {
3435 min_submitR = submitted_time;
3436 }
3437
3438 if ( submitted_time > max_submitR ) {
3439 max_submitR = submitted_time;
3440 }
3441 }
3442 } else {
3443 ++sc->ha_performance.write_by_size_count[index];
3444 if ( submitted_time != 0xffffffff ) {
3445 timevaladd(
3446 &(sc->ha_performance.write_by_size_total_time[index]),
3447 &submitted_timeval);
3448 if ( (submitted_time < min_submitW)
3449 || (min_submitW == 0) ) {
3450 min_submitW = submitted_time;
3451 }
3452
3453 if ( submitted_time > max_submitW ) {
3454 max_submitW = submitted_time;
3455 }
3456 }
3457 }
3458} /* asr_IObySize */
3459#endif
3460
3461/*
3462 * Handle processing of current CCB as pointed to by the Status.
3463 */
3464STATIC int
3465asr_intr (
3466 IN Asr_softc_t * sc)
3467{
3468 OUT int processed;
3469
3470#ifdef ASR_MEASURE_PERFORMANCE
3471 struct timeval junk;
3472
3473 microtime(&junk);
3474 sc->ha_performance.intr_started = junk;
3475#endif
3476
3477 for (processed = 0;
3478 sc->ha_Virt->Status & Mask_InterruptsDisabled;
3479 processed = 1) {
3480 union asr_ccb * ccb;
3481 U32 ReplyOffset;
3482 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply;
3483
3484 if (((ReplyOffset = sc->ha_Virt->FromFIFO) == EMPTY_QUEUE)
3485 && ((ReplyOffset = sc->ha_Virt->FromFIFO) == EMPTY_QUEUE)) {
3486 break;
3487 }
3488 Reply = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)(ReplyOffset
3489 - sc->ha_Msgs_Phys + (char *)(sc->ha_Msgs));
3490 /*
3491 * We do not need any (optional byteswapping) method access to
3492 * the Initiator context field.
3493 */
3494 ccb = (union asr_ccb *)(long)
3495 I2O_MESSAGE_FRAME_getInitiatorContext64(
3496 &(Reply->StdReplyFrame.StdMessageFrame));
3497 if (I2O_MESSAGE_FRAME_getMsgFlags(
3498 &(Reply->StdReplyFrame.StdMessageFrame))
3499 & I2O_MESSAGE_FLAGS_FAIL) {
3500 defAlignLong(I2O_UTIL_NOP_MESSAGE,Message);
3501 PI2O_UTIL_NOP_MESSAGE Message_Ptr;
3502 U32 MessageOffset;
3503
3504 MessageOffset = (u_long)
3505 I2O_FAILURE_REPLY_MESSAGE_FRAME_getPreservedMFA(
3506 (PI2O_FAILURE_REPLY_MESSAGE_FRAME)Reply);
3507 /*
3508 * Get the Original Message Frame's address, and get
3509 * it's Transaction Context into our space. (Currently
3510 * unused at original authorship, but better to be
3511 * safe than sorry). Straight copy means that we
3512 * need not concern ourselves with the (optional
3513 * byteswapping) method access.
3514 */
3515 Reply->StdReplyFrame.TransactionContext
3516 = ((PI2O_SINGLE_REPLY_MESSAGE_FRAME)
3517 (sc->ha_Fvirt + MessageOffset))->TransactionContext;
3518 /*
3519 * For 64 bit machines, we need to reconstruct the
3520 * 64 bit context.
3521 */
3522 ccb = (union asr_ccb *)(long)
3523 I2O_MESSAGE_FRAME_getInitiatorContext64(
3524 &(Reply->StdReplyFrame.StdMessageFrame));
3525 /*
3526 * Unique error code for command failure.
3527 */
3528 I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode(
3529 &(Reply->StdReplyFrame), (u_int16_t)-2);
3530 /*
3531 * Modify the message frame to contain a NOP and
3532 * re-issue it to the controller.
3533 */
3534 Message_Ptr = (PI2O_UTIL_NOP_MESSAGE)ASR_fillMessage(
3535 Message, sizeof(I2O_UTIL_NOP_MESSAGE));
3536# if (I2O_UTIL_NOP != 0)
3537 I2O_MESSAGE_FRAME_setFunction (
3538 &(Message_Ptr->StdMessageFrame),
3539 I2O_UTIL_NOP);
3540# endif
3541 /*
3542 * Copy the packet out to the Original Message
3543 */
3544 bcopy ((caddr_t)Message_Ptr,
3545 sc->ha_Fvirt + MessageOffset,
3546 sizeof(I2O_UTIL_NOP_MESSAGE));
3547 /*
3548 * Issue the NOP
3549 */
3550 sc->ha_Virt->ToFIFO = MessageOffset;
3551 }
3552
3553 /*
3554 * Asynchronous command with no return requirements,
3555 * and a generic handler for immunity against odd error
3556 * returns from the adapter.
3557 */
3558 if (ccb == (union asr_ccb *)NULL) {
3559 /*
3560 * Return Reply so that it can be used for the
3561 * next command
3562 */
3563 sc->ha_Virt->FromFIFO = ReplyOffset;
3564 continue;
3565 }
3566
3567 /* Welease Wadjah! (and stop timeouts) */
3568 ASR_ccbRemove (sc, ccb);
3569
3570 switch (
3571 I2O_SINGLE_REPLY_MESSAGE_FRAME_getDetailedStatusCode(
3572 &(Reply->StdReplyFrame))) {
3573
3574 case I2O_SCSI_DSC_SUCCESS:
3575 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3576 ccb->ccb_h.status |= CAM_REQ_CMP;
3577 break;
3578
3579 case I2O_SCSI_DSC_CHECK_CONDITION:
3580 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3581 ccb->ccb_h.status |= CAM_REQ_CMP|CAM_AUTOSNS_VALID;
3582 break;
3583
3584 case I2O_SCSI_DSC_BUSY:
3585 /* FALLTHRU */
3586 case I2O_SCSI_HBA_DSC_ADAPTER_BUSY:
3587 /* FALLTHRU */
3588 case I2O_SCSI_HBA_DSC_SCSI_BUS_RESET:
3589 /* FALLTHRU */
3590 case I2O_SCSI_HBA_DSC_BUS_BUSY:
3591 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3592 ccb->ccb_h.status |= CAM_SCSI_BUSY;
3593 break;
3594
3595 case I2O_SCSI_HBA_DSC_SELECTION_TIMEOUT:
3596 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3597 ccb->ccb_h.status |= CAM_SEL_TIMEOUT;
3598 break;
3599
3600 case I2O_SCSI_HBA_DSC_COMMAND_TIMEOUT:
3601 /* FALLTHRU */
3602 case I2O_SCSI_HBA_DSC_DEVICE_NOT_PRESENT:
3603 /* FALLTHRU */
3604 case I2O_SCSI_HBA_DSC_LUN_INVALID:
3605 /* FALLTHRU */
3606 case I2O_SCSI_HBA_DSC_SCSI_TID_INVALID:
3607 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3608 ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
3609 break;
3610
3611 case I2O_SCSI_HBA_DSC_DATA_OVERRUN:
3612 /* FALLTHRU */
3613 case I2O_SCSI_HBA_DSC_REQUEST_LENGTH_ERROR:
3614 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3615 ccb->ccb_h.status |= CAM_DATA_RUN_ERR;
3616 break;
3617
3618 default:
3619 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3620 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
3621 break;
3622 }
3623 if ((ccb->csio.resid = ccb->csio.dxfer_len) != 0) {
3624 ccb->csio.resid -=
3625 I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_getTransferCount(
3626 Reply);
3627 }
3628
3629#ifdef ASR_MEASURE_PERFORMANCE
3630 {
3631 struct timeval endTime;
3632 u_int32_t submitted_time;
3633 u_int32_t size;
3634 int op_type;
3635 int startTimeIndex;
3636
3637 --sc->ha_submitted_ccbs_count;
3638 startTimeIndex
3639 = (int)Reply->StdReplyFrame.TransactionContext;
3640 if (-1 != startTimeIndex) {
3641 /* Compute the time spent in device/adapter */
3642 microtime(&endTime);
3643 submitted_time = asr_time_delta(sc->ha_timeQ[
3644 startTimeIndex], endTime);
3645 /* put the startTimeIndex back on free list */
3646 ENQ_TIMEQ_FREE_LIST(startTimeIndex,
3647 sc->ha_timeQFreeList,
3648 sc->ha_timeQFreeHead,
3649 sc->ha_timeQFreeTail);
3650 } else {
3651 submitted_time = 0xffffffff;
3652 }
3653
3654#define maxctime sc->ha_performance.max_command_time[ccb->csio.cdb_io.cdb_bytes[0]]
3655#define minctime sc->ha_performance.min_command_time[ccb->csio.cdb_io.cdb_bytes[0]]
3656 if (submitted_time != 0xffffffff) {
3657 if ( maxctime < submitted_time ) {
3658 maxctime = submitted_time;
3659 }
3660 if ( (minctime == 0)
3661 || (minctime > submitted_time) ) {
3662 minctime = submitted_time;
3663 }
3664
3665 if ( sc->ha_performance.max_submit_time
3666 < submitted_time ) {
3667 sc->ha_performance.max_submit_time
3668 = submitted_time;
3669 }
3670 if ( sc->ha_performance.min_submit_time == 0
3671 || sc->ha_performance.min_submit_time
3672 > submitted_time) {
3673 sc->ha_performance.min_submit_time
3674 = submitted_time;
3675 }
3676
3677 switch ( ccb->csio.cdb_io.cdb_bytes[0] ) {
3678
3679 case 0xa8: /* 12-byte READ */
3680 /* FALLTHRU */
3681 case 0x08: /* 6-byte READ */
3682 /* FALLTHRU */
3683 case 0x28: /* 10-byte READ */
3684 op_type = READ_OP;
3685 break;
3686
3687 case 0x0a: /* 6-byte WRITE */
3688 /* FALLTHRU */
3689 case 0xaa: /* 12-byte WRITE */
3690 /* FALLTHRU */
3691 case 0x2a: /* 10-byte WRITE */
3692 op_type = WRITE_OP;
3693 break;
3694
3695 default:
3696 op_type = 0;
3697 break;
3698 }
3699
3700 if ( op_type != 0 ) {
3701 struct scsi_rw_big * cmd;
3702
3703 cmd = (struct scsi_rw_big *)
3704 &(ccb->csio.cdb_io);
3705
3706 size = (((u_int32_t) cmd->length2 << 8)
3707 | ((u_int32_t) cmd->length1)) << 9;
3708
3709 switch ( size ) {
3710
3711 case 512:
3712 asr_IObySize(sc,
3713 submitted_time, op_type,
3714 SIZE_512);
3715 break;
3716
3717 case 1024:
3718 asr_IObySize(sc,
3719 submitted_time, op_type,
3720 SIZE_1K);
3721 break;
3722
3723 case 2048:
3724 asr_IObySize(sc,
3725 submitted_time, op_type,
3726 SIZE_2K);
3727 break;
3728
3729 case 4096:
3730 asr_IObySize(sc,
3731 submitted_time, op_type,
3732 SIZE_4K);
3733 break;
3734
3735 case 8192:
3736 asr_IObySize(sc,
3737 submitted_time, op_type,
3738 SIZE_8K);
3739 break;
3740
3741 case 16384:
3742 asr_IObySize(sc,
3743 submitted_time, op_type,
3744 SIZE_16K);
3745 break;
3746
3747 case 32768:
3748 asr_IObySize(sc,
3749 submitted_time, op_type,
3750 SIZE_32K);
3751 break;
3752
3753 case 65536:
3754 asr_IObySize(sc,
3755 submitted_time, op_type,
3756 SIZE_64K);
3757 break;
3758
3759 default:
3760 if ( size > (1 << 16) ) {
3761 asr_IObySize(sc,
3762 submitted_time,
3763 op_type,
3764 SIZE_BIGGER);
3765 } else {
3766 asr_IObySize(sc,
3767 submitted_time,
3768 op_type,
3769 SIZE_OTHER);
3770 }
3771 break;
3772 }
3773 }
3774 }
3775 }
3776#endif
3777 /* Sense data in reply packet */
3778 if (ccb->ccb_h.status & CAM_AUTOSNS_VALID) {
3779 u_int16_t size = I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_getAutoSenseTransferCount(Reply);
3780
3781 if (size) {
3782 if (size > sizeof(ccb->csio.sense_data)) {
3783 size = sizeof(ccb->csio.sense_data);
3784 }
3785 if (size > I2O_SCSI_SENSE_DATA_SZ) {
3786 size = I2O_SCSI_SENSE_DATA_SZ;
3787 }
3788 if ((ccb->csio.sense_len)
3789 && (size > ccb->csio.sense_len)) {
3790 size = ccb->csio.sense_len;
3791 }
3792 bcopy ((caddr_t)Reply->SenseData,
3793 (caddr_t)&(ccb->csio.sense_data), size);
3794 }
3795 }
3796
3797 /*
3798 * Return Reply so that it can be used for the next command
3799 * since we have no more need for it now
3800 */
3801 sc->ha_Virt->FromFIFO = ReplyOffset;
3802
3803 if (ccb->ccb_h.path) {
3804 xpt_done ((union ccb *)ccb);
3805 } else {
3806 wakeup ((caddr_t)ccb);
3807 }
3808 }
3809#ifdef ASR_MEASURE_PERFORMANCE
3810 {
3811 u_int32_t result;
3812
3813 microtime(&junk);
3814 result = asr_time_delta(sc->ha_performance.intr_started, junk);
3815
3816 if (result != 0xffffffff) {
3817 if ( sc->ha_performance.max_intr_time < result ) {
3818 sc->ha_performance.max_intr_time = result;
3819 }
3820
3821 if ( (sc->ha_performance.min_intr_time == 0)
3822 || (sc->ha_performance.min_intr_time > result) ) {
3823 sc->ha_performance.min_intr_time = result;
3824 }
3825 }
3826 }
3827#endif
3828 return (processed);
3829} /* asr_intr */
3830
3831#undef QueueSize /* Grrrr */
3832#undef SG_Size /* Grrrr */
3833
3834/*
3835 * Meant to be included at the bottom of asr.c !!!
3836 */
3837
3838/*
3839 * Included here as hard coded. Done because other necessary include
3840 * files utilize C++ comment structures which make them a nuisance to
3841 * included here just to pick up these three typedefs.
3842 */
3843typedef U32 DPT_TAG_T;
3844typedef U32 DPT_MSG_T;
3845typedef U32 DPT_RTN_T;
3846
3847#undef SCSI_RESET /* Conflicts with "scsi/scsiconf.h" defintion */
3848#include "dev/asr/osd_unix.h"
3849
3850#define asr_unit(dev) minor(dev)
3851
3852STATIC INLINE Asr_softc_t *
3853ASR_get_sc (
3854 IN dev_t dev)
3855{
3856 int unit = asr_unit(dev);
3857 OUT Asr_softc_t * sc = Asr_softc;
3858
3859 while (sc && sc->ha_sim[0] && (cam_sim_unit(sc->ha_sim[0]) != unit)) {
3860 sc = sc->ha_next;
3861 }
3862 return (sc);
3863} /* ASR_get_sc */
3864
3865STATIC u_int8_t ASR_ctlr_held;
3866#if (!defined(UNREFERENCED_PARAMETER))
3867# define UNREFERENCED_PARAMETER(x) (void)(x)
3868#endif
3869
3870STATIC int
3871asr_open(
3872 IN dev_t dev,
3873 int32_t flags,
3874 int32_t ifmt,
41c20dac 3875 IN d_thread_t *td)
984263bc
MD
3876{
3877 int s;
3878 OUT int error;
3879 UNREFERENCED_PARAMETER(flags);
3880 UNREFERENCED_PARAMETER(ifmt);
3881
3882 if (ASR_get_sc (dev) == (Asr_softc_t *)NULL) {
3883 return (ENODEV);
3884 }
dadab5e9 3885 KKASSERT(td->td_proc);
984263bc
MD
3886 s = splcam ();
3887 if (ASR_ctlr_held) {
3888 error = EBUSY;
dadab5e9 3889 } else if ((error = suser_cred(td->td_proc->p_ucred, 0)) == 0) {
984263bc
MD
3890 ++ASR_ctlr_held;
3891 }
3892 splx(s);
3893 return (error);
3894} /* asr_open */
3895
3896STATIC int
3897asr_close(
3898 dev_t dev,
3899 int flags,
3900 int ifmt,
41c20dac 3901 d_thread_t *td)
984263bc
MD
3902{
3903 UNREFERENCED_PARAMETER(dev);
3904 UNREFERENCED_PARAMETER(flags);
3905 UNREFERENCED_PARAMETER(ifmt);
41c20dac 3906 UNREFERENCED_PARAMETER(td);
984263bc
MD
3907
3908 ASR_ctlr_