Remove two unnecessary volatile qualifications.
[dragonfly.git] / sys / dev / raid / asr / asr.c
CommitLineData
984263bc 1/* $FreeBSD: src/sys/dev/asr/asr.c,v 1.3.2.2 2001/08/23 05:21:29 scottl Exp $ */
fabb8ceb 2/* $DragonFly: src/sys/dev/raid/asr/asr.c,v 1.7 2003/07/21 05:50:27 dillon Exp $ */
984263bc
MD
3/*
4 * Copyright (c) 1996-2000 Distributed Processing Technology Corporation
5 * Copyright (c) 2000-2001 Adaptec Corporation
6 * All rights reserved.
7 *
8 * TERMS AND CONDITIONS OF USE
9 *
10 * Redistribution and use in source form, with or without modification, are
11 * permitted provided that redistributions of source code must retain the
12 * above copyright notice, this list of conditions and the following disclaimer.
13 *
14 * This software is provided `as is' by Adaptec and any express or implied
15 * warranties, including, but not limited to, the implied warranties of
16 * merchantability and fitness for a particular purpose, are disclaimed. In no
17 * event shall Adaptec be liable for any direct, indirect, incidental, special,
18 * exemplary or consequential damages (including, but not limited to,
19 * procurement of substitute goods or services; loss of use, data, or profits;
20 * or business interruptions) however caused and on any theory of liability,
21 * whether in contract, strict liability, or tort (including negligence or
22 * otherwise) arising in any way out of the use of this driver software, even
23 * if advised of the possibility of such damage.
24 *
25 * SCSI I2O host adapter driver
26 *
27 * V1.08 2001/08/21 Mark_Salyzyn@adaptec.com
28 * - The 2000S and 2005S do not initialize on some machines,
29 * increased timeout to 255ms from 50ms for the StatusGet
30 * command.
31 * V1.07 2001/05/22 Mark_Salyzyn@adaptec.com
32 * - I knew this one was too good to be true. The error return
33 * on ioctl commands needs to be compared to CAM_REQ_CMP, not
34 * to the bit masked status.
35 * V1.06 2001/05/08 Mark_Salyzyn@adaptec.com
36 * - The 2005S that was supported is affectionately called the
37 * Conjoined BAR Firmware. In order to support RAID-5 in a
38 * 16MB low-cost configuration, Firmware was forced to go
39 * to a Split BAR Firmware. This requires a separate IOP and
40 * Messaging base address.
41 * V1.05 2001/04/25 Mark_Salyzyn@adaptec.com
42 * - Handle support for 2005S Zero Channel RAID solution.
43 * - System locked up if the Adapter locked up. Do not try
44 * to send other commands if the resetIOP command fails. The
45 * fail outstanding command discovery loop was flawed as the
46 * removal of the command from the list prevented discovering
47 * all the commands.
48 * - Comment changes to clarify driver.
49 * - SysInfo searched for an EATA SmartROM, not an I2O SmartROM.
50 * - We do not use the AC_FOUND_DEV event because of I2O.
51 * Removed asr_async.
52 * V1.04 2000/09/22 Mark_Salyzyn@adaptec.com, msmith@freebsd.org,
53 * lampa@fee.vutbr.cz and Scott_Long@adaptec.com.
54 * - Removed support for PM1554, PM2554 and PM2654 in Mode-0
55 * mode as this is confused with competitor adapters in run
56 * mode.
57 * - critical locking needed in ASR_ccbAdd and ASR_ccbRemove
58 * to prevent operating system panic.
59 * - moved default major number to 154 from 97.
60 * V1.03 2000/07/12 Mark_Salyzyn@adaptec.com
61 * - The controller is not actually an ASR (Adaptec SCSI RAID)
62 * series that is visible, it's more of an internal code name.
63 * remove any visible references within reason for now.
64 * - bus_ptr->LUN was not correctly zeroed when initially
65 * allocated causing a possible panic of the operating system
66 * during boot.
67 * V1.02 2000/06/26 Mark_Salyzyn@adaptec.com
68 * - Code always fails for ASR_getTid affecting performance.
69 * - initiated a set of changes that resulted from a formal
70 * code inspection by Mark_Salyzyn@adaptec.com,
71 * George_Dake@adaptec.com, Jeff_Zeak@adaptec.com,
72 * Martin_Wilson@adaptec.com and Vincent_Trandoan@adaptec.com.
73 * Their findings were focussed on the LCT & TID handler, and
74 * all resulting changes were to improve code readability,
75 * consistency or have a positive effect on performance.
76 * V1.01 2000/06/14 Mark_Salyzyn@adaptec.com
77 * - Passthrough returned an incorrect error.
78 * - Passthrough did not migrate the intrinsic scsi layer wakeup
79 * on command completion.
80 * - generate control device nodes using make_dev and delete_dev.
81 * - Performance affected by TID caching reallocing.
82 * - Made suggested changes by Justin_Gibbs@adaptec.com
83 * - use splcam instead of splbio.
84 * - use cam_imask instead of bio_imask.
85 * - use u_int8_t instead of u_char.
86 * - use u_int16_t instead of u_short.
87 * - use u_int32_t instead of u_long where appropriate.
88 * - use 64 bit context handler instead of 32 bit.
89 * - create_ccb should only allocate the worst case
90 * requirements for the driver since CAM may evolve
91 * making union ccb much larger than needed here.
92 * renamed create_ccb to asr_alloc_ccb.
93 * - go nutz justifying all debug prints as macros
94 * defined at the top and remove unsightly ifdefs.
95 * - INLINE STATIC viewed as confusing. Historically
96 * utilized to affect code performance and debug
97 * issues in OS, Compiler or OEM specific situations.
98 * V1.00 2000/05/31 Mark_Salyzyn@adaptec.com
99 * - Ported from FreeBSD 2.2.X DPT I2O driver.
100 * changed struct scsi_xfer to union ccb/struct ccb_hdr
101 * changed variable name xs to ccb
102 * changed struct scsi_link to struct cam_path
103 * changed struct scsibus_data to struct cam_sim
104 * stopped using fordriver for holding on to the TID
105 * use proprietary packet creation instead of scsi_inquire
106 * CAM layer sends synchronize commands.
107 */
108
109#define ASR_VERSION 1
110#define ASR_REVISION '0'
111#define ASR_SUBREVISION '8'
112#define ASR_MONTH 8
113#define ASR_DAY 21
114#define ASR_YEAR 2001 - 1980
115
116/*
117 * Debug macros to reduce the unsightly ifdefs
118 */
119#if (defined(DEBUG_ASR) || defined(DEBUG_ASR_USR_CMD) || defined(DEBUG_ASR_CMD))
120# define debug_asr_message(message) \
121 { \
122 u_int32_t * pointer = (u_int32_t *)message; \
123 u_int32_t length = I2O_MESSAGE_FRAME_getMessageSize(message);\
124 u_int32_t counter = 0; \
125 \
126 while (length--) { \
127 printf ("%08lx%c", (u_long)*(pointer++), \
128 (((++counter & 7) == 0) || (length == 0)) \
129 ? '\n' \
130 : ' '); \
131 } \
132 }
133#endif /* DEBUG_ASR || DEBUG_ASR_USR_CMD || DEBUG_ASR_CMD */
134
135#if (defined(DEBUG_ASR))
136 /* Breaks on none STDC based compilers :-( */
137# define debug_asr_printf(fmt,args...) printf(fmt, ##args)
138# define debug_asr_dump_message(message) debug_asr_message(message)
139# define debug_asr_print_path(ccb) xpt_print_path(ccb->ccb_h.path);
140 /* None fatal version of the ASSERT macro */
141# if (defined(__STDC__))
142# define ASSERT(phrase) if(!(phrase))printf(#phrase " at line %d file %s\n",__LINE__,__FILE__)
143# else
144# define ASSERT(phrase) if(!(phrase))printf("phrase" " at line %d file %s\n",__LINE__,__FILE__)
145# endif
146#else /* DEBUG_ASR */
147# define debug_asr_printf(fmt,args...)
148# define debug_asr_dump_message(message)
149# define debug_asr_print_path(ccb)
150# define ASSERT(x)
151#endif /* DEBUG_ASR */
152
153/*
154 * If DEBUG_ASR_CMD is defined:
155 * 0 - Display incoming SCSI commands
156 * 1 - add in a quick character before queueing.
157 * 2 - add in outgoing message frames.
158 */
159#if (defined(DEBUG_ASR_CMD))
160# define debug_asr_cmd_printf(fmt,args...) printf(fmt,##args)
161# define debug_asr_dump_ccb(ccb) \
162 { \
163 u_int8_t * cp = (unsigned char *)&(ccb->csio.cdb_io); \
164 int len = ccb->csio.cdb_len; \
165 \
166 while (len) { \
167 debug_asr_cmd_printf (" %02x", *(cp++)); \
168 --len; \
169 } \
170 }
171# if (DEBUG_ASR_CMD > 0)
172# define debug_asr_cmd1_printf debug_asr_cmd_printf
173# else
174# define debug_asr_cmd1_printf(fmt,args...)
175# endif
176# if (DEBUG_ASR_CMD > 1)
177# define debug_asr_cmd2_printf debug_asr_cmd_printf
178# define debug_asr_cmd2_dump_message(message) debug_asr_message(message)
179# else
180# define debug_asr_cmd2_printf(fmt,args...)
181# define debug_asr_cmd2_dump_message(message)
182# endif
183#else /* DEBUG_ASR_CMD */
184# define debug_asr_cmd_printf(fmt,args...)
185# define debug_asr_cmd_dump_ccb(ccb)
186# define debug_asr_cmd1_printf(fmt,args...)
187# define debug_asr_cmd2_printf(fmt,args...)
188# define debug_asr_cmd2_dump_message(message)
189#endif /* DEBUG_ASR_CMD */
190
191#if (defined(DEBUG_ASR_USR_CMD))
192# define debug_usr_cmd_printf(fmt,args...) printf(fmt,##args)
193# define debug_usr_cmd_dump_message(message) debug_usr_message(message)
194#else /* DEBUG_ASR_USR_CMD */
195# define debug_usr_cmd_printf(fmt,args...)
196# define debug_usr_cmd_dump_message(message)
197#endif /* DEBUG_ASR_USR_CMD */
198
199#define dsDescription_size 46 /* Snug as a bug in a rug */
200#include "dev/asr/dptsig.h"
201
202static dpt_sig_S ASR_sig = {
203 { 'd', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION, PROC_INTEL,
204 PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM, FT_HBADRVR, 0,
205 OEM_DPT, OS_FREE_BSD, CAP_ABOVE16MB, DEV_ALL,
206 ADF_ALL_SC5,
207 0, 0, ASR_VERSION, ASR_REVISION, ASR_SUBREVISION,
208 ASR_MONTH, ASR_DAY, ASR_YEAR,
209/* 01234567890123456789012345678901234567890123456789 < 50 chars */
210 "Adaptec FreeBSD 4.0.0 Unix SCSI I2O HBA Driver"
211 /* ^^^^^ asr_attach alters these to match OS */
212};
213
214#include <sys/param.h> /* TRUE=1 and FALSE=0 defined here */
215#include <sys/kernel.h>
216#include <sys/systm.h>
217#include <sys/malloc.h>
218#include <sys/proc.h>
219#include <sys/conf.h>
220#include <sys/disklabel.h>
221#include <sys/bus.h>
222#include <machine/resource.h>
223#include <machine/bus.h>
224#include <sys/rman.h>
225#include <sys/stat.h>
226
227#include <cam/cam.h>
228#include <cam/cam_ccb.h>
229#include <cam/cam_sim.h>
230#include <cam/cam_xpt_sim.h>
231#include <cam/cam_xpt_periph.h>
232
233#include <cam/scsi/scsi_all.h>
234#include <cam/scsi/scsi_message.h>
235
236#include <vm/vm.h>
237#include <vm/pmap.h>
238#include <machine/cputypes.h>
239#include <machine/clock.h>
240#include <i386/include/vmparam.h>
241
242#include <pci/pcivar.h>
243#include <pci/pcireg.h>
244
245#define STATIC static
246#define INLINE
247
248#if (defined(DEBUG_ASR) && (DEBUG_ASR > 0))
249# undef STATIC
250# define STATIC
251# undef INLINE
252# define INLINE
253#endif
254#define IN
255#define OUT
256#define INOUT
257
258#define osdSwap4(x) ((u_long)ntohl((u_long)(x)))
259#define KVTOPHYS(x) vtophys(x)
260#include "dev/asr/dptalign.h"
261#include "dev/asr/i2oexec.h"
262#include "dev/asr/i2obscsi.h"
263#include "dev/asr/i2odpt.h"
264#include "dev/asr/i2oadptr.h"
265#include "opt_asr.h"
266
267#include "dev/asr/sys_info.h"
268
269/* Configuration Definitions */
270
271#define SG_SIZE 58 /* Scatter Gather list Size */
272#define MAX_TARGET_ID 126 /* Maximum Target ID supported */
273#define MAX_LUN 255 /* Maximum LUN Supported */
274#define MAX_CHANNEL 7 /* Maximum Channel # Supported by driver */
275#define MAX_INBOUND 2000 /* Max CCBs, Also Max Queue Size */
276#define MAX_OUTBOUND 256 /* Maximum outbound frames/adapter */
277#define MAX_INBOUND_SIZE 512 /* Maximum inbound frame size */
278#define MAX_MAP 4194304L /* Maximum mapping size of IOP */
279 /* Also serves as the minimum map for */
280 /* the 2005S zero channel RAID product */
281
282/**************************************************************************
283** ASR Host Adapter structure - One Structure For Each Host Adapter That **
284** Is Configured Into The System. The Structure Supplies Configuration **
285** Information, Status Info, Queue Info And An Active CCB List Pointer. **
286***************************************************************************/
287
288/* I2O register set */
289typedef struct {
290 U8 Address[0x30];
291 volatile U32 Status;
292 volatile U32 Mask;
293# define Mask_InterruptsDisabled 0x08
294 U32 x[2];
295 volatile U32 ToFIFO; /* In Bound FIFO */
296 volatile U32 FromFIFO; /* Out Bound FIFO */
297} i2oRegs_t;
298
299/*
300 * A MIX of performance and space considerations for TID lookups
301 */
302typedef u_int16_t tid_t;
303
304typedef struct {
305 u_int32_t size; /* up to MAX_LUN */
306 tid_t TID[1];
307} lun2tid_t;
308
309typedef struct {
310 u_int32_t size; /* up to MAX_TARGET */
311 lun2tid_t * LUN[1];
312} target2lun_t;
313
314/*
315 * To ensure that we only allocate and use the worst case ccb here, lets
316 * make our own local ccb union. If asr_alloc_ccb is utilized for another
317 * ccb type, ensure that you add the additional structures into our local
318 * ccb union. To ensure strict type checking, we will utilize the local
319 * ccb definition wherever possible.
320 */
321union asr_ccb {
322 struct ccb_hdr ccb_h; /* For convenience */
323 struct ccb_scsiio csio;
324 struct ccb_setasync csa;
325};
326
327typedef struct Asr_softc {
328 u_int16_t ha_irq;
329 void * ha_Base; /* base port for each board */
330 u_int8_t * volatile ha_blinkLED;
331 i2oRegs_t * ha_Virt; /* Base address of IOP */
332 U8 * ha_Fvirt; /* Base address of Frames */
333 I2O_IOP_ENTRY ha_SystemTable;
334 LIST_HEAD(,ccb_hdr) ha_ccb; /* ccbs in use */
335 struct cam_path * ha_path[MAX_CHANNEL+1];
336 struct cam_sim * ha_sim[MAX_CHANNEL+1];
337#if __FreeBSD_version >= 400000
338 struct resource * ha_mem_res;
339 struct resource * ha_mes_res;
340 struct resource * ha_irq_res;
341 void * ha_intr;
342#endif
343 PI2O_LCT ha_LCT; /* Complete list of devices */
344# define le_type IdentityTag[0]
345# define I2O_BSA 0x20
346# define I2O_FCA 0x40
347# define I2O_SCSI 0x00
348# define I2O_PORT 0x80
349# define I2O_UNKNOWN 0x7F
350# define le_bus IdentityTag[1]
351# define le_target IdentityTag[2]
352# define le_lun IdentityTag[3]
353 target2lun_t * ha_targets[MAX_CHANNEL+1];
354 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME ha_Msgs;
355 u_long ha_Msgs_Phys;
356
357 u_int8_t ha_in_reset;
358# define HA_OPERATIONAL 0
359# define HA_IN_RESET 1
360# define HA_OFF_LINE 2
361# define HA_OFF_LINE_RECOVERY 3
362 /* Configuration information */
363 /* The target id maximums we take */
364 u_int8_t ha_MaxBus; /* Maximum bus */
365 u_int8_t ha_MaxId; /* Maximum target ID */
366 u_int8_t ha_MaxLun; /* Maximum target LUN */
367 u_int8_t ha_SgSize; /* Max SG elements */
368 u_int8_t ha_pciBusNum;
369 u_int8_t ha_pciDeviceNum;
370 u_int8_t ha_adapter_target[MAX_CHANNEL+1];
371 u_int16_t ha_QueueSize; /* Max outstanding commands */
372 u_int16_t ha_Msgs_Count;
373
374 /* Links into other parents and HBAs */
375 struct Asr_softc * ha_next; /* HBA list */
376
377#ifdef ASR_MEASURE_PERFORMANCE
378#define MAX_TIMEQ_SIZE 256 // assumes MAX 256 scsi commands sent
379 asr_perf_t ha_performance;
380 u_int32_t ha_submitted_ccbs_count;
381
382 // Queueing macros for a circular queue
383#define TIMEQ_FREE_LIST_EMPTY(head, tail) (-1 == (head) && -1 == (tail))
384#define TIMEQ_FREE_LIST_FULL(head, tail) ((((tail) + 1) % MAX_TIMEQ_SIZE) == (head))
385#define ENQ_TIMEQ_FREE_LIST(item, Q, head, tail) \
386 if (!TIMEQ_FREE_LIST_FULL((head), (tail))) { \
387 if TIMEQ_FREE_LIST_EMPTY((head),(tail)) { \
388 (head) = (tail) = 0; \
389 } \
390 else (tail) = ((tail) + 1) % MAX_TIMEQ_SIZE; \
391 Q[(tail)] = (item); \
392 } \
393 else { \
394 debug_asr_printf("asr: Enqueueing when TimeQ Free List is full... This should not happen!\n"); \
395 }
396#define DEQ_TIMEQ_FREE_LIST(item, Q, head, tail) \
397 if (!TIMEQ_FREE_LIST_EMPTY((head), (tail))) { \
398 item = Q[(head)]; \
399 if ((head) == (tail)) { (head) = (tail) = -1; } \
400 else (head) = ((head) + 1) % MAX_TIMEQ_SIZE; \
401 } \
402 else { \
403 (item) = -1; \
404 debug_asr_printf("asr: Dequeueing when TimeQ Free List is empty... This should not happen!\n"); \
405 }
406
407 // Circular queue of time stamps
408 struct timeval ha_timeQ[MAX_TIMEQ_SIZE];
409 u_int32_t ha_timeQFreeList[MAX_TIMEQ_SIZE];
410 int ha_timeQFreeHead;
411 int ha_timeQFreeTail;
412#endif
413} Asr_softc_t;
414
415STATIC Asr_softc_t * Asr_softc;
416
417/*
418 * Prototypes of the routines we have in this object.
419 */
420
421/* Externally callable routines */
422#if __FreeBSD_version >= 400000
423#define PROBE_ARGS IN device_t tag
424#define PROBE_RET int
425#define PROBE_SET() u_long id = (pci_get_device(tag)<<16)|pci_get_vendor(tag)
426#define PROBE_RETURN(retval) if(retval){device_set_desc(tag,retval);return(0);}else{return(ENXIO);}
427#define ATTACH_ARGS IN device_t tag
428#define ATTACH_RET int
429#define ATTACH_SET() int unit = device_get_unit(tag)
430#define ATTACH_RETURN(retval) return(retval)
431#else
432#define PROBE_ARGS IN pcici_t tag, IN pcidi_t id
433#define PROBE_RET const char *
434#define PROBE_SET()
435#define PROBE_RETURN(retval) return(retval)
436#define ATTACH_ARGS IN pcici_t tag, IN int unit
437#define ATTACH_RET void
438#define ATTACH_SET()
439#define ATTACH_RETURN(retval) return
440#endif
441/* I2O HDM interface */
442STATIC PROBE_RET asr_probe __P((PROBE_ARGS));
443STATIC ATTACH_RET asr_attach __P((ATTACH_ARGS));
444/* DOMINO placeholder */
445STATIC PROBE_RET domino_probe __P((PROBE_ARGS));
446STATIC ATTACH_RET domino_attach __P((ATTACH_ARGS));
447/* MODE0 adapter placeholder */
448STATIC PROBE_RET mode0_probe __P((PROBE_ARGS));
449STATIC ATTACH_RET mode0_attach __P((ATTACH_ARGS));
450
451STATIC Asr_softc_t * ASR_get_sc __P((
452 IN dev_t dev));
453STATIC int asr_ioctl __P((
454 IN dev_t dev,
455 IN u_long cmd,
456 INOUT caddr_t data,
457 int flag,
a99c2fff 458 d_thread_t *td));
984263bc
MD
459STATIC int asr_open __P((
460 IN dev_t dev,
461 int32_t flags,
462 int32_t ifmt,
a99c2fff 463 IN d_thread_t *td));
984263bc
MD
464STATIC int asr_close __P((
465 dev_t dev,
466 int flags,
467 int ifmt,
a99c2fff 468 d_thread_t *td));
984263bc
MD
469STATIC int asr_intr __P((
470 IN Asr_softc_t * sc));
471STATIC void asr_timeout __P((
472 INOUT void * arg));
473STATIC int ASR_init __P((
474 IN Asr_softc_t * sc));
475STATIC INLINE int ASR_acquireLct __P((
476 INOUT Asr_softc_t * sc));
477STATIC INLINE int ASR_acquireHrt __P((
478 INOUT Asr_softc_t * sc));
479STATIC void asr_action __P((
480 IN struct cam_sim * sim,
481 IN union ccb * ccb));
482STATIC void asr_poll __P((
483 IN struct cam_sim * sim));
484
485/*
486 * Here is the auto-probe structure used to nest our tests appropriately
487 * during the startup phase of the operating system.
488 */
489#if __FreeBSD_version >= 400000
490STATIC device_method_t asr_methods[] = {
491 DEVMETHOD(device_probe, asr_probe),
492 DEVMETHOD(device_attach, asr_attach),
493 { 0, 0 }
494};
495
496STATIC driver_t asr_driver = {
497 "asr",
498 asr_methods,
499 sizeof(Asr_softc_t)
500};
501
502STATIC devclass_t asr_devclass;
503
504DRIVER_MODULE(asr, pci, asr_driver, asr_devclass, 0, 0);
505
506STATIC device_method_t domino_methods[] = {
507 DEVMETHOD(device_probe, domino_probe),
508 DEVMETHOD(device_attach, domino_attach),
509 { 0, 0 }
510};
511
512STATIC driver_t domino_driver = {
513 "domino",
514 domino_methods,
515 0
516};
517
518STATIC devclass_t domino_devclass;
519
520DRIVER_MODULE(domino, pci, domino_driver, domino_devclass, 0, 0);
521
522STATIC device_method_t mode0_methods[] = {
523 DEVMETHOD(device_probe, mode0_probe),
524 DEVMETHOD(device_attach, mode0_attach),
525 { 0, 0 }
526};
527
528STATIC driver_t mode0_driver = {
529 "mode0",
530 mode0_methods,
531 0
532};
533
534STATIC devclass_t mode0_devclass;
535
536DRIVER_MODULE(mode0, pci, mode0_driver, mode0_devclass, 0, 0);
537#else
538STATIC u_long asr_pcicount = 0;
539STATIC struct pci_device asr_pcidev = {
540 "asr",
541 asr_probe,
542 asr_attach,
543 &asr_pcicount,
544 NULL
545};
546DATA_SET (asr_pciset, asr_pcidev);
547
548STATIC u_long domino_pcicount = 0;
549STATIC struct pci_device domino_pcidev = {
550 "domino",
551 domino_probe,
552 domino_attach,
553 &domino_pcicount,
554 NULL
555};
556DATA_SET (domino_pciset, domino_pcidev);
557
558STATIC u_long mode0_pcicount = 0;
559STATIC struct pci_device mode0_pcidev = {
560 "mode0",
561 mode0_probe,
562 mode0_attach,
563 &mode0_pcicount,
564 NULL
565};
566DATA_SET (mode0_pciset, mode0_pcidev);
567#endif
568
569/*
570 * devsw for asr hba driver
571 *
572 * only ioctl is used. the sd driver provides all other access.
573 */
574#define CDEV_MAJOR 154 /* prefered default character major */
575STATIC struct cdevsw asr_cdevsw = {
fabb8ceb
MD
576 "asr", /* name */
577 CDEV_MAJOR, /* maj */
578 0, /* flags */
579 NULL, /* port */
580 0, /* auto */
581
984263bc
MD
582 asr_open, /* open */
583 asr_close, /* close */
584 noread, /* read */
585 nowrite, /* write */
586 asr_ioctl, /* ioctl */
587 nopoll, /* poll */
588 nommap, /* mmap */
589 nostrategy, /* strategy */
984263bc 590 nodump, /* dump */
fabb8ceb 591 nopsize /* psize */
984263bc
MD
592};
593
594#ifdef ASR_MEASURE_PERFORMANCE
595STATIC u_int32_t asr_time_delta __P((IN struct timeval start,
596 IN struct timeval end));
597#endif
598
599/*
600 * Initialize the dynamic cdevsw hooks.
601 */
602STATIC void
603asr_drvinit (
604 void * unused)
605{
606 static int asr_devsw_installed = 0;
607
608 if (asr_devsw_installed) {
609 return;
610 }
611 asr_devsw_installed++;
612 /*
613 * Find a free spot (the report during driver load used by
614 * osd layer in engine to generate the controlling nodes).
615 */
616 while ((asr_cdevsw.d_maj < NUMCDEVSW)
617 && (devsw(makedev(asr_cdevsw.d_maj,0)) != (struct cdevsw *)NULL)) {
618 ++asr_cdevsw.d_maj;
619 }
620 if (asr_cdevsw.d_maj >= NUMCDEVSW) for (
621 asr_cdevsw.d_maj = 0;
622 (asr_cdevsw.d_maj < CDEV_MAJOR)
623 && (devsw(makedev(asr_cdevsw.d_maj,0)) != (struct cdevsw *)NULL);
624 ++asr_cdevsw.d_maj);
625 /*
626 * Come to papa
627 */
628 cdevsw_add(&asr_cdevsw);
629 /*
630 * delete any nodes that would attach to the primary adapter,
631 * let the adapter scans add them.
632 */
633 destroy_dev(makedev(asr_cdevsw.d_maj,0));
634} /* asr_drvinit */
635
636/* Must initialize before CAM layer picks up our HBA driver */
637SYSINIT(asrdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,asr_drvinit,NULL)
638
639/* I2O support routines */
640#define defAlignLong(STRUCT,NAME) char NAME[sizeof(STRUCT)]
641#define getAlignLong(STRUCT,NAME) ((STRUCT *)(NAME))
642
643/*
644 * Fill message with default.
645 */
646STATIC PI2O_MESSAGE_FRAME
647ASR_fillMessage (
648 IN char * Message,
649 IN u_int16_t size)
650{
651 OUT PI2O_MESSAGE_FRAME Message_Ptr;
652
653 Message_Ptr = getAlignLong(I2O_MESSAGE_FRAME, Message);
654 bzero ((void *)Message_Ptr, size);
655 I2O_MESSAGE_FRAME_setVersionOffset(Message_Ptr, I2O_VERSION_11);
656 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
657 (size + sizeof(U32) - 1) >> 2);
658 I2O_MESSAGE_FRAME_setInitiatorAddress (Message_Ptr, 1);
659 return (Message_Ptr);
660} /* ASR_fillMessage */
661
662#define EMPTY_QUEUE ((U32)-1L)
663
664STATIC INLINE U32
665ASR_getMessage(
666 IN i2oRegs_t * virt)
667{
668 OUT U32 MessageOffset;
669
670 if ((MessageOffset = virt->ToFIFO) == EMPTY_QUEUE) {
671 MessageOffset = virt->ToFIFO;
672 }
673 return (MessageOffset);
674} /* ASR_getMessage */
675
676/* Issue a polled command */
677STATIC U32
678ASR_initiateCp (
679 INOUT i2oRegs_t * virt,
680 INOUT U8 * fvirt,
681 IN PI2O_MESSAGE_FRAME Message)
682{
683 OUT U32 Mask = -1L;
684 U32 MessageOffset;
685 u_int Delay = 1500;
686
687 /*
688 * ASR_initiateCp is only used for synchronous commands and will
689 * be made more resiliant to adapter delays since commands like
690 * resetIOP can cause the adapter to be deaf for a little time.
691 */
692 while (((MessageOffset = ASR_getMessage(virt)) == EMPTY_QUEUE)
693 && (--Delay != 0)) {
694 DELAY (10000);
695 }
696 if (MessageOffset != EMPTY_QUEUE) {
697 bcopy (Message, fvirt + MessageOffset,
698 I2O_MESSAGE_FRAME_getMessageSize(Message) << 2);
699 /*
700 * Disable the Interrupts
701 */
702 virt->Mask = (Mask = virt->Mask) | Mask_InterruptsDisabled;
703 virt->ToFIFO = MessageOffset;
704 }
705 return (Mask);
706} /* ASR_initiateCp */
707
708/*
709 * Reset the adapter.
710 */
711STATIC U32
712ASR_resetIOP (
713 INOUT i2oRegs_t * virt,
714 INOUT U8 * fvirt)
715{
716 struct resetMessage {
717 I2O_EXEC_IOP_RESET_MESSAGE M;
718 U32 R;
719 };
720 defAlignLong(struct resetMessage,Message);
721 PI2O_EXEC_IOP_RESET_MESSAGE Message_Ptr;
722 OUT U32 * volatile Reply_Ptr;
723 U32 Old;
724
725 /*
726 * Build up our copy of the Message.
727 */
728 Message_Ptr = (PI2O_EXEC_IOP_RESET_MESSAGE)ASR_fillMessage(Message,
729 sizeof(I2O_EXEC_IOP_RESET_MESSAGE));
730 I2O_EXEC_IOP_RESET_MESSAGE_setFunction(Message_Ptr, I2O_EXEC_IOP_RESET);
731 /*
732 * Reset the Reply Status
733 */
734 *(Reply_Ptr = (U32 *)((char *)Message_Ptr
735 + sizeof(I2O_EXEC_IOP_RESET_MESSAGE))) = 0;
736 I2O_EXEC_IOP_RESET_MESSAGE_setStatusWordLowAddress(Message_Ptr,
737 KVTOPHYS((void *)Reply_Ptr));
738 /*
739 * Send the Message out
740 */
741 if ((Old = ASR_initiateCp (virt, fvirt, (PI2O_MESSAGE_FRAME)Message_Ptr)) != (U32)-1L) {
742 /*
743 * Wait for a response (Poll), timeouts are dangerous if
744 * the card is truly responsive. We assume response in 2s.
745 */
746 u_int8_t Delay = 200;
747
748 while ((*Reply_Ptr == 0) && (--Delay != 0)) {
749 DELAY (10000);
750 }
751 /*
752 * Re-enable the interrupts.
753 */
754 virt->Mask = Old;
755 ASSERT (*Reply_Ptr);
756 return (*Reply_Ptr);
757 }
758 ASSERT (Old != (U32)-1L);
759 return (0);
760} /* ASR_resetIOP */
761
762/*
763 * Get the curent state of the adapter
764 */
765STATIC INLINE PI2O_EXEC_STATUS_GET_REPLY
766ASR_getStatus (
767 INOUT i2oRegs_t * virt,
768 INOUT U8 * fvirt,
769 OUT PI2O_EXEC_STATUS_GET_REPLY buffer)
770{
771 defAlignLong(I2O_EXEC_STATUS_GET_MESSAGE,Message);
772 PI2O_EXEC_STATUS_GET_MESSAGE Message_Ptr;
773 U32 Old;
774
775 /*
776 * Build up our copy of the Message.
777 */
778 Message_Ptr = (PI2O_EXEC_STATUS_GET_MESSAGE)ASR_fillMessage(Message,
779 sizeof(I2O_EXEC_STATUS_GET_MESSAGE));
780 I2O_EXEC_STATUS_GET_MESSAGE_setFunction(Message_Ptr,
781 I2O_EXEC_STATUS_GET);
782 I2O_EXEC_STATUS_GET_MESSAGE_setReplyBufferAddressLow(Message_Ptr,
783 KVTOPHYS((void *)buffer));
784 /* This one is a Byte Count */
785 I2O_EXEC_STATUS_GET_MESSAGE_setReplyBufferLength(Message_Ptr,
786 sizeof(I2O_EXEC_STATUS_GET_REPLY));
787 /*
788 * Reset the Reply Status
789 */
790 bzero ((void *)buffer, sizeof(I2O_EXEC_STATUS_GET_REPLY));
791 /*
792 * Send the Message out
793 */
794 if ((Old = ASR_initiateCp (virt, fvirt, (PI2O_MESSAGE_FRAME)Message_Ptr)) != (U32)-1L) {
795 /*
796 * Wait for a response (Poll), timeouts are dangerous if
797 * the card is truly responsive. We assume response in 50ms.
798 */
799 u_int8_t Delay = 255;
800
801 while (*((U8 * volatile)&(buffer->SyncByte)) == 0) {
802 if (--Delay == 0) {
803 buffer = (PI2O_EXEC_STATUS_GET_REPLY)NULL;
804 break;
805 }
806 DELAY (1000);
807 }
808 /*
809 * Re-enable the interrupts.
810 */
811 virt->Mask = Old;
812 return (buffer);
813 }
814 return ((PI2O_EXEC_STATUS_GET_REPLY)NULL);
815} /* ASR_getStatus */
816
817/*
818 * Check if the device is a SCSI I2O HBA, and add it to the list.
819 */
820
821/*
822 * Probe for ASR controller. If we find it, we will use it.
823 * virtual adapters.
824 */
825STATIC PROBE_RET
826asr_probe(PROBE_ARGS)
827{
828 PROBE_SET();
829 if ((id == 0xA5011044) || (id == 0xA5111044)) {
830 PROBE_RETURN ("Adaptec Caching SCSI RAID");
831 }
832 PROBE_RETURN (NULL);
833} /* asr_probe */
834
835/*
836 * Probe/Attach for DOMINO chipset.
837 */
838STATIC PROBE_RET
839domino_probe(PROBE_ARGS)
840{
841 PROBE_SET();
842 if (id == 0x10121044) {
843 PROBE_RETURN ("Adaptec Caching Memory Controller");
844 }
845 PROBE_RETURN (NULL);
846} /* domino_probe */
847
848STATIC ATTACH_RET
849domino_attach (ATTACH_ARGS)
850{
851 ATTACH_RETURN (0);
852} /* domino_attach */
853
854/*
855 * Probe/Attach for MODE0 adapters.
856 */
857STATIC PROBE_RET
858mode0_probe(PROBE_ARGS)
859{
860 PROBE_SET();
861
862 /*
863 * If/When we can get a business case to commit to a
864 * Mode0 driver here, we can make all these tests more
865 * specific and robust. Mode0 adapters have their processors
866 * turned off, this the chips are in a raw state.
867 */
868
869 /* This is a PLX9054 */
870 if (id == 0x905410B5) {
871 PROBE_RETURN ("Adaptec Mode0 PM3757");
872 }
873 /* This is a PLX9080 */
874 if (id == 0x908010B5) {
875 PROBE_RETURN ("Adaptec Mode0 PM3754/PM3755");
876 }
877 /* This is a ZION 80303 */
878 if (id == 0x53098086) {
879 PROBE_RETURN ("Adaptec Mode0 3010S");
880 }
881 /* This is an i960RS */
882 if (id == 0x39628086) {
883 PROBE_RETURN ("Adaptec Mode0 2100S");
884 }
885 /* This is an i960RN */
886 if (id == 0x19648086) {
887 PROBE_RETURN ("Adaptec Mode0 PM2865/2400A/3200S/3400S");
888 }
889#if 0 /* this would match any generic i960 -- mjs */
890 /* This is an i960RP (typically also on Motherboards) */
891 if (id == 0x19608086) {
892 PROBE_RETURN ("Adaptec Mode0 PM2554/PM1554/PM2654");
893 }
894#endif
895 PROBE_RETURN (NULL);
896} /* mode0_probe */
897
898STATIC ATTACH_RET
899mode0_attach (ATTACH_ARGS)
900{
901 ATTACH_RETURN (0);
902} /* mode0_attach */
903
904STATIC INLINE union asr_ccb *
905asr_alloc_ccb (
906 IN Asr_softc_t * sc)
907{
908 OUT union asr_ccb * new_ccb;
909
910 if ((new_ccb = (union asr_ccb *)malloc(sizeof(*new_ccb),
911 M_DEVBUF, M_WAITOK)) != (union asr_ccb *)NULL) {
912 bzero (new_ccb, sizeof(*new_ccb));
913 new_ccb->ccb_h.pinfo.priority = 1;
914 new_ccb->ccb_h.pinfo.index = CAM_UNQUEUED_INDEX;
915 new_ccb->ccb_h.spriv_ptr0 = sc;
916 }
917 return (new_ccb);
918} /* asr_alloc_ccb */
919
920STATIC INLINE void
921asr_free_ccb (
922 IN union asr_ccb * free_ccb)
923{
924 free(free_ccb, M_DEVBUF);
925} /* asr_free_ccb */
926
927/*
928 * Print inquiry data `carefully'
929 */
930STATIC void
931ASR_prstring (
932 u_int8_t * s,
933 int len)
934{
935 while ((--len >= 0) && (*s) && (*s != ' ') && (*s != '-')) {
936 printf ("%c", *(s++));
937 }
938} /* ASR_prstring */
939
940/*
941 * Prototypes
942 */
943STATIC INLINE int ASR_queue __P((
944 IN Asr_softc_t * sc,
945 IN PI2O_MESSAGE_FRAME Message));
946/*
947 * Send a message synchronously and without Interrupt to a ccb.
948 */
949STATIC int
950ASR_queue_s (
951 INOUT union asr_ccb * ccb,
952 IN PI2O_MESSAGE_FRAME Message)
953{
954 int s;
955 U32 Mask;
956 Asr_softc_t * sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
957
958 /*
959 * We do not need any (optional byteswapping) method access to
960 * the Initiator context field.
961 */
962 I2O_MESSAGE_FRAME_setInitiatorContext64(Message, (long)ccb);
963
964 /* Prevent interrupt service */
965 s = splcam ();
966 sc->ha_Virt->Mask = (Mask = sc->ha_Virt->Mask)
967 | Mask_InterruptsDisabled;
968
969 if (ASR_queue (sc, Message) == EMPTY_QUEUE) {
970 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
971 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
972 }
973
974 /*
975 * Wait for this board to report a finished instruction.
976 */
977 while ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
978 (void)asr_intr (sc);
979 }
980
981 /* Re-enable Interrupts */
982 sc->ha_Virt->Mask = Mask;
983 splx(s);
984
985 return (ccb->ccb_h.status);
986} /* ASR_queue_s */
987
988/*
989 * Send a message synchronously to a Asr_softc_t
990 */
991STATIC int
992ASR_queue_c (
993 IN Asr_softc_t * sc,
994 IN PI2O_MESSAGE_FRAME Message)
995{
996 union asr_ccb * ccb;
997 OUT int status;
998
999 if ((ccb = asr_alloc_ccb (sc)) == (union asr_ccb *)NULL) {
1000 return (CAM_REQUEUE_REQ);
1001 }
1002
1003 status = ASR_queue_s (ccb, Message);
1004
1005 asr_free_ccb(ccb);
1006
1007 return (status);
1008} /* ASR_queue_c */
1009
1010/*
1011 * Add the specified ccb to the active queue
1012 */
1013STATIC INLINE void
1014ASR_ccbAdd (
1015 IN Asr_softc_t * sc,
1016 INOUT union asr_ccb * ccb)
1017{
1018 int s;
1019
1020 s = splcam();
1021 LIST_INSERT_HEAD(&(sc->ha_ccb), &(ccb->ccb_h), sim_links.le);
1022 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1023 if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT) {
1024 /*
1025 * RAID systems can take considerable time to
1026 * complete some commands given the large cache
1027 * flashes switching from write back to write thru.
1028 */
1029 ccb->ccb_h.timeout = 6 * 60 * 1000;
1030 }
1031 ccb->ccb_h.timeout_ch = timeout(asr_timeout, (caddr_t)ccb,
1032 (ccb->ccb_h.timeout * hz) / 1000);
1033 }
1034 splx(s);
1035} /* ASR_ccbAdd */
1036
1037/*
1038 * Remove the specified ccb from the active queue.
1039 */
1040STATIC INLINE void
1041ASR_ccbRemove (
1042 IN Asr_softc_t * sc,
1043 INOUT union asr_ccb * ccb)
1044{
1045 int s;
1046
1047 s = splcam();
1048 untimeout(asr_timeout, (caddr_t)ccb, ccb->ccb_h.timeout_ch);
1049 LIST_REMOVE(&(ccb->ccb_h), sim_links.le);
1050 splx(s);
1051} /* ASR_ccbRemove */
1052
1053/*
1054 * Fail all the active commands, so they get re-issued by the operating
1055 * system.
1056 */
1057STATIC INLINE void
1058ASR_failActiveCommands (
1059 IN Asr_softc_t * sc)
1060{
1061 struct ccb_hdr * ccb;
1062 int s;
1063
1064#if 0 /* Currently handled by callers, unnecessary paranoia currently */
1065 /* Left in for historical perspective. */
1066 defAlignLong(I2O_EXEC_LCT_NOTIFY_MESSAGE,Message);
1067 PI2O_EXEC_LCT_NOTIFY_MESSAGE Message_Ptr;
1068
1069 /* Send a blind LCT command to wait for the enableSys to complete */
1070 Message_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)ASR_fillMessage(Message,
1071 sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) - sizeof(I2O_SG_ELEMENT));
1072 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
1073 I2O_EXEC_LCT_NOTIFY);
1074 I2O_EXEC_LCT_NOTIFY_MESSAGE_setClassIdentifier(Message_Ptr,
1075 I2O_CLASS_MATCH_ANYCLASS);
1076 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1077#endif
1078
1079 s = splcam();
1080 /*
1081 * We do not need to inform the CAM layer that we had a bus
1082 * reset since we manage it on our own, this also prevents the
1083 * SCSI_DELAY settling that would be required on other systems.
1084 * The `SCSI_DELAY' has already been handled by the card via the
1085 * acquisition of the LCT table while we are at CAM priority level.
1086 * for (int bus = 0; bus <= sc->ha_MaxBus; ++bus) {
1087 * xpt_async (AC_BUS_RESET, sc->ha_path[bus], NULL);
1088 * }
1089 */
1090 while ((ccb = LIST_FIRST(&(sc->ha_ccb))) != (struct ccb_hdr *)NULL) {
1091 ASR_ccbRemove (sc, (union asr_ccb *)ccb);
1092
1093 ccb->status &= ~CAM_STATUS_MASK;
1094 ccb->status |= CAM_REQUEUE_REQ;
1095 /* Nothing Transfered */
1096 ((struct ccb_scsiio *)ccb)->resid
1097 = ((struct ccb_scsiio *)ccb)->dxfer_len;
1098
1099 if (ccb->path) {
1100 xpt_done ((union ccb *)ccb);
1101 } else {
1102 wakeup ((caddr_t)ccb);
1103 }
1104 }
1105 splx(s);
1106} /* ASR_failActiveCommands */
1107
1108/*
1109 * The following command causes the HBA to reset the specific bus
1110 */
1111STATIC INLINE void
1112ASR_resetBus(
1113 IN Asr_softc_t * sc,
1114 IN int bus)
1115{
1116 defAlignLong(I2O_HBA_BUS_RESET_MESSAGE,Message);
1117 I2O_HBA_BUS_RESET_MESSAGE * Message_Ptr;
1118 PI2O_LCT_ENTRY Device;
1119
1120 Message_Ptr = (I2O_HBA_BUS_RESET_MESSAGE *)ASR_fillMessage(Message,
1121 sizeof(I2O_HBA_BUS_RESET_MESSAGE));
1122 I2O_MESSAGE_FRAME_setFunction(&Message_Ptr->StdMessageFrame,
1123 I2O_HBA_BUS_RESET);
1124 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
1125 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
1126 ++Device) {
1127 if (((Device->le_type & I2O_PORT) != 0)
1128 && (Device->le_bus == bus)) {
1129 I2O_MESSAGE_FRAME_setTargetAddress(
1130 &Message_Ptr->StdMessageFrame,
1131 I2O_LCT_ENTRY_getLocalTID(Device));
1132 /* Asynchronous command, with no expectations */
1133 (void)ASR_queue(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1134 break;
1135 }
1136 }
1137} /* ASR_resetBus */
1138
1139STATIC INLINE int
1140ASR_getBlinkLedCode (
1141 IN Asr_softc_t * sc)
1142{
1143 if ((sc != (Asr_softc_t *)NULL)
1144 && (sc->ha_blinkLED != (u_int8_t *)NULL)
1145 && (sc->ha_blinkLED[1] == 0xBC)) {
1146 return (sc->ha_blinkLED[0]);
1147 }
1148 return (0);
1149} /* ASR_getBlinkCode */
1150
1151/*
1152 * Determine the address of an TID lookup. Must be done at high priority
1153 * since the address can be changed by other threads of execution.
1154 *
1155 * Returns NULL pointer if not indexible (but will attempt to generate
1156 * an index if `new_entry' flag is set to TRUE).
1157 *
1158 * All addressible entries are to be guaranteed zero if never initialized.
1159 */
1160STATIC INLINE tid_t *
1161ASR_getTidAddress(
1162 INOUT Asr_softc_t * sc,
1163 IN int bus,
1164 IN int target,
1165 IN int lun,
1166 IN int new_entry)
1167{
1168 target2lun_t * bus_ptr;
1169 lun2tid_t * target_ptr;
1170 unsigned new_size;
1171
1172 /*
1173 * Validity checking of incoming parameters. More of a bound
1174 * expansion limit than an issue with the code dealing with the
1175 * values.
1176 *
1177 * sc must be valid before it gets here, so that check could be
1178 * dropped if speed a critical issue.
1179 */
1180 if ((sc == (Asr_softc_t *)NULL)
1181 || (bus > MAX_CHANNEL)
1182 || (target > sc->ha_MaxId)
1183 || (lun > sc->ha_MaxLun)) {
1184 debug_asr_printf("(%lx,%d,%d,%d) target out of range\n",
1185 (u_long)sc, bus, target, lun);
1186 return ((tid_t *)NULL);
1187 }
1188 /*
1189 * See if there is an associated bus list.
1190 *
1191 * for performance, allocate in size of BUS_CHUNK chunks.
1192 * BUS_CHUNK must be a power of two. This is to reduce
1193 * fragmentation effects on the allocations.
1194 */
1195# define BUS_CHUNK 8
1196 new_size = ((target + BUS_CHUNK - 1) & ~(BUS_CHUNK - 1));
1197 if ((bus_ptr = sc->ha_targets[bus]) == (target2lun_t *)NULL) {
1198 /*
1199 * Allocate a new structure?
1200 * Since one element in structure, the +1
1201 * needed for size has been abstracted.
1202 */
1203 if ((new_entry == FALSE)
1204 || ((sc->ha_targets[bus] = bus_ptr = (target2lun_t *)malloc (
1205 sizeof(*bus_ptr) + (sizeof(bus_ptr->LUN) * new_size),
1206 M_TEMP, M_WAITOK))
1207 == (target2lun_t *)NULL)) {
1208 debug_asr_printf("failed to allocate bus list\n");
1209 return ((tid_t *)NULL);
1210 }
1211 bzero (bus_ptr, sizeof(*bus_ptr)
1212 + (sizeof(bus_ptr->LUN) * new_size));
1213 bus_ptr->size = new_size + 1;
1214 } else if (bus_ptr->size <= new_size) {
1215 target2lun_t * new_bus_ptr;
1216
1217 /*
1218 * Reallocate a new structure?
1219 * Since one element in structure, the +1
1220 * needed for size has been abstracted.
1221 */
1222 if ((new_entry == FALSE)
1223 || ((new_bus_ptr = (target2lun_t *)malloc (
1224 sizeof(*bus_ptr) + (sizeof(bus_ptr->LUN) * new_size),
1225 M_TEMP, M_WAITOK))
1226 == (target2lun_t *)NULL)) {
1227 debug_asr_printf("failed to reallocate bus list\n");
1228 return ((tid_t *)NULL);
1229 }
1230 /*
1231 * Zero and copy the whole thing, safer, simpler coding
1232 * and not really performance critical at this point.
1233 */
1234 bzero (new_bus_ptr, sizeof(*bus_ptr)
1235 + (sizeof(bus_ptr->LUN) * new_size));
1236 bcopy (bus_ptr, new_bus_ptr, sizeof(*bus_ptr)
1237 + (sizeof(bus_ptr->LUN) * (bus_ptr->size - 1)));
1238 sc->ha_targets[bus] = new_bus_ptr;
1239 free (bus_ptr, M_TEMP);
1240 bus_ptr = new_bus_ptr;
1241 bus_ptr->size = new_size + 1;
1242 }
1243 /*
1244 * We now have the bus list, lets get to the target list.
1245 * Since most systems have only *one* lun, we do not allocate
1246 * in chunks as above, here we allow one, then in chunk sizes.
1247 * TARGET_CHUNK must be a power of two. This is to reduce
1248 * fragmentation effects on the allocations.
1249 */
1250# define TARGET_CHUNK 8
1251 if ((new_size = lun) != 0) {
1252 new_size = ((lun + TARGET_CHUNK - 1) & ~(TARGET_CHUNK - 1));
1253 }
1254 if ((target_ptr = bus_ptr->LUN[target]) == (lun2tid_t *)NULL) {
1255 /*
1256 * Allocate a new structure?
1257 * Since one element in structure, the +1
1258 * needed for size has been abstracted.
1259 */
1260 if ((new_entry == FALSE)
1261 || ((bus_ptr->LUN[target] = target_ptr = (lun2tid_t *)malloc (
1262 sizeof(*target_ptr) + (sizeof(target_ptr->TID) * new_size),
1263 M_TEMP, M_WAITOK))
1264 == (lun2tid_t *)NULL)) {
1265 debug_asr_printf("failed to allocate target list\n");
1266 return ((tid_t *)NULL);
1267 }
1268 bzero (target_ptr, sizeof(*target_ptr)
1269 + (sizeof(target_ptr->TID) * new_size));
1270 target_ptr->size = new_size + 1;
1271 } else if (target_ptr->size <= new_size) {
1272 lun2tid_t * new_target_ptr;
1273
1274 /*
1275 * Reallocate a new structure?
1276 * Since one element in structure, the +1
1277 * needed for size has been abstracted.
1278 */
1279 if ((new_entry == FALSE)
1280 || ((new_target_ptr = (lun2tid_t *)malloc (
1281 sizeof(*target_ptr) + (sizeof(target_ptr->TID) * new_size),
1282 M_TEMP, M_WAITOK))
1283 == (lun2tid_t *)NULL)) {
1284 debug_asr_printf("failed to reallocate target list\n");
1285 return ((tid_t *)NULL);
1286 }
1287 /*
1288 * Zero and copy the whole thing, safer, simpler coding
1289 * and not really performance critical at this point.
1290 */
1291 bzero (new_target_ptr, sizeof(*target_ptr)
1292 + (sizeof(target_ptr->TID) * new_size));
1293 bcopy (target_ptr, new_target_ptr,
1294 sizeof(*target_ptr)
1295 + (sizeof(target_ptr->TID) * (target_ptr->size - 1)));
1296 bus_ptr->LUN[target] = new_target_ptr;
1297 free (target_ptr, M_TEMP);
1298 target_ptr = new_target_ptr;
1299 target_ptr->size = new_size + 1;
1300 }
1301 /*
1302 * Now, acquire the TID address from the LUN indexed list.
1303 */
1304 return (&(target_ptr->TID[lun]));
1305} /* ASR_getTidAddress */
1306
1307/*
1308 * Get a pre-existing TID relationship.
1309 *
1310 * If the TID was never set, return (tid_t)-1.
1311 *
1312 * should use mutex rather than spl.
1313 */
1314STATIC INLINE tid_t
1315ASR_getTid (
1316 IN Asr_softc_t * sc,
1317 IN int bus,
1318 IN int target,
1319 IN int lun)
1320{
1321 tid_t * tid_ptr;
1322 int s;
1323 OUT tid_t retval;
1324
1325 s = splcam();
1326 if (((tid_ptr = ASR_getTidAddress (sc, bus, target, lun, FALSE))
1327 == (tid_t *)NULL)
1328 /* (tid_t)0 or (tid_t)-1 indicate no TID */
1329 || (*tid_ptr == (tid_t)0)) {
1330 splx(s);
1331 return ((tid_t)-1);
1332 }
1333 retval = *tid_ptr;
1334 splx(s);
1335 return (retval);
1336} /* ASR_getTid */
1337
1338/*
1339 * Set a TID relationship.
1340 *
1341 * If the TID was not set, return (tid_t)-1.
1342 *
1343 * should use mutex rather than spl.
1344 */
1345STATIC INLINE tid_t
1346ASR_setTid (
1347 INOUT Asr_softc_t * sc,
1348 IN int bus,
1349 IN int target,
1350 IN int lun,
1351 INOUT tid_t TID)
1352{
1353 tid_t * tid_ptr;
1354 int s;
1355
1356 if (TID != (tid_t)-1) {
1357 if (TID == 0) {
1358 return ((tid_t)-1);
1359 }
1360 s = splcam();
1361 if ((tid_ptr = ASR_getTidAddress (sc, bus, target, lun, TRUE))
1362 == (tid_t *)NULL) {
1363 splx(s);
1364 return ((tid_t)-1);
1365 }
1366 *tid_ptr = TID;
1367 splx(s);
1368 }
1369 return (TID);
1370} /* ASR_setTid */
1371
1372/*-------------------------------------------------------------------------*/
1373/* Function ASR_rescan */
1374/*-------------------------------------------------------------------------*/
1375/* The Parameters Passed To This Function Are : */
1376/* Asr_softc_t * : HBA miniport driver's adapter data storage. */
1377/* */
1378/* This Function Will rescan the adapter and resynchronize any data */
1379/* */
1380/* Return : 0 For OK, Error Code Otherwise */
1381/*-------------------------------------------------------------------------*/
1382
1383STATIC INLINE int
1384ASR_rescan(
1385 IN Asr_softc_t * sc)
1386{
1387 int bus;
1388 OUT int error;
1389
1390 /*
1391 * Re-acquire the LCT table and synchronize us to the adapter.
1392 */
1393 if ((error = ASR_acquireLct(sc)) == 0) {
1394 error = ASR_acquireHrt(sc);
1395 }
1396
1397 if (error != 0) {
1398 return error;
1399 }
1400
1401 bus = sc->ha_MaxBus;
1402 /* Reset all existing cached TID lookups */
1403 do {
1404 int target, event = 0;
1405
1406 /*
1407 * Scan for all targets on this bus to see if they
1408 * got affected by the rescan.
1409 */
1410 for (target = 0; target <= sc->ha_MaxId; ++target) {
1411 int lun;
1412
1413 /* Stay away from the controller ID */
1414 if (target == sc->ha_adapter_target[bus]) {
1415 continue;
1416 }
1417 for (lun = 0; lun <= sc->ha_MaxLun; ++lun) {
1418 PI2O_LCT_ENTRY Device;
1419 tid_t TID = (tid_t)-1;
1420 tid_t LastTID;
1421
1422 /*
1423 * See if the cached TID changed. Search for
1424 * the device in our new LCT.
1425 */
1426 for (Device = sc->ha_LCT->LCTEntry;
1427 Device < (PI2O_LCT_ENTRY)(((U32 *)sc->ha_LCT)
1428 + I2O_LCT_getTableSize(sc->ha_LCT));
1429 ++Device) {
1430 if ((Device->le_type != I2O_UNKNOWN)
1431 && (Device->le_bus == bus)
1432 && (Device->le_target == target)
1433 && (Device->le_lun == lun)
1434 && (I2O_LCT_ENTRY_getUserTID(Device)
1435 == 0xFFF)) {
1436 TID = I2O_LCT_ENTRY_getLocalTID(
1437 Device);
1438 break;
1439 }
1440 }
1441 /*
1442 * Indicate to the OS that the label needs
1443 * to be recalculated, or that the specific
1444 * open device is no longer valid (Merde)
1445 * because the cached TID changed.
1446 */
1447 LastTID = ASR_getTid (sc, bus, target, lun);
1448 if (LastTID != TID) {
1449 struct cam_path * path;
1450
1451 if (xpt_create_path(&path,
1452 /*periph*/NULL,
1453 cam_sim_path(sc->ha_sim[bus]),
1454 target, lun) != CAM_REQ_CMP) {
1455 if (TID == (tid_t)-1) {
1456 event |= AC_LOST_DEVICE;
1457 } else {
1458 event |= AC_INQ_CHANGED
1459 | AC_GETDEV_CHANGED;
1460 }
1461 } else {
1462 if (TID == (tid_t)-1) {
1463 xpt_async(
1464 AC_LOST_DEVICE,
1465 path, NULL);
1466 } else if (LastTID == (tid_t)-1) {
1467 struct ccb_getdev ccb;
1468
1469 xpt_setup_ccb(
1470 &(ccb.ccb_h),
1471 path, /*priority*/5);
1472 xpt_async(
1473 AC_FOUND_DEVICE,
1474 path,
1475 &ccb);
1476 } else {
1477 xpt_async(
1478 AC_INQ_CHANGED,
1479 path, NULL);
1480 xpt_async(
1481 AC_GETDEV_CHANGED,
1482 path, NULL);
1483 }
1484 }
1485 }
1486 /*
1487 * We have the option of clearing the
1488 * cached TID for it to be rescanned, or to
1489 * set it now even if the device never got
1490 * accessed. We chose the later since we
1491 * currently do not use the condition that
1492 * the TID ever got cached.
1493 */
1494 ASR_setTid (sc, bus, target, lun, TID);
1495 }
1496 }
1497 /*
1498 * The xpt layer can not handle multiple events at the
1499 * same call.
1500 */
1501 if (event & AC_LOST_DEVICE) {
1502 xpt_async(AC_LOST_DEVICE, sc->ha_path[bus], NULL);
1503 }
1504 if (event & AC_INQ_CHANGED) {
1505 xpt_async(AC_INQ_CHANGED, sc->ha_path[bus], NULL);
1506 }
1507 if (event & AC_GETDEV_CHANGED) {
1508 xpt_async(AC_GETDEV_CHANGED, sc->ha_path[bus], NULL);
1509 }
1510 } while (--bus >= 0);
1511 return (error);
1512} /* ASR_rescan */
1513
1514/*-------------------------------------------------------------------------*/
1515/* Function ASR_reset */
1516/*-------------------------------------------------------------------------*/
1517/* The Parameters Passed To This Function Are : */
1518/* Asr_softc_t * : HBA miniport driver's adapter data storage. */
1519/* */
1520/* This Function Will reset the adapter and resynchronize any data */
1521/* */
1522/* Return : None */
1523/*-------------------------------------------------------------------------*/
1524
1525STATIC INLINE int
1526ASR_reset(
1527 IN Asr_softc_t * sc)
1528{
1529 int s, retVal;
1530
1531 s = splcam();
1532 if ((sc->ha_in_reset == HA_IN_RESET)
1533 || (sc->ha_in_reset == HA_OFF_LINE_RECOVERY)) {
1534 splx (s);
1535 return (EBUSY);
1536 }
1537 /*
1538 * Promotes HA_OPERATIONAL to HA_IN_RESET,
1539 * or HA_OFF_LINE to HA_OFF_LINE_RECOVERY.
1540 */
1541 ++(sc->ha_in_reset);
1542 if (ASR_resetIOP (sc->ha_Virt, sc->ha_Fvirt) == 0) {
1543 debug_asr_printf ("ASR_resetIOP failed\n");
1544 /*
1545 * We really need to take this card off-line, easier said
1546 * than make sense. Better to keep retrying for now since if a
1547 * UART cable is connected the blinkLEDs the adapter is now in
1548 * a hard state requiring action from the monitor commands to
1549 * the HBA to continue. For debugging waiting forever is a
1550 * good thing. In a production system, however, one may wish
1551 * to instead take the card off-line ...
1552 */
1553# if 0 && (defined(HA_OFF_LINE))
1554 /*
1555 * Take adapter off-line.
1556 */
1557 printf ("asr%d: Taking adapter off-line\n",
1558 sc->ha_path[0]
1559 ? cam_sim_unit(xpt_path_sim(sc->ha_path[0]))
1560 : 0);
1561 sc->ha_in_reset = HA_OFF_LINE;
1562 splx (s);
1563 return (ENXIO);
1564# else
1565 /* Wait Forever */
1566 while (ASR_resetIOP (sc->ha_Virt, sc->ha_Fvirt) == 0);
1567# endif
1568 }
1569 retVal = ASR_init (sc);
1570 splx (s);
1571 if (retVal != 0) {
1572 debug_asr_printf ("ASR_init failed\n");
1573 sc->ha_in_reset = HA_OFF_LINE;
1574 return (ENXIO);
1575 }
1576 if (ASR_rescan (sc) != 0) {
1577 debug_asr_printf ("ASR_rescan failed\n");
1578 }
1579 ASR_failActiveCommands (sc);
1580 if (sc->ha_in_reset == HA_OFF_LINE_RECOVERY) {
1581 printf ("asr%d: Brining adapter back on-line\n",
1582 sc->ha_path[0]
1583 ? cam_sim_unit(xpt_path_sim(sc->ha_path[0]))
1584 : 0);
1585 }
1586 sc->ha_in_reset = HA_OPERATIONAL;
1587 return (0);
1588} /* ASR_reset */
1589
1590/*
1591 * Device timeout handler.
1592 */
1593STATIC void
1594asr_timeout(
1595 INOUT void * arg)
1596{
1597 union asr_ccb * ccb = (union asr_ccb *)arg;
1598 Asr_softc_t * sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
1599 int s;
1600
1601 debug_asr_print_path(ccb);
1602 debug_asr_printf("timed out");
1603
1604 /*
1605 * Check if the adapter has locked up?
1606 */
1607 if ((s = ASR_getBlinkLedCode(sc)) != 0) {
1608 /* Reset Adapter */
1609 printf ("asr%d: Blink LED 0x%x resetting adapter\n",
1610 cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)), s);
1611 if (ASR_reset (sc) == ENXIO) {
1612 /* Try again later */
1613 ccb->ccb_h.timeout_ch = timeout(asr_timeout,
1614 (caddr_t)ccb,
1615 (ccb->ccb_h.timeout * hz) / 1000);
1616 }
1617 return;
1618 }
1619 /*
1620 * Abort does not function on the ASR card!!! Walking away from
1621 * the SCSI command is also *very* dangerous. A SCSI BUS reset is
1622 * our best bet, followed by a complete adapter reset if that fails.
1623 */
1624 s = splcam();
1625 /* Check if we already timed out once to raise the issue */
1626 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_CMD_TIMEOUT) {
1627 debug_asr_printf (" AGAIN\nreinitializing adapter\n");
1628 if (ASR_reset (sc) == ENXIO) {
1629 ccb->ccb_h.timeout_ch = timeout(asr_timeout,
1630 (caddr_t)ccb,
1631 (ccb->ccb_h.timeout * hz) / 1000);
1632 }
1633 splx(s);
1634 return;
1635 }
1636 debug_asr_printf ("\nresetting bus\n");
1637 /* If the BUS reset does not take, then an adapter reset is next! */
1638 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1639 ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
1640 ccb->ccb_h.timeout_ch = timeout(asr_timeout, (caddr_t)ccb,
1641 (ccb->ccb_h.timeout * hz) / 1000);
1642 ASR_resetBus (sc, cam_sim_bus(xpt_path_sim(ccb->ccb_h.path)));
1643 xpt_async (AC_BUS_RESET, ccb->ccb_h.path, NULL);
1644 splx(s);
1645} /* asr_timeout */
1646
1647/*
1648 * send a message asynchronously
1649 */
1650STATIC INLINE int
1651ASR_queue(
1652 IN Asr_softc_t * sc,
1653 IN PI2O_MESSAGE_FRAME Message)
1654{
1655 OUT U32 MessageOffset;
1656 union asr_ccb * ccb;
1657
1658 debug_asr_printf ("Host Command Dump:\n");
1659 debug_asr_dump_message (Message);
1660
1661 ccb = (union asr_ccb *)(long)
1662 I2O_MESSAGE_FRAME_getInitiatorContext64(Message);
1663
1664 if ((MessageOffset = ASR_getMessage(sc->ha_Virt)) != EMPTY_QUEUE) {
1665#ifdef ASR_MEASURE_PERFORMANCE
1666 int startTimeIndex;
1667
1668 if (ccb) {
1669 ++sc->ha_performance.command_count[
1670 (int) ccb->csio.cdb_io.cdb_bytes[0]];
1671 DEQ_TIMEQ_FREE_LIST(startTimeIndex,
1672 sc->ha_timeQFreeList,
1673 sc->ha_timeQFreeHead,
1674 sc->ha_timeQFreeTail);
1675 if (-1 != startTimeIndex) {
1676 microtime(&(sc->ha_timeQ[startTimeIndex]));
1677 }
1678 /* Time stamp the command before we send it out */
1679 ((PRIVATE_SCSI_SCB_EXECUTE_MESSAGE *) Message)->
1680 PrivateMessageFrame.TransactionContext
1681 = (I2O_TRANSACTION_CONTEXT) startTimeIndex;
1682
1683 ++sc->ha_submitted_ccbs_count;
1684 if (sc->ha_performance.max_submit_count
1685 < sc->ha_submitted_ccbs_count) {
1686 sc->ha_performance.max_submit_count
1687 = sc->ha_submitted_ccbs_count;
1688 }
1689 }
1690#endif
1691 bcopy (Message, sc->ha_Fvirt + MessageOffset,
1692 I2O_MESSAGE_FRAME_getMessageSize(Message) << 2);
1693 if (ccb) {
1694 ASR_ccbAdd (sc, ccb);
1695 }
1696 /* Post the command */
1697 sc->ha_Virt->ToFIFO = MessageOffset;
1698 } else {
1699 if (ASR_getBlinkLedCode(sc)) {
1700 /*
1701 * Unlikely we can do anything if we can't grab a
1702 * message frame :-(, but lets give it a try.
1703 */
1704 (void)ASR_reset (sc);
1705 }
1706 }
1707 return (MessageOffset);
1708} /* ASR_queue */
1709
1710
1711/* Simple Scatter Gather elements */
1712#define SG(SGL,Index,Flags,Buffer,Size) \
1713 I2O_FLAGS_COUNT_setCount( \
1714 &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index].FlagsCount), \
1715 Size); \
1716 I2O_FLAGS_COUNT_setFlags( \
1717 &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index].FlagsCount), \
1718 I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT | (Flags)); \
1719 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress( \
1720 &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index]), \
1721 (Buffer == NULL) ? NULL : KVTOPHYS(Buffer))
1722
1723/*
1724 * Retrieve Parameter Group.
1725 * Buffer must be allocated using defAlignLong macro.
1726 */
1727STATIC void *
1728ASR_getParams(
1729 IN Asr_softc_t * sc,
1730 IN tid_t TID,
1731 IN int Group,
1732 OUT void * Buffer,
1733 IN unsigned BufferSize)
1734{
1735 struct paramGetMessage {
1736 I2O_UTIL_PARAMS_GET_MESSAGE M;
1737 char F[
1738 sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT)];
1739 struct Operations {
1740 I2O_PARAM_OPERATIONS_LIST_HEADER Header;
1741 I2O_PARAM_OPERATION_ALL_TEMPLATE Template[1];
1742 } O;
1743 };
1744 defAlignLong(struct paramGetMessage, Message);
1745 struct Operations * Operations_Ptr;
1746 I2O_UTIL_PARAMS_GET_MESSAGE * Message_Ptr;
1747 struct ParamBuffer {
1748 I2O_PARAM_RESULTS_LIST_HEADER Header;
1749 I2O_PARAM_READ_OPERATION_RESULT Read;
1750 char Info[1];
1751 } * Buffer_Ptr;
1752
1753 Message_Ptr = (I2O_UTIL_PARAMS_GET_MESSAGE *)ASR_fillMessage(Message,
1754 sizeof(I2O_UTIL_PARAMS_GET_MESSAGE)
1755 + sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT));
1756 Operations_Ptr = (struct Operations *)((char *)Message_Ptr
1757 + sizeof(I2O_UTIL_PARAMS_GET_MESSAGE)
1758 + sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT));
1759 bzero ((void *)Operations_Ptr, sizeof(struct Operations));
1760 I2O_PARAM_OPERATIONS_LIST_HEADER_setOperationCount(
1761 &(Operations_Ptr->Header), 1);
1762 I2O_PARAM_OPERATION_ALL_TEMPLATE_setOperation(
1763 &(Operations_Ptr->Template[0]), I2O_PARAMS_OPERATION_FIELD_GET);
1764 I2O_PARAM_OPERATION_ALL_TEMPLATE_setFieldCount(
1765 &(Operations_Ptr->Template[0]), 0xFFFF);
1766 I2O_PARAM_OPERATION_ALL_TEMPLATE_setGroupNumber(
1767 &(Operations_Ptr->Template[0]), Group);
1768 bzero ((void *)(Buffer_Ptr = getAlignLong(struct ParamBuffer, Buffer)),
1769 BufferSize);
1770
1771 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
1772 I2O_VERSION_11
1773 + (((sizeof(I2O_UTIL_PARAMS_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1774 / sizeof(U32)) << 4));
1775 I2O_MESSAGE_FRAME_setTargetAddress (&(Message_Ptr->StdMessageFrame),
1776 TID);
1777 I2O_MESSAGE_FRAME_setFunction (&(Message_Ptr->StdMessageFrame),
1778 I2O_UTIL_PARAMS_GET);
1779 /*
1780 * Set up the buffers as scatter gather elements.
1781 */
1782 SG(&(Message_Ptr->SGL), 0,
1783 I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER,
1784 Operations_Ptr, sizeof(struct Operations));
1785 SG(&(Message_Ptr->SGL), 1,
1786 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
1787 Buffer_Ptr, BufferSize);
1788
1789 if ((ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) == CAM_REQ_CMP)
1790 && (Buffer_Ptr->Header.ResultCount)) {
1791 return ((void *)(Buffer_Ptr->Info));
1792 }
1793 return ((void *)NULL);
1794} /* ASR_getParams */
1795
1796/*
1797 * Acquire the LCT information.
1798 */
1799STATIC INLINE int
1800ASR_acquireLct (
1801 INOUT Asr_softc_t * sc)
1802{
1803 PI2O_EXEC_LCT_NOTIFY_MESSAGE Message_Ptr;
1804 PI2O_SGE_SIMPLE_ELEMENT sg;
1805 int MessageSizeInBytes;
1806 caddr_t v;
1807 int len;
1808 I2O_LCT Table;
1809 PI2O_LCT_ENTRY Entry;
1810
1811 /*
1812 * sc value assumed valid
1813 */
1814 MessageSizeInBytes = sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE)
1815 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT);
1816 if ((Message_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)malloc (
1817 MessageSizeInBytes, M_TEMP, M_WAITOK))
1818 == (PI2O_EXEC_LCT_NOTIFY_MESSAGE)NULL) {
1819 return (ENOMEM);
1820 }
1821 (void)ASR_fillMessage((char *)Message_Ptr, MessageSizeInBytes);
1822 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
1823 (I2O_VERSION_11 +
1824 (((sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1825 / sizeof(U32)) << 4)));
1826 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
1827 I2O_EXEC_LCT_NOTIFY);
1828 I2O_EXEC_LCT_NOTIFY_MESSAGE_setClassIdentifier(Message_Ptr,
1829 I2O_CLASS_MATCH_ANYCLASS);
1830 /*
1831 * Call the LCT table to determine the number of device entries
1832 * to reserve space for.
1833 */
1834 SG(&(Message_Ptr->SGL), 0,
1835 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER, &Table,
1836 sizeof(I2O_LCT));
1837 /*
1838 * since this code is reused in several systems, code efficiency
1839 * is greater by using a shift operation rather than a divide by
1840 * sizeof(u_int32_t).
1841 */
1842 I2O_LCT_setTableSize(&Table,
1843 (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)) >> 2);
1844 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1845 /*
1846 * Determine the size of the LCT table.
1847 */
1848 if (sc->ha_LCT) {
1849 free (sc->ha_LCT, M_TEMP);
1850 }
1851 /*
1852 * malloc only generates contiguous memory when less than a
1853 * page is expected. We must break the request up into an SG list ...
1854 */
1855 if (((len = (I2O_LCT_getTableSize(&Table) << 2)) <=
1856 (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)))
1857 || (len > (128 * 1024))) { /* Arbitrary */
1858 free (Message_Ptr, M_TEMP);
1859 return (EINVAL);
1860 }
1861 if ((sc->ha_LCT = (PI2O_LCT)malloc (len, M_TEMP, M_WAITOK))
1862 == (PI2O_LCT)NULL) {
1863 free (Message_Ptr, M_TEMP);
1864 return (ENOMEM);
1865 }
1866 /*
1867 * since this code is reused in several systems, code efficiency
1868 * is greater by using a shift operation rather than a divide by
1869 * sizeof(u_int32_t).
1870 */
1871 I2O_LCT_setTableSize(sc->ha_LCT,
1872 (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)) >> 2);
1873 /*
1874 * Convert the access to the LCT table into a SG list.
1875 */
1876 sg = Message_Ptr->SGL.u.Simple;
1877 v = (caddr_t)(sc->ha_LCT);
1878 for (;;) {
1879 int next, base, span;
1880
1881 span = 0;
1882 next = base = KVTOPHYS(v);
1883 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, base);
1884
1885 /* How far can we go contiguously */
1886 while ((len > 0) && (base == next)) {
1887 int size;
1888
1889 next = trunc_page(base) + PAGE_SIZE;
1890 size = next - base;
1891 if (size > len) {
1892 size = len;
1893 }
1894 span += size;
1895 v += size;
1896 len -= size;
1897 base = KVTOPHYS(v);
1898 }
1899
1900 /* Construct the Flags */
1901 I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), span);
1902 {
1903 int rw = I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT;
1904 if (len <= 0) {
1905 rw = (I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT
1906 | I2O_SGL_FLAGS_LAST_ELEMENT
1907 | I2O_SGL_FLAGS_END_OF_BUFFER);
1908 }
1909 I2O_FLAGS_COUNT_setFlags(&(sg->FlagsCount), rw);
1910 }
1911
1912 if (len <= 0) {
1913 break;
1914 }
1915
1916 /*
1917 * Incrementing requires resizing of the packet.
1918 */
1919 ++sg;
1920 MessageSizeInBytes += sizeof(*sg);
1921 I2O_MESSAGE_FRAME_setMessageSize(
1922 &(Message_Ptr->StdMessageFrame),
1923 I2O_MESSAGE_FRAME_getMessageSize(
1924 &(Message_Ptr->StdMessageFrame))
1925 + (sizeof(*sg) / sizeof(U32)));
1926 {
1927 PI2O_EXEC_LCT_NOTIFY_MESSAGE NewMessage_Ptr;
1928
1929 if ((NewMessage_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)
1930 malloc (MessageSizeInBytes, M_TEMP, M_WAITOK))
1931 == (PI2O_EXEC_LCT_NOTIFY_MESSAGE)NULL) {
1932 free (sc->ha_LCT, M_TEMP);
1933 sc->ha_LCT = (PI2O_LCT)NULL;
1934 free (Message_Ptr, M_TEMP);
1935 return (ENOMEM);
1936 }
1937 span = ((caddr_t)sg) - (caddr_t)Message_Ptr;
1938 bcopy ((caddr_t)Message_Ptr,
1939 (caddr_t)NewMessage_Ptr, span);
1940 free (Message_Ptr, M_TEMP);
1941 sg = (PI2O_SGE_SIMPLE_ELEMENT)
1942 (((caddr_t)NewMessage_Ptr) + span);
1943 Message_Ptr = NewMessage_Ptr;
1944 }
1945 }
1946 { int retval;
1947
1948 retval = ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1949 free (Message_Ptr, M_TEMP);
1950 if (retval != CAM_REQ_CMP) {
1951 return (ENODEV);
1952 }
1953 }
1954 /* If the LCT table grew, lets truncate accesses */
1955 if (I2O_LCT_getTableSize(&Table) < I2O_LCT_getTableSize(sc->ha_LCT)) {
1956 I2O_LCT_setTableSize(sc->ha_LCT, I2O_LCT_getTableSize(&Table));
1957 }
1958 for (Entry = sc->ha_LCT->LCTEntry; Entry < (PI2O_LCT_ENTRY)
1959 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
1960 ++Entry) {
1961 Entry->le_type = I2O_UNKNOWN;
1962 switch (I2O_CLASS_ID_getClass(&(Entry->ClassID))) {
1963
1964 case I2O_CLASS_RANDOM_BLOCK_STORAGE:
1965 Entry->le_type = I2O_BSA;
1966 break;
1967
1968 case I2O_CLASS_SCSI_PERIPHERAL:
1969 Entry->le_type = I2O_SCSI;
1970 break;
1971
1972 case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
1973 Entry->le_type = I2O_FCA;
1974 break;
1975
1976 case I2O_CLASS_BUS_ADAPTER_PORT:
1977 Entry->le_type = I2O_PORT | I2O_SCSI;
1978 /* FALLTHRU */
1979 case I2O_CLASS_FIBRE_CHANNEL_PORT:
1980 if (I2O_CLASS_ID_getClass(&(Entry->ClassID)) ==
1981 I2O_CLASS_FIBRE_CHANNEL_PORT) {
1982 Entry->le_type = I2O_PORT | I2O_FCA;
1983 }
1984 { struct ControllerInfo {
1985 I2O_PARAM_RESULTS_LIST_HEADER Header;
1986 I2O_PARAM_READ_OPERATION_RESULT Read;
1987 I2O_HBA_SCSI_CONTROLLER_INFO_SCALAR Info;
1988 };
1989 defAlignLong(struct ControllerInfo, Buffer);
1990 PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR Info;
1991
1992 Entry->le_bus = 0xff;
1993 Entry->le_target = 0xff;
1994 Entry->le_lun = 0xff;
1995
1996 if ((Info = (PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR)
1997 ASR_getParams(sc,
1998 I2O_LCT_ENTRY_getLocalTID(Entry),
1999 I2O_HBA_SCSI_CONTROLLER_INFO_GROUP_NO,
2000 Buffer, sizeof(struct ControllerInfo)))
2001 == (PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR)NULL) {
2002 continue;
2003 }
2004 Entry->le_target
2005 = I2O_HBA_SCSI_CONTROLLER_INFO_SCALAR_getInitiatorID(
2006 Info);
2007 Entry->le_lun = 0;
2008 } /* FALLTHRU */
2009 default:
2010 continue;
2011 }
2012 { struct DeviceInfo {
2013 I2O_PARAM_RESULTS_LIST_HEADER Header;
2014 I2O_PARAM_READ_OPERATION_RESULT Read;
2015 I2O_DPT_DEVICE_INFO_SCALAR Info;
2016 };
2017 defAlignLong (struct DeviceInfo, Buffer);
2018 PI2O_DPT_DEVICE_INFO_SCALAR Info;
2019
2020 Entry->le_bus = 0xff;
2021 Entry->le_target = 0xff;
2022 Entry->le_lun = 0xff;
2023
2024 if ((Info = (PI2O_DPT_DEVICE_INFO_SCALAR)
2025 ASR_getParams(sc,
2026 I2O_LCT_ENTRY_getLocalTID(Entry),
2027 I2O_DPT_DEVICE_INFO_GROUP_NO,
2028 Buffer, sizeof(struct DeviceInfo)))
2029 == (PI2O_DPT_DEVICE_INFO_SCALAR)NULL) {
2030 continue;
2031 }
2032 Entry->le_type
2033 |= I2O_DPT_DEVICE_INFO_SCALAR_getDeviceType(Info);
2034 Entry->le_bus
2035 = I2O_DPT_DEVICE_INFO_SCALAR_getBus(Info);
2036 if ((Entry->le_bus > sc->ha_MaxBus)
2037 && (Entry->le_bus <= MAX_CHANNEL)) {
2038 sc->ha_MaxBus = Entry->le_bus;
2039 }
2040 Entry->le_target
2041 = I2O_DPT_DEVICE_INFO_SCALAR_getIdentifier(Info);
2042 Entry->le_lun
2043 = I2O_DPT_DEVICE_INFO_SCALAR_getLunInfo(Info);
2044 }
2045 }
2046 /*
2047 * A zero return value indicates success.
2048 */
2049 return (0);
2050} /* ASR_acquireLct */
2051
2052/*
2053 * Initialize a message frame.
2054 * We assume that the CDB has already been set up, so all we do here is
2055 * generate the Scatter Gather list.
2056 */
2057STATIC INLINE PI2O_MESSAGE_FRAME
2058ASR_init_message(
2059 IN union asr_ccb * ccb,
2060 OUT PI2O_MESSAGE_FRAME Message)
2061{
2062 int next, span, base, rw;
2063 OUT PI2O_MESSAGE_FRAME Message_Ptr;
2064 Asr_softc_t * sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
2065 PI2O_SGE_SIMPLE_ELEMENT sg;
2066 caddr_t v;
2067 vm_size_t size, len;
2068 U32 MessageSize;
2069
2070 /* We only need to zero out the PRIVATE_SCSI_SCB_EXECUTE_MESSAGE */
2071 bzero (Message_Ptr = getAlignLong(I2O_MESSAGE_FRAME, Message),
2072 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) - sizeof(I2O_SG_ELEMENT)));
2073
2074 {
2075 int target = ccb->ccb_h.target_id;
2076 int lun = ccb->ccb_h.target_lun;
2077 int bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path));
2078 tid_t TID;
2079
2080 if ((TID = ASR_getTid (sc, bus, target, lun)) == (tid_t)-1) {
2081 PI2O_LCT_ENTRY Device;
2082
2083 TID = (tid_t)0;
2084 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
2085 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
2086 ++Device) {
2087 if ((Device->le_type != I2O_UNKNOWN)
2088 && (Device->le_bus == bus)
2089 && (Device->le_target == target)
2090 && (Device->le_lun == lun)
2091 && (I2O_LCT_ENTRY_getUserTID(Device) == 0xFFF)) {
2092 TID = I2O_LCT_ENTRY_getLocalTID(Device);
2093 ASR_setTid (sc, Device->le_bus,
2094 Device->le_target, Device->le_lun,
2095 TID);
2096 break;
2097 }
2098 }
2099 }
2100 if (TID == (tid_t)0) {
2101 return ((PI2O_MESSAGE_FRAME)NULL);
2102 }
2103 I2O_MESSAGE_FRAME_setTargetAddress(Message_Ptr, TID);
2104 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setTID(
2105 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, TID);
2106 }
2107 I2O_MESSAGE_FRAME_setVersionOffset(Message_Ptr, I2O_VERSION_11 |
2108 (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) - sizeof(I2O_SG_ELEMENT))
2109 / sizeof(U32)) << 4));
2110 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
2111 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2112 - sizeof(I2O_SG_ELEMENT)) / sizeof(U32));
2113 I2O_MESSAGE_FRAME_setInitiatorAddress (Message_Ptr, 1);
2114 I2O_MESSAGE_FRAME_setFunction(Message_Ptr, I2O_PRIVATE_MESSAGE);
2115 I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode (
2116 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, I2O_SCSI_SCB_EXEC);
2117 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (
2118 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
2119 I2O_SCB_FLAG_ENABLE_DISCONNECT
2120 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2121 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
2122 /*
2123 * We do not need any (optional byteswapping) method access to
2124 * the Initiator & Transaction context field.
2125 */
2126 I2O_MESSAGE_FRAME_setInitiatorContext64(Message, (long)ccb);
2127
2128 I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
2129 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, DPT_ORGANIZATION_ID);
2130 /*
2131 * copy the cdb over
2132 */
2133 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(
2134 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, ccb->csio.cdb_len);
2135 bcopy (&(ccb->csio.cdb_io),
2136 ((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)->CDB, ccb->csio.cdb_len);
2137
2138 /*
2139 * Given a buffer describing a transfer, set up a scatter/gather map
2140 * in a ccb to map that SCSI transfer.
2141 */
2142
2143 rw = (ccb->ccb_h.flags & CAM_DIR_IN) ? 0 : I2O_SGL_FLAGS_DIR;
2144
2145 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (
2146 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
2147 (ccb->csio.dxfer_len)
2148 ? ((rw) ? (I2O_SCB_FLAG_XFER_TO_DEVICE
2149 | I2O_SCB_FLAG_ENABLE_DISCONNECT
2150 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2151 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER)
2152 : (I2O_SCB_FLAG_XFER_FROM_DEVICE
2153 | I2O_SCB_FLAG_ENABLE_DISCONNECT
2154 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2155 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER))
2156 : (I2O_SCB_FLAG_ENABLE_DISCONNECT
2157 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2158 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
2159
2160 /*
2161 * Given a transfer described by a `data', fill in the SG list.
2162 */
2163 sg = &((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)->SGL.u.Simple[0];
2164
2165 len = ccb->csio.dxfer_len;
2166 v = ccb->csio.data_ptr;
2167 ASSERT (ccb->csio.dxfer_len >= 0);
2168 MessageSize = I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr);
2169 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setByteCount(
2170 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, len);
2171 while ((len > 0) && (sg < &((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2172 Message_Ptr)->SGL.u.Simple[SG_SIZE])) {
2173 span = 0;
2174 next = base = KVTOPHYS(v);
2175 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, base);
2176
2177 /* How far can we go contiguously */
2178 while ((len > 0) && (base == next)) {
2179 next = trunc_page(base) + PAGE_SIZE;
2180 size = next - base;
2181 if (size > len) {
2182 size = len;
2183 }
2184 span += size;
2185 v += size;
2186 len -= size;
2187 base = KVTOPHYS(v);
2188 }
2189
2190 I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), span);
2191 if (len == 0) {
2192 rw |= I2O_SGL_FLAGS_LAST_ELEMENT;
2193 }
2194 I2O_FLAGS_COUNT_setFlags(&(sg->FlagsCount),
2195 I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT | rw);
2196 ++sg;
2197 MessageSize += sizeof(*sg) / sizeof(U32);
2198 }
2199 /* We always do the request sense ... */
2200 if ((span = ccb->csio.sense_len) == 0) {
2201 span = sizeof(ccb->csio.sense_data);
2202 }
2203 SG(sg, 0, I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
2204 &(ccb->csio.sense_data), span);
2205 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
2206 MessageSize + (sizeof(*sg) / sizeof(U32)));
2207 return (Message_Ptr);
2208} /* ASR_init_message */
2209
2210/*
2211 * Reset the adapter.
2212 */
2213STATIC INLINE U32
2214ASR_initOutBound (
2215 INOUT Asr_softc_t * sc)
2216{
2217 struct initOutBoundMessage {
2218 I2O_EXEC_OUTBOUND_INIT_MESSAGE M;
2219 U32 R;
2220 };
2221 defAlignLong(struct initOutBoundMessage,Message);
2222 PI2O_EXEC_OUTBOUND_INIT_MESSAGE Message_Ptr;
2223 OUT U32 * volatile Reply_Ptr;
2224 U32 Old;
2225
2226 /*
2227 * Build up our copy of the Message.
2228 */
2229 Message_Ptr = (PI2O_EXEC_OUTBOUND_INIT_MESSAGE)ASR_fillMessage(Message,
2230 sizeof(I2O_EXEC_OUTBOUND_INIT_MESSAGE));
2231 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
2232 I2O_EXEC_OUTBOUND_INIT);
2233 I2O_EXEC_OUTBOUND_INIT_MESSAGE_setHostPageFrameSize(Message_Ptr, PAGE_SIZE);
2234 I2O_EXEC_OUTBOUND_INIT_MESSAGE_setOutboundMFrameSize(Message_Ptr,
2235 sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME));
2236 /*
2237 * Reset the Reply Status
2238 */
2239 *(Reply_Ptr = (U32 *)((char *)Message_Ptr
2240 + sizeof(I2O_EXEC_OUTBOUND_INIT_MESSAGE))) = 0;
2241 SG (&(Message_Ptr->SGL), 0, I2O_SGL_FLAGS_LAST_ELEMENT, Reply_Ptr,
2242 sizeof(U32));
2243 /*
2244 * Send the Message out
2245 */
2246 if ((Old = ASR_initiateCp (sc->ha_Virt, sc->ha_Fvirt, (PI2O_MESSAGE_FRAME)Message_Ptr)) != (U32)-1L) {
2247 u_long size, addr;
2248
2249 /*
2250 * Wait for a response (Poll).
2251 */
2252 while (*Reply_Ptr < I2O_EXEC_OUTBOUND_INIT_REJECTED);
2253 /*
2254 * Re-enable the interrupts.
2255 */
2256 sc->ha_Virt->Mask = Old;
2257 /*
2258 * Populate the outbound table.
2259 */
2260 if (sc->ha_Msgs == (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL) {
2261
2262 /* Allocate the reply frames */
2263 size = sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
2264 * sc->ha_Msgs_Count;
2265
2266 /*
2267 * contigmalloc only works reliably at
2268 * initialization time.
2269 */
2270 if ((sc->ha_Msgs = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
2271 contigmalloc (size, M_DEVBUF, M_WAITOK, 0ul,
2272 0xFFFFFFFFul, (u_long)sizeof(U32), 0ul))
2273 != (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL) {
2274 (void)bzero ((char *)sc->ha_Msgs, size);
2275 sc->ha_Msgs_Phys = KVTOPHYS(sc->ha_Msgs);
2276 }
2277 }
2278
2279 /* Initialize the outbound FIFO */
2280 if (sc->ha_Msgs != (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL)
2281 for (size = sc->ha_Msgs_Count, addr = sc->ha_Msgs_Phys;
2282 size; --size) {
2283 sc->ha_Virt->FromFIFO = addr;
2284 addr += sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME);
2285 }
2286 return (*Reply_Ptr);
2287 }
2288 return (0);
2289} /* ASR_initOutBound */
2290
2291/*
2292 * Set the system table
2293 */
2294STATIC INLINE int
2295ASR_setSysTab(
2296 IN Asr_softc_t * sc)
2297{
2298 PI2O_EXEC_SYS_TAB_SET_MESSAGE Message_Ptr;
2299 PI2O_SET_SYSTAB_HEADER SystemTable;
2300 Asr_softc_t * ha;
2301 PI2O_SGE_SIMPLE_ELEMENT sg;
2302 int retVal;
2303
2304 if ((SystemTable = (PI2O_SET_SYSTAB_HEADER)malloc (
2305 sizeof(I2O_SET_SYSTAB_HEADER), M_TEMP, M_WAITOK))
2306 == (PI2O_SET_SYSTAB_HEADER)NULL) {
2307 return (ENOMEM);
2308 }
2309 bzero (SystemTable, sizeof(I2O_SET_SYSTAB_HEADER));
2310 for (ha = Asr_softc; ha; ha = ha->ha_next) {
2311 ++SystemTable->NumberEntries;
2312 }
2313 if ((Message_Ptr = (PI2O_EXEC_SYS_TAB_SET_MESSAGE)malloc (
2314 sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
2315 + ((3+SystemTable->NumberEntries) * sizeof(I2O_SGE_SIMPLE_ELEMENT)),
2316 M_TEMP, M_WAITOK)) == (PI2O_EXEC_SYS_TAB_SET_MESSAGE)NULL) {
2317 free (SystemTable, M_TEMP);
2318 return (ENOMEM);
2319 }
2320 (void)ASR_fillMessage((char *)Message_Ptr,
2321 sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
2322 + ((3+SystemTable->NumberEntries) * sizeof(I2O_SGE_SIMPLE_ELEMENT)));
2323 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
2324 (I2O_VERSION_11 +
2325 (((sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
2326 / sizeof(U32)) << 4)));
2327 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
2328 I2O_EXEC_SYS_TAB_SET);
2329 /*
2330 * Call the LCT table to determine the number of device entries
2331 * to reserve space for.
2332 * since this code is reused in several systems, code efficiency
2333 * is greater by using a shift operation rather than a divide by
2334 * sizeof(u_int32_t).
2335 */
2336 sg = (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr
2337 + ((I2O_MESSAGE_FRAME_getVersionOffset(
2338 &(Message_Ptr->StdMessageFrame)) & 0xF0) >> 2));
2339 SG(sg, 0, I2O_SGL_FLAGS_DIR, SystemTable, sizeof(I2O_SET_SYSTAB_HEADER));
2340 ++sg;
2341 for (ha = Asr_softc; ha; ha = ha->ha_next) {
2342 SG(sg, 0,
2343 ((ha->ha_next)
2344 ? (I2O_SGL_FLAGS_DIR)
2345 : (I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER)),
2346 &(ha->ha_SystemTable), sizeof(ha->ha_SystemTable));
2347 ++sg;
2348 }
2349 SG(sg, 0, I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER, NULL, 0);
2350 SG(sg, 1, I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_LAST_ELEMENT
2351 | I2O_SGL_FLAGS_END_OF_BUFFER, NULL, 0);
2352 retVal = ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
2353 free (Message_Ptr, M_TEMP);
2354 free (SystemTable, M_TEMP);
2355 return (retVal);
2356} /* ASR_setSysTab */
2357
2358STATIC INLINE int
2359ASR_acquireHrt (
2360 INOUT Asr_softc_t * sc)
2361{
2362 defAlignLong(I2O_EXEC_HRT_GET_MESSAGE,Message);
2363 I2O_EXEC_HRT_GET_MESSAGE * Message_Ptr;
2364 struct {
2365 I2O_HRT Header;
2366 I2O_HRT_ENTRY Entry[MAX_CHANNEL];
2367 } Hrt;
2368 u_int8_t NumberOfEntries;
2369 PI2O_HRT_ENTRY Entry;
2370
2371 bzero ((void *)&Hrt, sizeof (Hrt));
2372 Message_Ptr = (I2O_EXEC_HRT_GET_MESSAGE *)ASR_fillMessage(Message,
2373 sizeof(I2O_EXEC_HRT_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
2374 + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2375 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
2376 (I2O_VERSION_11
2377 + (((sizeof(I2O_EXEC_HRT_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
2378 / sizeof(U32)) << 4)));
2379 I2O_MESSAGE_FRAME_setFunction (&(Message_Ptr->StdMessageFrame),
2380 I2O_EXEC_HRT_GET);
2381
2382 /*
2383 * Set up the buffers as scatter gather elements.
2384 */
2385 SG(&(Message_Ptr->SGL), 0,
2386 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
2387 &Hrt, sizeof(Hrt));
2388 if (ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) != CAM_REQ_CMP) {
2389 return (ENODEV);
2390 }
2391 if ((NumberOfEntries = I2O_HRT_getNumberEntries(&Hrt.Header))
2392 > (MAX_CHANNEL + 1)) {
2393 NumberOfEntries = MAX_CHANNEL + 1;
2394 }
2395 for (Entry = Hrt.Header.HRTEntry;
2396 NumberOfEntries != 0;
2397 ++Entry, --NumberOfEntries) {
2398 PI2O_LCT_ENTRY Device;
2399
2400 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
2401 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
2402 ++Device) {
2403 if (I2O_LCT_ENTRY_getLocalTID(Device)
2404 == (I2O_HRT_ENTRY_getAdapterID(Entry) & 0xFFF)) {
2405 Device->le_bus = I2O_HRT_ENTRY_getAdapterID(
2406 Entry) >> 16;
2407 if ((Device->le_bus > sc->ha_MaxBus)
2408 && (Device->le_bus <= MAX_CHANNEL)) {
2409 sc->ha_MaxBus = Device->le_bus;
2410 }
2411 }
2412 }
2413 }
2414 return (0);
2415} /* ASR_acquireHrt */
2416
2417/*
2418 * Enable the adapter.
2419 */
2420STATIC INLINE int
2421ASR_enableSys (
2422 IN Asr_softc_t * sc)
2423{
2424 defAlignLong(I2O_EXEC_SYS_ENABLE_MESSAGE,Message);
2425 PI2O_EXEC_SYS_ENABLE_MESSAGE Message_Ptr;
2426
2427 Message_Ptr = (PI2O_EXEC_SYS_ENABLE_MESSAGE)ASR_fillMessage(Message,
2428 sizeof(I2O_EXEC_SYS_ENABLE_MESSAGE));
2429 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
2430 I2O_EXEC_SYS_ENABLE);
2431 return (ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) != 0);
2432} /* ASR_enableSys */
2433
2434/*
2435 * Perform the stages necessary to initialize the adapter
2436 */
2437STATIC int
2438ASR_init(
2439 IN Asr_softc_t * sc)
2440{
2441 return ((ASR_initOutBound(sc) == 0)
2442 || (ASR_setSysTab(sc) != CAM_REQ_CMP)
2443 || (ASR_enableSys(sc) != CAM_REQ_CMP));
2444} /* ASR_init */
2445
2446/*
2447 * Send a Synchronize Cache command to the target device.
2448 */
2449STATIC INLINE void
2450ASR_sync (
2451 IN Asr_softc_t * sc,
2452 IN int bus,
2453 IN int target,
2454 IN int lun)
2455{
2456 tid_t TID;
2457
2458 /*
2459 * We will not synchronize the device when there are outstanding
2460 * commands issued by the OS (this is due to a locked up device,
2461 * as the OS normally would flush all outstanding commands before
2462 * issuing a shutdown or an adapter reset).
2463 */
2464 if ((sc != (Asr_softc_t *)NULL)
2465 && (LIST_FIRST(&(sc->ha_ccb)) != (struct ccb_hdr *)NULL)
2466 && ((TID = ASR_getTid (sc, bus, target, lun)) != (tid_t)-1)
2467 && (TID != (tid_t)0)) {
2468 defAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE,Message);
2469 PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE Message_Ptr;
2470
2471 bzero (Message_Ptr
2472 = getAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE, Message),
2473 sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2474 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2475
2476 I2O_MESSAGE_FRAME_setVersionOffset(
2477 (PI2O_MESSAGE_FRAME)Message_Ptr,
2478 I2O_VERSION_11
2479 | (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2480 - sizeof(I2O_SG_ELEMENT))
2481 / sizeof(U32)) << 4));
2482 I2O_MESSAGE_FRAME_setMessageSize(
2483 (PI2O_MESSAGE_FRAME)Message_Ptr,
2484 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2485 - sizeof(I2O_SG_ELEMENT))
2486 / sizeof(U32));
2487 I2O_MESSAGE_FRAME_setInitiatorAddress (
2488 (PI2O_MESSAGE_FRAME)Message_Ptr, 1);
2489 I2O_MESSAGE_FRAME_setFunction(
2490 (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_PRIVATE_MESSAGE);
2491 I2O_MESSAGE_FRAME_setTargetAddress(
2492 (PI2O_MESSAGE_FRAME)Message_Ptr, TID);
2493 I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode (
2494 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2495 I2O_SCSI_SCB_EXEC);
2496 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setTID(Message_Ptr, TID);
2497 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2498 I2O_SCB_FLAG_ENABLE_DISCONNECT
2499 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2500 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
2501 I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
2502 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2503 DPT_ORGANIZATION_ID);
2504 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(Message_Ptr, 6);
2505 Message_Ptr->CDB[0] = SYNCHRONIZE_CACHE;
2506 Message_Ptr->CDB[1] = (lun << 5);
2507
2508 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2509 (I2O_SCB_FLAG_XFER_FROM_DEVICE
2510 | I2O_SCB_FLAG_ENABLE_DISCONNECT
2511 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2512 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
2513
2514 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
2515
2516 }
2517}
2518
2519STATIC INLINE void
2520ASR_synchronize (
2521 IN Asr_softc_t * sc)
2522{
2523 int bus, target, lun;
2524
2525 for (bus = 0; bus <= sc->ha_MaxBus; ++bus) {
2526 for (target = 0; target <= sc->ha_MaxId; ++target) {
2527 for (lun = 0; lun <= sc->ha_MaxLun; ++lun) {
2528 ASR_sync(sc,bus,target,lun);
2529 }
2530 }
2531 }
2532}
2533
2534/*
2535 * Reset the HBA, targets and BUS.
2536 * Currently this resets *all* the SCSI busses.
2537 */
2538STATIC INLINE void
2539asr_hbareset(
2540 IN Asr_softc_t * sc)
2541{
2542 ASR_synchronize (sc);
2543 (void)ASR_reset (sc);
2544} /* asr_hbareset */
2545
2546/*
2547 * A reduced copy of the real pci_map_mem, incorporating the MAX_MAP
2548 * limit and a reduction in error checking (in the pre 4.0 case).
2549 */
2550STATIC int
2551asr_pci_map_mem (
2552#if __FreeBSD_version >= 400000
2553 IN device_t tag,
2554#else
2555 IN pcici_t tag,
2556#endif
2557 IN Asr_softc_t * sc)
2558{
2559 int rid;
2560 u_int32_t p, l, s;
2561
2562#if __FreeBSD_version >= 400000
2563 /*
2564 * I2O specification says we must find first *memory* mapped BAR
2565 */
2566 for (rid = PCIR_MAPS;
2567 rid < (PCIR_MAPS + 4 * sizeof(u_int32_t));
2568 rid += sizeof(u_int32_t)) {
2569 p = pci_read_config(tag, rid, sizeof(p));
2570 if ((p & 1) == 0) {
2571 break;
2572 }
2573 }
2574 /*
2575 * Give up?
2576 */
2577 if (rid >= (PCIR_MAPS + 4 * sizeof(u_int32_t))) {
2578 rid = PCIR_MAPS;
2579 }
2580 p = pci_read_config(tag, rid, sizeof(p));
2581 pci_write_config(tag, rid, -1, sizeof(p));
2582 l = 0 - (pci_read_config(tag, rid, sizeof(l)) & ~15);
2583 pci_write_config(tag, rid, p, sizeof(p));
2584 if (l > MAX_MAP) {
2585 l = MAX_MAP;
2586 }
2587 /*
2588 * The 2005S Zero Channel RAID solution is not a perfect PCI
2589 * citizen. It asks for 4MB on BAR0, and 0MB on BAR1, once
2590 * enabled it rewrites the size of BAR0 to 2MB, sets BAR1 to
2591 * BAR0+2MB and sets it's size to 2MB. The IOP registers are
2592 * accessible via BAR0, the messaging registers are accessible
2593 * via BAR1. If the subdevice code is 50 to 59 decimal.
2594 */
2595 s = pci_read_config(tag, PCIR_DEVVENDOR, sizeof(s));
2596 if (s != 0xA5111044) {
2597 s = pci_read_config(tag, PCIR_SUBVEND_0, sizeof(s));
2598 if ((((ADPTDOMINATOR_SUB_ID_START ^ s) & 0xF000FFFF) == 0)
2599 && (ADPTDOMINATOR_SUB_ID_START <= s)
2600 && (s <= ADPTDOMINATOR_SUB_ID_END)) {
2601 l = MAX_MAP; /* Conjoined BAR Raptor Daptor */
2602 }
2603 }
2604 p &= ~15;
2605 sc->ha_mem_res = bus_alloc_resource(tag, SYS_RES_MEMORY, &rid,
2606 p, p + l, l, RF_ACTIVE);
2607 if (sc->ha_mem_res == (struct resource *)NULL) {
2608 return (0);
2609 }
2610 sc->ha_Base = (void *)rman_get_start(sc->ha_mem_res);
2611 if (sc->ha_Base == (void *)NULL) {
2612 return (0);
2613 }
2614 sc->ha_Virt = (i2oRegs_t *) rman_get_virtual(sc->ha_mem_res);
2615 if (s == 0xA5111044) { /* Split BAR Raptor Daptor */
2616 if ((rid += sizeof(u_int32_t))
2617 >= (PCIR_MAPS + 4 * sizeof(u_int32_t))) {
2618 return (0);
2619 }
2620 p = pci_read_config(tag, rid, sizeof(p));
2621 pci_write_config(tag, rid, -1, sizeof(p));
2622 l = 0 - (pci_read_config(tag, rid, sizeof(l)) & ~15);
2623 pci_write_config(tag, rid, p, sizeof(p));
2624 if (l > MAX_MAP) {
2625 l = MAX_MAP;
2626 }
2627 p &= ~15;
2628 sc->ha_mes_res = bus_alloc_resource(tag, SYS_RES_MEMORY, &rid,
2629 p, p + l, l, RF_ACTIVE);
2630 if (sc->ha_mes_res == (struct resource *)NULL) {
2631 return (0);
2632 }
2633 if ((void *)rman_get_start(sc->ha_mes_res) == (void *)NULL) {
2634 return (0);
2635 }
2636 sc->ha_Fvirt = (U8 *) rman_get_virtual(sc->ha_mes_res);
2637 } else {
2638 sc->ha_Fvirt = (U8 *)(sc->ha_Virt);
2639 }
2640#else
2641 vm_size_t psize, poffs;
2642
2643 /*
2644 * I2O specification says we must find first *memory* mapped BAR
2645 */
2646 for (rid = PCI_MAP_REG_START;
2647 rid < (PCI_MAP_REG_START + 4 * sizeof(u_int32_t));
2648 rid += sizeof(u_int32_t)) {
2649 p = pci_conf_read (tag, rid);
2650 if ((p & 1) == 0) {
2651 break;
2652 }
2653 }
2654 if (rid >= (PCI_MAP_REG_START + 4 * sizeof(u_int32_t))) {
2655 rid = PCI_MAP_REG_START;
2656 }
2657 /*
2658 ** save old mapping, get size and type of memory
2659 **
2660 ** type is in the lowest four bits.
2661 ** If device requires 2^n bytes, the next
2662 ** n-4 bits are read as 0.
2663 */
2664
2665 sc->ha_Base = (void *)((p = pci_conf_read (tag, rid))
2666 & PCI_MAP_MEMORY_ADDRESS_MASK);
2667 pci_conf_write (tag, rid, 0xfffffffful);
2668 l = pci_conf_read (tag, rid);
2669 pci_conf_write (tag, rid, p);
2670
2671 /*
2672 ** check the type
2673 */
2674
2675 if (!((l & PCI_MAP_MEMORY_TYPE_MASK) == PCI_MAP_MEMORY_TYPE_32BIT_1M
2676 && ((u_long)sc->ha_Base & ~0xfffff) == 0)
2677 && ((l & PCI_MAP_MEMORY_TYPE_MASK) != PCI_MAP_MEMORY_TYPE_32BIT)) {
2678 debug_asr_printf (
2679 "asr_pci_map_mem failed: bad memory type=0x%x\n",
2680 (unsigned) l);
2681 return (0);
2682 };
2683
2684 /*
2685 ** get the size.
2686 */
2687
2688 psize = -(l & PCI_MAP_MEMORY_ADDRESS_MASK);
2689 if (psize > MAX_MAP) {
2690 psize = MAX_MAP;
2691 }
2692 /*
2693 * The 2005S Zero Channel RAID solution is not a perfect PCI
2694 * citizen. It asks for 4MB on BAR0, and 0MB on BAR1, once
2695 * enabled it rewrites the size of BAR0 to 2MB, sets BAR1 to
2696 * BAR0+2MB and sets it's size to 2MB. The IOP registers are
2697 * accessible via BAR0, the messaging registers are accessible
2698 * via BAR1. If the subdevice code is 50 to 59 decimal.
2699 */
2700 s = pci_read_config(tag, PCIR_DEVVENDOR, sizeof(s));
2701 if (s != 0xA5111044) {
2702 s = pci_conf_read (tag, PCIR_SUBVEND_0)
2703 if ((((ADPTDOMINATOR_SUB_ID_START ^ s) & 0xF000FFFF) == 0)
2704 && (ADPTDOMINATOR_SUB_ID_START <= s)
2705 && (s <= ADPTDOMINATOR_SUB_ID_END)) {
2706 psize = MAX_MAP;
2707 }
2708 }
2709
2710 if ((sc->ha_Base == (void *)NULL)
2711 || (sc->ha_Base == (void *)PCI_MAP_MEMORY_ADDRESS_MASK)) {
2712 debug_asr_printf ("asr_pci_map_mem: not configured by bios.\n");
2713 return (0);
2714 };
2715
2716 /*
2717 ** Truncate sc->ha_Base to page boundary.
2718 ** (Or does pmap_mapdev the job?)
2719 */
2720
2721 poffs = (u_long)sc->ha_Base - trunc_page ((u_long)sc->ha_Base);
2722 sc->ha_Virt = (i2oRegs_t *)pmap_mapdev ((u_long)sc->ha_Base - poffs,
2723 psize + poffs);
2724
2725 if (sc->ha_Virt == (i2oRegs_t *)NULL) {
2726 return (0);
2727 }
2728
2729 sc->ha_Virt = (i2oRegs_t *)((u_long)sc->ha_Virt + poffs);
2730 if (s == 0xA5111044) {
2731 if ((rid += sizeof(u_int32_t))
2732 >= (PCI_MAP_REG_START + 4 * sizeof(u_int32_t))) {
2733 return (0);
2734 }
2735
2736 /*
2737 ** save old mapping, get size and type of memory
2738 **
2739 ** type is in the lowest four bits.
2740 ** If device requires 2^n bytes, the next
2741 ** n-4 bits are read as 0.
2742 */
2743
2744 if ((((p = pci_conf_read (tag, rid))
2745 & PCI_MAP_MEMORY_ADDRESS_MASK) == 0L)
2746 || ((p & PCI_MAP_MEMORY_ADDRESS_MASK)
2747 == PCI_MAP_MEMORY_ADDRESS_MASK)) {
2748 debug_asr_printf ("asr_pci_map_mem: not configured by bios.\n");
2749 }
2750 pci_conf_write (tag, rid, 0xfffffffful);
2751 l = pci_conf_read (tag, rid);
2752 pci_conf_write (tag, rid, p);
2753 p &= PCI_MAP_MEMORY_TYPE_MASK;
2754
2755 /*
2756 ** check the type
2757 */
2758
2759 if (!((l & PCI_MAP_MEMORY_TYPE_MASK)
2760 == PCI_MAP_MEMORY_TYPE_32BIT_1M
2761 && (p & ~0xfffff) == 0)
2762 && ((l & PCI_MAP_MEMORY_TYPE_MASK)
2763 != PCI_MAP_MEMORY_TYPE_32BIT)) {
2764 debug_asr_printf (
2765 "asr_pci_map_mem failed: bad memory type=0x%x\n",
2766 (unsigned) l);
2767 return (0);
2768 };
2769
2770 /*
2771 ** get the size.
2772 */
2773
2774 psize = -(l & PCI_MAP_MEMORY_ADDRESS_MASK);
2775 if (psize > MAX_MAP) {
2776 psize = MAX_MAP;
2777 }
2778
2779 /*
2780 ** Truncate p to page boundary.
2781 ** (Or does pmap_mapdev the job?)
2782 */
2783
2784 poffs = p - trunc_page (p);
2785 sc->ha_Fvirt = (U8 *)pmap_mapdev (p - poffs, psize + poffs);
2786
2787 if (sc->ha_Fvirt == (U8 *)NULL) {
2788 return (0);
2789 }
2790
2791 sc->ha_Fvirt = (U8 *)((u_long)sc->ha_Fvirt + poffs);
2792 } else {
2793 sc->ha_Fvirt = (U8 *)(sc->ha_Virt);
2794 }
2795#endif
2796 return (1);
2797} /* asr_pci_map_mem */
2798
2799/*
2800 * A simplified copy of the real pci_map_int with additional
2801 * registration requirements.
2802 */
2803STATIC int
2804asr_pci_map_int (
2805#if __FreeBSD_version >= 400000
2806 IN device_t tag,
2807#else
2808 IN pcici_t tag,
2809#endif
2810 IN Asr_softc_t * sc)
2811{
2812#if __FreeBSD_version >= 400000
2813 int rid = 0;
2814
2815 sc->ha_irq_res = bus_alloc_resource(tag, SYS_RES_IRQ, &rid,
2816 0, ~0, 1, RF_ACTIVE | RF_SHAREABLE);
2817 if (sc->ha_irq_res == (struct resource *)NULL) {
2818 return (0);
2819 }
2820 if (bus_setup_intr(tag, sc->ha_irq_res, INTR_TYPE_CAM,
2821 (driver_intr_t *)asr_intr, (void *)sc, &(sc->ha_intr))) {
2822 return (0);
2823 }
2824 sc->ha_irq = pci_read_config(tag, PCIR_INTLINE, sizeof(char));
2825#else
2826 if (!pci_map_int(tag, (pci_inthand_t *)asr_intr,
2827 (void *)sc, &cam_imask)) {
2828 return (0);
2829 }
2830 sc->ha_irq = pci_conf_read(tag, PCIR_INTLINE);
2831#endif
2832 return (1);
2833} /* asr_pci_map_int */
2834
2835/*
2836 * Attach the devices, and virtual devices to the driver list.
2837 */
2838STATIC ATTACH_RET
2839asr_attach (ATTACH_ARGS)
2840{
2841 Asr_softc_t * sc;
2842 struct scsi_inquiry_data * iq;
2843 ATTACH_SET();
2844
2845 if ((sc = malloc(sizeof(*sc), M_DEVBUF, M_NOWAIT)) == (Asr_softc_t *)NULL) {
2846 ATTACH_RETURN(ENOMEM);
2847 }
2848 if (Asr_softc == (Asr_softc_t *)NULL) {
2849 /*
2850 * Fixup the OS revision as saved in the dptsig for the
2851 * engine (dptioctl.h) to pick up.
2852 */
2853 bcopy (osrelease, &ASR_sig.dsDescription[16], 5);
2854 printf ("asr%d: major=%d\n", unit, asr_cdevsw.d_maj);
2855 }
2856 /*
2857 * Initialize the software structure
2858 */
2859 bzero (sc, sizeof(*sc));
2860 LIST_INIT(&(sc->ha_ccb));
2861# ifdef ASR_MEASURE_PERFORMANCE
2862 {
2863 u_int32_t i;
2864
2865 // initialize free list for timeQ
2866 sc->ha_timeQFreeHead = 0;
2867 sc->ha_timeQFreeTail = MAX_TIMEQ_SIZE - 1;
2868 for (i = 0; i < MAX_TIMEQ_SIZE; i++) {
2869 sc->ha_timeQFreeList[i] = i;
2870 }
2871 }
2872# endif
2873 /* Link us into the HA list */
2874 {
2875 Asr_softc_t **ha;
2876
2877 for (ha = &Asr_softc; *ha; ha = &((*ha)->ha_next));
2878 *(ha) = sc;
2879 }
2880 {
2881 PI2O_EXEC_STATUS_GET_REPLY status;
2882 int size;
2883
2884 /*
2885 * This is the real McCoy!
2886 */
2887 if (!asr_pci_map_mem(tag, sc)) {
2888 printf ("asr%d: could not map memory\n", unit);
2889 ATTACH_RETURN(ENXIO);
2890 }
2891 /* Enable if not formerly enabled */
2892#if __FreeBSD_version >= 400000
2893 pci_write_config (tag, PCIR_COMMAND,
2894 pci_read_config (tag, PCIR_COMMAND, sizeof(char))
2895 | PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN, sizeof(char));
2896 /* Knowledge is power, responsibility is direct */
2897 {
2898 struct pci_devinfo {
2899 STAILQ_ENTRY(pci_devinfo) pci_links;
2900 struct resource_list resources;
2901 pcicfgregs cfg;
2902 } * dinfo = device_get_ivars(tag);
2903 sc->ha_pciBusNum = dinfo->cfg.bus;
2904 sc->ha_pciDeviceNum = (dinfo->cfg.slot << 3)
2905 | dinfo->cfg.func;
2906 }
2907#else
2908 pci_conf_write (tag, PCIR_COMMAND,
2909 pci_conf_read (tag, PCIR_COMMAND)
2910 | PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN);
2911 /* Knowledge is power, responsibility is direct */
2912 switch (pci_mechanism) {
2913
2914 case 1:
2915 sc->ha_pciBusNum = tag.cfg1 >> 16;
2916 sc->ha_pciDeviceNum = tag.cfg1 >> 8;
2917
2918 case 2:
2919 sc->ha_pciBusNum = tag.cfg2.forward;
2920 sc->ha_pciDeviceNum = ((tag.cfg2.enable >> 1) & 7)
2921 | (tag.cfg2.port >> 5);
2922 }
2923#endif
2924 /* Check if the device is there? */
2925 if ((ASR_resetIOP(sc->ha_Virt, sc->ha_Fvirt) == 0)
2926 || ((status = (PI2O_EXEC_STATUS_GET_REPLY)malloc (
2927 sizeof(I2O_EXEC_STATUS_GET_REPLY), M_TEMP, M_WAITOK))
2928 == (PI2O_EXEC_STATUS_GET_REPLY)NULL)
2929 || (ASR_getStatus(sc->ha_Virt, sc->ha_Fvirt, status) == NULL)) {
2930 printf ("asr%d: could not initialize hardware\n", unit);
2931 ATTACH_RETURN(ENODEV); /* Get next, maybe better luck */
2932 }
2933 sc->ha_SystemTable.OrganizationID = status->OrganizationID;
2934 sc->ha_SystemTable.IOP_ID = status->IOP_ID;
2935 sc->ha_SystemTable.I2oVersion = status->I2oVersion;
2936 sc->ha_SystemTable.IopState = status->IopState;
2937 sc->ha_SystemTable.MessengerType = status->MessengerType;
2938 sc->ha_SystemTable.InboundMessageFrameSize
2939 = status->InboundMFrameSize;
2940 sc->ha_SystemTable.MessengerInfo.InboundMessagePortAddressLow
2941 = (U32)(sc->ha_Base) + (U32)(&(((i2oRegs_t *)NULL)->ToFIFO));
2942
2943 if (!asr_pci_map_int(tag, (void *)sc)) {
2944 printf ("asr%d: could not map interrupt\n", unit);
2945 ATTACH_RETURN(ENXIO);
2946 }
2947
2948 /* Adjust the maximim inbound count */
2949 if (((sc->ha_QueueSize
2950 = I2O_EXEC_STATUS_GET_REPLY_getMaxInboundMFrames(status))
2951 > MAX_INBOUND)
2952 || (sc->ha_QueueSize == 0)) {
2953 sc->ha_QueueSize = MAX_INBOUND;
2954 }
2955
2956 /* Adjust the maximum outbound count */
2957 if (((sc->ha_Msgs_Count
2958 = I2O_EXEC_STATUS_GET_REPLY_getMaxOutboundMFrames(status))
2959 > MAX_OUTBOUND)
2960 || (sc->ha_Msgs_Count == 0)) {
2961 sc->ha_Msgs_Count = MAX_OUTBOUND;
2962 }
2963 if (sc->ha_Msgs_Count > sc->ha_QueueSize) {
2964 sc->ha_Msgs_Count = sc->ha_QueueSize;
2965 }
2966
2967 /* Adjust the maximum SG size to adapter */
2968 if ((size = (I2O_EXEC_STATUS_GET_REPLY_getInboundMFrameSize(
2969 status) << 2)) > MAX_INBOUND_SIZE) {
2970 size = MAX_INBOUND_SIZE;
2971 }
2972 free (status, M_TEMP);
2973 sc->ha_SgSize = (size - sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2974 + sizeof(I2O_SG_ELEMENT)) / sizeof(I2O_SGE_SIMPLE_ELEMENT);
2975 }
2976
2977 /*
2978 * Only do a bus/HBA reset on the first time through. On this
2979 * first time through, we do not send a flush to the devices.
2980 */
2981 if (ASR_init(sc) == 0) {
2982 struct BufferInfo {
2983 I2O_PARAM_RESULTS_LIST_HEADER Header;
2984 I2O_PARAM_READ_OPERATION_RESULT Read;
2985 I2O_DPT_EXEC_IOP_BUFFERS_SCALAR Info;
2986 };
2987 defAlignLong (struct BufferInfo, Buffer);
2988 PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR Info;
2989# define FW_DEBUG_BLED_OFFSET 8
2990
2991 if ((Info = (PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR)
2992 ASR_getParams(sc, 0,
2993 I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO,
2994 Buffer, sizeof(struct BufferInfo)))
2995 != (PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR)NULL) {
2996 sc->ha_blinkLED = sc->ha_Fvirt
2997 + I2O_DPT_EXEC_IOP_BUFFERS_SCALAR_getSerialOutputOffset(Info)
2998 + FW_DEBUG_BLED_OFFSET;
2999 }
3000 if (ASR_acquireLct(sc) == 0) {
3001 (void)ASR_acquireHrt(sc);
3002 }
3003 } else {
3004 printf ("asr%d: failed to initialize\n", unit);
3005 ATTACH_RETURN(ENXIO);
3006 }
3007 /*
3008 * Add in additional probe responses for more channels. We
3009 * are reusing the variable `target' for a channel loop counter.
3010 * Done here because of we need both the acquireLct and
3011 * acquireHrt data.
3012 */
3013 { PI2O_LCT_ENTRY Device;
3014
3015 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
3016 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
3017 ++Device) {
3018 if (Device->le_type == I2O_UNKNOWN) {
3019 continue;
3020 }
3021 if (I2O_LCT_ENTRY_getUserTID(Device) == 0xFFF) {
3022 if (Device->le_target > sc->ha_MaxId) {
3023 sc->ha_MaxId = Device->le_target;
3024 }
3025 if (Device->le_lun > sc->ha_MaxLun) {
3026 sc->ha_MaxLun = Device->le_lun;
3027 }
3028 }
3029 if (((Device->le_type & I2O_PORT) != 0)
3030 && (Device->le_bus <= MAX_CHANNEL)) {
3031 /* Do not increase MaxId for efficiency */
3032 sc->ha_adapter_target[Device->le_bus]
3033 = Device->le_target;
3034 }
3035 }
3036 }
3037
3038
3039 /*
3040 * Print the HBA model number as inquired from the card.
3041 */
3042
3043 printf ("asr%d:", unit);
3044
3045 if ((iq = (struct scsi_inquiry_data *)malloc (
3046 sizeof(struct scsi_inquiry_data), M_TEMP, M_WAITOK))
3047 != (struct scsi_inquiry_data *)NULL) {
3048 defAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE,Message);
3049 PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE Message_Ptr;
3050 int posted = 0;
3051
3052 bzero (iq, sizeof(struct scsi_inquiry_data));
3053 bzero (Message_Ptr
3054 = getAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE, Message),
3055 sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
3056 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT));
3057
3058 I2O_MESSAGE_FRAME_setVersionOffset(
3059 (PI2O_MESSAGE_FRAME)Message_Ptr,
3060 I2O_VERSION_11
3061 | (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
3062 - sizeof(I2O_SG_ELEMENT))
3063 / sizeof(U32)) << 4));
3064 I2O_MESSAGE_FRAME_setMessageSize(
3065 (PI2O_MESSAGE_FRAME)Message_Ptr,
3066 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
3067 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT))
3068 / sizeof(U32));
3069 I2O_MESSAGE_FRAME_setInitiatorAddress (
3070 (PI2O_MESSAGE_FRAME)Message_Ptr, 1);
3071 I2O_MESSAGE_FRAME_setFunction(
3072 (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_PRIVATE_MESSAGE);
3073 I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode (
3074 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
3075 I2O_SCSI_SCB_EXEC);
3076 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
3077 I2O_SCB_FLAG_ENABLE_DISCONNECT
3078 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
3079 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
3080 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setInterpret(Message_Ptr, 1);
3081 I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
3082 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
3083 DPT_ORGANIZATION_ID);
3084 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(Message_Ptr, 6);
3085 Message_Ptr->CDB[0] = INQUIRY;
3086 Message_Ptr->CDB[4] = (unsigned char)sizeof(struct scsi_inquiry_data);
3087 if (Message_Ptr->CDB[4] == 0) {
3088 Message_Ptr->CDB[4] = 255;
3089 }
3090
3091 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
3092 (I2O_SCB_FLAG_XFER_FROM_DEVICE
3093 | I2O_SCB_FLAG_ENABLE_DISCONNECT
3094 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
3095 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
3096
3097 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setByteCount(
3098 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
3099 sizeof(struct scsi_inquiry_data));
3100 SG(&(Message_Ptr->SGL), 0,
3101 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
3102 iq, sizeof(struct scsi_inquiry_data));
3103 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
3104
3105 if (iq->vendor[0] && (iq->vendor[0] != ' ')) {
3106 printf (" ");
3107 ASR_prstring (iq->vendor, 8);
3108 ++posted;
3109 }
3110 if (iq->product[0] && (iq->product[0] != ' ')) {
3111 printf (" ");
3112 ASR_prstring (iq->product, 16);
3113 ++posted;
3114 }
3115 if (iq->revision[0] && (iq->revision[0] != ' ')) {
3116 printf (" FW Rev. ");
3117 ASR_prstring (iq->revision, 4);
3118 ++posted;
3119 }
3120 free ((caddr_t)iq, M_TEMP);
3121 if (posted) {
3122 printf (",");
3123 }
3124 }
3125 printf (" %d channel, %d CCBs, Protocol I2O\n", sc->ha_MaxBus + 1,
3126 (sc->ha_QueueSize > MAX_INBOUND) ? MAX_INBOUND : sc->ha_QueueSize);
3127
3128 /*
3129 * fill in the prototype cam_path.
3130 */
3131 {
3132 int bus;
3133 union asr_ccb * ccb;
3134
3135 if ((ccb = asr_alloc_ccb (sc)) == (union asr_ccb *)NULL) {
3136 printf ("asr%d: CAM could not be notified of asynchronous callback parameters\n", unit);
3137 ATTACH_RETURN(ENOMEM);
3138 }
3139 for (bus = 0; bus <= sc->ha_MaxBus; ++bus) {
3140 struct cam_devq * devq;
3141 int QueueSize = sc->ha_QueueSize;
3142
3143 if (QueueSize > MAX_INBOUND) {
3144 QueueSize = MAX_INBOUND;
3145 }
3146
3147 /*
3148 * Create the device queue for our SIM(s).
3149 */
3150 if ((devq = cam_simq_alloc(QueueSize)) == NULL) {
3151 continue;
3152 }
3153
3154 /*
3155 * Construct our first channel SIM entry
3156 */
3157 sc->ha_sim[bus] = cam_sim_alloc(
3158 asr_action, asr_poll, "asr", sc,
3159 unit, 1, QueueSize, devq);
3160 if (sc->ha_sim[bus] == NULL) {
3161 continue;
3162 }
3163
3164 if (xpt_bus_register(sc->ha_sim[bus], bus)
3165 != CAM_SUCCESS) {
3166 cam_sim_free(sc->ha_sim[bus],
3167 /*free_devq*/TRUE);
3168 sc->ha_sim[bus] = NULL;
3169 continue;
3170 }
3171
3172 if (xpt_create_path(&(sc->ha_path[bus]), /*periph*/NULL,
3173 cam_sim_path(sc->ha_sim[bus]), CAM_TARGET_WILDCARD,
3174 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
3175 xpt_bus_deregister(
3176 cam_sim_path(sc->ha_sim[bus]));
3177 cam_sim_free(sc->ha_sim[bus],
3178 /*free_devq*/TRUE);
3179 sc->ha_sim[bus] = NULL;
3180 continue;
3181 }
3182 }
3183 asr_free_ccb (ccb);
3184 }
3185 /*
3186 * Generate the device node information
3187 */
3188 (void)make_dev(&asr_cdevsw, unit, 0, 0, S_IRWXU, "rasr%d", unit);
3189 destroy_dev(makedev(asr_cdevsw.d_maj,unit+1));
3190 ATTACH_RETURN(0);
3191} /* asr_attach */
3192
3193STATIC void
3194asr_poll(
3195 IN struct cam_sim *sim)
3196{
3197 asr_intr(cam_sim_softc(sim));
3198} /* asr_poll */
3199
3200STATIC void
3201asr_action(
3202 IN struct cam_sim * sim,
3203 IN union ccb * ccb)
3204{
3205 struct Asr_softc * sc;
3206
3207 debug_asr_printf ("asr_action(%lx,%lx{%x})\n",
3208 (u_long)sim, (u_long)ccb, ccb->ccb_h.func_code);
3209
3210 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("asr_action\n"));
3211
3212 ccb->ccb_h.spriv_ptr0 = sc = (struct Asr_softc *)cam_sim_softc(sim);
3213
3214 switch (ccb->ccb_h.func_code) {
3215
3216 /* Common cases first */
3217 case XPT_SCSI_IO: /* Execute the requested I/O operation */
3218 {
3219 struct Message {
3220 char M[MAX_INBOUND_SIZE];
3221 };
3222 defAlignLong(struct Message,Message);
3223 PI2O_MESSAGE_FRAME Message_Ptr;
3224
3225 /* Reject incoming commands while we are resetting the card */
3226 if (sc->ha_in_reset != HA_OPERATIONAL) {
3227 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3228 if (sc->ha_in_reset >= HA_OFF_LINE) {
3229 /* HBA is now off-line */
3230 ccb->ccb_h.status |= CAM_UNREC_HBA_ERROR;
3231 } else {
3232 /* HBA currently resetting, try again later. */
3233 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
3234 }
3235 debug_asr_cmd_printf (" e\n");
3236 xpt_done(ccb);
3237 debug_asr_cmd_printf (" q\n");
3238 break;
3239 }
3240 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
3241 printf(
3242 "asr%d WARNING: scsi_cmd(%x) already done on b%dt%du%d\n",
3243 cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)),
3244 ccb->csio.cdb_io.cdb_bytes[0],
3245 cam_sim_bus(sim),
3246 ccb->ccb_h.target_id,
3247 ccb->ccb_h.target_lun);
3248 }
3249 debug_asr_cmd_printf ("(%d,%d,%d,%d)",
3250 cam_sim_unit(sim),
3251 cam_sim_bus(sim),
3252 ccb->ccb_h.target_id,
3253 ccb->ccb_h.target_lun);
3254 debug_asr_cmd_dump_ccb(ccb);
3255
3256 if ((Message_Ptr = ASR_init_message ((union asr_ccb *)ccb,
3257 (PI2O_MESSAGE_FRAME)Message)) != (PI2O_MESSAGE_FRAME)NULL) {
3258 debug_asr_cmd2_printf ("TID=%x:\n",
3259 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_getTID(
3260 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr));
3261 debug_asr_cmd2_dump_message(Message_Ptr);
3262 debug_asr_cmd1_printf (" q");
3263
3264 if (ASR_queue (sc, Message_Ptr) == EMPTY_QUEUE) {
3265#ifdef ASR_MEASURE_PERFORMANCE
3266 ++sc->ha_performance.command_too_busy;
3267#endif
3268 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3269 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
3270 debug_asr_cmd_printf (" E\n");
3271 xpt_done(ccb);
3272 }
3273 debug_asr_cmd_printf (" Q\n");
3274 break;
3275 }
3276 /*
3277 * We will get here if there is no valid TID for the device
3278 * referenced in the scsi command packet.
3279 */
3280 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3281 ccb->ccb_h.status |= CAM_SEL_TIMEOUT;
3282 debug_asr_cmd_printf (" B\n");
3283 xpt_done(ccb);
3284 break;
3285 }
3286
3287 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */
3288 /* Rese HBA device ... */
3289 asr_hbareset (sc);
3290 ccb->ccb_h.status = CAM_REQ_CMP;
3291 xpt_done(ccb);
3292 break;
3293
3294# if (defined(REPORT_LUNS))
3295 case REPORT_LUNS:
3296# endif
3297 case XPT_ABORT: /* Abort the specified CCB */
3298 /* XXX Implement */
3299 ccb->ccb_h.status = CAM_REQ_INVALID;
3300 xpt_done(ccb);
3301 break;
3302
3303 case XPT_SET_TRAN_SETTINGS:
3304 /* XXX Implement */
3305 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
3306 xpt_done(ccb);
3307 break;
3308
3309 case XPT_GET_TRAN_SETTINGS:
3310 /* Get default/user set transfer settings for the target */
3311 {
3312 struct ccb_trans_settings *cts;
3313 u_int target_mask;
3314
3315 cts = &(ccb->cts);
3316 target_mask = 0x01 << ccb->ccb_h.target_id;
3317 if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) {
3318 cts->flags = CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB;
3319 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
3320 cts->sync_period = 6; /* 40MHz */
3321 cts->sync_offset = 15;
3322
3323 cts->valid = CCB_TRANS_SYNC_RATE_VALID
3324 | CCB_TRANS_SYNC_OFFSET_VALID
3325 | CCB_TRANS_BUS_WIDTH_VALID
3326 | CCB_TRANS_DISC_VALID
3327 | CCB_TRANS_TQ_VALID;
3328 ccb->ccb_h.status = CAM_REQ_CMP;
3329 } else {
3330 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
3331 }
3332 xpt_done(ccb);
3333 break;
3334 }
3335
3336 case XPT_CALC_GEOMETRY:
3337 {
3338 struct ccb_calc_geometry *ccg;
3339 u_int32_t size_mb;
3340 u_int32_t secs_per_cylinder;
3341
3342 ccg = &(ccb->ccg);
3343 size_mb = ccg->volume_size
3344 / ((1024L * 1024L) / ccg->block_size);
3345
3346 if (size_mb > 4096) {
3347 ccg->heads = 255;
3348 ccg->secs_per_track = 63;
3349 } else if (size_mb > 2048) {
3350 ccg->heads = 128;
3351 ccg->secs_per_track = 63;
3352 } else if (size_mb > 1024) {
3353 ccg->heads = 65;
3354 ccg->secs_per_track = 63;
3355 } else {
3356 ccg->heads = 64;
3357 ccg->secs_per_track = 32;
3358 }
3359 secs_per_cylinder = ccg->heads * ccg->secs_per_track;
3360 ccg->cylinders = ccg->volume_size / secs_per_cylinder;
3361 ccb->ccb_h.status = CAM_REQ_CMP;
3362 xpt_done(ccb);
3363 break;
3364 }
3365
3366 case XPT_RESET_BUS: /* Reset the specified SCSI bus */
3367 ASR_resetBus (sc, cam_sim_bus(sim));
3368 ccb->ccb_h.status = CAM_REQ_CMP;
3369 xpt_done(ccb);
3370 break;
3371
3372 case XPT_TERM_IO: /* Terminate the I/O process */
3373 /* XXX Implement */
3374 ccb->ccb_h.status = CAM_REQ_INVALID;
3375 xpt_done(ccb);
3376 break;
3377
3378 case XPT_PATH_INQ: /* Path routing inquiry */
3379 {
3380 struct ccb_pathinq *cpi = &(ccb->cpi);
3381
3382 cpi->version_num = 1; /* XXX??? */
3383 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
3384 cpi->target_sprt = 0;
3385 /* Not necessary to reset bus, done by HDM initialization */
3386 cpi->hba_misc = PIM_NOBUSRESET;
3387 cpi->hba_eng_cnt = 0;
3388 cpi->max_target = sc->ha_MaxId;
3389 cpi->max_lun = sc->ha_MaxLun;
3390 cpi->initiator_id = sc->ha_adapter_target[cam_sim_bus(sim)];
3391 cpi->bus_id = cam_sim_bus(sim);
3392 cpi->base_transfer_speed = 3300;
3393 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
3394 strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN);
3395 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
3396 cpi->unit_number = cam_sim_unit(sim);
3397 cpi->ccb_h.status = CAM_REQ_CMP;
3398 xpt_done(ccb);
3399 break;
3400 }
3401 default:
3402 ccb->ccb_h.status = CAM_REQ_INVALID;
3403 xpt_done(ccb);
3404 break;
3405 }
3406} /* asr_action */
3407
3408#ifdef ASR_MEASURE_PERFORMANCE
3409#define WRITE_OP 1
3410#define READ_OP 2
3411#define min_submitR sc->ha_performance.read_by_size_min_time[index]
3412#define max_submitR sc->ha_performance.read_by_size_max_time[index]
3413#define min_submitW sc->ha_performance.write_by_size_min_time[index]
3414#define max_submitW sc->ha_performance.write_by_size_max_time[index]
3415
3416STATIC INLINE void
3417asr_IObySize(
3418 IN Asr_softc_t * sc,
3419 IN u_int32_t submitted_time,
3420 IN int op,
3421 IN int index)
3422{
3423 struct timeval submitted_timeval;
3424
3425 submitted_timeval.tv_sec = 0;
3426 submitted_timeval.tv_usec = submitted_time;
3427
3428 if ( op == READ_OP ) {
3429 ++sc->ha_performance.read_by_size_count[index];
3430
3431 if ( submitted_time != 0xffffffff ) {
3432 timevaladd(
3433 &(sc->ha_performance.read_by_size_total_time[index]),
3434 &submitted_timeval);
3435 if ( (min_submitR == 0)
3436 || (submitted_time < min_submitR) ) {
3437 min_submitR = submitted_time;
3438 }
3439
3440 if ( submitted_time > max_submitR ) {
3441 max_submitR = submitted_time;
3442 }
3443 }
3444 } else {
3445 ++sc->ha_performance.write_by_size_count[index];
3446 if ( submitted_time != 0xffffffff ) {
3447 timevaladd(
3448 &(sc->ha_performance.write_by_size_total_time[index]),
3449 &submitted_timeval);
3450 if ( (submitted_time < min_submitW)
3451 || (min_submitW == 0) ) {
3452 min_submitW = submitted_time;
3453 }
3454
3455 if ( submitted_time > max_submitW ) {
3456 max_submitW = submitted_time;
3457 }
3458 }
3459 }
3460} /* asr_IObySize */
3461#endif
3462
3463/*
3464 * Handle processing of current CCB as pointed to by the Status.
3465 */
3466STATIC int
3467asr_intr (
3468 IN Asr_softc_t * sc)
3469{
3470 OUT int processed;
3471
3472#ifdef ASR_MEASURE_PERFORMANCE
3473 struct timeval junk;
3474
3475 microtime(&junk);
3476 sc->ha_performance.intr_started = junk;
3477#endif
3478
3479 for (processed = 0;
3480 sc->ha_Virt->Status & Mask_InterruptsDisabled;
3481 processed = 1) {
3482 union asr_ccb * ccb;
3483 U32 ReplyOffset;
3484 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply;
3485
3486 if (((ReplyOffset = sc->ha_Virt->FromFIFO) == EMPTY_QUEUE)
3487 && ((ReplyOffset = sc->ha_Virt->FromFIFO) == EMPTY_QUEUE)) {
3488 break;
3489 }
3490 Reply = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)(ReplyOffset
3491 - sc->ha_Msgs_Phys + (char *)(sc->ha_Msgs));
3492 /*
3493 * We do not need any (optional byteswapping) method access to
3494 * the Initiator context field.
3495 */
3496 ccb = (union asr_ccb *)(long)
3497 I2O_MESSAGE_FRAME_getInitiatorContext64(
3498 &(Reply->StdReplyFrame.StdMessageFrame));
3499 if (I2O_MESSAGE_FRAME_getMsgFlags(
3500 &(Reply->StdReplyFrame.StdMessageFrame))
3501 & I2O_MESSAGE_FLAGS_FAIL) {
3502 defAlignLong(I2O_UTIL_NOP_MESSAGE,Message);
3503 PI2O_UTIL_NOP_MESSAGE Message_Ptr;
3504 U32 MessageOffset;
3505
3506 MessageOffset = (u_long)
3507 I2O_FAILURE_REPLY_MESSAGE_FRAME_getPreservedMFA(
3508 (PI2O_FAILURE_REPLY_MESSAGE_FRAME)Reply);
3509 /*
3510 * Get the Original Message Frame's address, and get
3511 * it's Transaction Context into our space. (Currently
3512 * unused at original authorship, but better to be
3513 * safe than sorry). Straight copy means that we
3514 * need not concern ourselves with the (optional
3515 * byteswapping) method access.
3516 */
3517 Reply->StdReplyFrame.TransactionContext
3518 = ((PI2O_SINGLE_REPLY_MESSAGE_FRAME)
3519 (sc->ha_Fvirt + MessageOffset))->TransactionContext;
3520 /*
3521 * For 64 bit machines, we need to reconstruct the
3522 * 64 bit context.
3523 */
3524 ccb = (union asr_ccb *)(long)
3525 I2O_MESSAGE_FRAME_getInitiatorContext64(
3526 &(Reply->StdReplyFrame.StdMessageFrame));
3527 /*
3528 * Unique error code for command failure.
3529 */
3530 I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode(
3531 &(Reply->StdReplyFrame), (u_int16_t)-2);
3532 /*
3533 * Modify the message frame to contain a NOP and
3534 * re-issue it to the controller.
3535 */
3536 Message_Ptr = (PI2O_UTIL_NOP_MESSAGE)ASR_fillMessage(
3537 Message, sizeof(I2O_UTIL_NOP_MESSAGE));
3538# if (I2O_UTIL_NOP != 0)
3539 I2O_MESSAGE_FRAME_setFunction (
3540 &(Message_Ptr->StdMessageFrame),
3541 I2O_UTIL_NOP);
3542# endif
3543 /*
3544 * Copy the packet out to the Original Message
3545 */
3546 bcopy ((caddr_t)Message_Ptr,
3547 sc->ha_Fvirt + MessageOffset,
3548 sizeof(I2O_UTIL_NOP_MESSAGE));
3549 /*
3550 * Issue the NOP
3551 */
3552 sc->ha_Virt->ToFIFO = MessageOffset;
3553 }
3554
3555 /*
3556 * Asynchronous command with no return requirements,
3557 * and a generic handler for immunity against odd error
3558 * returns from the adapter.
3559 */
3560 if (ccb == (union asr_ccb *)NULL) {
3561 /*
3562 * Return Reply so that it can be used for the
3563 * next command
3564 */
3565 sc->ha_Virt->FromFIFO = ReplyOffset;
3566 continue;
3567 }
3568
3569 /* Welease Wadjah! (and stop timeouts) */
3570 ASR_ccbRemove (sc, ccb);
3571
3572 switch (
3573 I2O_SINGLE_REPLY_MESSAGE_FRAME_getDetailedStatusCode(
3574 &(Reply->StdReplyFrame))) {
3575
3576 case I2O_SCSI_DSC_SUCCESS:
3577 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3578 ccb->ccb_h.status |= CAM_REQ_CMP;
3579 break;
3580
3581 case I2O_SCSI_DSC_CHECK_CONDITION:
3582 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3583 ccb->ccb_h.status |= CAM_REQ_CMP|CAM_AUTOSNS_VALID;
3584 break;
3585
3586 case I2O_SCSI_DSC_BUSY:
3587 /* FALLTHRU */
3588 case I2O_SCSI_HBA_DSC_ADAPTER_BUSY:
3589 /* FALLTHRU */
3590 case I2O_SCSI_HBA_DSC_SCSI_BUS_RESET:
3591 /* FALLTHRU */
3592 case I2O_SCSI_HBA_DSC_BUS_BUSY:
3593 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3594 ccb->ccb_h.status |= CAM_SCSI_BUSY;
3595 break;
3596
3597 case I2O_SCSI_HBA_DSC_SELECTION_TIMEOUT:
3598 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3599 ccb->ccb_h.status |= CAM_SEL_TIMEOUT;
3600 break;
3601
3602 case I2O_SCSI_HBA_DSC_COMMAND_TIMEOUT:
3603 /* FALLTHRU */
3604 case I2O_SCSI_HBA_DSC_DEVICE_NOT_PRESENT:
3605 /* FALLTHRU */
3606 case I2O_SCSI_HBA_DSC_LUN_INVALID:
3607 /* FALLTHRU */
3608 case I2O_SCSI_HBA_DSC_SCSI_TID_INVALID:
3609 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3610 ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
3611 break;
3612
3613 case I2O_SCSI_HBA_DSC_DATA_OVERRUN:
3614 /* FALLTHRU */
3615 case I2O_SCSI_HBA_DSC_REQUEST_LENGTH_ERROR:
3616 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3617 ccb->ccb_h.status |= CAM_DATA_RUN_ERR;
3618 break;
3619
3620 default:
3621 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3622 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
3623 break;
3624 }
3625 if ((ccb->csio.resid = ccb->csio.dxfer_len) != 0) {
3626 ccb->csio.resid -=
3627 I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_getTransferCount(
3628 Reply);
3629 }
3630
3631#ifdef ASR_MEASURE_PERFORMANCE
3632 {
3633 struct timeval endTime;
3634 u_int32_t submitted_time;
3635 u_int32_t size;
3636 int op_type;
3637 int startTimeIndex;
3638
3639 --sc->ha_submitted_ccbs_count;
3640 startTimeIndex
3641 = (int)Reply->StdReplyFrame.TransactionContext;
3642 if (-1 != startTimeIndex) {
3643 /* Compute the time spent in device/adapter */
3644 microtime(&endTime);
3645 submitted_time = asr_time_delta(sc->ha_timeQ[
3646 startTimeIndex], endTime);
3647 /* put the startTimeIndex back on free list */
3648 ENQ_TIMEQ_FREE_LIST(startTimeIndex,
3649 sc->ha_timeQFreeList,
3650 sc->ha_timeQFreeHead,
3651 sc->ha_timeQFreeTail);
3652 } else {
3653 submitted_time = 0xffffffff;
3654 }
3655
3656#define maxctime sc->ha_performance.max_command_time[ccb->csio.cdb_io.cdb_bytes[0]]
3657#define minctime sc->ha_performance.min_command_time[ccb->csio.cdb_io.cdb_bytes[0]]
3658 if (submitted_time != 0xffffffff) {
3659 if ( maxctime < submitted_time ) {
3660 maxctime = submitted_time;
3661 }
3662 if ( (minctime == 0)
3663 || (minctime > submitted_time) ) {
3664 minctime = submitted_time;
3665 }
3666
3667 if ( sc->ha_performance.max_submit_time
3668 < submitted_time ) {
3669 sc->ha_performance.max_submit_time
3670 = submitted_time;
3671 }
3672 if ( sc->ha_performance.min_submit_time == 0
3673 || sc->ha_performance.min_submit_time
3674 > submitted_time) {
3675 sc->ha_performance.min_submit_time
3676 = submitted_time;
3677 }
3678
3679 switch ( ccb->csio.cdb_io.cdb_bytes[0] ) {
3680
3681 case 0xa8: /* 12-byte READ */
3682 /* FALLTHRU */
3683 case 0x08: /* 6-byte READ */
3684 /* FALLTHRU */
3685 case 0x28: /* 10-byte READ */
3686 op_type = READ_OP;
3687 break;
3688
3689 case 0x0a: /* 6-byte WRITE */
3690 /* FALLTHRU */
3691 case 0xaa: /* 12-byte WRITE */
3692 /* FALLTHRU */
3693 case 0x2a: /* 10-byte WRITE */
3694 op_type = WRITE_OP;
3695 break;
3696
3697 default:
3698 op_type = 0;
3699 break;
3700 }
3701
3702 if ( op_type != 0 ) {
3703 struct scsi_rw_big * cmd;
3704
3705 cmd = (struct scsi_rw_big *)
3706 &(ccb->csio.cdb_io);
3707
3708 size = (((u_int32_t) cmd->length2 << 8)
3709 | ((u_int32_t) cmd->length1)) << 9;
3710
3711 switch ( size ) {
3712
3713 case 512:
3714 asr_IObySize(sc,
3715 submitted_time, op_type,
3716 SIZE_512);
3717 break;
3718
3719 case 1024:
3720 asr_IObySize(sc,
3721 submitted_time, op_type,
3722 SIZE_1K);
3723 break;
3724
3725 case 2048:
3726 asr_IObySize(sc,
3727 submitted_time, op_type,
3728 SIZE_2K);
3729 break;
3730
3731 case 4096:
3732 asr_IObySize(sc,
3733 submitted_time, op_type,
3734 SIZE_4K);
3735 break;
3736
3737 case 8192:
3738 asr_IObySize(sc,
3739 submitted_time, op_type,
3740 SIZE_8K);
3741 break;
3742
3743 case 16384:
3744 asr_IObySize(sc,
3745 submitted_time, op_type,
3746 SIZE_16K);
3747 break;
3748
3749 case 32768:
3750 asr_IObySize(sc,
3751 submitted_time, op_type,
3752 SIZE_32K);
3753 break;
3754
3755 case 65536:
3756 asr_IObySize(sc,
3757 submitted_time, op_type,
3758 SIZE_64K);
3759 break;
3760
3761 default:
3762 if ( size > (1 << 16) ) {
3763 asr_IObySize(sc,
3764 submitted_time,
3765 op_type,
3766 SIZE_BIGGER);
3767 } else {
3768 asr_IObySize(sc,
3769 submitted_time,
3770 op_type,
3771 SIZE_OTHER);
3772 }
3773 break;
3774 }
3775 }
3776 }
3777 }
3778#endif
3779 /* Sense data in reply packet */
3780 if (ccb->ccb_h.status & CAM_AUTOSNS_VALID) {
3781 u_int16_t size = I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_getAutoSenseTransferCount(Reply);
3782
3783 if (size) {
3784 if (size > sizeof(ccb->csio.sense_data)) {
3785 size = sizeof(ccb->csio.sense_data);
3786 }
3787 if (size > I2O_SCSI_SENSE_DATA_SZ) {
3788 size = I2O_SCSI_SENSE_DATA_SZ;
3789 }
3790 if ((ccb->csio.sense_len)
3791 && (size > ccb->csio.sense_len)) {
3792 size = ccb->csio.sense_len;
3793 }
3794 bcopy ((caddr_t)Reply->SenseData,
3795 (caddr_t)&(ccb->csio.sense_data), size);
3796 }
3797 }
3798
3799 /*
3800 * Return Reply so that it can be used for the next command
3801 * since we have no more need for it now
3802 */
3803 sc->ha_Virt->FromFIFO = ReplyOffset;
3804
3805 if (ccb->ccb_h.path) {
3806 xpt_done ((union ccb *)ccb);
3807 } else {
3808 wakeup ((caddr_t)ccb);
3809 }
3810 }
3811#ifdef ASR_MEASURE_PERFORMANCE
3812 {
3813 u_int32_t result;
3814
3815 microtime(&junk);
3816 result = asr_time_delta(sc->ha_performance.intr_started, junk);
3817
3818 if (result != 0xffffffff) {
3819 if ( sc->ha_performance.max_intr_time < result ) {
3820 sc->ha_performance.max_intr_time = result;
3821 }
3822
3823 if ( (sc->ha_performance.min_intr_time == 0)
3824 || (sc->ha_performance.min_intr_time > result) ) {
3825 sc->ha_performance.min_intr_time = result;
3826 }
3827 }
3828 }
3829#endif
3830 return (processed);
3831} /* asr_intr */
3832
3833#undef QueueSize /* Grrrr */
3834#undef SG_Size /* Grrrr */
3835
3836/*
3837 * Meant to be included at the bottom of asr.c !!!
3838 */
3839
3840/*
3841 * Included here as hard coded. Done because other necessary include
3842 * files utilize C++ comment structures which make them a nuisance to
3843 * included here just to pick up these three typedefs.
3844 */
3845typedef U32 DPT_TAG_T;
3846typedef U32 DPT_MSG_T;
3847typedef U32 DPT_RTN_T;
3848
3849#undef SCSI_RESET /* Conflicts with "scsi/scsiconf.h" defintion */
3850#include "dev/asr/osd_unix.h"
3851
3852#define asr_unit(dev) minor(dev)
3853
3854STATIC INLINE Asr_softc_t *
3855ASR_get_sc (
3856 IN dev_t dev)
3857{
3858 int unit = asr_unit(dev);
3859 OUT Asr_softc_t * sc = Asr_softc;
3860
3861 while (sc && sc->ha_sim[0] && (cam_sim_unit(sc->ha_sim[0]) != unit)) {
3862 sc = sc->ha_next;
3863 }
3864 return (sc);
3865} /* ASR_get_sc */
3866
3867STATIC u_int8_t ASR_ctlr_held;
3868#if (!defined(UNREFERENCED_PARAMETER))
3869# define UNREFERENCED_PARAMETER(x) (void)(x)
3870#endif
3871
3872STATIC int
3873asr_open(
3874 IN dev_t dev,
3875 int32_t flags,
3876 int32_t ifmt,
41c20dac 3877 IN d_thread_t *td)
984263bc
MD
3878{
3879 int s;
3880 OUT int error;
3881 UNREFERENCED_PARAMETER(flags);
3882 UNREFERENCED_PARAMETER(ifmt);
3883
3884 if (ASR_get_sc (dev) == (Asr_softc_t *)NULL) {
3885 return (ENODEV);
3886 }
dadab5e9 3887 KKASSERT(td->td_proc);
984263bc
MD
3888 s = splcam ();
3889 if (ASR_ctlr_held) {
3890 error = EBUSY;
dadab5e9 3891 } else if ((error = suser_cred(td->td_proc->p_ucred, 0)) == 0) {
984263bc
MD
3892 ++ASR_ctlr_held;
3893 }
3894 splx(s);