Hardwire i386 instead of using the borken machine variable
[dragonfly.git] / sys / dev / raid / asr / asr.c
CommitLineData
984263bc 1/* $FreeBSD: src/sys/dev/asr/asr.c,v 1.3.2.2 2001/08/23 05:21:29 scottl Exp $ */
f15db79e 2/* $DragonFly: src/sys/dev/raid/asr/asr.c,v 1.8 2003/07/23 02:30:15 dillon Exp $ */
984263bc
MD
3/*
4 * Copyright (c) 1996-2000 Distributed Processing Technology Corporation
5 * Copyright (c) 2000-2001 Adaptec Corporation
6 * All rights reserved.
7 *
8 * TERMS AND CONDITIONS OF USE
9 *
10 * Redistribution and use in source form, with or without modification, are
11 * permitted provided that redistributions of source code must retain the
12 * above copyright notice, this list of conditions and the following disclaimer.
13 *
14 * This software is provided `as is' by Adaptec and any express or implied
15 * warranties, including, but not limited to, the implied warranties of
16 * merchantability and fitness for a particular purpose, are disclaimed. In no
17 * event shall Adaptec be liable for any direct, indirect, incidental, special,
18 * exemplary or consequential damages (including, but not limited to,
19 * procurement of substitute goods or services; loss of use, data, or profits;
20 * or business interruptions) however caused and on any theory of liability,
21 * whether in contract, strict liability, or tort (including negligence or
22 * otherwise) arising in any way out of the use of this driver software, even
23 * if advised of the possibility of such damage.
24 *
25 * SCSI I2O host adapter driver
26 *
27 * V1.08 2001/08/21 Mark_Salyzyn@adaptec.com
28 * - The 2000S and 2005S do not initialize on some machines,
29 * increased timeout to 255ms from 50ms for the StatusGet
30 * command.
31 * V1.07 2001/05/22 Mark_Salyzyn@adaptec.com
32 * - I knew this one was too good to be true. The error return
33 * on ioctl commands needs to be compared to CAM_REQ_CMP, not
34 * to the bit masked status.
35 * V1.06 2001/05/08 Mark_Salyzyn@adaptec.com
36 * - The 2005S that was supported is affectionately called the
37 * Conjoined BAR Firmware. In order to support RAID-5 in a
38 * 16MB low-cost configuration, Firmware was forced to go
39 * to a Split BAR Firmware. This requires a separate IOP and
40 * Messaging base address.
41 * V1.05 2001/04/25 Mark_Salyzyn@adaptec.com
42 * - Handle support for 2005S Zero Channel RAID solution.
43 * - System locked up if the Adapter locked up. Do not try
44 * to send other commands if the resetIOP command fails. The
45 * fail outstanding command discovery loop was flawed as the
46 * removal of the command from the list prevented discovering
47 * all the commands.
48 * - Comment changes to clarify driver.
49 * - SysInfo searched for an EATA SmartROM, not an I2O SmartROM.
50 * - We do not use the AC_FOUND_DEV event because of I2O.
51 * Removed asr_async.
52 * V1.04 2000/09/22 Mark_Salyzyn@adaptec.com, msmith@freebsd.org,
53 * lampa@fee.vutbr.cz and Scott_Long@adaptec.com.
54 * - Removed support for PM1554, PM2554 and PM2654 in Mode-0
55 * mode as this is confused with competitor adapters in run
56 * mode.
57 * - critical locking needed in ASR_ccbAdd and ASR_ccbRemove
58 * to prevent operating system panic.
59 * - moved default major number to 154 from 97.
60 * V1.03 2000/07/12 Mark_Salyzyn@adaptec.com
61 * - The controller is not actually an ASR (Adaptec SCSI RAID)
62 * series that is visible, it's more of an internal code name.
63 * remove any visible references within reason for now.
64 * - bus_ptr->LUN was not correctly zeroed when initially
65 * allocated causing a possible panic of the operating system
66 * during boot.
67 * V1.02 2000/06/26 Mark_Salyzyn@adaptec.com
68 * - Code always fails for ASR_getTid affecting performance.
69 * - initiated a set of changes that resulted from a formal
70 * code inspection by Mark_Salyzyn@adaptec.com,
71 * George_Dake@adaptec.com, Jeff_Zeak@adaptec.com,
72 * Martin_Wilson@adaptec.com and Vincent_Trandoan@adaptec.com.
73 * Their findings were focussed on the LCT & TID handler, and
74 * all resulting changes were to improve code readability,
75 * consistency or have a positive effect on performance.
76 * V1.01 2000/06/14 Mark_Salyzyn@adaptec.com
77 * - Passthrough returned an incorrect error.
78 * - Passthrough did not migrate the intrinsic scsi layer wakeup
79 * on command completion.
80 * - generate control device nodes using make_dev and delete_dev.
81 * - Performance affected by TID caching reallocing.
82 * - Made suggested changes by Justin_Gibbs@adaptec.com
83 * - use splcam instead of splbio.
84 * - use cam_imask instead of bio_imask.
85 * - use u_int8_t instead of u_char.
86 * - use u_int16_t instead of u_short.
87 * - use u_int32_t instead of u_long where appropriate.
88 * - use 64 bit context handler instead of 32 bit.
89 * - create_ccb should only allocate the worst case
90 * requirements for the driver since CAM may evolve
91 * making union ccb much larger than needed here.
92 * renamed create_ccb to asr_alloc_ccb.
93 * - go nutz justifying all debug prints as macros
94 * defined at the top and remove unsightly ifdefs.
95 * - INLINE STATIC viewed as confusing. Historically
96 * utilized to affect code performance and debug
97 * issues in OS, Compiler or OEM specific situations.
98 * V1.00 2000/05/31 Mark_Salyzyn@adaptec.com
99 * - Ported from FreeBSD 2.2.X DPT I2O driver.
100 * changed struct scsi_xfer to union ccb/struct ccb_hdr
101 * changed variable name xs to ccb
102 * changed struct scsi_link to struct cam_path
103 * changed struct scsibus_data to struct cam_sim
104 * stopped using fordriver for holding on to the TID
105 * use proprietary packet creation instead of scsi_inquire
106 * CAM layer sends synchronize commands.
107 */
108
109#define ASR_VERSION 1
110#define ASR_REVISION '0'
111#define ASR_SUBREVISION '8'
112#define ASR_MONTH 8
113#define ASR_DAY 21
114#define ASR_YEAR 2001 - 1980
115
116/*
117 * Debug macros to reduce the unsightly ifdefs
118 */
119#if (defined(DEBUG_ASR) || defined(DEBUG_ASR_USR_CMD) || defined(DEBUG_ASR_CMD))
120# define debug_asr_message(message) \
121 { \
122 u_int32_t * pointer = (u_int32_t *)message; \
123 u_int32_t length = I2O_MESSAGE_FRAME_getMessageSize(message);\
124 u_int32_t counter = 0; \
125 \
126 while (length--) { \
127 printf ("%08lx%c", (u_long)*(pointer++), \
128 (((++counter & 7) == 0) || (length == 0)) \
129 ? '\n' \
130 : ' '); \
131 } \
132 }
133#endif /* DEBUG_ASR || DEBUG_ASR_USR_CMD || DEBUG_ASR_CMD */
134
135#if (defined(DEBUG_ASR))
136 /* Breaks on none STDC based compilers :-( */
137# define debug_asr_printf(fmt,args...) printf(fmt, ##args)
138# define debug_asr_dump_message(message) debug_asr_message(message)
139# define debug_asr_print_path(ccb) xpt_print_path(ccb->ccb_h.path);
140 /* None fatal version of the ASSERT macro */
141# if (defined(__STDC__))
142# define ASSERT(phrase) if(!(phrase))printf(#phrase " at line %d file %s\n",__LINE__,__FILE__)
143# else
144# define ASSERT(phrase) if(!(phrase))printf("phrase" " at line %d file %s\n",__LINE__,__FILE__)
145# endif
146#else /* DEBUG_ASR */
147# define debug_asr_printf(fmt,args...)
148# define debug_asr_dump_message(message)
149# define debug_asr_print_path(ccb)
150# define ASSERT(x)
151#endif /* DEBUG_ASR */
152
153/*
154 * If DEBUG_ASR_CMD is defined:
155 * 0 - Display incoming SCSI commands
156 * 1 - add in a quick character before queueing.
157 * 2 - add in outgoing message frames.
158 */
159#if (defined(DEBUG_ASR_CMD))
160# define debug_asr_cmd_printf(fmt,args...) printf(fmt,##args)
161# define debug_asr_dump_ccb(ccb) \
162 { \
163 u_int8_t * cp = (unsigned char *)&(ccb->csio.cdb_io); \
164 int len = ccb->csio.cdb_len; \
165 \
166 while (len) { \
167 debug_asr_cmd_printf (" %02x", *(cp++)); \
168 --len; \
169 } \
170 }
171# if (DEBUG_ASR_CMD > 0)
172# define debug_asr_cmd1_printf debug_asr_cmd_printf
173# else
174# define debug_asr_cmd1_printf(fmt,args...)
175# endif
176# if (DEBUG_ASR_CMD > 1)
177# define debug_asr_cmd2_printf debug_asr_cmd_printf
178# define debug_asr_cmd2_dump_message(message) debug_asr_message(message)
179# else
180# define debug_asr_cmd2_printf(fmt,args...)
181# define debug_asr_cmd2_dump_message(message)
182# endif
183#else /* DEBUG_ASR_CMD */
184# define debug_asr_cmd_printf(fmt,args...)
185# define debug_asr_cmd_dump_ccb(ccb)
186# define debug_asr_cmd1_printf(fmt,args...)
187# define debug_asr_cmd2_printf(fmt,args...)
188# define debug_asr_cmd2_dump_message(message)
189#endif /* DEBUG_ASR_CMD */
190
191#if (defined(DEBUG_ASR_USR_CMD))
192# define debug_usr_cmd_printf(fmt,args...) printf(fmt,##args)
193# define debug_usr_cmd_dump_message(message) debug_usr_message(message)
194#else /* DEBUG_ASR_USR_CMD */
195# define debug_usr_cmd_printf(fmt,args...)
196# define debug_usr_cmd_dump_message(message)
197#endif /* DEBUG_ASR_USR_CMD */
198
199#define dsDescription_size 46 /* Snug as a bug in a rug */
200#include "dev/asr/dptsig.h"
201
202static dpt_sig_S ASR_sig = {
203 { 'd', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION, PROC_INTEL,
204 PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM, FT_HBADRVR, 0,
205 OEM_DPT, OS_FREE_BSD, CAP_ABOVE16MB, DEV_ALL,
206 ADF_ALL_SC5,
207 0, 0, ASR_VERSION, ASR_REVISION, ASR_SUBREVISION,
208 ASR_MONTH, ASR_DAY, ASR_YEAR,
209/* 01234567890123456789012345678901234567890123456789 < 50 chars */
210 "Adaptec FreeBSD 4.0.0 Unix SCSI I2O HBA Driver"
211 /* ^^^^^ asr_attach alters these to match OS */
212};
213
214#include <sys/param.h> /* TRUE=1 and FALSE=0 defined here */
215#include <sys/kernel.h>
216#include <sys/systm.h>
217#include <sys/malloc.h>
218#include <sys/proc.h>
219#include <sys/conf.h>
220#include <sys/disklabel.h>
221#include <sys/bus.h>
222#include <machine/resource.h>
223#include <machine/bus.h>
224#include <sys/rman.h>
225#include <sys/stat.h>
f15db79e 226#include <sys/device.h>
984263bc
MD
227
228#include <cam/cam.h>
229#include <cam/cam_ccb.h>
230#include <cam/cam_sim.h>
231#include <cam/cam_xpt_sim.h>
232#include <cam/cam_xpt_periph.h>
233
234#include <cam/scsi/scsi_all.h>
235#include <cam/scsi/scsi_message.h>
236
237#include <vm/vm.h>
238#include <vm/pmap.h>
239#include <machine/cputypes.h>
240#include <machine/clock.h>
241#include <i386/include/vmparam.h>
242
243#include <pci/pcivar.h>
244#include <pci/pcireg.h>
245
246#define STATIC static
247#define INLINE
248
249#if (defined(DEBUG_ASR) && (DEBUG_ASR > 0))
250# undef STATIC
251# define STATIC
252# undef INLINE
253# define INLINE
254#endif
255#define IN
256#define OUT
257#define INOUT
258
259#define osdSwap4(x) ((u_long)ntohl((u_long)(x)))
260#define KVTOPHYS(x) vtophys(x)
261#include "dev/asr/dptalign.h"
262#include "dev/asr/i2oexec.h"
263#include "dev/asr/i2obscsi.h"
264#include "dev/asr/i2odpt.h"
265#include "dev/asr/i2oadptr.h"
266#include "opt_asr.h"
267
268#include "dev/asr/sys_info.h"
269
270/* Configuration Definitions */
271
272#define SG_SIZE 58 /* Scatter Gather list Size */
273#define MAX_TARGET_ID 126 /* Maximum Target ID supported */
274#define MAX_LUN 255 /* Maximum LUN Supported */
275#define MAX_CHANNEL 7 /* Maximum Channel # Supported by driver */
276#define MAX_INBOUND 2000 /* Max CCBs, Also Max Queue Size */
277#define MAX_OUTBOUND 256 /* Maximum outbound frames/adapter */
278#define MAX_INBOUND_SIZE 512 /* Maximum inbound frame size */
279#define MAX_MAP 4194304L /* Maximum mapping size of IOP */
280 /* Also serves as the minimum map for */
281 /* the 2005S zero channel RAID product */
282
283/**************************************************************************
284** ASR Host Adapter structure - One Structure For Each Host Adapter That **
285** Is Configured Into The System. The Structure Supplies Configuration **
286** Information, Status Info, Queue Info And An Active CCB List Pointer. **
287***************************************************************************/
288
289/* I2O register set */
290typedef struct {
291 U8 Address[0x30];
292 volatile U32 Status;
293 volatile U32 Mask;
294# define Mask_InterruptsDisabled 0x08
295 U32 x[2];
296 volatile U32 ToFIFO; /* In Bound FIFO */
297 volatile U32 FromFIFO; /* Out Bound FIFO */
298} i2oRegs_t;
299
300/*
301 * A MIX of performance and space considerations for TID lookups
302 */
303typedef u_int16_t tid_t;
304
305typedef struct {
306 u_int32_t size; /* up to MAX_LUN */
307 tid_t TID[1];
308} lun2tid_t;
309
310typedef struct {
311 u_int32_t size; /* up to MAX_TARGET */
312 lun2tid_t * LUN[1];
313} target2lun_t;
314
315/*
316 * To ensure that we only allocate and use the worst case ccb here, lets
317 * make our own local ccb union. If asr_alloc_ccb is utilized for another
318 * ccb type, ensure that you add the additional structures into our local
319 * ccb union. To ensure strict type checking, we will utilize the local
320 * ccb definition wherever possible.
321 */
322union asr_ccb {
323 struct ccb_hdr ccb_h; /* For convenience */
324 struct ccb_scsiio csio;
325 struct ccb_setasync csa;
326};
327
328typedef struct Asr_softc {
329 u_int16_t ha_irq;
330 void * ha_Base; /* base port for each board */
331 u_int8_t * volatile ha_blinkLED;
332 i2oRegs_t * ha_Virt; /* Base address of IOP */
333 U8 * ha_Fvirt; /* Base address of Frames */
334 I2O_IOP_ENTRY ha_SystemTable;
335 LIST_HEAD(,ccb_hdr) ha_ccb; /* ccbs in use */
336 struct cam_path * ha_path[MAX_CHANNEL+1];
337 struct cam_sim * ha_sim[MAX_CHANNEL+1];
338#if __FreeBSD_version >= 400000
339 struct resource * ha_mem_res;
340 struct resource * ha_mes_res;
341 struct resource * ha_irq_res;
342 void * ha_intr;
343#endif
344 PI2O_LCT ha_LCT; /* Complete list of devices */
345# define le_type IdentityTag[0]
346# define I2O_BSA 0x20
347# define I2O_FCA 0x40
348# define I2O_SCSI 0x00
349# define I2O_PORT 0x80
350# define I2O_UNKNOWN 0x7F
351# define le_bus IdentityTag[1]
352# define le_target IdentityTag[2]
353# define le_lun IdentityTag[3]
354 target2lun_t * ha_targets[MAX_CHANNEL+1];
355 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME ha_Msgs;
356 u_long ha_Msgs_Phys;
357
358 u_int8_t ha_in_reset;
359# define HA_OPERATIONAL 0
360# define HA_IN_RESET 1
361# define HA_OFF_LINE 2
362# define HA_OFF_LINE_RECOVERY 3
363 /* Configuration information */
364 /* The target id maximums we take */
365 u_int8_t ha_MaxBus; /* Maximum bus */
366 u_int8_t ha_MaxId; /* Maximum target ID */
367 u_int8_t ha_MaxLun; /* Maximum target LUN */
368 u_int8_t ha_SgSize; /* Max SG elements */
369 u_int8_t ha_pciBusNum;
370 u_int8_t ha_pciDeviceNum;
371 u_int8_t ha_adapter_target[MAX_CHANNEL+1];
372 u_int16_t ha_QueueSize; /* Max outstanding commands */
373 u_int16_t ha_Msgs_Count;
374
375 /* Links into other parents and HBAs */
376 struct Asr_softc * ha_next; /* HBA list */
377
378#ifdef ASR_MEASURE_PERFORMANCE
379#define MAX_TIMEQ_SIZE 256 // assumes MAX 256 scsi commands sent
380 asr_perf_t ha_performance;
381 u_int32_t ha_submitted_ccbs_count;
382
383 // Queueing macros for a circular queue
384#define TIMEQ_FREE_LIST_EMPTY(head, tail) (-1 == (head) && -1 == (tail))
385#define TIMEQ_FREE_LIST_FULL(head, tail) ((((tail) + 1) % MAX_TIMEQ_SIZE) == (head))
386#define ENQ_TIMEQ_FREE_LIST(item, Q, head, tail) \
387 if (!TIMEQ_FREE_LIST_FULL((head), (tail))) { \
388 if TIMEQ_FREE_LIST_EMPTY((head),(tail)) { \
389 (head) = (tail) = 0; \
390 } \
391 else (tail) = ((tail) + 1) % MAX_TIMEQ_SIZE; \
392 Q[(tail)] = (item); \
393 } \
394 else { \
395 debug_asr_printf("asr: Enqueueing when TimeQ Free List is full... This should not happen!\n"); \
396 }
397#define DEQ_TIMEQ_FREE_LIST(item, Q, head, tail) \
398 if (!TIMEQ_FREE_LIST_EMPTY((head), (tail))) { \
399 item = Q[(head)]; \
400 if ((head) == (tail)) { (head) = (tail) = -1; } \
401 else (head) = ((head) + 1) % MAX_TIMEQ_SIZE; \
402 } \
403 else { \
404 (item) = -1; \
405 debug_asr_printf("asr: Dequeueing when TimeQ Free List is empty... This should not happen!\n"); \
406 }
407
408 // Circular queue of time stamps
409 struct timeval ha_timeQ[MAX_TIMEQ_SIZE];
410 u_int32_t ha_timeQFreeList[MAX_TIMEQ_SIZE];
411 int ha_timeQFreeHead;
412 int ha_timeQFreeTail;
413#endif
414} Asr_softc_t;
415
416STATIC Asr_softc_t * Asr_softc;
417
418/*
419 * Prototypes of the routines we have in this object.
420 */
421
422/* Externally callable routines */
423#if __FreeBSD_version >= 400000
424#define PROBE_ARGS IN device_t tag
425#define PROBE_RET int
426#define PROBE_SET() u_long id = (pci_get_device(tag)<<16)|pci_get_vendor(tag)
427#define PROBE_RETURN(retval) if(retval){device_set_desc(tag,retval);return(0);}else{return(ENXIO);}
428#define ATTACH_ARGS IN device_t tag
429#define ATTACH_RET int
430#define ATTACH_SET() int unit = device_get_unit(tag)
431#define ATTACH_RETURN(retval) return(retval)
432#else
433#define PROBE_ARGS IN pcici_t tag, IN pcidi_t id
434#define PROBE_RET const char *
435#define PROBE_SET()
436#define PROBE_RETURN(retval) return(retval)
437#define ATTACH_ARGS IN pcici_t tag, IN int unit
438#define ATTACH_RET void
439#define ATTACH_SET()
440#define ATTACH_RETURN(retval) return
441#endif
442/* I2O HDM interface */
443STATIC PROBE_RET asr_probe __P((PROBE_ARGS));
444STATIC ATTACH_RET asr_attach __P((ATTACH_ARGS));
445/* DOMINO placeholder */
446STATIC PROBE_RET domino_probe __P((PROBE_ARGS));
447STATIC ATTACH_RET domino_attach __P((ATTACH_ARGS));
448/* MODE0 adapter placeholder */
449STATIC PROBE_RET mode0_probe __P((PROBE_ARGS));
450STATIC ATTACH_RET mode0_attach __P((ATTACH_ARGS));
451
452STATIC Asr_softc_t * ASR_get_sc __P((
453 IN dev_t dev));
454STATIC int asr_ioctl __P((
455 IN dev_t dev,
456 IN u_long cmd,
457 INOUT caddr_t data,
458 int flag,
a99c2fff 459 d_thread_t *td));
984263bc
MD
460STATIC int asr_open __P((
461 IN dev_t dev,
462 int32_t flags,
463 int32_t ifmt,
a99c2fff 464 IN d_thread_t *td));
984263bc
MD
465STATIC int asr_close __P((
466 dev_t dev,
467 int flags,
468 int ifmt,
a99c2fff 469 d_thread_t *td));
984263bc
MD
470STATIC int asr_intr __P((
471 IN Asr_softc_t * sc));
472STATIC void asr_timeout __P((
473 INOUT void * arg));
474STATIC int ASR_init __P((
475 IN Asr_softc_t * sc));
476STATIC INLINE int ASR_acquireLct __P((
477 INOUT Asr_softc_t * sc));
478STATIC INLINE int ASR_acquireHrt __P((
479 INOUT Asr_softc_t * sc));
480STATIC void asr_action __P((
481 IN struct cam_sim * sim,
482 IN union ccb * ccb));
483STATIC void asr_poll __P((
484 IN struct cam_sim * sim));
485
486/*
487 * Here is the auto-probe structure used to nest our tests appropriately
488 * during the startup phase of the operating system.
489 */
490#if __FreeBSD_version >= 400000
491STATIC device_method_t asr_methods[] = {
492 DEVMETHOD(device_probe, asr_probe),
493 DEVMETHOD(device_attach, asr_attach),
494 { 0, 0 }
495};
496
497STATIC driver_t asr_driver = {
498 "asr",
499 asr_methods,
500 sizeof(Asr_softc_t)
501};
502
503STATIC devclass_t asr_devclass;
504
505DRIVER_MODULE(asr, pci, asr_driver, asr_devclass, 0, 0);
506
507STATIC device_method_t domino_methods[] = {
508 DEVMETHOD(device_probe, domino_probe),
509 DEVMETHOD(device_attach, domino_attach),
510 { 0, 0 }
511};
512
513STATIC driver_t domino_driver = {
514 "domino",
515 domino_methods,
516 0
517};
518
519STATIC devclass_t domino_devclass;
520
521DRIVER_MODULE(domino, pci, domino_driver, domino_devclass, 0, 0);
522
523STATIC device_method_t mode0_methods[] = {
524 DEVMETHOD(device_probe, mode0_probe),
525 DEVMETHOD(device_attach, mode0_attach),
526 { 0, 0 }
527};
528
529STATIC driver_t mode0_driver = {
530 "mode0",
531 mode0_methods,
532 0
533};
534
535STATIC devclass_t mode0_devclass;
536
537DRIVER_MODULE(mode0, pci, mode0_driver, mode0_devclass, 0, 0);
538#else
539STATIC u_long asr_pcicount = 0;
540STATIC struct pci_device asr_pcidev = {
541 "asr",
542 asr_probe,
543 asr_attach,
544 &asr_pcicount,
545 NULL
546};
547DATA_SET (asr_pciset, asr_pcidev);
548
549STATIC u_long domino_pcicount = 0;
550STATIC struct pci_device domino_pcidev = {
551 "domino",
552 domino_probe,
553 domino_attach,
554 &domino_pcicount,
555 NULL
556};
557DATA_SET (domino_pciset, domino_pcidev);
558
559STATIC u_long mode0_pcicount = 0;
560STATIC struct pci_device mode0_pcidev = {
561 "mode0",
562 mode0_probe,
563 mode0_attach,
564 &mode0_pcicount,
565 NULL
566};
567DATA_SET (mode0_pciset, mode0_pcidev);
568#endif
569
570/*
571 * devsw for asr hba driver
572 *
573 * only ioctl is used. the sd driver provides all other access.
574 */
575#define CDEV_MAJOR 154 /* prefered default character major */
576STATIC struct cdevsw asr_cdevsw = {
fabb8ceb
MD
577 "asr", /* name */
578 CDEV_MAJOR, /* maj */
579 0, /* flags */
580 NULL, /* port */
581 0, /* auto */
582
984263bc
MD
583 asr_open, /* open */
584 asr_close, /* close */
585 noread, /* read */
586 nowrite, /* write */
587 asr_ioctl, /* ioctl */
588 nopoll, /* poll */
589 nommap, /* mmap */
590 nostrategy, /* strategy */
984263bc 591 nodump, /* dump */
fabb8ceb 592 nopsize /* psize */
984263bc
MD
593};
594
595#ifdef ASR_MEASURE_PERFORMANCE
596STATIC u_int32_t asr_time_delta __P((IN struct timeval start,
597 IN struct timeval end));
598#endif
599
600/*
601 * Initialize the dynamic cdevsw hooks.
602 */
603STATIC void
604asr_drvinit (
605 void * unused)
606{
607 static int asr_devsw_installed = 0;
608
609 if (asr_devsw_installed) {
610 return;
611 }
612 asr_devsw_installed++;
613 /*
614 * Find a free spot (the report during driver load used by
615 * osd layer in engine to generate the controlling nodes).
616 */
617 while ((asr_cdevsw.d_maj < NUMCDEVSW)
f15db79e 618 && (dev_dport(makedev(asr_cdevsw.d_maj,0)) != NULL)) {
984263bc
MD
619 ++asr_cdevsw.d_maj;
620 }
621 if (asr_cdevsw.d_maj >= NUMCDEVSW) for (
622 asr_cdevsw.d_maj = 0;
623 (asr_cdevsw.d_maj < CDEV_MAJOR)
f15db79e 624 && (dev_dport(makedev(asr_cdevsw.d_maj,0)) != NULL);
984263bc
MD
625 ++asr_cdevsw.d_maj);
626 /*
627 * Come to papa
628 */
629 cdevsw_add(&asr_cdevsw);
630 /*
631 * delete any nodes that would attach to the primary adapter,
632 * let the adapter scans add them.
633 */
634 destroy_dev(makedev(asr_cdevsw.d_maj,0));
635} /* asr_drvinit */
636
637/* Must initialize before CAM layer picks up our HBA driver */
638SYSINIT(asrdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,asr_drvinit,NULL)
639
640/* I2O support routines */
641#define defAlignLong(STRUCT,NAME) char NAME[sizeof(STRUCT)]
642#define getAlignLong(STRUCT,NAME) ((STRUCT *)(NAME))
643
644/*
645 * Fill message with default.
646 */
647STATIC PI2O_MESSAGE_FRAME
648ASR_fillMessage (
649 IN char * Message,
650 IN u_int16_t size)
651{
652 OUT PI2O_MESSAGE_FRAME Message_Ptr;
653
654 Message_Ptr = getAlignLong(I2O_MESSAGE_FRAME, Message);
655 bzero ((void *)Message_Ptr, size);
656 I2O_MESSAGE_FRAME_setVersionOffset(Message_Ptr, I2O_VERSION_11);
657 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
658 (size + sizeof(U32) - 1) >> 2);
659 I2O_MESSAGE_FRAME_setInitiatorAddress (Message_Ptr, 1);
660 return (Message_Ptr);
661} /* ASR_fillMessage */
662
663#define EMPTY_QUEUE ((U32)-1L)
664
665STATIC INLINE U32
666ASR_getMessage(
667 IN i2oRegs_t * virt)
668{
669 OUT U32 MessageOffset;
670
671 if ((MessageOffset = virt->ToFIFO) == EMPTY_QUEUE) {
672 MessageOffset = virt->ToFIFO;
673 }
674 return (MessageOffset);
675} /* ASR_getMessage */
676
677/* Issue a polled command */
678STATIC U32
679ASR_initiateCp (
680 INOUT i2oRegs_t * virt,
681 INOUT U8 * fvirt,
682 IN PI2O_MESSAGE_FRAME Message)
683{
684 OUT U32 Mask = -1L;
685 U32 MessageOffset;
686 u_int Delay = 1500;
687
688 /*
689 * ASR_initiateCp is only used for synchronous commands and will
690 * be made more resiliant to adapter delays since commands like
691 * resetIOP can cause the adapter to be deaf for a little time.
692 */
693 while (((MessageOffset = ASR_getMessage(virt)) == EMPTY_QUEUE)
694 && (--Delay != 0)) {
695 DELAY (10000);
696 }
697 if (MessageOffset != EMPTY_QUEUE) {
698 bcopy (Message, fvirt + MessageOffset,
699 I2O_MESSAGE_FRAME_getMessageSize(Message) << 2);
700 /*
701 * Disable the Interrupts
702 */
703 virt->Mask = (Mask = virt->Mask) | Mask_InterruptsDisabled;
704 virt->ToFIFO = MessageOffset;
705 }
706 return (Mask);
707} /* ASR_initiateCp */
708
709/*
710 * Reset the adapter.
711 */
712STATIC U32
713ASR_resetIOP (
714 INOUT i2oRegs_t * virt,
715 INOUT U8 * fvirt)
716{
717 struct resetMessage {
718 I2O_EXEC_IOP_RESET_MESSAGE M;
719 U32 R;
720 };
721 defAlignLong(struct resetMessage,Message);
722 PI2O_EXEC_IOP_RESET_MESSAGE Message_Ptr;
723 OUT U32 * volatile Reply_Ptr;
724 U32 Old;
725
726 /*
727 * Build up our copy of the Message.
728 */
729 Message_Ptr = (PI2O_EXEC_IOP_RESET_MESSAGE)ASR_fillMessage(Message,
730 sizeof(I2O_EXEC_IOP_RESET_MESSAGE));
731 I2O_EXEC_IOP_RESET_MESSAGE_setFunction(Message_Ptr, I2O_EXEC_IOP_RESET);
732 /*
733 * Reset the Reply Status
734 */
735 *(Reply_Ptr = (U32 *)((char *)Message_Ptr
736 + sizeof(I2O_EXEC_IOP_RESET_MESSAGE))) = 0;
737 I2O_EXEC_IOP_RESET_MESSAGE_setStatusWordLowAddress(Message_Ptr,
738 KVTOPHYS((void *)Reply_Ptr));
739 /*
740 * Send the Message out
741 */
742 if ((Old = ASR_initiateCp (virt, fvirt, (PI2O_MESSAGE_FRAME)Message_Ptr)) != (U32)-1L) {
743 /*
744 * Wait for a response (Poll), timeouts are dangerous if
745 * the card is truly responsive. We assume response in 2s.
746 */
747 u_int8_t Delay = 200;
748
749 while ((*Reply_Ptr == 0) && (--Delay != 0)) {
750 DELAY (10000);
751 }
752 /*
753 * Re-enable the interrupts.
754 */
755 virt->Mask = Old;
756 ASSERT (*Reply_Ptr);
757 return (*Reply_Ptr);
758 }
759 ASSERT (Old != (U32)-1L);
760 return (0);
761} /* ASR_resetIOP */
762
763/*
764 * Get the curent state of the adapter
765 */
766STATIC INLINE PI2O_EXEC_STATUS_GET_REPLY
767ASR_getStatus (
768 INOUT i2oRegs_t * virt,
769 INOUT U8 * fvirt,
770 OUT PI2O_EXEC_STATUS_GET_REPLY buffer)
771{
772 defAlignLong(I2O_EXEC_STATUS_GET_MESSAGE,Message);
773 PI2O_EXEC_STATUS_GET_MESSAGE Message_Ptr;
774 U32 Old;
775
776 /*
777 * Build up our copy of the Message.
778 */
779 Message_Ptr = (PI2O_EXEC_STATUS_GET_MESSAGE)ASR_fillMessage(Message,
780 sizeof(I2O_EXEC_STATUS_GET_MESSAGE));
781 I2O_EXEC_STATUS_GET_MESSAGE_setFunction(Message_Ptr,
782 I2O_EXEC_STATUS_GET);
783 I2O_EXEC_STATUS_GET_MESSAGE_setReplyBufferAddressLow(Message_Ptr,
784 KVTOPHYS((void *)buffer));
785 /* This one is a Byte Count */
786 I2O_EXEC_STATUS_GET_MESSAGE_setReplyBufferLength(Message_Ptr,
787 sizeof(I2O_EXEC_STATUS_GET_REPLY));
788 /*
789 * Reset the Reply Status
790 */
791 bzero ((void *)buffer, sizeof(I2O_EXEC_STATUS_GET_REPLY));
792 /*
793 * Send the Message out
794 */
795 if ((Old = ASR_initiateCp (virt, fvirt, (PI2O_MESSAGE_FRAME)Message_Ptr)) != (U32)-1L) {
796 /*
797 * Wait for a response (Poll), timeouts are dangerous if
798 * the card is truly responsive. We assume response in 50ms.
799 */
800 u_int8_t Delay = 255;
801
802 while (*((U8 * volatile)&(buffer->SyncByte)) == 0) {
803 if (--Delay == 0) {
804 buffer = (PI2O_EXEC_STATUS_GET_REPLY)NULL;
805 break;
806 }
807 DELAY (1000);
808 }
809 /*
810 * Re-enable the interrupts.
811 */
812 virt->Mask = Old;
813 return (buffer);
814 }
815 return ((PI2O_EXEC_STATUS_GET_REPLY)NULL);
816} /* ASR_getStatus */
817
818/*
819 * Check if the device is a SCSI I2O HBA, and add it to the list.
820 */
821
822/*
823 * Probe for ASR controller. If we find it, we will use it.
824 * virtual adapters.
825 */
826STATIC PROBE_RET
827asr_probe(PROBE_ARGS)
828{
829 PROBE_SET();
830 if ((id == 0xA5011044) || (id == 0xA5111044)) {
831 PROBE_RETURN ("Adaptec Caching SCSI RAID");
832 }
833 PROBE_RETURN (NULL);
834} /* asr_probe */
835
836/*
837 * Probe/Attach for DOMINO chipset.
838 */
839STATIC PROBE_RET
840domino_probe(PROBE_ARGS)
841{
842 PROBE_SET();
843 if (id == 0x10121044) {
844 PROBE_RETURN ("Adaptec Caching Memory Controller");
845 }
846 PROBE_RETURN (NULL);
847} /* domino_probe */
848
849STATIC ATTACH_RET
850domino_attach (ATTACH_ARGS)
851{
852 ATTACH_RETURN (0);
853} /* domino_attach */
854
855/*
856 * Probe/Attach for MODE0 adapters.
857 */
858STATIC PROBE_RET
859mode0_probe(PROBE_ARGS)
860{
861 PROBE_SET();
862
863 /*
864 * If/When we can get a business case to commit to a
865 * Mode0 driver here, we can make all these tests more
866 * specific and robust. Mode0 adapters have their processors
867 * turned off, this the chips are in a raw state.
868 */
869
870 /* This is a PLX9054 */
871 if (id == 0x905410B5) {
872 PROBE_RETURN ("Adaptec Mode0 PM3757");
873 }
874 /* This is a PLX9080 */
875 if (id == 0x908010B5) {
876 PROBE_RETURN ("Adaptec Mode0 PM3754/PM3755");
877 }
878 /* This is a ZION 80303 */
879 if (id == 0x53098086) {
880 PROBE_RETURN ("Adaptec Mode0 3010S");
881 }
882 /* This is an i960RS */
883 if (id == 0x39628086) {
884 PROBE_RETURN ("Adaptec Mode0 2100S");
885 }
886 /* This is an i960RN */
887 if (id == 0x19648086) {
888 PROBE_RETURN ("Adaptec Mode0 PM2865/2400A/3200S/3400S");
889 }
890#if 0 /* this would match any generic i960 -- mjs */
891 /* This is an i960RP (typically also on Motherboards) */
892 if (id == 0x19608086) {
893 PROBE_RETURN ("Adaptec Mode0 PM2554/PM1554/PM2654");
894 }
895#endif
896 PROBE_RETURN (NULL);
897} /* mode0_probe */
898
899STATIC ATTACH_RET
900mode0_attach (ATTACH_ARGS)
901{
902 ATTACH_RETURN (0);
903} /* mode0_attach */
904
905STATIC INLINE union asr_ccb *
906asr_alloc_ccb (
907 IN Asr_softc_t * sc)
908{
909 OUT union asr_ccb * new_ccb;
910
911 if ((new_ccb = (union asr_ccb *)malloc(sizeof(*new_ccb),
912 M_DEVBUF, M_WAITOK)) != (union asr_ccb *)NULL) {
913 bzero (new_ccb, sizeof(*new_ccb));
914 new_ccb->ccb_h.pinfo.priority = 1;
915 new_ccb->ccb_h.pinfo.index = CAM_UNQUEUED_INDEX;
916 new_ccb->ccb_h.spriv_ptr0 = sc;
917 }
918 return (new_ccb);
919} /* asr_alloc_ccb */
920
921STATIC INLINE void
922asr_free_ccb (
923 IN union asr_ccb * free_ccb)
924{
925 free(free_ccb, M_DEVBUF);
926} /* asr_free_ccb */
927
928/*
929 * Print inquiry data `carefully'
930 */
931STATIC void
932ASR_prstring (
933 u_int8_t * s,
934 int len)
935{
936 while ((--len >= 0) && (*s) && (*s != ' ') && (*s != '-')) {
937 printf ("%c", *(s++));
938 }
939} /* ASR_prstring */
940
941/*
942 * Prototypes
943 */
944STATIC INLINE int ASR_queue __P((
945 IN Asr_softc_t * sc,
946 IN PI2O_MESSAGE_FRAME Message));
947/*
948 * Send a message synchronously and without Interrupt to a ccb.
949 */
950STATIC int
951ASR_queue_s (
952 INOUT union asr_ccb * ccb,
953 IN PI2O_MESSAGE_FRAME Message)
954{
955 int s;
956 U32 Mask;
957 Asr_softc_t * sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
958
959 /*
960 * We do not need any (optional byteswapping) method access to
961 * the Initiator context field.
962 */
963 I2O_MESSAGE_FRAME_setInitiatorContext64(Message, (long)ccb);
964
965 /* Prevent interrupt service */
966 s = splcam ();
967 sc->ha_Virt->Mask = (Mask = sc->ha_Virt->Mask)
968 | Mask_InterruptsDisabled;
969
970 if (ASR_queue (sc, Message) == EMPTY_QUEUE) {
971 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
972 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
973 }
974
975 /*
976 * Wait for this board to report a finished instruction.
977 */
978 while ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
979 (void)asr_intr (sc);
980 }
981
982 /* Re-enable Interrupts */
983 sc->ha_Virt->Mask = Mask;
984 splx(s);
985
986 return (ccb->ccb_h.status);
987} /* ASR_queue_s */
988
989/*
990 * Send a message synchronously to a Asr_softc_t
991 */
992STATIC int
993ASR_queue_c (
994 IN Asr_softc_t * sc,
995 IN PI2O_MESSAGE_FRAME Message)
996{
997 union asr_ccb * ccb;
998 OUT int status;
999
1000 if ((ccb = asr_alloc_ccb (sc)) == (union asr_ccb *)NULL) {
1001 return (CAM_REQUEUE_REQ);
1002 }
1003
1004 status = ASR_queue_s (ccb, Message);
1005
1006 asr_free_ccb(ccb);
1007
1008 return (status);
1009} /* ASR_queue_c */
1010
1011/*
1012 * Add the specified ccb to the active queue
1013 */
1014STATIC INLINE void
1015ASR_ccbAdd (
1016 IN Asr_softc_t * sc,
1017 INOUT union asr_ccb * ccb)
1018{
1019 int s;
1020
1021 s = splcam();
1022 LIST_INSERT_HEAD(&(sc->ha_ccb), &(ccb->ccb_h), sim_links.le);
1023 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1024 if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT) {
1025 /*
1026 * RAID systems can take considerable time to
1027 * complete some commands given the large cache
1028 * flashes switching from write back to write thru.
1029 */
1030 ccb->ccb_h.timeout = 6 * 60 * 1000;
1031 }
1032 ccb->ccb_h.timeout_ch = timeout(asr_timeout, (caddr_t)ccb,
1033 (ccb->ccb_h.timeout * hz) / 1000);
1034 }
1035 splx(s);
1036} /* ASR_ccbAdd */
1037
1038/*
1039 * Remove the specified ccb from the active queue.
1040 */
1041STATIC INLINE void
1042ASR_ccbRemove (
1043 IN Asr_softc_t * sc,
1044 INOUT union asr_ccb * ccb)
1045{
1046 int s;
1047
1048 s = splcam();
1049 untimeout(asr_timeout, (caddr_t)ccb, ccb->ccb_h.timeout_ch);
1050 LIST_REMOVE(&(ccb->ccb_h), sim_links.le);
1051 splx(s);
1052} /* ASR_ccbRemove */
1053
1054/*
1055 * Fail all the active commands, so they get re-issued by the operating
1056 * system.
1057 */
1058STATIC INLINE void
1059ASR_failActiveCommands (
1060 IN Asr_softc_t * sc)
1061{
1062 struct ccb_hdr * ccb;
1063 int s;
1064
1065#if 0 /* Currently handled by callers, unnecessary paranoia currently */
1066 /* Left in for historical perspective. */
1067 defAlignLong(I2O_EXEC_LCT_NOTIFY_MESSAGE,Message);
1068 PI2O_EXEC_LCT_NOTIFY_MESSAGE Message_Ptr;
1069
1070 /* Send a blind LCT command to wait for the enableSys to complete */
1071 Message_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)ASR_fillMessage(Message,
1072 sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) - sizeof(I2O_SG_ELEMENT));
1073 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
1074 I2O_EXEC_LCT_NOTIFY);
1075 I2O_EXEC_LCT_NOTIFY_MESSAGE_setClassIdentifier(Message_Ptr,
1076 I2O_CLASS_MATCH_ANYCLASS);
1077 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1078#endif
1079
1080 s = splcam();
1081 /*
1082 * We do not need to inform the CAM layer that we had a bus
1083 * reset since we manage it on our own, this also prevents the
1084 * SCSI_DELAY settling that would be required on other systems.
1085 * The `SCSI_DELAY' has already been handled by the card via the
1086 * acquisition of the LCT table while we are at CAM priority level.
1087 * for (int bus = 0; bus <= sc->ha_MaxBus; ++bus) {
1088 * xpt_async (AC_BUS_RESET, sc->ha_path[bus], NULL);
1089 * }
1090 */
1091 while ((ccb = LIST_FIRST(&(sc->ha_ccb))) != (struct ccb_hdr *)NULL) {
1092 ASR_ccbRemove (sc, (union asr_ccb *)ccb);
1093
1094 ccb->status &= ~CAM_STATUS_MASK;
1095 ccb->status |= CAM_REQUEUE_REQ;
1096 /* Nothing Transfered */
1097 ((struct ccb_scsiio *)ccb)->resid
1098 = ((struct ccb_scsiio *)ccb)->dxfer_len;
1099
1100 if (ccb->path) {
1101 xpt_done ((union ccb *)ccb);
1102 } else {
1103 wakeup ((caddr_t)ccb);
1104 }
1105 }
1106 splx(s);
1107} /* ASR_failActiveCommands */
1108
1109/*
1110 * The following command causes the HBA to reset the specific bus
1111 */
1112STATIC INLINE void
1113ASR_resetBus(
1114 IN Asr_softc_t * sc,
1115 IN int bus)
1116{
1117 defAlignLong(I2O_HBA_BUS_RESET_MESSAGE,Message);
1118 I2O_HBA_BUS_RESET_MESSAGE * Message_Ptr;
1119 PI2O_LCT_ENTRY Device;
1120
1121 Message_Ptr = (I2O_HBA_BUS_RESET_MESSAGE *)ASR_fillMessage(Message,
1122 sizeof(I2O_HBA_BUS_RESET_MESSAGE));
1123 I2O_MESSAGE_FRAME_setFunction(&Message_Ptr->StdMessageFrame,
1124 I2O_HBA_BUS_RESET);
1125 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
1126 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
1127 ++Device) {
1128 if (((Device->le_type & I2O_PORT) != 0)
1129 && (Device->le_bus == bus)) {
1130 I2O_MESSAGE_FRAME_setTargetAddress(
1131 &Message_Ptr->StdMessageFrame,
1132 I2O_LCT_ENTRY_getLocalTID(Device));
1133 /* Asynchronous command, with no expectations */
1134 (void)ASR_queue(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1135 break;
1136 }
1137 }
1138} /* ASR_resetBus */
1139
1140STATIC INLINE int
1141ASR_getBlinkLedCode (
1142 IN Asr_softc_t * sc)
1143{
1144 if ((sc != (Asr_softc_t *)NULL)
1145 && (sc->ha_blinkLED != (u_int8_t *)NULL)
1146 && (sc->ha_blinkLED[1] == 0xBC)) {
1147 return (sc->ha_blinkLED[0]);
1148 }
1149 return (0);
1150} /* ASR_getBlinkCode */
1151
1152/*
1153 * Determine the address of an TID lookup. Must be done at high priority
1154 * since the address can be changed by other threads of execution.
1155 *
1156 * Returns NULL pointer if not indexible (but will attempt to generate
1157 * an index if `new_entry' flag is set to TRUE).
1158 *
1159 * All addressible entries are to be guaranteed zero if never initialized.
1160 */
1161STATIC INLINE tid_t *
1162ASR_getTidAddress(
1163 INOUT Asr_softc_t * sc,
1164 IN int bus,
1165 IN int target,
1166 IN int lun,
1167 IN int new_entry)
1168{
1169 target2lun_t * bus_ptr;
1170 lun2tid_t * target_ptr;
1171 unsigned new_size;
1172
1173 /*
1174 * Validity checking of incoming parameters. More of a bound
1175 * expansion limit than an issue with the code dealing with the
1176 * values.
1177 *
1178 * sc must be valid before it gets here, so that check could be
1179 * dropped if speed a critical issue.
1180 */
1181 if ((sc == (Asr_softc_t *)NULL)
1182 || (bus > MAX_CHANNEL)
1183 || (target > sc->ha_MaxId)
1184 || (lun > sc->ha_MaxLun)) {
1185 debug_asr_printf("(%lx,%d,%d,%d) target out of range\n",
1186 (u_long)sc, bus, target, lun);
1187 return ((tid_t *)NULL);
1188 }
1189 /*
1190 * See if there is an associated bus list.
1191 *
1192 * for performance, allocate in size of BUS_CHUNK chunks.
1193 * BUS_CHUNK must be a power of two. This is to reduce
1194 * fragmentation effects on the allocations.
1195 */
1196# define BUS_CHUNK 8
1197 new_size = ((target + BUS_CHUNK - 1) & ~(BUS_CHUNK - 1));
1198 if ((bus_ptr = sc->ha_targets[bus]) == (target2lun_t *)NULL) {
1199 /*
1200 * Allocate a new structure?
1201 * Since one element in structure, the +1
1202 * needed for size has been abstracted.
1203 */
1204 if ((new_entry == FALSE)
1205 || ((sc->ha_targets[bus] = bus_ptr = (target2lun_t *)malloc (
1206 sizeof(*bus_ptr) + (sizeof(bus_ptr->LUN) * new_size),
1207 M_TEMP, M_WAITOK))
1208 == (target2lun_t *)NULL)) {
1209 debug_asr_printf("failed to allocate bus list\n");
1210 return ((tid_t *)NULL);
1211 }
1212 bzero (bus_ptr, sizeof(*bus_ptr)
1213 + (sizeof(bus_ptr->LUN) * new_size));
1214 bus_ptr->size = new_size + 1;
1215 } else if (bus_ptr->size <= new_size) {
1216 target2lun_t * new_bus_ptr;
1217
1218 /*
1219 * Reallocate a new structure?
1220 * Since one element in structure, the +1
1221 * needed for size has been abstracted.
1222 */
1223 if ((new_entry == FALSE)
1224 || ((new_bus_ptr = (target2lun_t *)malloc (
1225 sizeof(*bus_ptr) + (sizeof(bus_ptr->LUN) * new_size),
1226 M_TEMP, M_WAITOK))
1227 == (target2lun_t *)NULL)) {
1228 debug_asr_printf("failed to reallocate bus list\n");
1229 return ((tid_t *)NULL);
1230 }
1231 /*
1232 * Zero and copy the whole thing, safer, simpler coding
1233 * and not really performance critical at this point.
1234 */
1235 bzero (new_bus_ptr, sizeof(*bus_ptr)
1236 + (sizeof(bus_ptr->LUN) * new_size));
1237 bcopy (bus_ptr, new_bus_ptr, sizeof(*bus_ptr)
1238 + (sizeof(bus_ptr->LUN) * (bus_ptr->size - 1)));
1239 sc->ha_targets[bus] = new_bus_ptr;
1240 free (bus_ptr, M_TEMP);
1241 bus_ptr = new_bus_ptr;
1242 bus_ptr->size = new_size + 1;
1243 }
1244 /*
1245 * We now have the bus list, lets get to the target list.
1246 * Since most systems have only *one* lun, we do not allocate
1247 * in chunks as above, here we allow one, then in chunk sizes.
1248 * TARGET_CHUNK must be a power of two. This is to reduce
1249 * fragmentation effects on the allocations.
1250 */
1251# define TARGET_CHUNK 8
1252 if ((new_size = lun) != 0) {
1253 new_size = ((lun + TARGET_CHUNK - 1) & ~(TARGET_CHUNK - 1));
1254 }
1255 if ((target_ptr = bus_ptr->LUN[target]) == (lun2tid_t *)NULL) {
1256 /*
1257 * Allocate a new structure?
1258 * Since one element in structure, the +1
1259 * needed for size has been abstracted.
1260 */
1261 if ((new_entry == FALSE)
1262 || ((bus_ptr->LUN[target] = target_ptr = (lun2tid_t *)malloc (
1263 sizeof(*target_ptr) + (sizeof(target_ptr->TID) * new_size),
1264 M_TEMP, M_WAITOK))
1265 == (lun2tid_t *)NULL)) {
1266 debug_asr_printf("failed to allocate target list\n");
1267 return ((tid_t *)NULL);
1268 }
1269 bzero (target_ptr, sizeof(*target_ptr)
1270 + (sizeof(target_ptr->TID) * new_size));
1271 target_ptr->size = new_size + 1;
1272 } else if (target_ptr->size <= new_size) {
1273 lun2tid_t * new_target_ptr;
1274
1275 /*
1276 * Reallocate a new structure?
1277 * Since one element in structure, the +1
1278 * needed for size has been abstracted.
1279 */
1280 if ((new_entry == FALSE)
1281 || ((new_target_ptr = (lun2tid_t *)malloc (
1282 sizeof(*target_ptr) + (sizeof(target_ptr->TID) * new_size),
1283 M_TEMP, M_WAITOK))
1284 == (lun2tid_t *)NULL)) {
1285 debug_asr_printf("failed to reallocate target list\n");
1286 return ((tid_t *)NULL);
1287 }
1288 /*
1289 * Zero and copy the whole thing, safer, simpler coding
1290 * and not really performance critical at this point.
1291 */
1292 bzero (new_target_ptr, sizeof(*target_ptr)
1293 + (sizeof(target_ptr->TID) * new_size));
1294 bcopy (target_ptr, new_target_ptr,
1295 sizeof(*target_ptr)
1296 + (sizeof(target_ptr->TID) * (target_ptr->size - 1)));
1297 bus_ptr->LUN[target] = new_target_ptr;
1298 free (target_ptr, M_TEMP);
1299 target_ptr = new_target_ptr;
1300 target_ptr->size = new_size + 1;
1301 }
1302 /*
1303 * Now, acquire the TID address from the LUN indexed list.
1304 */
1305 return (&(target_ptr->TID[lun]));
1306} /* ASR_getTidAddress */
1307
1308/*
1309 * Get a pre-existing TID relationship.
1310 *
1311 * If the TID was never set, return (tid_t)-1.
1312 *
1313 * should use mutex rather than spl.
1314 */
1315STATIC INLINE tid_t
1316ASR_getTid (
1317 IN Asr_softc_t * sc,
1318 IN int bus,
1319 IN int target,
1320 IN int lun)
1321{
1322 tid_t * tid_ptr;
1323 int s;
1324 OUT tid_t retval;
1325
1326 s = splcam();
1327 if (((tid_ptr = ASR_getTidAddress (sc, bus, target, lun, FALSE))
1328 == (tid_t *)NULL)
1329 /* (tid_t)0 or (tid_t)-1 indicate no TID */
1330 || (*tid_ptr == (tid_t)0)) {
1331 splx(s);
1332 return ((tid_t)-1);
1333 }
1334 retval = *tid_ptr;
1335 splx(s);
1336 return (retval);
1337} /* ASR_getTid */
1338
1339/*
1340 * Set a TID relationship.
1341 *
1342 * If the TID was not set, return (tid_t)-1.
1343 *
1344 * should use mutex rather than spl.
1345 */
1346STATIC INLINE tid_t
1347ASR_setTid (
1348 INOUT Asr_softc_t * sc,
1349 IN int bus,
1350 IN int target,
1351 IN int lun,
1352 INOUT tid_t TID)
1353{
1354 tid_t * tid_ptr;
1355 int s;
1356
1357 if (TID != (tid_t)-1) {
1358 if (TID == 0) {
1359 return ((tid_t)-1);
1360 }
1361 s = splcam();
1362 if ((tid_ptr = ASR_getTidAddress (sc, bus, target, lun, TRUE))
1363 == (tid_t *)NULL) {
1364 splx(s);
1365 return ((tid_t)-1);
1366 }
1367 *tid_ptr = TID;
1368 splx(s);
1369 }
1370 return (TID);
1371} /* ASR_setTid */
1372
1373/*-------------------------------------------------------------------------*/
1374/* Function ASR_rescan */
1375/*-------------------------------------------------------------------------*/
1376/* The Parameters Passed To This Function Are : */
1377/* Asr_softc_t * : HBA miniport driver's adapter data storage. */
1378/* */
1379/* This Function Will rescan the adapter and resynchronize any data */
1380/* */
1381/* Return : 0 For OK, Error Code Otherwise */
1382/*-------------------------------------------------------------------------*/
1383
1384STATIC INLINE int
1385ASR_rescan(
1386 IN Asr_softc_t * sc)
1387{
1388 int bus;
1389 OUT int error;
1390
1391 /*
1392 * Re-acquire the LCT table and synchronize us to the adapter.
1393 */
1394 if ((error = ASR_acquireLct(sc)) == 0) {
1395 error = ASR_acquireHrt(sc);
1396 }
1397
1398 if (error != 0) {
1399 return error;
1400 }
1401
1402 bus = sc->ha_MaxBus;
1403 /* Reset all existing cached TID lookups */
1404 do {
1405 int target, event = 0;
1406
1407 /*
1408 * Scan for all targets on this bus to see if they
1409 * got affected by the rescan.
1410 */
1411 for (target = 0; target <= sc->ha_MaxId; ++target) {
1412 int lun;
1413
1414 /* Stay away from the controller ID */
1415 if (target == sc->ha_adapter_target[bus]) {
1416 continue;
1417 }
1418 for (lun = 0; lun <= sc->ha_MaxLun; ++lun) {
1419 PI2O_LCT_ENTRY Device;
1420 tid_t TID = (tid_t)-1;
1421 tid_t LastTID;
1422
1423 /*
1424 * See if the cached TID changed. Search for
1425 * the device in our new LCT.
1426 */
1427 for (Device = sc->ha_LCT->LCTEntry;
1428 Device < (PI2O_LCT_ENTRY)(((U32 *)sc->ha_LCT)
1429 + I2O_LCT_getTableSize(sc->ha_LCT));
1430 ++Device) {
1431 if ((Device->le_type != I2O_UNKNOWN)
1432 && (Device->le_bus == bus)
1433 && (Device->le_target == target)
1434 && (Device->le_lun == lun)
1435 && (I2O_LCT_ENTRY_getUserTID(Device)
1436 == 0xFFF)) {
1437 TID = I2O_LCT_ENTRY_getLocalTID(
1438 Device);
1439 break;
1440 }
1441 }
1442 /*
1443 * Indicate to the OS that the label needs
1444 * to be recalculated, or that the specific
1445 * open device is no longer valid (Merde)
1446 * because the cached TID changed.
1447 */
1448 LastTID = ASR_getTid (sc, bus, target, lun);
1449 if (LastTID != TID) {
1450 struct cam_path * path;
1451
1452 if (xpt_create_path(&path,
1453 /*periph*/NULL,
1454 cam_sim_path(sc->ha_sim[bus]),
1455 target, lun) != CAM_REQ_CMP) {
1456 if (TID == (tid_t)-1) {
1457 event |= AC_LOST_DEVICE;
1458 } else {
1459 event |= AC_INQ_CHANGED
1460 | AC_GETDEV_CHANGED;
1461 }
1462 } else {
1463 if (TID == (tid_t)-1) {
1464 xpt_async(
1465 AC_LOST_DEVICE,
1466 path, NULL);
1467 } else if (LastTID == (tid_t)-1) {
1468 struct ccb_getdev ccb;
1469
1470 xpt_setup_ccb(
1471 &(ccb.ccb_h),
1472 path, /*priority*/5);
1473 xpt_async(
1474 AC_FOUND_DEVICE,
1475 path,
1476 &ccb);
1477 } else {
1478 xpt_async(
1479 AC_INQ_CHANGED,
1480 path, NULL);
1481 xpt_async(
1482 AC_GETDEV_CHANGED,
1483 path, NULL);
1484 }
1485 }
1486 }
1487 /*
1488 * We have the option of clearing the
1489 * cached TID for it to be rescanned, or to
1490 * set it now even if the device never got
1491 * accessed. We chose the later since we
1492 * currently do not use the condition that
1493 * the TID ever got cached.
1494 */
1495 ASR_setTid (sc, bus, target, lun, TID);
1496 }
1497 }
1498 /*
1499 * The xpt layer can not handle multiple events at the
1500 * same call.
1501 */
1502 if (event & AC_LOST_DEVICE) {
1503 xpt_async(AC_LOST_DEVICE, sc->ha_path[bus], NULL);
1504 }
1505 if (event & AC_INQ_CHANGED) {
1506 xpt_async(AC_INQ_CHANGED, sc->ha_path[bus], NULL);
1507 }
1508 if (event & AC_GETDEV_CHANGED) {
1509 xpt_async(AC_GETDEV_CHANGED, sc->ha_path[bus], NULL);
1510 }
1511 } while (--bus >= 0);
1512 return (error);
1513} /* ASR_rescan */
1514
1515/*-------------------------------------------------------------------------*/
1516/* Function ASR_reset */
1517/*-------------------------------------------------------------------------*/
1518/* The Parameters Passed To This Function Are : */
1519/* Asr_softc_t * : HBA miniport driver's adapter data storage. */
1520/* */
1521/* This Function Will reset the adapter and resynchronize any data */
1522/* */
1523/* Return : None */
1524/*-------------------------------------------------------------------------*/
1525
1526STATIC INLINE int
1527ASR_reset(
1528 IN Asr_softc_t * sc)
1529{
1530 int s, retVal;
1531
1532 s = splcam();
1533 if ((sc->ha_in_reset == HA_IN_RESET)
1534 || (sc->ha_in_reset == HA_OFF_LINE_RECOVERY)) {
1535 splx (s);
1536 return (EBUSY);
1537 }
1538 /*
1539 * Promotes HA_OPERATIONAL to HA_IN_RESET,
1540 * or HA_OFF_LINE to HA_OFF_LINE_RECOVERY.
1541 */
1542 ++(sc->ha_in_reset);
1543 if (ASR_resetIOP (sc->ha_Virt, sc->ha_Fvirt) == 0) {
1544 debug_asr_printf ("ASR_resetIOP failed\n");
1545 /*
1546 * We really need to take this card off-line, easier said
1547 * than make sense. Better to keep retrying for now since if a
1548 * UART cable is connected the blinkLEDs the adapter is now in
1549 * a hard state requiring action from the monitor commands to
1550 * the HBA to continue. For debugging waiting forever is a
1551 * good thing. In a production system, however, one may wish
1552 * to instead take the card off-line ...
1553 */
1554# if 0 && (defined(HA_OFF_LINE))
1555 /*
1556 * Take adapter off-line.
1557 */
1558 printf ("asr%d: Taking adapter off-line\n",
1559 sc->ha_path[0]
1560 ? cam_sim_unit(xpt_path_sim(sc->ha_path[0]))
1561 : 0);
1562 sc->ha_in_reset = HA_OFF_LINE;
1563 splx (s);
1564 return (ENXIO);
1565# else
1566 /* Wait Forever */
1567 while (ASR_resetIOP (sc->ha_Virt, sc->ha_Fvirt) == 0);
1568# endif
1569 }
1570 retVal = ASR_init (sc);
1571 splx (s);
1572 if (retVal != 0) {
1573 debug_asr_printf ("ASR_init failed\n");
1574 sc->ha_in_reset = HA_OFF_LINE;
1575 return (ENXIO);
1576 }
1577 if (ASR_rescan (sc) != 0) {
1578 debug_asr_printf ("ASR_rescan failed\n");
1579 }
1580 ASR_failActiveCommands (sc);
1581 if (sc->ha_in_reset == HA_OFF_LINE_RECOVERY) {
1582 printf ("asr%d: Brining adapter back on-line\n",
1583 sc->ha_path[0]
1584 ? cam_sim_unit(xpt_path_sim(sc->ha_path[0]))
1585 : 0);
1586 }
1587 sc->ha_in_reset = HA_OPERATIONAL;
1588 return (0);
1589} /* ASR_reset */
1590
1591/*
1592 * Device timeout handler.
1593 */
1594STATIC void
1595asr_timeout(
1596 INOUT void * arg)
1597{
1598 union asr_ccb * ccb = (union asr_ccb *)arg;
1599 Asr_softc_t * sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
1600 int s;
1601
1602 debug_asr_print_path(ccb);
1603 debug_asr_printf("timed out");
1604
1605 /*
1606 * Check if the adapter has locked up?
1607 */
1608 if ((s = ASR_getBlinkLedCode(sc)) != 0) {
1609 /* Reset Adapter */
1610 printf ("asr%d: Blink LED 0x%x resetting adapter\n",
1611 cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)), s);
1612 if (ASR_reset (sc) == ENXIO) {
1613 /* Try again later */
1614 ccb->ccb_h.timeout_ch = timeout(asr_timeout,
1615 (caddr_t)ccb,
1616 (ccb->ccb_h.timeout * hz) / 1000);
1617 }
1618 return;
1619 }
1620 /*
1621 * Abort does not function on the ASR card!!! Walking away from
1622 * the SCSI command is also *very* dangerous. A SCSI BUS reset is
1623 * our best bet, followed by a complete adapter reset if that fails.
1624 */
1625 s = splcam();
1626 /* Check if we already timed out once to raise the issue */
1627 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_CMD_TIMEOUT) {
1628 debug_asr_printf (" AGAIN\nreinitializing adapter\n");
1629 if (ASR_reset (sc) == ENXIO) {
1630 ccb->ccb_h.timeout_ch = timeout(asr_timeout,
1631 (caddr_t)ccb,
1632 (ccb->ccb_h.timeout * hz) / 1000);
1633 }
1634 splx(s);
1635 return;
1636 }
1637 debug_asr_printf ("\nresetting bus\n");
1638 /* If the BUS reset does not take, then an adapter reset is next! */
1639 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1640 ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
1641 ccb->ccb_h.timeout_ch = timeout(asr_timeout, (caddr_t)ccb,
1642 (ccb->ccb_h.timeout * hz) / 1000);
1643 ASR_resetBus (sc, cam_sim_bus(xpt_path_sim(ccb->ccb_h.path)));
1644 xpt_async (AC_BUS_RESET, ccb->ccb_h.path, NULL);
1645 splx(s);
1646} /* asr_timeout */
1647
1648/*
1649 * send a message asynchronously
1650 */
1651STATIC INLINE int
1652ASR_queue(
1653 IN Asr_softc_t * sc,
1654 IN PI2O_MESSAGE_FRAME Message)
1655{
1656 OUT U32 MessageOffset;
1657 union asr_ccb * ccb;
1658
1659 debug_asr_printf ("Host Command Dump:\n");
1660 debug_asr_dump_message (Message);
1661
1662 ccb = (union asr_ccb *)(long)
1663 I2O_MESSAGE_FRAME_getInitiatorContext64(Message);
1664
1665 if ((MessageOffset = ASR_getMessage(sc->ha_Virt)) != EMPTY_QUEUE) {
1666#ifdef ASR_MEASURE_PERFORMANCE
1667 int startTimeIndex;
1668
1669 if (ccb) {
1670 ++sc->ha_performance.command_count[
1671 (int) ccb->csio.cdb_io.cdb_bytes[0]];
1672 DEQ_TIMEQ_FREE_LIST(startTimeIndex,
1673 sc->ha_timeQFreeList,
1674 sc->ha_timeQFreeHead,
1675 sc->ha_timeQFreeTail);
1676 if (-1 != startTimeIndex) {
1677 microtime(&(sc->ha_timeQ[startTimeIndex]));
1678 }
1679 /* Time stamp the command before we send it out */
1680 ((PRIVATE_SCSI_SCB_EXECUTE_MESSAGE *) Message)->
1681 PrivateMessageFrame.TransactionContext
1682 = (I2O_TRANSACTION_CONTEXT) startTimeIndex;
1683
1684 ++sc->ha_submitted_ccbs_count;
1685 if (sc->ha_performance.max_submit_count
1686 < sc->ha_submitted_ccbs_count) {
1687 sc->ha_performance.max_submit_count
1688 = sc->ha_submitted_ccbs_count;
1689 }
1690 }
1691#endif
1692 bcopy (Message, sc->ha_Fvirt + MessageOffset,
1693 I2O_MESSAGE_FRAME_getMessageSize(Message) << 2);
1694 if (ccb) {
1695 ASR_ccbAdd (sc, ccb);
1696 }
1697 /* Post the command */
1698 sc->ha_Virt->ToFIFO = MessageOffset;
1699 } else {
1700 if (ASR_getBlinkLedCode(sc)) {
1701 /*
1702 * Unlikely we can do anything if we can't grab a
1703 * message frame :-(, but lets give it a try.
1704 */
1705 (void)ASR_reset (sc);
1706 }
1707 }
1708 return (MessageOffset);
1709} /* ASR_queue */
1710
1711
1712/* Simple Scatter Gather elements */
1713#define SG(SGL,Index,Flags,Buffer,Size) \
1714 I2O_FLAGS_COUNT_setCount( \
1715 &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index].FlagsCount), \
1716 Size); \
1717 I2O_FLAGS_COUNT_setFlags( \
1718 &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index].FlagsCount), \
1719 I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT | (Flags)); \
1720 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress( \
1721 &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index]), \
1722 (Buffer == NULL) ? NULL : KVTOPHYS(Buffer))
1723
1724/*
1725 * Retrieve Parameter Group.
1726 * Buffer must be allocated using defAlignLong macro.
1727 */
1728STATIC void *
1729ASR_getParams(
1730 IN Asr_softc_t * sc,
1731 IN tid_t TID,
1732 IN int Group,
1733 OUT void * Buffer,
1734 IN unsigned BufferSize)
1735{
1736 struct paramGetMessage {
1737 I2O_UTIL_PARAMS_GET_MESSAGE M;
1738 char F[
1739 sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT)];
1740 struct Operations {
1741 I2O_PARAM_OPERATIONS_LIST_HEADER Header;
1742 I2O_PARAM_OPERATION_ALL_TEMPLATE Template[1];
1743 } O;
1744 };
1745 defAlignLong(struct paramGetMessage, Message);
1746 struct Operations * Operations_Ptr;
1747 I2O_UTIL_PARAMS_GET_MESSAGE * Message_Ptr;
1748 struct ParamBuffer {
1749 I2O_PARAM_RESULTS_LIST_HEADER Header;
1750 I2O_PARAM_READ_OPERATION_RESULT Read;
1751 char Info[1];
1752 } * Buffer_Ptr;
1753
1754 Message_Ptr = (I2O_UTIL_PARAMS_GET_MESSAGE *)ASR_fillMessage(Message,
1755 sizeof(I2O_UTIL_PARAMS_GET_MESSAGE)
1756 + sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT));
1757 Operations_Ptr = (struct Operations *)((char *)Message_Ptr
1758 + sizeof(I2O_UTIL_PARAMS_GET_MESSAGE)
1759 + sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT));
1760 bzero ((void *)Operations_Ptr, sizeof(struct Operations));
1761 I2O_PARAM_OPERATIONS_LIST_HEADER_setOperationCount(
1762 &(Operations_Ptr->Header), 1);
1763 I2O_PARAM_OPERATION_ALL_TEMPLATE_setOperation(
1764 &(Operations_Ptr->Template[0]), I2O_PARAMS_OPERATION_FIELD_GET);
1765 I2O_PARAM_OPERATION_ALL_TEMPLATE_setFieldCount(
1766 &(Operations_Ptr->Template[0]), 0xFFFF);
1767 I2O_PARAM_OPERATION_ALL_TEMPLATE_setGroupNumber(
1768 &(Operations_Ptr->Template[0]), Group);
1769 bzero ((void *)(Buffer_Ptr = getAlignLong(struct ParamBuffer, Buffer)),
1770 BufferSize);
1771
1772 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
1773 I2O_VERSION_11
1774 + (((sizeof(I2O_UTIL_PARAMS_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1775 / sizeof(U32)) << 4));
1776 I2O_MESSAGE_FRAME_setTargetAddress (&(Message_Ptr->StdMessageFrame),
1777 TID);
1778 I2O_MESSAGE_FRAME_setFunction (&(Message_Ptr->StdMessageFrame),
1779 I2O_UTIL_PARAMS_GET);
1780 /*
1781 * Set up the buffers as scatter gather elements.
1782 */
1783 SG(&(Message_Ptr->SGL), 0,
1784 I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER,
1785 Operations_Ptr, sizeof(struct Operations));
1786 SG(&(Message_Ptr->SGL), 1,
1787 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
1788 Buffer_Ptr, BufferSize);
1789
1790 if ((ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) == CAM_REQ_CMP)
1791 && (Buffer_Ptr->Header.ResultCount)) {
1792 return ((void *)(Buffer_Ptr->Info));
1793 }
1794 return ((void *)NULL);
1795} /* ASR_getParams */
1796
1797/*
1798 * Acquire the LCT information.
1799 */
1800STATIC INLINE int
1801ASR_acquireLct (
1802 INOUT Asr_softc_t * sc)
1803{
1804 PI2O_EXEC_LCT_NOTIFY_MESSAGE Message_Ptr;
1805 PI2O_SGE_SIMPLE_ELEMENT sg;
1806 int MessageSizeInBytes;
1807 caddr_t v;
1808 int len;
1809 I2O_LCT Table;
1810 PI2O_LCT_ENTRY Entry;
1811
1812 /*
1813 * sc value assumed valid
1814 */
1815 MessageSizeInBytes = sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE)
1816 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT);
1817 if ((Message_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)malloc (
1818 MessageSizeInBytes, M_TEMP, M_WAITOK))
1819 == (PI2O_EXEC_LCT_NOTIFY_MESSAGE)NULL) {
1820 return (ENOMEM);
1821 }
1822 (void)ASR_fillMessage((char *)Message_Ptr, MessageSizeInBytes);
1823 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
1824 (I2O_VERSION_11 +
1825 (((sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1826 / sizeof(U32)) << 4)));
1827 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
1828 I2O_EXEC_LCT_NOTIFY);
1829 I2O_EXEC_LCT_NOTIFY_MESSAGE_setClassIdentifier(Message_Ptr,
1830 I2O_CLASS_MATCH_ANYCLASS);
1831 /*
1832 * Call the LCT table to determine the number of device entries
1833 * to reserve space for.
1834 */
1835 SG(&(Message_Ptr->SGL), 0,
1836 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER, &Table,
1837 sizeof(I2O_LCT));
1838 /*
1839 * since this code is reused in several systems, code efficiency
1840 * is greater by using a shift operation rather than a divide by
1841 * sizeof(u_int32_t).
1842 */
1843 I2O_LCT_setTableSize(&Table,
1844 (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)) >> 2);
1845 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1846 /*
1847 * Determine the size of the LCT table.
1848 */
1849 if (sc->ha_LCT) {
1850 free (sc->ha_LCT, M_TEMP);
1851 }
1852 /*
1853 * malloc only generates contiguous memory when less than a
1854 * page is expected. We must break the request up into an SG list ...
1855 */
1856 if (((len = (I2O_LCT_getTableSize(&Table) << 2)) <=
1857 (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)))
1858 || (len > (128 * 1024))) { /* Arbitrary */
1859 free (Message_Ptr, M_TEMP);
1860 return (EINVAL);
1861 }
1862 if ((sc->ha_LCT = (PI2O_LCT)malloc (len, M_TEMP, M_WAITOK))
1863 == (PI2O_LCT)NULL) {
1864 free (Message_Ptr, M_TEMP);
1865 return (ENOMEM);
1866 }
1867 /*
1868 * since this code is reused in several systems, code efficiency
1869 * is greater by using a shift operation rather than a divide by
1870 * sizeof(u_int32_t).
1871 */
1872 I2O_LCT_setTableSize(sc->ha_LCT,
1873 (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)) >> 2);
1874 /*
1875 * Convert the access to the LCT table into a SG list.
1876 */
1877 sg = Message_Ptr->SGL.u.Simple;
1878 v = (caddr_t)(sc->ha_LCT);
1879 for (;;) {
1880 int next, base, span;
1881
1882 span = 0;
1883 next = base = KVTOPHYS(v);
1884 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, base);
1885
1886 /* How far can we go contiguously */
1887 while ((len > 0) && (base == next)) {
1888 int size;
1889
1890 next = trunc_page(base) + PAGE_SIZE;
1891 size = next - base;
1892 if (size > len) {
1893 size = len;
1894 }
1895 span += size;
1896 v += size;
1897 len -= size;
1898 base = KVTOPHYS(v);
1899 }
1900
1901 /* Construct the Flags */
1902 I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), span);
1903 {
1904 int rw = I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT;
1905 if (len <= 0) {
1906 rw = (I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT
1907 | I2O_SGL_FLAGS_LAST_ELEMENT
1908 | I2O_SGL_FLAGS_END_OF_BUFFER);
1909 }
1910 I2O_FLAGS_COUNT_setFlags(&(sg->FlagsCount), rw);
1911 }
1912
1913 if (len <= 0) {
1914 break;
1915 }
1916
1917 /*
1918 * Incrementing requires resizing of the packet.
1919 */
1920 ++sg;
1921 MessageSizeInBytes += sizeof(*sg);
1922 I2O_MESSAGE_FRAME_setMessageSize(
1923 &(Message_Ptr->StdMessageFrame),
1924 I2O_MESSAGE_FRAME_getMessageSize(
1925 &(Message_Ptr->StdMessageFrame))
1926 + (sizeof(*sg) / sizeof(U32)));
1927 {
1928 PI2O_EXEC_LCT_NOTIFY_MESSAGE NewMessage_Ptr;
1929
1930 if ((NewMessage_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)
1931 malloc (MessageSizeInBytes, M_TEMP, M_WAITOK))
1932 == (PI2O_EXEC_LCT_NOTIFY_MESSAGE)NULL) {
1933 free (sc->ha_LCT, M_TEMP);
1934 sc->ha_LCT = (PI2O_LCT)NULL;
1935 free (Message_Ptr, M_TEMP);
1936 return (ENOMEM);
1937 }
1938 span = ((caddr_t)sg) - (caddr_t)Message_Ptr;
1939 bcopy ((caddr_t)Message_Ptr,
1940 (caddr_t)NewMessage_Ptr, span);
1941 free (Message_Ptr, M_TEMP);
1942 sg = (PI2O_SGE_SIMPLE_ELEMENT)
1943 (((caddr_t)NewMessage_Ptr) + span);
1944 Message_Ptr = NewMessage_Ptr;
1945 }
1946 }
1947 { int retval;
1948
1949 retval = ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1950 free (Message_Ptr, M_TEMP);
1951 if (retval != CAM_REQ_CMP) {
1952 return (ENODEV);
1953 }
1954 }
1955 /* If the LCT table grew, lets truncate accesses */
1956 if (I2O_LCT_getTableSize(&Table) < I2O_LCT_getTableSize(sc->ha_LCT)) {
1957 I2O_LCT_setTableSize(sc->ha_LCT, I2O_LCT_getTableSize(&Table));
1958 }
1959 for (Entry = sc->ha_LCT->LCTEntry; Entry < (PI2O_LCT_ENTRY)
1960 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
1961 ++Entry) {
1962 Entry->le_type = I2O_UNKNOWN;
1963 switch (I2O_CLASS_ID_getClass(&(Entry->ClassID))) {
1964
1965 case I2O_CLASS_RANDOM_BLOCK_STORAGE:
1966 Entry->le_type = I2O_BSA;
1967 break;
1968
1969 case I2O_CLASS_SCSI_PERIPHERAL:
1970 Entry->le_type = I2O_SCSI;
1971 break;
1972
1973 case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
1974 Entry->le_type = I2O_FCA;
1975 break;
1976
1977 case I2O_CLASS_BUS_ADAPTER_PORT:
1978 Entry->le_type = I2O_PORT | I2O_SCSI;
1979 /* FALLTHRU */
1980 case I2O_CLASS_FIBRE_CHANNEL_PORT:
1981 if (I2O_CLASS_ID_getClass(&(Entry->ClassID)) ==
1982 I2O_CLASS_FIBRE_CHANNEL_PORT) {
1983 Entry->le_type = I2O_PORT | I2O_FCA;
1984 }
1985 { struct ControllerInfo {
1986 I2O_PARAM_RESULTS_LIST_HEADER Header;
1987 I2O_PARAM_READ_OPERATION_RESULT Read;
1988 I2O_HBA_SCSI_CONTROLLER_INFO_SCALAR Info;
1989 };
1990 defAlignLong(struct ControllerInfo, Buffer);
1991 PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR Info;
1992
1993 Entry->le_bus = 0xff;
1994 Entry->le_target = 0xff;
1995 Entry->le_lun = 0xff;
1996
1997 if ((Info = (PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR)
1998 ASR_getParams(sc,
1999 I2O_LCT_ENTRY_getLocalTID(Entry),
2000 I2O_HBA_SCSI_CONTROLLER_INFO_GROUP_NO,
2001 Buffer, sizeof(struct ControllerInfo)))
2002 == (PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR)NULL) {
2003 continue;
2004 }
2005 Entry->le_target
2006 = I2O_HBA_SCSI_CONTROLLER_INFO_SCALAR_getInitiatorID(
2007 Info);
2008 Entry->le_lun = 0;
2009 } /* FALLTHRU */
2010 default:
2011 continue;
2012 }
2013 { struct DeviceInfo {
2014 I2O_PARAM_RESULTS_LIST_HEADER Header;
2015 I2O_PARAM_READ_OPERATION_RESULT Read;
2016 I2O_DPT_DEVICE_INFO_SCALAR Info;
2017 };
2018 defAlignLong (struct DeviceInfo, Buffer);
2019 PI2O_DPT_DEVICE_INFO_SCALAR Info;
2020
2021 Entry->le_bus = 0xff;
2022 Entry->le_target = 0xff;
2023 Entry->le_lun = 0xff;
2024
2025 if ((Info = (PI2O_DPT_DEVICE_INFO_SCALAR)
2026 ASR_getParams(sc,
2027 I2O_LCT_ENTRY_getLocalTID(Entry),
2028 I2O_DPT_DEVICE_INFO_GROUP_NO,
2029 Buffer, sizeof(struct DeviceInfo)))
2030 == (PI2O_DPT_DEVICE_INFO_SCALAR)NULL) {
2031 continue;
2032 }
2033 Entry->le_type
2034 |= I2O_DPT_DEVICE_INFO_SCALAR_getDeviceType(Info);
2035 Entry->le_bus
2036 = I2O_DPT_DEVICE_INFO_SCALAR_getBus(Info);
2037 if ((Entry->le_bus > sc->ha_MaxBus)
2038 && (Entry->le_bus <= MAX_CHANNEL)) {
2039 sc->ha_MaxBus = Entry->le_bus;
2040 }
2041 Entry->le_target
2042 = I2O_DPT_DEVICE_INFO_SCALAR_getIdentifier(Info);
2043 Entry->le_lun
2044 = I2O_DPT_DEVICE_INFO_SCALAR_getLunInfo(Info);
2045 }
2046 }
2047 /*
2048 * A zero return value indicates success.
2049 */
2050 return (0);
2051} /* ASR_acquireLct */
2052
2053/*
2054 * Initialize a message frame.
2055 * We assume that the CDB has already been set up, so all we do here is
2056 * generate the Scatter Gather list.
2057 */
2058STATIC INLINE PI2O_MESSAGE_FRAME
2059ASR_init_message(
2060 IN union asr_ccb * ccb,
2061 OUT PI2O_MESSAGE_FRAME Message)
2062{
2063 int next, span, base, rw;
2064 OUT PI2O_MESSAGE_FRAME Message_Ptr;
2065 Asr_softc_t * sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
2066 PI2O_SGE_SIMPLE_ELEMENT sg;
2067 caddr_t v;
2068 vm_size_t size, len;
2069 U32 MessageSize;
2070
2071 /* We only need to zero out the PRIVATE_SCSI_SCB_EXECUTE_MESSAGE */
2072 bzero (Message_Ptr = getAlignLong(I2O_MESSAGE_FRAME, Message),
2073 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) - sizeof(I2O_SG_ELEMENT)));
2074
2075 {
2076 int target = ccb->ccb_h.target_id;
2077 int lun = ccb->ccb_h.target_lun;
2078 int bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path));
2079 tid_t TID;
2080
2081 if ((TID = ASR_getTid (sc, bus, target, lun)) == (tid_t)-1) {
2082 PI2O_LCT_ENTRY Device;
2083
2084 TID = (tid_t)0;
2085 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
2086 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
2087 ++Device) {
2088 if ((Device->le_type != I2O_UNKNOWN)
2089 && (Device->le_bus == bus)
2090 && (Device->le_target == target)
2091 && (Device->le_lun == lun)
2092 && (I2O_LCT_ENTRY_getUserTID(Device) == 0xFFF)) {
2093 TID = I2O_LCT_ENTRY_getLocalTID(Device);
2094 ASR_setTid (sc, Device->le_bus,
2095 Device->le_target, Device->le_lun,
2096 TID);
2097 break;
2098 }
2099 }
2100 }
2101 if (TID == (tid_t)0) {
2102 return ((PI2O_MESSAGE_FRAME)NULL);
2103 }
2104 I2O_MESSAGE_FRAME_setTargetAddress(Message_Ptr, TID);
2105 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setTID(
2106 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, TID);
2107 }
2108 I2O_MESSAGE_FRAME_setVersionOffset(Message_Ptr, I2O_VERSION_11 |
2109 (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) - sizeof(I2O_SG_ELEMENT))
2110 / sizeof(U32)) << 4));
2111 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
2112 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2113 - sizeof(I2O_SG_ELEMENT)) / sizeof(U32));
2114 I2O_MESSAGE_FRAME_setInitiatorAddress (Message_Ptr, 1);
2115 I2O_MESSAGE_FRAME_setFunction(Message_Ptr, I2O_PRIVATE_MESSAGE);
2116 I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode (
2117 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, I2O_SCSI_SCB_EXEC);
2118 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (
2119 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
2120 I2O_SCB_FLAG_ENABLE_DISCONNECT
2121 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2122 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
2123 /*
2124 * We do not need any (optional byteswapping) method access to
2125 * the Initiator & Transaction context field.
2126 */
2127 I2O_MESSAGE_FRAME_setInitiatorContext64(Message, (long)ccb);
2128
2129 I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
2130 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, DPT_ORGANIZATION_ID);
2131 /*
2132 * copy the cdb over
2133 */
2134 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(
2135 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, ccb->csio.cdb_len);
2136 bcopy (&(ccb->csio.cdb_io),
2137 ((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)->CDB, ccb->csio.cdb_len);
2138
2139 /*
2140 * Given a buffer describing a transfer, set up a scatter/gather map
2141 * in a ccb to map that SCSI transfer.
2142 */
2143
2144 rw = (ccb->ccb_h.flags & CAM_DIR_IN) ? 0 : I2O_SGL_FLAGS_DIR;
2145
2146 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (
2147 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
2148 (ccb->csio.dxfer_len)
2149 ? ((rw) ? (I2O_SCB_FLAG_XFER_TO_DEVICE
2150 | I2O_SCB_FLAG_ENABLE_DISCONNECT
2151 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2152 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER)
2153 : (I2O_SCB_FLAG_XFER_FROM_DEVICE
2154 | I2O_SCB_FLAG_ENABLE_DISCONNECT
2155 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2156 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER))
2157 : (I2O_SCB_FLAG_ENABLE_DISCONNECT
2158 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2159 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
2160
2161 /*
2162 * Given a transfer described by a `data', fill in the SG list.
2163 */
2164 sg = &((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)->SGL.u.Simple[0];
2165
2166 len = ccb->csio.dxfer_len;
2167 v = ccb->csio.data_ptr;
2168 ASSERT (ccb->csio.dxfer_len >= 0);
2169 MessageSize = I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr);
2170 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setByteCount(
2171 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, len);
2172 while ((len > 0) && (sg < &((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2173 Message_Ptr)->SGL.u.Simple[SG_SIZE])) {
2174 span = 0;
2175 next = base = KVTOPHYS(v);
2176 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, base);
2177
2178 /* How far can we go contiguously */
2179 while ((len > 0) && (base == next)) {
2180 next = trunc_page(base) + PAGE_SIZE;
2181 size = next - base;
2182 if (size > len) {
2183 size = len;
2184 }
2185 span += size;
2186 v += size;
2187 len -= size;
2188 base = KVTOPHYS(v);
2189 }
2190
2191 I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), span);
2192 if (len == 0) {
2193 rw |= I2O_SGL_FLAGS_LAST_ELEMENT;
2194 }
2195 I2O_FLAGS_COUNT_setFlags(&(sg->FlagsCount),
2196 I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT | rw);
2197 ++sg;
2198 MessageSize += sizeof(*sg) / sizeof(U32);
2199 }
2200 /* We always do the request sense ... */
2201 if ((span = ccb->csio.sense_len) == 0) {
2202 span = sizeof(ccb->csio.sense_data);
2203 }
2204 SG(sg, 0, I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
2205 &(ccb->csio.sense_data), span);
2206 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
2207 MessageSize + (sizeof(*sg) / sizeof(U32)));
2208 return (Message_Ptr);
2209} /* ASR_init_message */
2210
2211/*
2212 * Reset the adapter.
2213 */
2214STATIC INLINE U32
2215ASR_initOutBound (
2216 INOUT Asr_softc_t * sc)
2217{
2218 struct initOutBoundMessage {
2219 I2O_EXEC_OUTBOUND_INIT_MESSAGE M;
2220 U32 R;
2221 };
2222 defAlignLong(struct initOutBoundMessage,Message);
2223 PI2O_EXEC_OUTBOUND_INIT_MESSAGE Message_Ptr;
2224 OUT U32 * volatile Reply_Ptr;
2225 U32 Old;
2226
2227 /*
2228 * Build up our copy of the Message.
2229 */
2230 Message_Ptr = (PI2O_EXEC_OUTBOUND_INIT_MESSAGE)ASR_fillMessage(Message,
2231 sizeof(I2O_EXEC_OUTBOUND_INIT_MESSAGE));
2232 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
2233 I2O_EXEC_OUTBOUND_INIT);
2234 I2O_EXEC_OUTBOUND_INIT_MESSAGE_setHostPageFrameSize(Message_Ptr, PAGE_SIZE);
2235 I2O_EXEC_OUTBOUND_INIT_MESSAGE_setOutboundMFrameSize(Message_Ptr,
2236 sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME));
2237 /*
2238 * Reset the Reply Status
2239 */
2240 *(Reply_Ptr = (U32 *)((char *)Message_Ptr
2241 + sizeof(I2O_EXEC_OUTBOUND_INIT_MESSAGE))) = 0;
2242 SG (&(Message_Ptr->SGL), 0, I2O_SGL_FLAGS_LAST_ELEMENT, Reply_Ptr,
2243 sizeof(U32));
2244 /*
2245 * Send the Message out
2246 */
2247 if ((Old = ASR_initiateCp (sc->ha_Virt, sc->ha_Fvirt, (PI2O_MESSAGE_FRAME)Message_Ptr)) != (U32)-1L) {
2248 u_long size, addr;
2249
2250 /*
2251 * Wait for a response (Poll).
2252 */
2253 while (*Reply_Ptr < I2O_EXEC_OUTBOUND_INIT_REJECTED);
2254 /*
2255 * Re-enable the interrupts.
2256 */
2257 sc->ha_Virt->Mask = Old;
2258 /*
2259 * Populate the outbound table.
2260 */
2261 if (sc->ha_Msgs == (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL) {
2262
2263 /* Allocate the reply frames */
2264 size = sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
2265 * sc->ha_Msgs_Count;
2266
2267 /*
2268 * contigmalloc only works reliably at
2269 * initialization time.
2270 */
2271 if ((sc->ha_Msgs = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
2272 contigmalloc (size, M_DEVBUF, M_WAITOK, 0ul,
2273 0xFFFFFFFFul, (u_long)sizeof(U32), 0ul))
2274 != (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL) {
2275 (void)bzero ((char *)sc->ha_Msgs, size);
2276 sc->ha_Msgs_Phys = KVTOPHYS(sc->ha_Msgs);
2277 }
2278 }
2279
2280 /* Initialize the outbound FIFO */
2281 if (sc->ha_Msgs != (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)NULL)
2282 for (size = sc->ha_Msgs_Count, addr = sc->ha_Msgs_Phys;
2283 size; --size) {
2284 sc->ha_Virt->FromFIFO = addr;
2285 addr += sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME);
2286 }
2287 return (*Reply_Ptr);
2288 }
2289 return (0);
2290} /* ASR_initOutBound */
2291
2292/*
2293 * Set the system table
2294 */
2295STATIC INLINE int
2296ASR_setSysTab(
2297 IN Asr_softc_t * sc)
2298{
2299 PI2O_EXEC_SYS_TAB_SET_MESSAGE Message_Ptr;
2300 PI2O_SET_SYSTAB_HEADER SystemTable;
2301 Asr_softc_t * ha;
2302 PI2O_SGE_SIMPLE_ELEMENT sg;
2303 int retVal;
2304
2305 if ((SystemTable = (PI2O_SET_SYSTAB_HEADER)malloc (
2306 sizeof(I2O_SET_SYSTAB_HEADER), M_TEMP, M_WAITOK))
2307 == (PI2O_SET_SYSTAB_HEADER)NULL) {
2308 return (ENOMEM);
2309 }
2310 bzero (SystemTable, sizeof(I2O_SET_SYSTAB_HEADER));
2311 for (ha = Asr_softc; ha; ha = ha->ha_next) {
2312 ++SystemTable->NumberEntries;
2313 }
2314 if ((Message_Ptr = (PI2O_EXEC_SYS_TAB_SET_MESSAGE)malloc (
2315 sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
2316 + ((3+SystemTable->NumberEntries) * sizeof(I2O_SGE_SIMPLE_ELEMENT)),
2317 M_TEMP, M_WAITOK)) == (PI2O_EXEC_SYS_TAB_SET_MESSAGE)NULL) {
2318 free (SystemTable, M_TEMP);
2319 return (ENOMEM);
2320 }
2321 (void)ASR_fillMessage((char *)Message_Ptr,
2322 sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
2323 + ((3+SystemTable->NumberEntries) * sizeof(I2O_SGE_SIMPLE_ELEMENT)));
2324 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
2325 (I2O_VERSION_11 +
2326 (((sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
2327 / sizeof(U32)) << 4)));
2328 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
2329 I2O_EXEC_SYS_TAB_SET);
2330 /*
2331 * Call the LCT table to determine the number of device entries
2332 * to reserve space for.
2333 * since this code is reused in several systems, code efficiency
2334 * is greater by using a shift operation rather than a divide by
2335 * sizeof(u_int32_t).
2336 */
2337 sg = (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr
2338 + ((I2O_MESSAGE_FRAME_getVersionOffset(
2339 &(Message_Ptr->StdMessageFrame)) & 0xF0) >> 2));
2340 SG(sg, 0, I2O_SGL_FLAGS_DIR, SystemTable, sizeof(I2O_SET_SYSTAB_HEADER));
2341 ++sg;
2342 for (ha = Asr_softc; ha; ha = ha->ha_next) {
2343 SG(sg, 0,
2344 ((ha->ha_next)
2345 ? (I2O_SGL_FLAGS_DIR)
2346 : (I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER)),
2347 &(ha->ha_SystemTable), sizeof(ha->ha_SystemTable));
2348 ++sg;
2349 }
2350 SG(sg, 0, I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER, NULL, 0);
2351 SG(sg, 1, I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_LAST_ELEMENT
2352 | I2O_SGL_FLAGS_END_OF_BUFFER, NULL, 0);
2353 retVal = ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
2354 free (Message_Ptr, M_TEMP);
2355 free (SystemTable, M_TEMP);
2356 return (retVal);
2357} /* ASR_setSysTab */
2358
2359STATIC INLINE int
2360ASR_acquireHrt (
2361 INOUT Asr_softc_t * sc)
2362{
2363 defAlignLong(I2O_EXEC_HRT_GET_MESSAGE,Message);
2364 I2O_EXEC_HRT_GET_MESSAGE * Message_Ptr;
2365 struct {
2366 I2O_HRT Header;
2367 I2O_HRT_ENTRY Entry[MAX_CHANNEL];
2368 } Hrt;
2369 u_int8_t NumberOfEntries;
2370 PI2O_HRT_ENTRY Entry;
2371
2372 bzero ((void *)&Hrt, sizeof (Hrt));
2373 Message_Ptr = (I2O_EXEC_HRT_GET_MESSAGE *)ASR_fillMessage(Message,
2374 sizeof(I2O_EXEC_HRT_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
2375 + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2376 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
2377 (I2O_VERSION_11
2378 + (((sizeof(I2O_EXEC_HRT_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
2379 / sizeof(U32)) << 4)));
2380 I2O_MESSAGE_FRAME_setFunction (&(Message_Ptr->StdMessageFrame),
2381 I2O_EXEC_HRT_GET);
2382
2383 /*
2384 * Set up the buffers as scatter gather elements.
2385 */
2386 SG(&(Message_Ptr->SGL), 0,
2387 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
2388 &Hrt, sizeof(Hrt));
2389 if (ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) != CAM_REQ_CMP) {
2390 return (ENODEV);
2391 }
2392 if ((NumberOfEntries = I2O_HRT_getNumberEntries(&Hrt.Header))
2393 > (MAX_CHANNEL + 1)) {
2394 NumberOfEntries = MAX_CHANNEL + 1;
2395 }
2396 for (Entry = Hrt.Header.HRTEntry;
2397 NumberOfEntries != 0;
2398 ++Entry, --NumberOfEntries) {
2399 PI2O_LCT_ENTRY Device;
2400
2401 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
2402 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
2403 ++Device) {
2404 if (I2O_LCT_ENTRY_getLocalTID(Device)
2405 == (I2O_HRT_ENTRY_getAdapterID(Entry) & 0xFFF)) {
2406 Device->le_bus = I2O_HRT_ENTRY_getAdapterID(
2407 Entry) >> 16;
2408 if ((Device->le_bus > sc->ha_MaxBus)
2409 && (Device->le_bus <= MAX_CHANNEL)) {
2410 sc->ha_MaxBus = Device->le_bus;
2411 }
2412 }
2413 }
2414 }
2415 return (0);
2416} /* ASR_acquireHrt */
2417
2418/*
2419 * Enable the adapter.
2420 */
2421STATIC INLINE int
2422ASR_enableSys (
2423 IN Asr_softc_t * sc)
2424{
2425 defAlignLong(I2O_EXEC_SYS_ENABLE_MESSAGE,Message);
2426 PI2O_EXEC_SYS_ENABLE_MESSAGE Message_Ptr;
2427
2428 Message_Ptr = (PI2O_EXEC_SYS_ENABLE_MESSAGE)ASR_fillMessage(Message,
2429 sizeof(I2O_EXEC_SYS_ENABLE_MESSAGE));
2430 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
2431 I2O_EXEC_SYS_ENABLE);
2432 return (ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) != 0);
2433} /* ASR_enableSys */
2434
2435/*
2436 * Perform the stages necessary to initialize the adapter
2437 */
2438STATIC int
2439ASR_init(
2440 IN Asr_softc_t * sc)
2441{
2442 return ((ASR_initOutBound(sc) == 0)
2443 || (ASR_setSysTab(sc) != CAM_REQ_CMP)
2444 || (ASR_enableSys(sc) != CAM_REQ_CMP));
2445} /* ASR_init */
2446
2447/*
2448 * Send a Synchronize Cache command to the target device.
2449 */
2450STATIC INLINE void
2451ASR_sync (
2452 IN Asr_softc_t * sc,
2453 IN int bus,
2454 IN int target,
2455 IN int lun)
2456{
2457 tid_t TID;
2458
2459 /*
2460 * We will not synchronize the device when there are outstanding
2461 * commands issued by the OS (this is due to a locked up device,
2462 * as the OS normally would flush all outstanding commands before
2463 * issuing a shutdown or an adapter reset).
2464 */
2465 if ((sc != (Asr_softc_t *)NULL)
2466 && (LIST_FIRST(&(sc->ha_ccb)) != (struct ccb_hdr *)NULL)
2467 && ((TID = ASR_getTid (sc, bus, target, lun)) != (tid_t)-1)
2468 && (TID != (tid_t)0)) {
2469 defAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE,Message);
2470 PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE Message_Ptr;
2471
2472 bzero (Message_Ptr
2473 = getAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE, Message),
2474 sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2475 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2476
2477 I2O_MESSAGE_FRAME_setVersionOffset(
2478 (PI2O_MESSAGE_FRAME)Message_Ptr,
2479 I2O_VERSION_11
2480 | (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2481 - sizeof(I2O_SG_ELEMENT))
2482 / sizeof(U32)) << 4));
2483 I2O_MESSAGE_FRAME_setMessageSize(
2484 (PI2O_MESSAGE_FRAME)Message_Ptr,
2485 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2486 - sizeof(I2O_SG_ELEMENT))
2487 / sizeof(U32));
2488 I2O_MESSAGE_FRAME_setInitiatorAddress (
2489 (PI2O_MESSAGE_FRAME)Message_Ptr, 1);
2490 I2O_MESSAGE_FRAME_setFunction(
2491 (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_PRIVATE_MESSAGE);
2492 I2O_MESSAGE_FRAME_setTargetAddress(
2493 (PI2O_MESSAGE_FRAME)Message_Ptr, TID);
2494 I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode (
2495 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2496 I2O_SCSI_SCB_EXEC);
2497 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setTID(Message_Ptr, TID);
2498 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2499 I2O_SCB_FLAG_ENABLE_DISCONNECT
2500 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2501 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
2502 I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
2503 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2504 DPT_ORGANIZATION_ID);
2505 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(Message_Ptr, 6);
2506 Message_Ptr->CDB[0] = SYNCHRONIZE_CACHE;
2507 Message_Ptr->CDB[1] = (lun << 5);
2508
2509 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2510 (I2O_SCB_FLAG_XFER_FROM_DEVICE
2511 | I2O_SCB_FLAG_ENABLE_DISCONNECT
2512 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2513 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
2514
2515 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
2516
2517 }
2518}
2519
2520STATIC INLINE void
2521ASR_synchronize (
2522 IN Asr_softc_t * sc)
2523{
2524 int bus, target, lun;
2525
2526 for (bus = 0; bus <= sc->ha_MaxBus; ++bus) {
2527 for (target = 0; target <= sc->ha_MaxId; ++target) {
2528 for (lun = 0; lun <= sc->ha_MaxLun; ++lun) {
2529 ASR_sync(sc,bus,target,lun);
2530 }
2531 }
2532 }
2533}
2534
2535/*
2536 * Reset the HBA, targets and BUS.
2537 * Currently this resets *all* the SCSI busses.
2538 */
2539STATIC INLINE void
2540asr_hbareset(
2541 IN Asr_softc_t * sc)
2542{
2543 ASR_synchronize (sc);
2544 (void)ASR_reset (sc);
2545} /* asr_hbareset */
2546
2547/*
2548 * A reduced copy of the real pci_map_mem, incorporating the MAX_MAP
2549 * limit and a reduction in error checking (in the pre 4.0 case).
2550 */
2551STATIC int
2552asr_pci_map_mem (
2553#if __FreeBSD_version >= 400000
2554 IN device_t tag,
2555#else
2556 IN pcici_t tag,
2557#endif
2558 IN Asr_softc_t * sc)
2559{
2560 int rid;
2561 u_int32_t p, l, s;
2562
2563#if __FreeBSD_version >= 400000
2564 /*
2565 * I2O specification says we must find first *memory* mapped BAR
2566 */
2567 for (rid = PCIR_MAPS;
2568 rid < (PCIR_MAPS + 4 * sizeof(u_int32_t));
2569 rid += sizeof(u_int32_t)) {
2570 p = pci_read_config(tag, rid, sizeof(p));
2571 if ((p & 1) == 0) {
2572 break;
2573 }
2574 }
2575 /*
2576 * Give up?
2577 */
2578 if (rid >= (PCIR_MAPS + 4 * sizeof(u_int32_t))) {
2579 rid = PCIR_MAPS;
2580 }
2581 p = pci_read_config(tag, rid, sizeof(p));
2582 pci_write_config(tag, rid, -1, sizeof(p));
2583 l = 0 - (pci_read_config(tag, rid, sizeof(l)) & ~15);
2584 pci_write_config(tag, rid, p, sizeof(p));
2585 if (l > MAX_MAP) {
2586 l = MAX_MAP;
2587 }
2588 /*
2589 * The 2005S Zero Channel RAID solution is not a perfect PCI
2590 * citizen. It asks for 4MB on BAR0, and 0MB on BAR1, once
2591 * enabled it rewrites the size of BAR0 to 2MB, sets BAR1 to
2592 * BAR0+2MB and sets it's size to 2MB. The IOP registers are
2593 * accessible via BAR0, the messaging registers are accessible
2594 * via BAR1. If the subdevice code is 50 to 59 decimal.
2595 */
2596 s = pci_read_config(tag, PCIR_DEVVENDOR, sizeof(s));
2597 if (s != 0xA5111044) {
2598 s = pci_read_config(tag, PCIR_SUBVEND_0, sizeof(s));
2599 if ((((ADPTDOMINATOR_SUB_ID_START ^ s) & 0xF000FFFF) == 0)
2600 && (ADPTDOMINATOR_SUB_ID_START <= s)
2601 && (s <= ADPTDOMINATOR_SUB_ID_END)) {
2602 l = MAX_MAP; /* Conjoined BAR Raptor Daptor */
2603 }
2604 }
2605 p &= ~15;
2606 sc->ha_mem_res = bus_alloc_resource(tag, SYS_RES_MEMORY, &rid,
2607 p, p + l, l, RF_ACTIVE);
2608 if (sc->ha_mem_res == (struct resource *)NULL) {
2609 return (0);
2610 }
2611 sc->ha_Base = (void *)rman_get_start(sc->ha_mem_res);
2612 if (sc->ha_Base == (void *)NULL) {
2613 return (0);
2614 }
2615 sc->ha_Virt = (i2oRegs_t *) rman_get_virtual(sc->ha_mem_res);
2616 if (s == 0xA5111044) { /* Split BAR Raptor Daptor */
2617 if ((rid += sizeof(u_int32_t))
2618 >= (PCIR_MAPS + 4 * sizeof(u_int32_t))) {
2619 return (0);
2620 }
2621 p = pci_read_config(tag, rid, sizeof(p));
2622 pci_write_config(tag, rid, -1, sizeof(p));
2623 l = 0 - (pci_read_config(tag, rid, sizeof(l)) & ~15);
2624 pci_write_config(tag, rid, p, sizeof(p));
2625 if (l > MAX_MAP) {
2626 l = MAX_MAP;
2627 }
2628 p &= ~15;
2629 sc->ha_mes_res = bus_alloc_resource(tag, SYS_RES_MEMORY, &rid,
2630 p, p + l, l, RF_ACTIVE);
2631 if (sc->ha_mes_res == (struct resource *)NULL) {
2632 return (0);
2633 }
2634 if ((void *)rman_get_start(sc->ha_mes_res) == (void *)NULL) {
2635 return (0);
2636 }
2637 sc->ha_Fvirt = (U8 *) rman_get_virtual(sc->ha_mes_res);
2638 } else {
2639 sc->ha_Fvirt = (U8 *)(sc->ha_Virt);
2640 }
2641#else
2642 vm_size_t psize, poffs;
2643
2644 /*
2645 * I2O specification says we must find first *memory* mapped BAR
2646 */
2647 for (rid = PCI_MAP_REG_START;
2648 rid < (PCI_MAP_REG_START + 4 * sizeof(u_int32_t));
2649 rid += sizeof(u_int32_t)) {
2650 p = pci_conf_read (tag, rid);
2651 if ((p & 1) == 0) {
2652 break;
2653 }
2654 }
2655 if (rid >= (PCI_MAP_REG_START + 4 * sizeof(u_int32_t))) {
2656 rid = PCI_MAP_REG_START;
2657 }
2658 /*
2659 ** save old mapping, get size and type of memory
2660 **
2661 ** type is in the lowest four bits.
2662 ** If device requires 2^n bytes, the next
2663 ** n-4 bits are read as 0.
2664 */
2665
2666 sc->ha_Base = (void *)((p = pci_conf_read (tag, rid))
2667 & PCI_MAP_MEMORY_ADDRESS_MASK);
2668 pci_conf_write (tag, rid, 0xfffffffful);
2669 l = pci_conf_read (tag, rid);
2670 pci_conf_write (tag, rid, p);
2671
2672 /*
2673 ** check the type
2674 */
2675
2676 if (!((l & PCI_MAP_MEMORY_TYPE_MASK) == PCI_MAP_MEMORY_TYPE_32BIT_1M
2677 && ((u_long)sc->ha_Base & ~0xfffff) == 0)
2678 && ((l & PCI_MAP_MEMORY_TYPE_MASK) != PCI_MAP_MEMORY_TYPE_32BIT)) {
2679 debug_asr_printf (
2680 "asr_pci_map_mem failed: bad memory type=0x%x\n",
2681 (unsigned) l);
2682 return (0);
2683 };
2684
2685 /*
2686 ** get the size.
2687 */
2688
2689 psize = -(l & PCI_MAP_MEMORY_ADDRESS_MASK);
2690 if (psize > MAX_MAP) {
2691 psize = MAX_MAP;
2692 }
2693 /*
2694 * The 2005S Zero Channel RAID solution is not a perfect PCI
2695 * citizen. It asks for 4MB on BAR0, and 0MB on BAR1, once
2696 * enabled it rewrites the size of BAR0 to 2MB, sets BAR1 to
2697 * BAR0+2MB and sets it's size to 2MB. The IOP registers are
2698 * accessible via BAR0, the messaging registers are accessible
2699 * via BAR1. If the subdevice code is 50 to 59 decimal.
2700 */
2701 s = pci_read_config(tag, PCIR_DEVVENDOR, sizeof(s));
2702 if (s != 0xA5111044) {
2703 s = pci_conf_read (tag, PCIR_SUBVEND_0)
2704 if ((((ADPTDOMINATOR_SUB_ID_START ^ s) & 0xF000FFFF) == 0)
2705 && (ADPTDOMINATOR_SUB_ID_START <= s)
2706 && (s <= ADPTDOMINATOR_SUB_ID_END)) {
2707 psize = MAX_MAP;
2708 }
2709 }
2710
2711 if ((sc->ha_Base == (void *)NULL)
2712 || (sc->ha_Base == (void *)PCI_MAP_MEMORY_ADDRESS_MASK)) {
2713 debug_asr_printf ("asr_pci_map_mem: not configured by bios.\n");
2714 return (0);
2715 };
2716
2717 /*
2718 ** Truncate sc->ha_Base to page boundary.
2719 ** (Or does pmap_mapdev the job?)
2720 */
2721
2722 poffs = (u_long)sc->ha_Base - trunc_page ((u_long)sc->ha_Base);
2723 sc->ha_Virt = (i2oRegs_t *)pmap_mapdev ((u_long)sc->ha_Base - poffs,
2724 psize + poffs);
2725
2726 if (sc->ha_Virt == (i2oRegs_t *)NULL) {
2727 return (0);
2728 }
2729
2730 sc->ha_Virt = (i2oRegs_t *)((u_long)sc->ha_Virt + poffs);
2731 if (s == 0xA5111044) {
2732 if ((rid += sizeof(u_int32_t))
2733 >= (PCI_MAP_REG_START + 4 * sizeof(u_int32_t))) {
2734 return (0);
2735 }
2736
2737 /*
2738 ** save old mapping, get size and type of memory
2739 **
2740 ** type is in the lowest four bits.
2741 ** If device requires 2^n bytes, the next
2742 ** n-4 bits are read as 0.
2743 */
2744
2745 if ((((p = pci_conf_read (tag, rid))
2746 & PCI_MAP_MEMORY_ADDRESS_MASK) == 0L)
2747 || ((p & PCI_MAP_MEMORY_ADDRESS_MASK)
2748 == PCI_MAP_MEMORY_ADDRESS_MASK)) {
2749 debug_asr_printf ("asr_pci_map_mem: not configured by bios.\n");
2750 }
2751 pci_conf_write (tag, rid, 0xfffffffful);
2752 l = pci_conf_read (tag, rid);
2753 pci_conf_write (tag, rid, p);
2754 p &= PCI_MAP_MEMORY_TYPE_MASK;
2755
2756 /*
2757 ** check the type
2758 */
2759
2760 if (!((l & PCI_MAP_MEMORY_TYPE_MASK)
2761 == PCI_MAP_MEMORY_TYPE_32BIT_1M
2762 && (p & ~0xfffff) == 0)
2763 && ((l & PCI_MAP_MEMORY_TYPE_MASK)
2764 != PCI_MAP_MEMORY_TYPE_32BIT)) {
2765 debug_asr_printf (
2766 "asr_pci_map_mem failed: bad memory type=0x%x\n",
2767 (unsigned) l);
2768 return (0);
2769 };
2770
2771 /*
2772 ** get the size.
2773 */
2774
2775 psize = -(l & PCI_MAP_MEMORY_ADDRESS_MASK);
2776 if (psize > MAX_MAP) {
2777 psize = MAX_MAP;
2778 }
2779
2780 /*
2781 ** Truncate p to page boundary.
2782 ** (Or does pmap_mapdev the job?)
2783 */
2784
2785 poffs = p - trunc_page (p);
2786 sc->ha_Fvirt = (U8 *)pmap_mapdev (p - poffs, psize + poffs);
2787
2788 if (sc->ha_Fvirt == (U8 *)NULL) {
2789 return (0);
2790 }
2791
2792 sc->ha_Fvirt = (U8 *)((u_long)sc->ha_Fvirt + poffs);
2793 } else {
2794 sc->ha_Fvirt = (U8 *)(sc->ha_Virt);
2795 }
2796#endif
2797 return (1);
2798} /* asr_pci_map_mem */
2799
2800/*
2801 * A simplified copy of the real pci_map_int with additional
2802 * registration requirements.
2803 */
2804STATIC int
2805asr_pci_map_int (
2806#if __FreeBSD_version >= 400000
2807 IN device_t tag,
2808#else
2809 IN pcici_t tag,
2810#endif
2811 IN Asr_softc_t * sc)
2812{
2813#if __FreeBSD_version >= 400000
2814 int rid = 0;
2815
2816 sc->ha_irq_res = bus_alloc_resource(tag, SYS_RES_IRQ, &rid,
2817 0, ~0, 1, RF_ACTIVE | RF_SHAREABLE);
2818 if (sc->ha_irq_res == (struct resource *)NULL) {
2819 return (0);
2820 }
2821 if (bus_setup_intr(tag, sc->ha_irq_res, INTR_TYPE_CAM,
2822 (driver_intr_t *)asr_intr, (void *)sc, &(sc->ha_intr))) {
2823 return (0);
2824 }
2825 sc->ha_irq = pci_read_config(tag, PCIR_INTLINE, sizeof(char));
2826#else
2827 if (!pci_map_int(tag, (pci_inthand_t *)asr_intr,
2828 (void *)sc, &cam_imask)) {
2829 return (0);
2830 }
2831 sc->ha_irq = pci_conf_read(tag, PCIR_INTLINE);
2832#endif
2833 return (1);
2834} /* asr_pci_map_int */
2835
2836/*
2837 * Attach the devices, and virtual devices to the driver list.
2838 */
2839STATIC ATTACH_RET
2840asr_attach (ATTACH_ARGS)
2841{
2842 Asr_softc_t * sc;
2843 struct scsi_inquiry_data * iq;
2844 ATTACH_SET();
2845
2846 if ((sc = malloc(sizeof(*sc), M_DEVBUF, M_NOWAIT)) == (Asr_softc_t *)NULL) {
2847 ATTACH_RETURN(ENOMEM);
2848 }
2849 if (Asr_softc == (Asr_softc_t *)NULL) {
2850 /*
2851 * Fixup the OS revision as saved in the dptsig for the
2852 * engine (dptioctl.h) to pick up.
2853 */
2854 bcopy (osrelease, &ASR_sig.dsDescription[16], 5);
2855 printf ("asr%d: major=%d\n", unit, asr_cdevsw.d_maj);
2856 }
2857 /*
2858 * Initialize the software structure
2859 */
2860 bzero (sc, sizeof(*sc));
2861 LIST_INIT(&(sc->ha_ccb));
2862# ifdef ASR_MEASURE_PERFORMANCE
2863 {
2864 u_int32_t i;
2865
2866 // initialize free list for timeQ
2867 sc->ha_timeQFreeHead = 0;
2868 sc->ha_timeQFreeTail = MAX_TIMEQ_SIZE - 1;
2869 for (i = 0; i < MAX_TIMEQ_SIZE; i++) {
2870 sc->ha_timeQFreeList[i] = i;
2871 }
2872 }
2873# endif
2874 /* Link us into the HA list */
2875 {
2876 Asr_softc_t **ha;
2877
2878 for (ha = &Asr_softc; *ha; ha = &((*ha)->ha_next));
2879 *(ha) = sc;
2880 }
2881 {
2882 PI2O_EXEC_STATUS_GET_REPLY status;
2883 int size;
2884
2885 /*
2886 * This is the real McCoy!
2887 */
2888 if (!asr_pci_map_mem(tag, sc)) {
2889 printf ("asr%d: could not map memory\n", unit);
2890 ATTACH_RETURN(ENXIO);
2891 }
2892 /* Enable if not formerly enabled */
2893#if __FreeBSD_version >= 400000
2894 pci_write_config (tag, PCIR_COMMAND,
2895 pci_read_config (tag, PCIR_COMMAND, sizeof(char))
2896 | PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN, sizeof(char));
2897 /* Knowledge is power, responsibility is direct */
2898 {
2899 struct pci_devinfo {
2900 STAILQ_ENTRY(pci_devinfo) pci_links;
2901 struct resource_list resources;
2902 pcicfgregs cfg;
2903 } * dinfo = device_get_ivars(tag);
2904 sc->ha_pciBusNum = dinfo->cfg.bus;
2905 sc->ha_pciDeviceNum = (dinfo->cfg.slot << 3)
2906 | dinfo->cfg.func;
2907 }
2908#else
2909 pci_conf_write (tag, PCIR_COMMAND,
2910 pci_conf_read (tag, PCIR_COMMAND)
2911 | PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN);
2912 /* Knowledge is power, responsibility is direct */
2913 switch (pci_mechanism) {
2914
2915 case 1:
2916 sc->ha_pciBusNum = tag.cfg1 >> 16;
2917 sc->ha_pciDeviceNum = tag.cfg1 >> 8;
2918
2919 case 2:
2920 sc->ha_pciBusNum = tag.cfg2.forward;
2921 sc->ha_pciDeviceNum = ((tag.cfg2.enable >> 1) & 7)
2922 | (tag.cfg2.port >> 5);
2923 }
2924#endif
2925 /* Check if the device is there? */
2926 if ((ASR_resetIOP(sc->ha_Virt, sc->ha_Fvirt) == 0)
2927 || ((status = (PI2O_EXEC_STATUS_GET_REPLY)malloc (
2928 sizeof(I2O_EXEC_STATUS_GET_REPLY), M_TEMP, M_WAITOK))
2929 == (PI2O_EXEC_STATUS_GET_REPLY)NULL)
2930 || (ASR_getStatus(sc->ha_Virt, sc->ha_Fvirt, status) == NULL)) {
2931 printf ("asr%d: could not initialize hardware\n", unit);
2932 ATTACH_RETURN(ENODEV); /* Get next, maybe better luck */
2933 }
2934 sc->ha_SystemTable.OrganizationID = status->OrganizationID;
2935 sc->ha_SystemTable.IOP_ID = status->IOP_ID;
2936 sc->ha_SystemTable.I2oVersion = status->I2oVersion;
2937 sc->ha_SystemTable.IopState = status->IopState;
2938 sc->ha_SystemTable.MessengerType = status->MessengerType;
2939 sc->ha_SystemTable.InboundMessageFrameSize
2940 = status->InboundMFrameSize;
2941 sc->ha_SystemTable.MessengerInfo.InboundMessagePortAddressLow
2942 = (U32)(sc->ha_Base) + (U32)(&(((i2oRegs_t *)NULL)->ToFIFO));
2943
2944 if (!asr_pci_map_int(tag, (void *)sc)) {
2945 printf ("asr%d: could not map interrupt\n", unit);
2946 ATTACH_RETURN(ENXIO);
2947 }
2948
2949 /* Adjust the maximim inbound count */
2950 if (((sc->ha_QueueSize
2951 = I2O_EXEC_STATUS_GET_REPLY_getMaxInboundMFrames(status))
2952 > MAX_INBOUND)
2953 || (sc->ha_QueueSize == 0)) {
2954 sc->ha_QueueSize = MAX_INBOUND;
2955 }
2956
2957 /* Adjust the maximum outbound count */
2958 if (((sc->ha_Msgs_Count
2959 = I2O_EXEC_STATUS_GET_REPLY_getMaxOutboundMFrames(status))
2960 > MAX_OUTBOUND)
2961 || (sc->ha_Msgs_Count == 0)) {
2962 sc->ha_Msgs_Count = MAX_OUTBOUND;
2963 }
2964 if (sc->ha_Msgs_Count > sc->ha_QueueSize) {
2965 sc->ha_Msgs_Count = sc->ha_QueueSize;
2966 }
2967
2968 /* Adjust the maximum SG size to adapter */
2969 if ((size = (I2O_EXEC_STATUS_GET_REPLY_getInboundMFrameSize(
2970 status) << 2)) > MAX_INBOUND_SIZE) {
2971 size = MAX_INBOUND_SIZE;
2972 }
2973 free (status, M_TEMP);
2974 sc->ha_SgSize = (size - sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2975 + sizeof(I2O_SG_ELEMENT)) / sizeof(I2O_SGE_SIMPLE_ELEMENT);
2976 }
2977
2978 /*
2979 * Only do a bus/HBA reset on the first time through. On this
2980 * first time through, we do not send a flush to the devices.
2981 */
2982 if (ASR_init(sc) == 0) {
2983 struct BufferInfo {
2984 I2O_PARAM_RESULTS_LIST_HEADER Header;
2985 I2O_PARAM_READ_OPERATION_RESULT Read;
2986 I2O_DPT_EXEC_IOP_BUFFERS_SCALAR Info;
2987 };
2988 defAlignLong (struct BufferInfo, Buffer);
2989 PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR Info;
2990# define FW_DEBUG_BLED_OFFSET 8
2991
2992 if ((Info = (PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR)
2993 ASR_getParams(sc, 0,
2994 I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO,
2995 Buffer, sizeof(struct BufferInfo)))
2996 != (PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR)NULL) {
2997 sc->ha_blinkLED = sc->ha_Fvirt
2998 + I2O_DPT_EXEC_IOP_BUFFERS_SCALAR_getSerialOutputOffset(Info)
2999 + FW_DEBUG_BLED_OFFSET;
3000 }
3001 if (ASR_acquireLct(sc) == 0) {
3002 (void)ASR_acquireHrt(sc);
3003 }
3004 } else {
3005 printf ("asr%d: failed to initialize\n", unit);
3006 ATTACH_RETURN(ENXIO);
3007 }
3008 /*
3009 * Add in additional probe responses for more channels. We
3010 * are reusing the variable `target' for a channel loop counter.
3011 * Done here because of we need both the acquireLct and
3012 * acquireHrt data.
3013 */
3014 { PI2O_LCT_ENTRY Device;
3015
3016 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
3017 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
3018 ++Device) {
3019 if (Device->le_type == I2O_UNKNOWN) {
3020 continue;
3021 }
3022 if (I2O_LCT_ENTRY_getUserTID(Device) == 0xFFF) {
3023 if (Device->le_target > sc->ha_MaxId) {
3024 sc->ha_MaxId = Device->le_target;
3025 }
3026 if (Device->le_lun > sc->ha_MaxLun) {
3027 sc->ha_MaxLun = Device->le_lun;
3028 }
3029 }
3030 if (((Device->le_type & I2O_PORT) != 0)
3031 && (Device->le_bus <= MAX_CHANNEL)) {
3032 /* Do not increase MaxId for efficiency */
3033 sc->ha_adapter_target[Device->le_bus]
3034 = Device->le_target;
3035 }
3036 }
3037 }
3038
3039
3040 /*
3041 * Print the HBA model number as inquired from the card.
3042 */
3043
3044 printf ("asr%d:", unit);
3045
3046 if ((iq = (struct scsi_inquiry_data *)malloc (
3047 sizeof(struct scsi_inquiry_data), M_TEMP, M_WAITOK))
3048 != (struct scsi_inquiry_data *)NULL) {
3049 defAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE,Message);
3050 PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE Message_Ptr;
3051 int posted = 0;
3052
3053 bzero (iq, sizeof(struct scsi_inquiry_data));
3054 bzero (Message_Ptr
3055 = getAlignLong(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE, Message),
3056 sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
3057 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT));
3058
3059 I2O_MESSAGE_FRAME_setVersionOffset(
3060 (PI2O_MESSAGE_FRAME)Message_Ptr,
3061 I2O_VERSION_11
3062 | (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
3063 - sizeof(I2O_SG_ELEMENT))
3064 / sizeof(U32)) << 4));
3065 I2O_MESSAGE_FRAME_setMessageSize(
3066 (PI2O_MESSAGE_FRAME)Message_Ptr,
3067 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
3068 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT))
3069 / sizeof(U32));
3070 I2O_MESSAGE_FRAME_setInitiatorAddress (
3071 (PI2O_MESSAGE_FRAME)Message_Ptr, 1);
3072 I2O_MESSAGE_FRAME_setFunction(
3073 (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_PRIVATE_MESSAGE);
3074 I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode (
3075 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
3076 I2O_SCSI_SCB_EXEC);
3077 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
3078 I2O_SCB_FLAG_ENABLE_DISCONNECT
3079 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
3080 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
3081 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setInterpret(Message_Ptr, 1);
3082 I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
3083 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
3084 DPT_ORGANIZATION_ID);
3085 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(Message_Ptr, 6);
3086 Message_Ptr->CDB[0] = INQUIRY;
3087 Message_Ptr->CDB[4] = (unsigned char)sizeof(struct scsi_inquiry_data);
3088 if (Message_Ptr->CDB[4] == 0) {
3089 Message_Ptr->CDB[4] = 255;
3090 }
3091
3092 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
3093 (I2O_SCB_FLAG_XFER_FROM_DEVICE
3094 | I2O_SCB_FLAG_ENABLE_DISCONNECT
3095 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
3096 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
3097
3098 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setByteCount(
3099 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
3100 sizeof(struct scsi_inquiry_data));
3101 SG(&(Message_Ptr->SGL), 0,
3102 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
3103 iq, sizeof(struct scsi_inquiry_data));
3104 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
3105
3106 if (iq->vendor[0] && (iq->vendor[0] != ' ')) {
3107 printf (" ");
3108 ASR_prstring (iq->vendor, 8);
3109 ++posted;
3110 }
3111 if (iq->product[0] && (iq->product[0] != ' ')) {
3112 printf (" ");
3113 ASR_prstring (iq->product, 16);
3114 ++posted;
3115 }
3116 if (iq->revision[0] && (iq->revision[0] != ' ')) {
3117 printf (" FW Rev. ");
3118 ASR_prstring (iq->revision, 4);
3119 ++posted;
3120 }
3121 free ((caddr_t)iq, M_TEMP);
3122 if (posted) {
3123 printf (",");
3124 }
3125 }
3126 printf (" %d channel, %d CCBs, Protocol I2O\n", sc->ha_MaxBus + 1,
3127 (sc->ha_QueueSize > MAX_INBOUND) ? MAX_INBOUND : sc->ha_QueueSize);
3128
3129 /*
3130 * fill in the prototype cam_path.
3131 */
3132 {
3133 int bus;
3134 union asr_ccb * ccb;
3135
3136 if ((ccb = asr_alloc_ccb (sc)) == (union asr_ccb *)NULL) {
3137 printf ("asr%d: CAM could not be notified of asynchronous callback parameters\n", unit);
3138 ATTACH_RETURN(ENOMEM);
3139 }
3140 for (bus = 0; bus <= sc->ha_MaxBus; ++bus) {
3141 struct cam_devq * devq;
3142 int QueueSize = sc->ha_QueueSize;
3143
3144 if (QueueSize > MAX_INBOUND) {
3145 QueueSize = MAX_INBOUND;
3146 }
3147
3148 /*
3149 * Create the device queue for our SIM(s).
3150 */
3151 if ((devq = cam_simq_alloc(QueueSize)) == NULL) {
3152 continue;
3153 }
3154
3155 /*
3156 * Construct our first channel SIM entry
3157 */
3158 sc->ha_sim[bus] = cam_sim_alloc(
3159 asr_action, asr_poll, "asr", sc,
3160 unit, 1, QueueSize, devq);
3161 if (sc->ha_sim[bus] == NULL) {
3162 continue;
3163 }
3164
3165 if (xpt_bus_register(sc->ha_sim[bus], bus)
3166 != CAM_SUCCESS) {
3167 cam_sim_free(sc->ha_sim[bus],
3168 /*free_devq*/TRUE);
3169 sc->ha_sim[bus] = NULL;
3170 continue;
3171 }
3172
3173 if (xpt_create_path(&(sc->ha_path[bus]), /*periph*/NULL,
3174 cam_sim_path(sc->ha_sim[bus]), CAM_TARGET_WILDCARD,
3175 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
3176 xpt_bus_deregister(
3177 cam_sim_path(sc->ha_sim[bus]));
3178 cam_sim_free(sc->ha_sim[bus],
3179 /*free_devq*/TRUE);
3180 sc->ha_sim[bus] = NULL;
3181 continue;
3182 }
3183 }
3184 asr_free_ccb (ccb);
3185 }
3186 /*
3187 * Generate the device node information
3188 */
3189 (void)make_dev(&asr_cdevsw, unit, 0, 0, S_IRWXU, "rasr%d", unit);
3190 destroy_dev(makedev(asr_cdevsw.d_maj,unit+1));
3191 ATTACH_RETURN(0);
3192} /* asr_attach */
3193
3194STATIC void
3195asr_poll(
3196 IN struct cam_sim *sim)
3197{
3198 asr_intr(cam_sim_softc(sim));
3199} /* asr_poll */
3200
3201STATIC void
3202asr_action(
3203 IN struct cam_sim * sim,
3204 IN union ccb * ccb)
3205{
3206 struct Asr_softc * sc;
3207
3208 debug_asr_printf ("asr_action(%lx,%lx{%x})\n",
3209 (u_long)sim, (u_long)ccb, ccb->ccb_h.func_code);
3210
3211 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("asr_action\n"));
3212
3213 ccb->ccb_h.spriv_ptr0 = sc = (struct Asr_softc *)cam_sim_softc(sim);
3214
3215 switch (ccb->ccb_h.func_code) {
3216
3217 /* Common cases first */
3218 case XPT_SCSI_IO: /* Execute the requested I/O operation */
3219 {
3220 struct Message {
3221 char M[MAX_INBOUND_SIZE];
3222 };
3223 defAlignLong(struct Message,Message);
3224 PI2O_MESSAGE_FRAME Message_Ptr;
3225
3226 /* Reject incoming commands while we are resetting the card */
3227 if (sc->ha_in_reset != HA_OPERATIONAL) {
3228 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3229 if (sc->ha_in_reset >= HA_OFF_LINE) {
3230 /* HBA is now off-line */
3231 ccb->ccb_h.status |= CAM_UNREC_HBA_ERROR;
3232 } else {
3233 /* HBA currently resetting, try again later. */
3234 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
3235 }
3236 debug_asr_cmd_printf (" e\n");
3237 xpt_done(ccb);
3238 debug_asr_cmd_printf (" q\n");
3239 break;
3240 }
3241 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
3242 printf(
3243 "asr%d WARNING: scsi_cmd(%x) already done on b%dt%du%d\n",
3244 cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)),
3245 ccb->csio.cdb_io.cdb_bytes[0],
3246 cam_sim_bus(sim),
3247 ccb->ccb_h.target_id,
3248 ccb->ccb_h.target_lun);
3249 }
3250 debug_asr_cmd_printf ("(%d,%d,%d,%d)",
3251 cam_sim_unit(sim),
3252 cam_sim_bus(sim),
3253 ccb->ccb_h.target_id,
3254 ccb->ccb_h.target_lun);
3255 debug_asr_cmd_dump_ccb(ccb);
3256
3257 if ((Message_Ptr = ASR_init_message ((union asr_ccb *)ccb,
3258 (PI2O_MESSAGE_FRAME)Message)) != (PI2O_MESSAGE_FRAME)NULL) {
3259 debug_asr_cmd2_printf ("TID=%x:\n",
3260 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_getTID(
3261 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr));
3262 debug_asr_cmd2_dump_message(Message_Ptr);
3263 debug_asr_cmd1_printf (" q");
3264
3265 if (ASR_queue (sc, Message_Ptr) == EMPTY_QUEUE) {
3266#ifdef ASR_MEASURE_PERFORMANCE
3267 ++sc->ha_performance.command_too_busy;
3268#endif
3269 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3270 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
3271 debug_asr_cmd_printf (" E\n");
3272 xpt_done(ccb);
3273 }
3274 debug_asr_cmd_printf (" Q\n");
3275 break;
3276 }
3277 /*
3278 * We will get here if there is no valid TID for the device
3279 * referenced in the scsi command packet.
3280 */
3281 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3282 ccb->ccb_h.status |= CAM_SEL_TIMEOUT;
3283 debug_asr_cmd_printf (" B\n");
3284 xpt_done(ccb);
3285 break;
3286 }
3287
3288 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */
3289 /* Rese HBA device ... */
3290 asr_hbareset (sc);
3291 ccb->ccb_h.status = CAM_REQ_CMP;
3292 xpt_done(ccb);
3293 break;
3294
3295# if (defined(REPORT_LUNS))
3296 case REPORT_LUNS:
3297# endif
3298 case XPT_ABORT: /* Abort the specified CCB */
3299 /* XXX Implement */
3300 ccb->ccb_h.status = CAM_REQ_INVALID;
3301 xpt_done(ccb);
3302 break;
3303
3304 case XPT_SET_TRAN_SETTINGS:
3305 /* XXX Implement */
3306 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
3307 xpt_done(ccb);
3308 break;
3309
3310 case XPT_GET_TRAN_SETTINGS:
3311 /* Get default/user set transfer settings for the target */
3312 {
3313 struct ccb_trans_settings *cts;
3314 u_int target_mask;
3315
3316 cts = &(ccb->cts);
3317 target_mask = 0x01 << ccb->ccb_h.target_id;
3318 if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) {
3319 cts->flags = CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB;
3320 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
3321 cts->sync_period = 6; /* 40MHz */
3322 cts->sync_offset = 15;
3323
3324 cts->valid = CCB_TRANS_SYNC_RATE_VALID
3325 | CCB_TRANS_SYNC_OFFSET_VALID
3326 | CCB_TRANS_BUS_WIDTH_VALID
3327 | CCB_TRANS_DISC_VALID
3328 | CCB_TRANS_TQ_VALID;
3329 ccb->ccb_h.status = CAM_REQ_CMP;
3330 } else {
3331 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
3332 }
3333 xpt_done(ccb);
3334 break;
3335 }
3336
3337 case XPT_CALC_GEOMETRY:
3338 {
3339 struct ccb_calc_geometry *ccg;
3340 u_int32_t size_mb;
3341 u_int32_t secs_per_cylinder;
3342
3343 ccg = &(ccb->ccg);
3344 size_mb = ccg->volume_size
3345 / ((1024L * 1024L) / ccg->block_size);
3346
3347 if (size_mb > 4096) {
3348 ccg->heads = 255;
3349 ccg->secs_per_track = 63;
3350 } else if (size_mb > 2048) {
3351 ccg->heads = 128;
3352 ccg->secs_per_track = 63;
3353 } else if (size_mb > 1024) {
3354 ccg->heads = 65;
3355 ccg->secs_per_track = 63;
3356 } else {
3357 ccg->heads = 64;
3358 ccg->secs_per_track = 32;
3359 }
3360 secs_per_cylinder = ccg->heads * ccg->secs_per_track;
3361 ccg->cylinders = ccg->volume_size / secs_per_cylinder;
3362 ccb->ccb_h.status = CAM_REQ_CMP;
3363 xpt_done(ccb);
3364 break;
3365 }
3366
3367 case XPT_RESET_BUS: /* Reset the specified SCSI bus */
3368 ASR_resetBus (sc, cam_sim_bus(sim));
3369 ccb->ccb_h.status = CAM_REQ_CMP;
3370 xpt_done(ccb);
3371 break;
3372
3373 case XPT_TERM_IO: /* Terminate the I/O process */
3374 /* XXX Implement */
3375 ccb->ccb_h.status = CAM_REQ_INVALID;
3376 xpt_done(ccb);
3377 break;
3378
3379 case XPT_PATH_INQ: /* Path routing inquiry */
3380 {
3381 struct ccb_pathinq *cpi = &(ccb->cpi);
3382
3383 cpi->version_num = 1; /* XXX??? */
3384 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
3385 cpi->target_sprt = 0;
3386 /* Not necessary to reset bus, done by HDM initialization */
3387 cpi->hba_misc = PIM_NOBUSRESET;
3388 cpi->hba_eng_cnt = 0;
3389 cpi->max_target = sc->ha_MaxId;
3390 cpi->max_lun = sc->ha_MaxLun;
3391 cpi->initiator_id = sc->ha_adapter_target[cam_sim_bus(sim)];
3392 cpi->bus_id = cam_sim_bus(sim);
3393 cpi->base_transfer_speed = 3300;
3394 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
3395 strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN);
3396 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
3397 cpi->unit_number = cam_sim_unit(sim);
3398 cpi->ccb_h.status = CAM_REQ_CMP;
3399 xpt_done(ccb);
3400 break;
3401 }
3402 default:
3403 ccb->ccb_h.status = CAM_REQ_INVALID;
3404 xpt_done(ccb);
3405 break;
3406 }
3407} /* asr_action */
3408
3409#ifdef ASR_MEASURE_PERFORMANCE
3410#define WRITE_OP 1
3411#define READ_OP 2
3412#define min_submitR sc->ha_performance.read_by_size_min_time[index]
3413#define max_submitR sc->ha_performance.read_by_size_max_time[index]
3414#define min_submitW sc->ha_performance.write_by_size_min_time[index]
3415#define max_submitW sc->ha_performance.write_by_size_max_time[index]
3416
3417STATIC INLINE void
3418asr_IObySize(
3419 IN Asr_softc_t * sc,
3420 IN u_int32_t submitted_time,
3421 IN int op,
3422 IN int index)
3423{
3424 struct timeval submitted_timeval;
3425
3426 submitted_timeval.tv_sec = 0;
3427 submitted_timeval.tv_usec = submitted_time;
3428
3429 if ( op == READ_OP ) {
3430 ++sc->ha_performance.read_by_size_count[index];
3431
3432 if ( submitted_time != 0xffffffff ) {
3433 timevaladd(
3434 &(sc->ha_performance.read_by_size_total_time[index]),
3435 &submitted_timeval);
3436 if ( (min_submitR == 0)
3437 || (submitted_time < min_submitR) ) {
3438 min_submitR = submitted_time;
3439 }
3440
3441 if ( submitted_time > max_submitR ) {
3442 max_submitR = submitted_time;
3443 }
3444 }
3445 } else {
3446 ++sc->ha_performance.write_by_size_count[index];
3447 if ( submitted_time != 0xffffffff ) {
3448 timevaladd(
3449 &(sc->ha_performance.write_by_size_total_time[index]),
3450 &submitted_timeval);
3451 if ( (submitted_time < min_submitW)
3452 || (min_submitW == 0) ) {
3453 min_submitW = submitted_time;
3454 }
3455
3456 if ( submitted_time > max_submitW ) {
3457 max_submitW = submitted_time;
3458 }
3459 }
3460 }
3461} /* asr_IObySize */
3462#endif
3463
3464/*
3465 * Handle processing of current CCB as pointed to by the Status.
3466 */
3467STATIC int
3468asr_intr (
3469 IN Asr_softc_t * sc)
3470{
3471 OUT int processed;
3472
3473#ifdef ASR_MEASURE_PERFORMANCE
3474 struct timeval junk;
3475
3476 microtime(&junk);
3477 sc->ha_performance.intr_started = junk;
3478#endif
3479
3480 for (processed = 0;
3481 sc->ha_Virt->Status & Mask_InterruptsDisabled;
3482 processed = 1) {
3483 union asr_ccb * ccb;
3484 U32 ReplyOffset;
3485 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply;
3486
3487 if (((ReplyOffset = sc->ha_Virt->FromFIFO) == EMPTY_QUEUE)
3488 && ((ReplyOffset = sc->ha_Virt->FromFIFO) == EMPTY_QUEUE)) {
3489 break;
3490 }
3491 Reply = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)(ReplyOffset
3492 - sc->ha_Msgs_Phys + (char *)(sc->ha_Msgs));
3493 /*
3494 * We do not need any (optional byteswapping) method access to
3495 * the Initiator context field.
3496 */
3497 ccb = (union asr_ccb *)(long)
3498 I2O_MESSAGE_FRAME_getInitiatorContext64(
3499 &(Reply->StdReplyFrame.StdMessageFrame));
3500 if (I2O_MESSAGE_FRAME_getMsgFlags(
3501 &(Reply->StdReplyFrame.StdMessageFrame))
3502 & I2O_MESSAGE_FLAGS_FAIL) {
3503 defAlignLong(I2O_UTIL_NOP_MESSAGE,Message);
3504 PI2O_UTIL_NOP_MESSAGE Message_Ptr;
3505 U32 MessageOffset;
3506
3507 MessageOffset = (u_long)
3508 I2O_FAILURE_REPLY_MESSAGE_FRAME_getPreservedMFA(
3509 (PI2O_FAILURE_REPLY_MESSAGE_FRAME)Reply);
3510 /*
3511 * Get the Original Message Frame's address, and get
3512 * it's Transaction Context into our space. (Currently
3513 * unused at original authorship, but better to be
3514 * safe than sorry). Straight copy means that we
3515 * need not concern ourselves with the (optional
3516 * byteswapping) method access.
3517 */
3518 Reply->StdReplyFrame.TransactionContext
3519 = ((PI2O_SINGLE_REPLY_MESSAGE_FRAME)
3520 (sc->ha_Fvirt + MessageOffset))->TransactionContext;
3521 /*
3522 * For 64 bit machines, we need to reconstruct the
3523 * 64 bit context.
3524 */
3525 ccb = (union asr_ccb *)(long)
3526 I2O_MESSAGE_FRAME_getInitiatorContext64(
3527 &(Reply->StdReplyFrame.StdMessageFrame));
3528 /*
3529 * Unique error code for command failure.
3530 */
3531 I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode(
3532 &(Reply->StdReplyFrame), (u_int16_t)-2);
3533 /*
3534 * Modify the message frame to contain a NOP and
3535 * re-issue it to the controller.
3536 */
3537 Message_Ptr = (PI2O_UTIL_NOP_MESSAGE)ASR_fillMessage(
3538 Message, sizeof(I2O_UTIL_NOP_MESSAGE));
3539# if (I2O_UTIL_NOP != 0)
3540 I2O_MESSAGE_FRAME_setFunction (
3541 &(Message_Ptr->StdMessageFrame),
3542 I2O_UTIL_NOP);
3543# endif
3544 /*
3545 * Copy the packet out to the Original Message
3546 */
3547 bcopy ((caddr_t)Message_Ptr,
3548 sc->ha_Fvirt + MessageOffset,
3549 sizeof(I2O_UTIL_NOP_MESSAGE));
3550 /*
3551 * Issue the NOP
3552 */
3553 sc->ha_Virt->ToFIFO = MessageOffset;
3554 }
3555
3556 /*
3557 * Asynchronous command with no return requirements,
3558 * and a generic handler for immunity against odd error
3559 * returns from the adapter.
3560 */
3561 if (ccb == (union asr_ccb *)NULL) {
3562 /*
3563 * Return Reply so that it can be used for the
3564 * next command
3565 */
3566 sc->ha_Virt->FromFIFO = ReplyOffset;
3567 continue;
3568 }
3569
3570 /* Welease Wadjah! (and stop timeouts) */
3571 ASR_ccbRemove (sc, ccb);
3572
3573 switch (
3574 I2O_SINGLE_REPLY_MESSAGE_FRAME_getDetailedStatusCode(
3575 &(Reply->StdReplyFrame))) {
3576
3577 case I2O_SCSI_DSC_SUCCESS:
3578 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3579 ccb->ccb_h.status |= CAM_REQ_CMP;
3580 break;
3581
3582 case I2O_SCSI_DSC_CHECK_CONDITION:
3583 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3584 ccb->ccb_h.status |= CAM_REQ_CMP|CAM_AUTOSNS_VALID;
3585 break;
3586
3587 case I2O_SCSI_DSC_BUSY:
3588 /* FALLTHRU */
3589 case I2O_SCSI_HBA_DSC_ADAPTER_BUSY:
3590 /* FALLTHRU */
3591 case I2O_SCSI_HBA_DSC_SCSI_BUS_RESET:
3592 /* FALLTHRU */
3593 case I2O_SCSI_HBA_DSC_BUS_BUSY:
3594 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3595 ccb->ccb_h.status |= CAM_SCSI_BUSY;
3596 break;
3597
3598 case I2O_SCSI_HBA_DSC_SELECTION_TIMEOUT:
3599 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3600 ccb->ccb_h.status |= CAM_SEL_TIMEOUT;
3601 break;
3602
3603 case I2O_SCSI_HBA_DSC_COMMAND_TIMEOUT:
3604 /* FALLTHRU */
3605 case I2O_SCSI_HBA_DSC_DEVICE_NOT_PRESENT:
3606 /* FALLTHRU */
3607 case I2O_SCSI_HBA_DSC_LUN_INVALID:
3608 /* FALLTHRU */
3609 case I2O_SCSI_HBA_DSC_SCSI_TID_INVALID:
3610 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3611 ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
3612 break;
3613
3614 case I2O_SCSI_HBA_DSC_DATA_OVERRUN:
3615 /* FALLTHRU */
3616 case I2O_SCSI_HBA_DSC_REQUEST_LENGTH_ERROR:
3617 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3618 ccb->ccb_h.status |= CAM_DATA_RUN_ERR;
3619 break;
3620
3621 default:
3622 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3623 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
3624 break;
3625 }
3626 if ((ccb->csio.resid = ccb->csio.dxfer_len) != 0) {
3627 ccb->csio.resid -=
3628 I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_getTransferCount(
3629 Reply);
3630 }
3631
3632#ifdef ASR_MEASURE_PERFORMANCE
3633 {
3634 struct timeval endTime;
3635 u_int32_t submitted_time;
3636 u_int32_t size;
3637 int op_type;
3638 int startTimeIndex;
3639
3640 --sc->ha_submitted_ccbs_count;
3641 startTimeIndex
3642 = (int)Reply->StdReplyFrame.TransactionContext;
3643 if (-1 != startTimeIndex) {
3644 /* Compute the time spent in device/adapter */
3645 microtime(&endTime);
3646 submitted_time = asr_time_delta(sc->ha_timeQ[
3647 startTimeIndex], endTime);
3648 /* put the startTimeIndex back on free list */
3649 ENQ_TIMEQ_FREE_LIST(startTimeIndex,
3650 sc->ha_timeQFreeList,
3651 sc->ha_timeQFreeHead,
3652 sc->ha_timeQFreeTail);
3653 } else {
3654 submitted_time = 0xffffffff;
3655 }
3656
3657#define maxctime sc->ha_performance.max_command_time[ccb->csio.cdb_io.cdb_bytes[0]]
3658#define minctime sc->ha_performance.min_command_time[ccb->csio.cdb_io.cdb_bytes[0]]
3659 if (submitted_time != 0xffffffff) {
3660 if ( maxctime < submitted_time ) {
3661 maxctime = submitted_time;
3662 }
3663 if ( (minctime == 0)
3664 || (minctime > submitted_time) ) {
3665 minctime = submitted_time;
3666 }
3667
3668 if ( sc->ha_performance.max_submit_time
3669 < submitted_time ) {
3670 sc->ha_performance.max_submit_time
3671 = submitted_time;
3672 }
3673 if ( sc->ha_performance.min_submit_time == 0
3674 || sc->ha_performance.min_submit_time
3675 > submitted_time) {
3676 sc->ha_performance.min_submit_time
3677 = submitted_time;
3678 }
3679
3680 switch ( ccb->csio.cdb_io.cdb_bytes[0] ) {
3681
3682 case 0xa8: /* 12-byte READ */
3683 /* FALLTHRU */
3684 case 0x08: /* 6-byte READ */
3685 /* FALLTHRU */
3686 case 0x28: /* 10-byte READ */
3687 op_type = READ_OP;
3688 break;
3689
3690 case 0x0a: /* 6-byte WRITE */
3691 /* FALLTHRU */
3692 case 0xaa: /* 12-byte WRITE */
3693 /* FALLTHRU */
3694 case 0x2a: /* 10-byte WRITE */
3695 op_type = WRITE_OP;
3696 break;
3697
3698 default:
3699 op_type = 0;
3700 break;
3701 }
3702
3703 if ( op_type != 0 ) {
3704 struct scsi_rw_big * cmd;
3705
3706 cmd = (struct scsi_rw_big *)
3707 &(ccb->csio.cdb_io);
3708
3709 size = (((u_int32_t) cmd->length2 << 8)
3710 | ((u_int32_t) cmd->length1)) << 9;
3711
3712 switch ( size ) {
3713
3714 case 512:
3715 asr_IObySize(sc,
3716 submitted_time, op_type,
3717 SIZE_512);
3718 break;
3719
3720 case 1024:
3721 asr_IObySize(sc,
3722 submitted_time, op_type,
3723 SIZE_1K);
3724 break;
3725
3726 case 2048:
3727 asr_IObySize(sc,
3728 submitted_time, op_type,
3729 SIZE_2K);
3730 break;
3731
3732 case 4096:
3733 asr_IObySize(sc,
3734 submitted_time, op_type,
3735 SIZE_4K);
3736 break;
3737
3738 case 8192:
3739 asr_IObySize(sc,
3740 submitted_time, op_type,
3741 SIZE_8K);
3742 break;
3743
3744 case 16384:
3745 asr_IObySize(sc,
3746 submitted_time, op_type,
3747 SIZE_16K);
3748 break;
3749
3750 case 32768:
3751 asr_IObySize(sc,
3752 submitted_time, op_type,
3753 SIZE_32K);
3754 break;
3755
3756 case 65536:
3757 asr_IObySize(sc,
3758 submitted_time, op_type,
3759 SIZE_64K);
3760 break;
3761
3762 default:
3763 if ( size > (1 << 16) ) {
3764 asr_IObySize(sc,
3765 submitted_time,
3766 op_type,
3767 SIZE_BIGGER);
3768 } else {
3769 asr_IObySize(sc,
3770 submitted_time,
3771 op_type,
3772 SIZE_OTHER);
3773 }
3774 break;
3775 }
3776 }
3777 }
3778 }
3779#endif
3780 /* Sense data in reply packet */
3781 if (ccb->ccb_h.status & CAM_AUTOSNS_VALID) {
3782 u_int16_t size = I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_getAutoSenseTransferCount(Reply);
3783
3784 if (size) {
3785 if (size > sizeof(ccb->csio.sense_data)) {
3786 size = sizeof(ccb->csio.sense_data);
3787 }
3788 if (size > I2O_SCSI_SENSE_DATA_SZ) {
3789 size = I2O_SCSI_SENSE_DATA_SZ;
3790 }
3791 if ((ccb->csio.sense_len)
3792 && (size > ccb->csio.sense_len)) {
3793 size = ccb->csio.sense_len;
3794 }
3795 bcopy ((caddr_t)Reply->SenseData,
3796 (caddr_t)&(ccb->csio.sense_data), size);
3797 }
3798 }
3799
3800 /*
3801 * Return Reply so that it can be used for the next command
3802 * since we have no more need for it now
3803 */
3804 sc->ha_Virt->FromFIFO = ReplyOffset;
3805
3806 if (ccb->ccb_h.path) {
3807 xpt_done ((union ccb *)ccb);
3808 } else {
3809 wakeup ((caddr_t)ccb);
3810 }
3811 }
3812#ifdef ASR_MEASURE_PERFORMANCE
3813 {
3814 u_int32_t result;
3815
3816 microtime(&junk);
3817 result = asr_time_delta(sc->ha_performance.intr_started, junk);
3818
3819 if (result != 0xffffffff) {
3820 if ( sc->ha_performance.max_intr_time < result ) {
3821 sc->ha_performance.max_intr_time = result;
3822 }
3823
3824 if ( (sc->ha_performance.min_intr_time == 0)
3825 || (sc->ha_performance.min_intr_time > result) ) {
3826 sc->ha_performance.min_intr_time = result;
3827 }
3828 }
3829 }
3830#endif
3831 return (processed);
3832} /* asr_intr */
3833
3834#undef QueueSize /* Grrrr */
3835#undef SG_Size /* Grrrr */
3836
3837/*
3838 * Meant to be included at the bottom of asr.c !!!
3839 */
3840
3841/*
3842 * Included here as hard coded. Done because other necessary include
3843 * files utilize C++ comment structures which make them a nuisance to
3844 * included here just to pick up these three typedefs.
3845 */
3846typedef U32 DPT_TAG_T;
3847typedef U32 DPT_MSG_T;
3848typedef U32 DPT_RTN_T;
3849
3850#undef SCSI_RESET /* Conflicts with "scsi/scsiconf.h" defintion */
3851#include "dev/asr/osd_unix.h"
3852
3853#define asr_unit(dev) minor(dev)
3854
3855STATIC INLINE Asr_softc_t *
3856ASR_get_sc (
3857 IN dev_t dev)
3858{
3859 int unit = asr_unit(dev);
3860 OUT Asr_softc_t * sc = Asr_softc;
3861
3862 while (sc && sc->ha_sim[0] && (cam_sim_unit(sc->ha_sim[0]) != unit)) {
3863 sc = sc->ha_next;
3864 }
3865 return (sc);
3866} /* ASR_get_sc */
3867
3868STATIC u_int8_t ASR_ctlr_held;
3869#if (!defined(UNREFERENCED_PARAMETER))
3870# define UNREFERENCED_PARAMETER(x) (void)(x)
3871#endif
3872
3873STATIC int
3874asr_open(
3875 IN dev_t dev,
3876 int32_t flags,
3877 int32_t ifmt,
41c20dac 3878 IN d_thread_t *td)
984263bc
MD
3879{
3880 int s;
3881 OUT int error;
3882 UNREFERENCED_PARAMETER(flags);
3883 UNREFERENCED_PARAMETER(ifmt);
3884
3885 if (ASR_get_sc (dev) == (Asr_softc_t *)NULL) {
3886 return (ENODEV);
3887 }
dadab5e9 3888 KKASSERT(td->td_proc);
984263bc
MD
3889 s = splcam ();
3890 if (ASR_ctlr_held) {
3891 error = EBUSY;
d