nrelease - fix/improve livecd
[dragonfly.git] / sys / dev / raid / asr / asr.c
CommitLineData
7b0cd042 1/*-
984263bc
MD
2 * Copyright (c) 1996-2000 Distributed Processing Technology Corporation
3 * Copyright (c) 2000-2001 Adaptec Corporation
4 * All rights reserved.
5 *
6 * TERMS AND CONDITIONS OF USE
7 *
8 * Redistribution and use in source form, with or without modification, are
9 * permitted provided that redistributions of source code must retain the
10 * above copyright notice, this list of conditions and the following disclaimer.
11 *
12 * This software is provided `as is' by Adaptec and any express or implied
13 * warranties, including, but not limited to, the implied warranties of
14 * merchantability and fitness for a particular purpose, are disclaimed. In no
15 * event shall Adaptec be liable for any direct, indirect, incidental, special,
16 * exemplary or consequential damages (including, but not limited to,
17 * procurement of substitute goods or services; loss of use, data, or profits;
18 * or business interruptions) however caused and on any theory of liability,
19 * whether in contract, strict liability, or tort (including negligence or
20 * otherwise) arising in any way out of the use of this driver software, even
21 * if advised of the possibility of such damage.
22 *
23 * SCSI I2O host adapter driver
24 *
7b0cd042
SW
25 * V1.10 2004/05/05 scottl@freebsd.org
26 * - Massive cleanup of the driver to remove dead code and
27 * non-conformant style.
28 * - Removed most i386-specific code to make it more portable.
29 * - Converted to the bus_space API.
30 * V1.08 2001/08/21 Mark_Salyzyn@adaptec.com
31 * - The 2000S and 2005S do not initialize on some machines,
984263bc
MD
32 * increased timeout to 255ms from 50ms for the StatusGet
33 * command.
7b0cd042
SW
34 * V1.07 2001/05/22 Mark_Salyzyn@adaptec.com
35 * - I knew this one was too good to be true. The error return
36 * on ioctl commands needs to be compared to CAM_REQ_CMP, not
37 * to the bit masked status.
38 * V1.06 2001/05/08 Mark_Salyzyn@adaptec.com
39 * - The 2005S that was supported is affectionately called the
40 * Conjoined BAR Firmware. In order to support RAID-5 in a
41 * 16MB low-cost configuration, Firmware was forced to go
42 * to a Split BAR Firmware. This requires a separate IOP and
43 * Messaging base address.
44 * V1.05 2001/04/25 Mark_Salyzyn@adaptec.com
45 * - Handle support for 2005S Zero Channel RAID solution.
46 * - System locked up if the Adapter locked up. Do not try
47 * to send other commands if the resetIOP command fails. The
48 * fail outstanding command discovery loop was flawed as the
49 * removal of the command from the list prevented discovering
50 * all the commands.
51 * - Comment changes to clarify driver.
52 * - SysInfo searched for an EATA SmartROM, not an I2O SmartROM.
53 * - We do not use the AC_FOUND_DEV event because of I2O.
54 * Removed asr_async.
55 * V1.04 2000/09/22 Mark_Salyzyn@adaptec.com, msmith@freebsd.org,
56 * lampa@fee.vutbr.cz and Scott_Long@adaptec.com.
57 * - Removed support for PM1554, PM2554 and PM2654 in Mode-0
58 * mode as this is confused with competitor adapters in run
59 * mode.
60 * - critical locking needed in ASR_ccbAdd and ASR_ccbRemove
61 * to prevent operating system panic.
62 * - moved default major number to 154 from 97.
63 * V1.03 2000/07/12 Mark_Salyzyn@adaptec.com
64 * - The controller is not actually an ASR (Adaptec SCSI RAID)
65 * series that is visible, it's more of an internal code name.
66 * remove any visible references within reason for now.
67 * - bus_ptr->LUN was not correctly zeroed when initially
68 * allocated causing a possible panic of the operating system
69 * during boot.
70 * V1.02 2000/06/26 Mark_Salyzyn@adaptec.com
71 * - Code always fails for ASR_getTid affecting performance.
72 * - initiated a set of changes that resulted from a formal
73 * code inspection by Mark_Salyzyn@adaptec.com,
74 * George_Dake@adaptec.com, Jeff_Zeak@adaptec.com,
75 * Martin_Wilson@adaptec.com and Vincent_Trandoan@adaptec.com.
76 * Their findings were focussed on the LCT & TID handler, and
77 * all resulting changes were to improve code readability,
78 * consistency or have a positive effect on performance.
79 * V1.01 2000/06/14 Mark_Salyzyn@adaptec.com
80 * - Passthrough returned an incorrect error.
81 * - Passthrough did not migrate the intrinsic scsi layer wakeup
82 * on command completion.
83 * - generate control device nodes using make_dev and delete_dev.
84 * - Performance affected by TID caching reallocing.
85 * - Made suggested changes by Justin_Gibbs@adaptec.com
86 * - use splcam instead of splbio.
87 * - use cam_imask instead of bio_imask.
88 * - use u_int8_t instead of u_char.
89 * - use u_int16_t instead of u_short.
90 * - use u_int32_t instead of u_long where appropriate.
91 * - use 64 bit context handler instead of 32 bit.
92 * - create_ccb should only allocate the worst case
93 * requirements for the driver since CAM may evolve
94 * making union ccb much larger than needed here.
95 * renamed create_ccb to asr_alloc_ccb.
96 * - go nutz justifying all debug prints as macros
97 * defined at the top and remove unsightly ifdefs.
98 * - INLINE STATIC viewed as confusing. Historically
99 * utilized to affect code performance and debug
100 * issues in OS, Compiler or OEM specific situations.
101 * V1.00 2000/05/31 Mark_Salyzyn@adaptec.com
102 * - Ported from FreeBSD 2.2.X DPT I2O driver.
103 * changed struct scsi_xfer to union ccb/struct ccb_hdr
104 * changed variable name xs to ccb
105 * changed struct scsi_link to struct cam_path
106 * changed struct scsibus_data to struct cam_sim
107 * stopped using fordriver for holding on to the TID
108 * use proprietary packet creation instead of scsi_inquire
109 * CAM layer sends synchronize commands.
110 *
111 * $FreeBSD: src/sys/dev/asr/asr.c,v 1.90 2011/10/13 20:06:19 marius Exp $
984263bc 112 */
984263bc 113
7b0cd042 114#include <sys/param.h> /* TRUE=1 and FALSE=0 defined here */
984263bc 115#include <sys/kernel.h>
7b0cd042 116#include <sys/module.h>
984263bc
MD
117#include <sys/systm.h>
118#include <sys/malloc.h>
984263bc 119#include <sys/conf.h>
2b3f93ea 120#include <sys/caps.h>
7b0cd042 121#include <sys/proc.h>
984263bc 122#include <sys/bus.h>
984263bc
MD
123#include <sys/rman.h>
124#include <sys/stat.h>
f15db79e 125#include <sys/device.h>
7f2216bc 126#include <sys/thread2.h>
7b0cd042 127#include <sys/bus_dma.h>
984263bc 128
1f2de5d4
MD
129#include <bus/cam/cam.h>
130#include <bus/cam/cam_ccb.h>
131#include <bus/cam/cam_sim.h>
132#include <bus/cam/cam_xpt_sim.h>
cec957e9 133#include <bus/cam/cam_xpt_periph.h>
984263bc 134
1f2de5d4
MD
135#include <bus/cam/scsi/scsi_all.h>
136#include <bus/cam/scsi/scsi_message.h>
984263bc
MD
137
138#include <vm/vm.h>
139#include <vm/pmap.h>
7b0cd042 140
f8334305 141#include <machine/vmparam.h>
984263bc 142
1f2de5d4
MD
143#include <bus/pci/pcivar.h>
144#include <bus/pci/pcireg.h>
984263bc 145
7b0cd042
SW
146#define osdSwap4(x) ((u_long)ntohl((u_long)(x)))
147#define KVTOPHYS(x) vtophys(x)
148#include <dev/raid/asr/dptalign.h>
149#include <dev/raid/asr/i2oexec.h>
150#include <dev/raid/asr/i2obscsi.h>
151#include <dev/raid/asr/i2odpt.h>
152#include <dev/raid/asr/i2oadptr.h>
153
154#include <dev/raid/asr/sys_info.h>
155
156#define ASR_VERSION 1
157#define ASR_REVISION '1'
158#define ASR_SUBREVISION '0'
159#define ASR_MONTH 5
160#define ASR_DAY 5
161#define ASR_YEAR (2004 - 1980)
162
163/*
164 * Debug macros to reduce the unsightly ifdefs
165 */
166#if (defined(DEBUG_ASR) || defined(DEBUG_ASR_USR_CMD) || defined(DEBUG_ASR_CMD))
167static __inline void
168debug_asr_message(PI2O_MESSAGE_FRAME message)
169{
170 u_int32_t * pointer = (u_int32_t *)message;
171 u_int32_t length = I2O_MESSAGE_FRAME_getMessageSize(message);
172 u_int32_t counter = 0;
173
174 while (length--) {
175 kprintf("%08lx%c", (u_long)*(pointer++),
176 (((++counter & 7) == 0) || (length == 0)) ? '\n' : ' ');
177 }
178}
179#endif /* DEBUG_ASR || DEBUG_ASR_USR_CMD || DEBUG_ASR_CMD */
180
181#ifdef DEBUG_ASR
182 /* Breaks on none STDC based compilers :-( */
183#define debug_asr_printf(fmt,args...) kprintf(fmt, ##args)
184#define debug_asr_dump_message(message) debug_asr_message(message)
185#define debug_asr_print_path(ccb) xpt_print_path(ccb->ccb_h.path);
186#else /* DEBUG_ASR */
187#define debug_asr_printf(fmt,args...)
188#define debug_asr_dump_message(message)
189#define debug_asr_print_path(ccb)
190#endif /* DEBUG_ASR */
984263bc 191
7b0cd042
SW
192/*
193 * If DEBUG_ASR_CMD is defined:
194 * 0 - Display incoming SCSI commands
195 * 1 - add in a quick character before queueing.
196 * 2 - add in outgoing message frames.
197 */
198#if (defined(DEBUG_ASR_CMD))
199#define debug_asr_cmd_printf(fmt,args...) kprintf(fmt,##args)
200static __inline void
201debug_asr_dump_ccb(union ccb *ccb)
202{
203 u_int8_t *cp = (unsigned char *)&(ccb->csio.cdb_io);
204 int len = ccb->csio.cdb_len;
205
206 while (len) {
207 debug_asr_cmd_printf (" %02x", *(cp++));
208 --len;
209 }
210}
211#if (DEBUG_ASR_CMD > 0)
212#define debug_asr_cmd1_printf debug_asr_cmd_printf
213#else
214#define debug_asr_cmd1_printf(fmt,args...)
215#endif
216#if (DEBUG_ASR_CMD > 1)
217#define debug_asr_cmd2_printf debug_asr_cmd_printf
218#define debug_asr_cmd2_dump_message(message) debug_asr_message(message)
219#else
220#define debug_asr_cmd2_printf(fmt,args...)
221#define debug_asr_cmd2_dump_message(message)
984263bc 222#endif
7b0cd042
SW
223#else /* DEBUG_ASR_CMD */
224#define debug_asr_cmd_printf(fmt,args...)
225#define debug_asr_dump_ccb(ccb)
226#define debug_asr_cmd1_printf(fmt,args...)
227#define debug_asr_cmd2_printf(fmt,args...)
228#define debug_asr_cmd2_dump_message(message)
229#endif /* DEBUG_ASR_CMD */
984263bc 230
7b0cd042
SW
231#if (defined(DEBUG_ASR_USR_CMD))
232#define debug_usr_cmd_printf(fmt,args...) kprintf(fmt,##args)
233#define debug_usr_cmd_dump_message(message) debug_usr_message(message)
234#else /* DEBUG_ASR_USR_CMD */
235#define debug_usr_cmd_printf(fmt,args...)
236#define debug_usr_cmd_dump_message(message)
237#endif /* DEBUG_ASR_USR_CMD */
984263bc 238
7b0cd042
SW
239#ifdef ASR_IOCTL_COMPAT
240#define dsDescription_size 46 /* Snug as a bug in a rug */
241#endif /* ASR_IOCTL_COMPAT */
984263bc 242
7b0cd042
SW
243#include "dev/raid/asr/dptsig.h"
244
245static dpt_sig_S ASR_sig = {
246 { 'd', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION, PROC_INTEL,
247 PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM, FT_HBADRVR, 0,
248 OEM_DPT, OS_FREE_BSD, CAP_ABOVE16MB, DEV_ALL, ADF_ALL_SC5,
249 0, 0, ASR_VERSION, ASR_REVISION, ASR_SUBREVISION,
250 ASR_MONTH, ASR_DAY, ASR_YEAR,
251/* 01234567890123456789012345678901234567890123456789 < 50 chars */
252 "Adaptec FreeBSD 4.0.0 Unix SCSI I2O HBA Driver"
253 /* ^^^^^ asr_attach alters these to match OS */
254};
255
256/* Configuration Definitions */
257
258#define SG_SIZE 58 /* Scatter Gather list Size */
259#define MAX_TARGET_ID 126 /* Maximum Target ID supported */
260#define MAX_LUN 255 /* Maximum LUN Supported */
261#define MAX_CHANNEL 7 /* Maximum Channel # Supported by driver */
262#define MAX_INBOUND 2000 /* Max CCBs, Also Max Queue Size */
263#define MAX_OUTBOUND 256 /* Maximum outbound frames/adapter */
264#define MAX_INBOUND_SIZE 512 /* Maximum inbound frame size */
265#define MAX_MAP 4194304L /* Maximum mapping size of IOP */
266 /* Also serves as the minimum map for */
267 /* the 2005S zero channel RAID product */
984263bc
MD
268
269/* I2O register set */
7b0cd042
SW
270#define I2O_REG_STATUS 0x30
271#define I2O_REG_MASK 0x34
272#define I2O_REG_TOFIFO 0x40
273#define I2O_REG_FROMFIFO 0x44
274
275#define Mask_InterruptsDisabled 0x08
984263bc
MD
276
277/*
278 * A MIX of performance and space considerations for TID lookups
279 */
280typedef u_int16_t tid_t;
281
282typedef struct {
7b0cd042
SW
283 u_int32_t size; /* up to MAX_LUN */
284 tid_t TID[1];
984263bc
MD
285} lun2tid_t;
286
287typedef struct {
7b0cd042
SW
288 u_int32_t size; /* up to MAX_TARGET */
289 lun2tid_t * LUN[1];
984263bc
MD
290} target2lun_t;
291
292/*
fdd00e0a 293 * Don't play games with the ccb any more, use the CAM ccb
984263bc 294 */
fdd00e0a 295#define asr_ccb ccb
984263bc 296
7b0cd042
SW
297struct Asr_status_mem {
298 I2O_EXEC_STATUS_GET_REPLY status;
299 U32 rstatus;
300};
301
302/**************************************************************************
303** ASR Host Adapter structure - One Structure For Each Host Adapter That **
304** Is Configured Into The System. The Structure Supplies Configuration **
305** Information, Status Info, Queue Info And An Active CCB List Pointer. **
306***************************************************************************/
307
984263bc 308typedef struct Asr_softc {
7b0cd042
SW
309 device_t ha_dev;
310 u_int16_t ha_irq;
311 u_long ha_Base; /* base port for each board */
312 bus_size_t ha_blinkLED;
313 bus_space_handle_t ha_i2o_bhandle;
314 bus_space_tag_t ha_i2o_btag;
315 bus_space_handle_t ha_frame_bhandle;
316 bus_space_tag_t ha_frame_btag;
317 I2O_IOP_ENTRY ha_SystemTable;
318 LIST_HEAD(,ccb_hdr) ha_ccb; /* ccbs in use */
319
320 bus_dma_tag_t ha_parent_dmat;
321 bus_dma_tag_t ha_statusmem_dmat;
322 bus_dmamap_t ha_statusmem_dmamap;
323 struct Asr_status_mem * ha_statusmem;
324 u_int32_t ha_rstatus_phys;
325 u_int32_t ha_status_phys;
326 struct cam_path * ha_path[MAX_CHANNEL+1];
327 struct cam_sim * ha_sim[MAX_CHANNEL+1];
328 struct resource * ha_mem_res;
329 struct resource * ha_mes_res;
330 struct resource * ha_irq_res;
331 void * ha_intr;
332 PI2O_LCT ha_LCT; /* Complete list of devices */
333#define le_type IdentityTag[0]
334#define I2O_BSA 0x20
335#define I2O_FCA 0x40
336#define I2O_SCSI 0x00
337#define I2O_PORT 0x80
338#define I2O_UNKNOWN 0x7F
339#define le_bus IdentityTag[1]
340#define le_target IdentityTag[2]
341#define le_lun IdentityTag[3]
342 target2lun_t * ha_targets[MAX_CHANNEL+1];
343 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME ha_Msgs;
344 u_long ha_Msgs_Phys;
345
346 u_int8_t ha_in_reset;
347#define HA_OPERATIONAL 0
348#define HA_IN_RESET 1
349#define HA_OFF_LINE 2
350#define HA_OFF_LINE_RECOVERY 3
351 /* Configuration information */
352 /* The target id maximums we take */
353 u_int8_t ha_MaxBus; /* Maximum bus */
354 u_int8_t ha_MaxId; /* Maximum target ID */
355 u_int8_t ha_MaxLun; /* Maximum target LUN */
356 u_int8_t ha_SgSize; /* Max SG elements */
357 u_int8_t ha_pciBusNum;
358 u_int8_t ha_pciDeviceNum;
359 u_int8_t ha_adapter_target[MAX_CHANNEL+1];
360 u_int16_t ha_QueueSize; /* Max outstanding commands */
361 u_int16_t ha_Msgs_Count;
362
363 /* Links into other parents and HBAs */
364 struct Asr_softc * ha_next; /* HBA list */
365 struct cdev *ha_devt;
984263bc
MD
366} Asr_softc_t;
367
7b0cd042 368static Asr_softc_t *Asr_softc_list;
984263bc
MD
369
370/*
7b0cd042 371 * Prototypes of the routines we have in this object.
984263bc
MD
372 */
373
984263bc 374/* I2O HDM interface */
7b0cd042
SW
375static int asr_probe(device_t dev);
376static int asr_attach(device_t dev);
377
378static d_ioctl_t asr_ioctl;
379static d_open_t asr_open;
380static d_close_t asr_close;
381static int asr_intr(Asr_softc_t *sc);
382static void asr_timeout(void *arg);
383static int ASR_init(Asr_softc_t *sc);
384static int ASR_acquireLct(Asr_softc_t *sc);
385static int ASR_acquireHrt(Asr_softc_t *sc);
386static void asr_action(struct cam_sim *sim, union ccb *ccb);
387static void asr_poll(struct cam_sim *sim);
388static int ASR_queue(Asr_softc_t *sc, PI2O_MESSAGE_FRAME Message);
984263bc
MD
389
390/*
7b0cd042
SW
391 * Here is the auto-probe structure used to nest our tests appropriately
392 * during the startup phase of the operating system.
984263bc 393 */
7b0cd042
SW
394static device_method_t asr_methods[] = {
395 DEVMETHOD(device_probe, asr_probe),
396 DEVMETHOD(device_attach, asr_attach),
d3c9c58e 397 DEVMETHOD_END
984263bc
MD
398};
399
7b0cd042
SW
400static driver_t asr_driver = {
401 "asr",
402 asr_methods,
403 sizeof(Asr_softc_t)
984263bc
MD
404};
405
7b0cd042 406static devclass_t asr_devclass;
dfc199f7 407DRIVER_MODULE(asr, pci, asr_driver, asr_devclass, NULL, NULL);
7b0cd042
SW
408MODULE_VERSION(asr, 1);
409MODULE_DEPEND(asr, pci, 1, 1, 1);
410MODULE_DEPEND(asr, cam, 1, 1, 1);
984263bc 411
7b0cd042
SW
412/*
413 * devsw for asr hba driver
414 *
415 * only ioctl is used. the sd driver provides all other access.
416 */
417static struct dev_ops asr_ops = {
418 { "asr", 0, 0 },
419 .d_open = asr_open,
420 .d_close = asr_close,
421 .d_ioctl = asr_ioctl,
984263bc
MD
422};
423
7b0cd042 424/* I2O support routines */
984263bc 425
7b0cd042
SW
426static __inline u_int32_t
427asr_get_FromFIFO(Asr_softc_t *sc)
428{
429 return (bus_space_read_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle,
430 I2O_REG_FROMFIFO));
431}
984263bc 432
7b0cd042
SW
433static __inline u_int32_t
434asr_get_ToFIFO(Asr_softc_t *sc)
435{
436 return (bus_space_read_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle,
437 I2O_REG_TOFIFO));
438}
984263bc 439
7b0cd042
SW
440static __inline u_int32_t
441asr_get_intr(Asr_softc_t *sc)
442{
443 return (bus_space_read_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle,
444 I2O_REG_MASK));
445}
984263bc 446
7b0cd042
SW
447static __inline u_int32_t
448asr_get_status(Asr_softc_t *sc)
449{
450 return (bus_space_read_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle,
451 I2O_REG_STATUS));
452}
984263bc 453
7b0cd042
SW
454static __inline void
455asr_set_FromFIFO(Asr_softc_t *sc, u_int32_t val)
456{
457 bus_space_write_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle, I2O_REG_FROMFIFO,
458 val);
459}
984263bc 460
7b0cd042
SW
461static __inline void
462asr_set_ToFIFO(Asr_softc_t *sc, u_int32_t val)
463{
464 bus_space_write_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle, I2O_REG_TOFIFO,
465 val);
466}
984263bc 467
7b0cd042
SW
468static __inline void
469asr_set_intr(Asr_softc_t *sc, u_int32_t val)
470{
471 bus_space_write_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle, I2O_REG_MASK,
472 val);
473}
984263bc 474
7b0cd042
SW
475static __inline void
476asr_set_frame(Asr_softc_t *sc, void *frame, u_int32_t offset, int len)
477{
478 bus_space_write_region_4(sc->ha_frame_btag, sc->ha_frame_bhandle,
479 offset, (u_int32_t *)frame, len);
480}
984263bc
MD
481
482/*
7b0cd042 483 * Fill message with default.
984263bc 484 */
7b0cd042
SW
485static PI2O_MESSAGE_FRAME
486ASR_fillMessage(void *Message, u_int16_t size)
984263bc 487{
7b0cd042
SW
488 PI2O_MESSAGE_FRAME Message_Ptr;
489
490 Message_Ptr = (I2O_MESSAGE_FRAME *)Message;
491 bzero(Message_Ptr, size);
492 I2O_MESSAGE_FRAME_setVersionOffset(Message_Ptr, I2O_VERSION_11);
493 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
494 (size + sizeof(U32) - 1) >> 2);
495 I2O_MESSAGE_FRAME_setInitiatorAddress (Message_Ptr, 1);
496 KASSERT(Message_Ptr != NULL, ("Message_Ptr == NULL"));
497 return (Message_Ptr);
984263bc
MD
498} /* ASR_fillMessage */
499
7b0cd042 500#define EMPTY_QUEUE (0xffffffff)
984263bc 501
7b0cd042
SW
502static __inline U32
503ASR_getMessage(Asr_softc_t *sc)
984263bc 504{
7b0cd042
SW
505 U32 MessageOffset;
506
507 MessageOffset = asr_get_ToFIFO(sc);
508 if (MessageOffset == EMPTY_QUEUE)
509 MessageOffset = asr_get_ToFIFO(sc);
984263bc 510
7b0cd042 511 return (MessageOffset);
984263bc
MD
512} /* ASR_getMessage */
513
514/* Issue a polled command */
7b0cd042
SW
515static U32
516ASR_initiateCp(Asr_softc_t *sc, PI2O_MESSAGE_FRAME Message)
984263bc 517{
7b0cd042
SW
518 U32 Mask = 0xffffffff;
519 U32 MessageOffset;
520 u_int Delay = 1500;
521
522 /*
523 * ASR_initiateCp is only used for synchronous commands and will
524 * be made more resiliant to adapter delays since commands like
525 * resetIOP can cause the adapter to be deaf for a little time.
526 */
527 while (((MessageOffset = ASR_getMessage(sc)) == EMPTY_QUEUE)
528 && (--Delay != 0)) {
529 DELAY (10000);
530 }
531 if (MessageOffset != EMPTY_QUEUE) {
532 asr_set_frame(sc, Message, MessageOffset,
533 I2O_MESSAGE_FRAME_getMessageSize(Message));
534 /*
535 * Disable the Interrupts
536 */
537 Mask = asr_get_intr(sc);
538 asr_set_intr(sc, Mask | Mask_InterruptsDisabled);
539 asr_set_ToFIFO(sc, MessageOffset);
540 }
541 return (Mask);
984263bc
MD
542} /* ASR_initiateCp */
543
544/*
7b0cd042 545 * Reset the adapter.
984263bc 546 */
7b0cd042
SW
547static U32
548ASR_resetIOP(Asr_softc_t *sc)
984263bc 549{
7b0cd042
SW
550 I2O_EXEC_IOP_RESET_MESSAGE Message;
551 PI2O_EXEC_IOP_RESET_MESSAGE Message_Ptr;
552 U32 * Reply_Ptr;
553 U32 Old;
554
555 /*
556 * Build up our copy of the Message.
557 */
558 Message_Ptr = (PI2O_EXEC_IOP_RESET_MESSAGE)ASR_fillMessage(&Message,
559 sizeof(I2O_EXEC_IOP_RESET_MESSAGE));
560 I2O_EXEC_IOP_RESET_MESSAGE_setFunction(Message_Ptr, I2O_EXEC_IOP_RESET);
561 /*
562 * Reset the Reply Status
563 */
564 Reply_Ptr = &sc->ha_statusmem->rstatus;
565 *Reply_Ptr = 0;
566 I2O_EXEC_IOP_RESET_MESSAGE_setStatusWordLowAddress(Message_Ptr,
567 sc->ha_rstatus_phys);
568 /*
569 * Send the Message out
570 */
571 if ((Old = ASR_initiateCp(sc, (PI2O_MESSAGE_FRAME)Message_Ptr)) !=
572 0xffffffff) {
573 /*
574 * Wait for a response (Poll), timeouts are dangerous if
575 * the card is truly responsive. We assume response in 2s.
576 */
577 u_int8_t Delay = 200;
578
579 while ((*Reply_Ptr == 0) && (--Delay != 0)) {
580 DELAY (10000);
581 }
582 /*
583 * Re-enable the interrupts.
584 */
585 asr_set_intr(sc, Old);
586 KASSERT(*Reply_Ptr != 0, ("*Reply_Ptr == 0"));
587 return(*Reply_Ptr);
588 }
589 KASSERT(Old != 0xffffffff, ("Old == -1"));
590 return (0);
984263bc
MD
591} /* ASR_resetIOP */
592
593/*
7b0cd042 594 * Get the curent state of the adapter
984263bc 595 */
7b0cd042
SW
596static PI2O_EXEC_STATUS_GET_REPLY
597ASR_getStatus(Asr_softc_t *sc)
984263bc 598{
7b0cd042
SW
599 I2O_EXEC_STATUS_GET_MESSAGE Message;
600 PI2O_EXEC_STATUS_GET_MESSAGE Message_Ptr;
601 PI2O_EXEC_STATUS_GET_REPLY buffer;
602 U32 Old;
603
604 /*
605 * Build up our copy of the Message.
606 */
607 Message_Ptr = (PI2O_EXEC_STATUS_GET_MESSAGE)ASR_fillMessage(&Message,
608 sizeof(I2O_EXEC_STATUS_GET_MESSAGE));
609 I2O_EXEC_STATUS_GET_MESSAGE_setFunction(Message_Ptr,
610 I2O_EXEC_STATUS_GET);
611 I2O_EXEC_STATUS_GET_MESSAGE_setReplyBufferAddressLow(Message_Ptr,
612 sc->ha_status_phys);
613 /* This one is a Byte Count */
614 I2O_EXEC_STATUS_GET_MESSAGE_setReplyBufferLength(Message_Ptr,
615 sizeof(I2O_EXEC_STATUS_GET_REPLY));
616 /*
617 * Reset the Reply Status
618 */
619 buffer = &sc->ha_statusmem->status;
620 bzero(buffer, sizeof(I2O_EXEC_STATUS_GET_REPLY));
621 /*
622 * Send the Message out
623 */
624 if ((Old = ASR_initiateCp(sc, (PI2O_MESSAGE_FRAME)Message_Ptr)) !=
625 0xffffffff) {
626 /*
627 * Wait for a response (Poll), timeouts are dangerous if
628 * the card is truly responsive. We assume response in 50ms.
629 */
630 u_int8_t Delay = 255;
631
632 while (*((U8 * volatile)&(buffer->SyncByte)) == 0) {
633 if (--Delay == 0) {
634 buffer = NULL;
635 break;
636 }
637 DELAY (1000);
638 }
639 /*
640 * Re-enable the interrupts.
641 */
642 asr_set_intr(sc, Old);
643 return (buffer);
644 }
645 return (NULL);
984263bc
MD
646} /* ASR_getStatus */
647
648/*
7b0cd042 649 * Check if the device is a SCSI I2O HBA, and add it to the list.
984263bc
MD
650 */
651
652/*
653 * Probe for ASR controller. If we find it, we will use it.
654 * virtual adapters.
655 */
7b0cd042
SW
656static int
657asr_probe(device_t dev)
984263bc 658{
7b0cd042
SW
659 u_int32_t id;
660
661 id = (pci_get_device(dev) << 16) | pci_get_vendor(dev);
662 if ((id == 0xA5011044) || (id == 0xA5111044)) {
663 device_set_desc(dev, "Adaptec Caching SCSI RAID");
664 return (BUS_PROBE_DEFAULT);
665 }
666 return (ENXIO);
984263bc
MD
667} /* asr_probe */
668
7b0cd042
SW
669static __inline union asr_ccb *
670asr_alloc_ccb(Asr_softc_t *sc)
984263bc 671{
7b0cd042
SW
672 union asr_ccb *new_ccb;
673
fdd00e0a
MD
674 new_ccb = xpt_alloc_ccb();
675 new_ccb->ccb_h.pinfo.priority = 1;
676 new_ccb->ccb_h.pinfo.index = CAM_UNQUEUED_INDEX;
677 new_ccb->ccb_h.spriv_ptr0 = sc;
678
7b0cd042 679 return (new_ccb);
984263bc
MD
680} /* asr_alloc_ccb */
681
7b0cd042
SW
682static __inline void
683asr_free_ccb(union asr_ccb *free_ccb)
984263bc 684{
fdd00e0a 685 xpt_free_ccb(&free_ccb->ccb_h);
984263bc
MD
686} /* asr_free_ccb */
687
688/*
7b0cd042 689 * Print inquiry data `carefully'
984263bc 690 */
7b0cd042
SW
691static void
692ASR_prstring(u_int8_t *s, int len)
984263bc 693{
7b0cd042
SW
694 while ((--len >= 0) && (*s) && (*s != ' ') && (*s != '-')) {
695 kprintf ("%c", *(s++));
696 }
984263bc
MD
697} /* ASR_prstring */
698
699/*
7b0cd042 700 * Send a message synchronously and without Interrupt to a ccb.
984263bc 701 */
7b0cd042
SW
702static int
703ASR_queue_s(union asr_ccb *ccb, PI2O_MESSAGE_FRAME Message)
984263bc 704{
7b0cd042
SW
705 U32 Mask;
706 Asr_softc_t *sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
984263bc 707
7b0cd042
SW
708 /*
709 * We do not need any (optional byteswapping) method access to
710 * the Initiator context field.
711 */
712 I2O_MESSAGE_FRAME_setInitiatorContext64(Message, (long)ccb);
984263bc 713
7b0cd042 714 /* Prevent interrupt service */
7f2216bc 715 crit_enter();
7b0cd042
SW
716 Mask = asr_get_intr(sc);
717 asr_set_intr(sc, Mask | Mask_InterruptsDisabled);
718
719 if (ASR_queue(sc, Message) == EMPTY_QUEUE) {
720 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
721 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
722 }
723
724 /*
725 * Wait for this board to report a finished instruction.
726 */
727 while ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
728 (void)asr_intr (sc);
729 }
730
731 /* Re-enable Interrupts */
732 asr_set_intr(sc, Mask);
7f2216bc 733 crit_exit();
984263bc 734
7b0cd042 735 return (ccb->ccb_h.status);
984263bc
MD
736} /* ASR_queue_s */
737
738/*
7b0cd042 739 * Send a message synchronously to an Asr_softc_t.
984263bc 740 */
7b0cd042
SW
741static int
742ASR_queue_c(Asr_softc_t *sc, PI2O_MESSAGE_FRAME Message)
984263bc 743{
7b0cd042
SW
744 union asr_ccb *ccb;
745 int status;
984263bc 746
7b0cd042
SW
747 if ((ccb = asr_alloc_ccb (sc)) == NULL) {
748 return (CAM_REQUEUE_REQ);
749 }
984263bc 750
7b0cd042 751 status = ASR_queue_s (ccb, Message);
984263bc 752
7b0cd042 753 asr_free_ccb(ccb);
984263bc 754
7b0cd042 755 return (status);
984263bc
MD
756} /* ASR_queue_c */
757
758/*
7b0cd042 759 * Add the specified ccb to the active queue
984263bc 760 */
7b0cd042
SW
761static __inline void
762ASR_ccbAdd(Asr_softc_t *sc, union asr_ccb *ccb)
984263bc 763{
7f2216bc 764 crit_enter();
7b0cd042
SW
765 LIST_INSERT_HEAD(&(sc->ha_ccb), &(ccb->ccb_h), sim_links.le);
766 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
767 if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT) {
768 /*
769 * RAID systems can take considerable time to
770 * complete some commands given the large cache
771 * flashes switching from write back to write thru.
772 */
773 ccb->ccb_h.timeout = 6 * 60 * 1000;
774 }
cec957e9
MD
775 callout_reset(ccb->ccb_h.timeout_ch,
776 (ccb->ccb_h.timeout * hz) / 1000,
777 asr_timeout, ccb);
7b0cd042 778 }
7f2216bc 779 crit_exit();
984263bc
MD
780} /* ASR_ccbAdd */
781
782/*
7b0cd042 783 * Remove the specified ccb from the active queue.
984263bc 784 */
7b0cd042
SW
785static __inline void
786ASR_ccbRemove(Asr_softc_t *sc, union asr_ccb *ccb)
984263bc 787{
7f2216bc 788 crit_enter();
cec957e9 789 callout_stop(ccb->ccb_h.timeout_ch);
7b0cd042 790 LIST_REMOVE(&(ccb->ccb_h), sim_links.le);
7f2216bc 791 crit_exit();
984263bc
MD
792} /* ASR_ccbRemove */
793
794/*
7b0cd042
SW
795 * Fail all the active commands, so they get re-issued by the operating
796 * system.
984263bc 797 */
7b0cd042
SW
798static void
799ASR_failActiveCommands(Asr_softc_t *sc)
984263bc 800{
7b0cd042 801 struct ccb_hdr *ccb;
984263bc 802
7f2216bc 803 crit_enter();
7b0cd042
SW
804 /*
805 * We do not need to inform the CAM layer that we had a bus
806 * reset since we manage it on our own, this also prevents the
807 * SCSI_DELAY settling that would be required on other systems.
808 * The `SCSI_DELAY' has already been handled by the card via the
809 * acquisition of the LCT table while we are at CAM priority level.
810 * for (int bus = 0; bus <= sc->ha_MaxBus; ++bus) {
811 * xpt_async (AC_BUS_RESET, sc->ha_path[bus], NULL);
812 * }
813 */
814 while ((ccb = LIST_FIRST(&(sc->ha_ccb))) != NULL) {
815 ASR_ccbRemove (sc, (union asr_ccb *)ccb);
816
817 ccb->status &= ~CAM_STATUS_MASK;
818 ccb->status |= CAM_REQUEUE_REQ;
819 /* Nothing Transfered */
820 ((struct ccb_scsiio *)ccb)->resid
821 = ((struct ccb_scsiio *)ccb)->dxfer_len;
822
823 if (ccb->path) {
824 xpt_done ((union ccb *)ccb);
825 } else {
826 wakeup (ccb);
827 }
828 }
7f2216bc 829 crit_exit();
984263bc
MD
830} /* ASR_failActiveCommands */
831
832/*
7b0cd042 833 * The following command causes the HBA to reset the specific bus
984263bc 834 */
7b0cd042
SW
835static void
836ASR_resetBus(Asr_softc_t *sc, int bus)
984263bc 837{
7b0cd042
SW
838 I2O_HBA_BUS_RESET_MESSAGE Message;
839 I2O_HBA_BUS_RESET_MESSAGE *Message_Ptr;
840 PI2O_LCT_ENTRY Device;
841
842 Message_Ptr = (I2O_HBA_BUS_RESET_MESSAGE *)ASR_fillMessage(&Message,
843 sizeof(I2O_HBA_BUS_RESET_MESSAGE));
844 I2O_MESSAGE_FRAME_setFunction(&Message_Ptr->StdMessageFrame,
845 I2O_HBA_BUS_RESET);
846 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
847 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
848 ++Device) {
849 if (((Device->le_type & I2O_PORT) != 0)
850 && (Device->le_bus == bus)) {
851 I2O_MESSAGE_FRAME_setTargetAddress(
852 &Message_Ptr->StdMessageFrame,
853 I2O_LCT_ENTRY_getLocalTID(Device));
854 /* Asynchronous command, with no expectations */
855 (void)ASR_queue(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
856 break;
857 }
858 }
984263bc
MD
859} /* ASR_resetBus */
860
7b0cd042
SW
861static __inline int
862ASR_getBlinkLedCode(Asr_softc_t *sc)
984263bc 863{
7b0cd042
SW
864 U8 blink;
865
866 if (sc == NULL)
867 return (0);
868
869 blink = bus_space_read_1(sc->ha_frame_btag,
870 sc->ha_frame_bhandle, sc->ha_blinkLED + 1);
871 if (blink != 0xBC)
872 return (0);
873
874 blink = bus_space_read_1(sc->ha_frame_btag,
875 sc->ha_frame_bhandle, sc->ha_blinkLED);
876 return (blink);
984263bc
MD
877} /* ASR_getBlinkCode */
878
879/*
7b0cd042
SW
880 * Determine the address of an TID lookup. Must be done at high priority
881 * since the address can be changed by other threads of execution.
984263bc 882 *
7b0cd042
SW
883 * Returns NULL pointer if not indexible (but will attempt to generate
884 * an index if `new_entry' flag is set to TRUE).
984263bc 885 *
7b0cd042 886 * All addressible entries are to be guaranteed zero if never initialized.
984263bc 887 */
7b0cd042
SW
888static tid_t *
889ASR_getTidAddress(Asr_softc_t *sc, int bus, int target, int lun, int new_entry)
984263bc 890{
7b0cd042
SW
891 target2lun_t *bus_ptr;
892 lun2tid_t *target_ptr;
893 unsigned new_size;
894
895 /*
896 * Validity checking of incoming parameters. More of a bound
897 * expansion limit than an issue with the code dealing with the
898 * values.
899 *
900 * sc must be valid before it gets here, so that check could be
901 * dropped if speed a critical issue.
902 */
903 if ((sc == NULL)
904 || (bus > MAX_CHANNEL)
905 || (target > sc->ha_MaxId)
906 || (lun > sc->ha_MaxLun)) {
907 debug_asr_printf("(%lx,%d,%d,%d) target out of range\n",
908 (u_long)sc, bus, target, lun);
909 return (NULL);
910 }
911 /*
912 * See if there is an associated bus list.
913 *
914 * for performance, allocate in size of BUS_CHUNK chunks.
915 * BUS_CHUNK must be a power of two. This is to reduce
916 * fragmentation effects on the allocations.
917 */
918#define BUS_CHUNK 8
965b839f 919 new_size = roundup2(target, BUS_CHUNK);
7b0cd042
SW
920 if ((bus_ptr = sc->ha_targets[bus]) == NULL) {
921 /*
922 * Allocate a new structure?
923 * Since one element in structure, the +1
924 * needed for size has been abstracted.
925 */
926 if ((new_entry == FALSE)
927 || ((sc->ha_targets[bus] = bus_ptr = (target2lun_t *)kmalloc (
928 sizeof(*bus_ptr) + (sizeof(bus_ptr->LUN) * new_size),
929 M_TEMP, M_WAITOK | M_ZERO))
930 == NULL)) {
931 debug_asr_printf("failed to allocate bus list\n");
932 return (NULL);
933 }
934 bus_ptr->size = new_size + 1;
935 } else if (bus_ptr->size <= new_size) {
936 target2lun_t * new_bus_ptr;
937
938 /*
939 * Reallocate a new structure?
940 * Since one element in structure, the +1
941 * needed for size has been abstracted.
942 */
943 if ((new_entry == FALSE)
944 || ((new_bus_ptr = (target2lun_t *)kmalloc (
945 sizeof(*bus_ptr) + (sizeof(bus_ptr->LUN) * new_size),
946 M_TEMP, M_WAITOK | M_ZERO)) == NULL)) {
947 debug_asr_printf("failed to reallocate bus list\n");
948 return (NULL);
949 }
950 /*
951 * Copy the whole thing, safer, simpler coding
952 * and not really performance critical at this point.
953 */
954 bcopy(bus_ptr, new_bus_ptr, sizeof(*bus_ptr)
955 + (sizeof(bus_ptr->LUN) * (bus_ptr->size - 1)));
956 sc->ha_targets[bus] = new_bus_ptr;
957 kfree(bus_ptr, M_TEMP);
958 bus_ptr = new_bus_ptr;
959 bus_ptr->size = new_size + 1;
960 }
961 /*
962 * We now have the bus list, lets get to the target list.
963 * Since most systems have only *one* lun, we do not allocate
964 * in chunks as above, here we allow one, then in chunk sizes.
965 * TARGET_CHUNK must be a power of two. This is to reduce
966 * fragmentation effects on the allocations.
967 */
968#define TARGET_CHUNK 8
969 if ((new_size = lun) != 0) {
965b839f 970 new_size = roundup2(lun, TARGET_CHUNK);
7b0cd042
SW
971 }
972 if ((target_ptr = bus_ptr->LUN[target]) == NULL) {
973 /*
974 * Allocate a new structure?
975 * Since one element in structure, the +1
976 * needed for size has been abstracted.
977 */
978 if ((new_entry == FALSE)
979 || ((bus_ptr->LUN[target] = target_ptr = (lun2tid_t *)kmalloc (
980 sizeof(*target_ptr) + (sizeof(target_ptr->TID) * new_size),
981 M_TEMP, M_WAITOK | M_ZERO)) == NULL)) {
982 debug_asr_printf("failed to allocate target list\n");
983 return (NULL);
984 }
985 target_ptr->size = new_size + 1;
986 } else if (target_ptr->size <= new_size) {
987 lun2tid_t * new_target_ptr;
988
989 /*
990 * Reallocate a new structure?
991 * Since one element in structure, the +1
992 * needed for size has been abstracted.
993 */
994 if ((new_entry == FALSE)
995 || ((new_target_ptr = (lun2tid_t *)kmalloc (
996 sizeof(*target_ptr) + (sizeof(target_ptr->TID) * new_size),
997 M_TEMP, M_WAITOK | M_ZERO)) == NULL)) {
998 debug_asr_printf("failed to reallocate target list\n");
999 return (NULL);
1000 }
1001 /*
1002 * Copy the whole thing, safer, simpler coding
1003 * and not really performance critical at this point.
1004 */
1005 bcopy(target_ptr, new_target_ptr, sizeof(*target_ptr)
1006 + (sizeof(target_ptr->TID) * (target_ptr->size - 1)));
1007 bus_ptr->LUN[target] = new_target_ptr;
1008 kfree(target_ptr, M_TEMP);
1009 target_ptr = new_target_ptr;
1010 target_ptr->size = new_size + 1;
1011 }
1012 /*
1013 * Now, acquire the TID address from the LUN indexed list.
1014 */
1015 return (&(target_ptr->TID[lun]));
984263bc
MD
1016} /* ASR_getTidAddress */
1017
1018/*
7b0cd042 1019 * Get a pre-existing TID relationship.
984263bc 1020 *
7b0cd042 1021 * If the TID was never set, return (tid_t)-1.
984263bc 1022 *
7b0cd042 1023 * should use mutex rather than spl.
984263bc 1024 */
7b0cd042
SW
1025static __inline tid_t
1026ASR_getTid(Asr_softc_t *sc, int bus, int target, int lun)
984263bc 1027{
7b0cd042
SW
1028 tid_t *tid_ptr;
1029 tid_t retval;
984263bc 1030
7f2216bc 1031 crit_enter();
7b0cd042
SW
1032 if (((tid_ptr = ASR_getTidAddress(sc, bus, target, lun, FALSE)) == NULL)
1033 /* (tid_t)0 or (tid_t)-1 indicate no TID */
1034 || (*tid_ptr == (tid_t)0)) {
7f2216bc 1035 crit_exit();
7b0cd042
SW
1036 return ((tid_t)-1);
1037 }
1038 retval = *tid_ptr;
7f2216bc 1039 crit_exit();
7b0cd042 1040 return (retval);
984263bc
MD
1041} /* ASR_getTid */
1042
1043/*
7b0cd042 1044 * Set a TID relationship.
984263bc 1045 *
7b0cd042 1046 * If the TID was not set, return (tid_t)-1.
984263bc 1047 *
7b0cd042 1048 * should use mutex rather than spl.
984263bc 1049 */
7b0cd042
SW
1050static __inline tid_t
1051ASR_setTid(Asr_softc_t *sc, int bus, int target, int lun, tid_t TID)
984263bc 1052{
7b0cd042 1053 tid_t *tid_ptr;
984263bc 1054
7b0cd042
SW
1055 if (TID != (tid_t)-1) {
1056 if (TID == 0) {
1057 return ((tid_t)-1);
1058 }
7f2216bc 1059 crit_enter();
7b0cd042
SW
1060 if ((tid_ptr = ASR_getTidAddress(sc, bus, target, lun, TRUE))
1061 == NULL) {
7f2216bc 1062 crit_exit();
7b0cd042
SW
1063 return ((tid_t)-1);
1064 }
1065 *tid_ptr = TID;
7f2216bc 1066 crit_exit();
7b0cd042
SW
1067 }
1068 return (TID);
984263bc
MD
1069} /* ASR_setTid */
1070
1071/*-------------------------------------------------------------------------*/
7b0cd042 1072/* Function ASR_rescan */
984263bc 1073/*-------------------------------------------------------------------------*/
7b0cd042
SW
1074/* The Parameters Passed To This Function Are : */
1075/* Asr_softc_t * : HBA miniport driver's adapter data storage. */
1076/* */
1077/* This Function Will rescan the adapter and resynchronize any data */
1078/* */
1079/* Return : 0 For OK, Error Code Otherwise */
984263bc
MD
1080/*-------------------------------------------------------------------------*/
1081
7b0cd042
SW
1082static int
1083ASR_rescan(Asr_softc_t *sc)
984263bc 1084{
7b0cd042
SW
1085 int bus;
1086 int error;
1087
1088 /*
1089 * Re-acquire the LCT table and synchronize us to the adapter.
1090 */
1091 if ((error = ASR_acquireLct(sc)) == 0) {
1092 error = ASR_acquireHrt(sc);
1093 }
1094
1095 if (error != 0) {
1096 return error;
1097 }
1098
1099 bus = sc->ha_MaxBus;
1100 /* Reset all existing cached TID lookups */
1101 do {
1102 int target, event = 0;
1103
1104 /*
1105 * Scan for all targets on this bus to see if they
1106 * got affected by the rescan.
1107 */
1108 for (target = 0; target <= sc->ha_MaxId; ++target) {
1109 int lun;
1110
1111 /* Stay away from the controller ID */
1112 if (target == sc->ha_adapter_target[bus]) {
1113 continue;
1114 }
1115 for (lun = 0; lun <= sc->ha_MaxLun; ++lun) {
1116 PI2O_LCT_ENTRY Device;
1117 tid_t TID = (tid_t)-1;
1118 tid_t LastTID;
1119
1120 /*
1121 * See if the cached TID changed. Search for
1122 * the device in our new LCT.
1123 */
1124 for (Device = sc->ha_LCT->LCTEntry;
1125 Device < (PI2O_LCT_ENTRY)(((U32 *)sc->ha_LCT)
1126 + I2O_LCT_getTableSize(sc->ha_LCT));
1127 ++Device) {
1128 if ((Device->le_type != I2O_UNKNOWN)
1129 && (Device->le_bus == bus)
1130 && (Device->le_target == target)
1131 && (Device->le_lun == lun)
1132 && (I2O_LCT_ENTRY_getUserTID(Device)
1133 == 0xFFF)) {
1134 TID = I2O_LCT_ENTRY_getLocalTID(
1135 Device);
1136 break;
1137 }
1138 }
1139 /*
1140 * Indicate to the OS that the label needs
1141 * to be recalculated, or that the specific
1142 * open device is no longer valid (Merde)
1143 * because the cached TID changed.
1144 */
1145 LastTID = ASR_getTid (sc, bus, target, lun);
1146 if (LastTID != TID) {
1147 struct cam_path * path;
1148
1149 if (xpt_create_path(&path,
1150 /*periph*/NULL,
1151 cam_sim_path(sc->ha_sim[bus]),
1152 target, lun) != CAM_REQ_CMP) {
1153 if (TID == (tid_t)-1) {
1154 event |= AC_LOST_DEVICE;
1155 } else {
1156 event |= AC_INQ_CHANGED
1157 | AC_GETDEV_CHANGED;
1158 }
1159 } else {
1160 if (TID == (tid_t)-1) {
1161 xpt_async(
1162 AC_LOST_DEVICE,
1163 path, NULL);
1164 } else if (LastTID == (tid_t)-1) {
cec957e9
MD
1165 struct ccb_getdev *ccb;
1166
1167 ccb = &xpt_alloc_ccb()->cgd;
7b0cd042
SW
1168
1169 xpt_setup_ccb(
cec957e9 1170 &ccb->ccb_h,
7b0cd042
SW
1171 path, /*priority*/5);
1172 xpt_async(
1173 AC_FOUND_DEVICE,
1174 path,
cec957e9
MD
1175 ccb);
1176 xpt_free_ccb(&ccb->ccb_h);
7b0cd042
SW
1177 } else {
1178 xpt_async(
1179 AC_INQ_CHANGED,
1180 path, NULL);
1181 xpt_async(
1182 AC_GETDEV_CHANGED,
1183 path, NULL);
1184 }
1185 }
1186 }
1187 /*
1188 * We have the option of clearing the
1189 * cached TID for it to be rescanned, or to
1190 * set it now even if the device never got
1191 * accessed. We chose the later since we
1192 * currently do not use the condition that
1193 * the TID ever got cached.
1194 */
1195 ASR_setTid (sc, bus, target, lun, TID);
1196 }
1197 }
1198 /*
1199 * The xpt layer can not handle multiple events at the
1200 * same call.
1201 */
1202 if (event & AC_LOST_DEVICE) {
1203 xpt_async(AC_LOST_DEVICE, sc->ha_path[bus], NULL);
1204 }
1205 if (event & AC_INQ_CHANGED) {
1206 xpt_async(AC_INQ_CHANGED, sc->ha_path[bus], NULL);
1207 }
1208 if (event & AC_GETDEV_CHANGED) {
1209 xpt_async(AC_GETDEV_CHANGED, sc->ha_path[bus], NULL);
1210 }
1211 } while (--bus >= 0);
1212 return (error);
984263bc
MD
1213} /* ASR_rescan */
1214
1215/*-------------------------------------------------------------------------*/
7b0cd042 1216/* Function ASR_reset */
984263bc 1217/*-------------------------------------------------------------------------*/
7b0cd042
SW
1218/* The Parameters Passed To This Function Are : */
1219/* Asr_softc_t * : HBA miniport driver's adapter data storage. */
1220/* */
1221/* This Function Will reset the adapter and resynchronize any data */
1222/* */
1223/* Return : None */
984263bc
MD
1224/*-------------------------------------------------------------------------*/
1225
7b0cd042
SW
1226static int
1227ASR_reset(Asr_softc_t *sc)
984263bc 1228{
7b0cd042 1229 int retVal;
984263bc 1230
7f2216bc 1231 crit_enter();
7b0cd042
SW
1232 if ((sc->ha_in_reset == HA_IN_RESET)
1233 || (sc->ha_in_reset == HA_OFF_LINE_RECOVERY)) {
7f2216bc 1234 crit_exit();
7b0cd042
SW
1235 return (EBUSY);
1236 }
1237 /*
1238 * Promotes HA_OPERATIONAL to HA_IN_RESET,
1239 * or HA_OFF_LINE to HA_OFF_LINE_RECOVERY.
1240 */
1241 ++(sc->ha_in_reset);
1242 if (ASR_resetIOP(sc) == 0) {
1243 debug_asr_printf ("ASR_resetIOP failed\n");
1244 /*
1245 * We really need to take this card off-line, easier said
1246 * than make sense. Better to keep retrying for now since if a
1247 * UART cable is connected the blinkLEDs the adapter is now in
1248 * a hard state requiring action from the monitor commands to
1249 * the HBA to continue. For debugging waiting forever is a
1250 * good thing. In a production system, however, one may wish
1251 * to instead take the card off-line ...
1252 */
1253 /* Wait Forever */
1254 while (ASR_resetIOP(sc) == 0);
1255 }
1256 retVal = ASR_init (sc);
7f2216bc 1257 crit_exit();
7b0cd042
SW
1258 if (retVal != 0) {
1259 debug_asr_printf ("ASR_init failed\n");
1260 sc->ha_in_reset = HA_OFF_LINE;
1261 return (ENXIO);
1262 }
1263 if (ASR_rescan (sc) != 0) {
1264 debug_asr_printf ("ASR_rescan failed\n");
1265 }
1266 ASR_failActiveCommands (sc);
1267 if (sc->ha_in_reset == HA_OFF_LINE_RECOVERY) {
1268 kprintf ("asr%d: Brining adapter back on-line\n",
1269 sc->ha_path[0]
1270 ? cam_sim_unit(xpt_path_sim(sc->ha_path[0]))
1271 : 0);
1272 }
1273 sc->ha_in_reset = HA_OPERATIONAL;
1274 return (0);
984263bc
MD
1275} /* ASR_reset */
1276
1277/*
7b0cd042 1278 * Device timeout handler.
984263bc 1279 */
7b0cd042
SW
1280static void
1281asr_timeout(void *arg)
984263bc 1282{
7b0cd042
SW
1283 union asr_ccb *ccb = (union asr_ccb *)arg;
1284 Asr_softc_t *sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
1285 int s;
1286
1287 debug_asr_print_path(ccb);
1288 debug_asr_printf("timed out");
1289
1290 /*
1291 * Check if the adapter has locked up?
1292 */
1293 if ((s = ASR_getBlinkLedCode(sc)) != 0) {
1294 /* Reset Adapter */
1295 kprintf ("asr%d: Blink LED 0x%x resetting adapter\n",
1296 cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)), s);
1297 if (ASR_reset (sc) == ENXIO) {
1298 /* Try again later */
cec957e9
MD
1299 callout_reset(ccb->ccb_h.timeout_ch,
1300 (ccb->ccb_h.timeout * hz) / 1000,
1301 asr_timeout, ccb);
7b0cd042
SW
1302 }
1303 return;
1304 }
1305 /*
1306 * Abort does not function on the ASR card!!! Walking away from
1307 * the SCSI command is also *very* dangerous. A SCSI BUS reset is
1308 * our best bet, followed by a complete adapter reset if that fails.
1309 */
7f2216bc 1310 crit_enter();
7b0cd042
SW
1311 /* Check if we already timed out once to raise the issue */
1312 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_CMD_TIMEOUT) {
1313 debug_asr_printf (" AGAIN\nreinitializing adapter\n");
1314 if (ASR_reset (sc) == ENXIO) {
cec957e9
MD
1315 callout_reset(ccb->ccb_h.timeout_ch,
1316 (ccb->ccb_h.timeout * hz) / 1000,
1317 asr_timeout, ccb);
7b0cd042 1318 }
7f2216bc 1319 crit_exit();
7b0cd042
SW
1320 return;
1321 }
1322 debug_asr_printf ("\nresetting bus\n");
1323 /* If the BUS reset does not take, then an adapter reset is next! */
1324 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1325 ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
cec957e9
MD
1326 callout_reset(ccb->ccb_h.timeout_ch,
1327 (ccb->ccb_h.timeout * hz) / 1000,
ddcafce9 1328 asr_timeout, ccb);
7b0cd042
SW
1329 ASR_resetBus (sc, cam_sim_bus(xpt_path_sim(ccb->ccb_h.path)));
1330 xpt_async (AC_BUS_RESET, ccb->ccb_h.path, NULL);
7f2216bc 1331 crit_exit();
984263bc
MD
1332} /* asr_timeout */
1333
1334/*
1335 * send a message asynchronously
1336 */
7b0cd042
SW
1337static int
1338ASR_queue(Asr_softc_t *sc, PI2O_MESSAGE_FRAME Message)
984263bc 1339{
7b0cd042
SW
1340 U32 MessageOffset;
1341 union asr_ccb *ccb;
1342
1343 debug_asr_printf("Host Command Dump:\n");
1344 debug_asr_dump_message(Message);
1345
1346 ccb = (union asr_ccb *)(long)
1347 I2O_MESSAGE_FRAME_getInitiatorContext64(Message);
1348
1349 if ((MessageOffset = ASR_getMessage(sc)) != EMPTY_QUEUE) {
1350 asr_set_frame(sc, Message, MessageOffset,
1351 I2O_MESSAGE_FRAME_getMessageSize(Message));
1352 if (ccb) {
1353 ASR_ccbAdd (sc, ccb);
1354 }
1355 /* Post the command */
1356 asr_set_ToFIFO(sc, MessageOffset);
1357 } else {
1358 if (ASR_getBlinkLedCode(sc)) {
1359 /*
1360 * Unlikely we can do anything if we can't grab a
1361 * message frame :-(, but lets give it a try.
1362 */
1363 (void)ASR_reset(sc);
1364 }
1365 }
1366 return (MessageOffset);
984263bc
MD
1367} /* ASR_queue */
1368
1369
1370/* Simple Scatter Gather elements */
7b0cd042
SW
1371#define SG(SGL,Index,Flags,Buffer,Size) \
1372 I2O_FLAGS_COUNT_setCount( \
1373 &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index].FlagsCount), \
1374 Size); \
1375 I2O_FLAGS_COUNT_setFlags( \
1376 &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index].FlagsCount), \
1377 I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT | (Flags)); \
1378 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress( \
1379 &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index]), \
1380 (Buffer == NULL) ? 0 : KVTOPHYS(Buffer))
984263bc
MD
1381
1382/*
7b0cd042 1383 * Retrieve Parameter Group.
984263bc 1384 */
7b0cd042
SW
1385static void *
1386ASR_getParams(Asr_softc_t *sc, tid_t TID, int Group, void *Buffer,
1387 unsigned BufferSize)
984263bc 1388{
7b0cd042
SW
1389 struct paramGetMessage {
1390 I2O_UTIL_PARAMS_GET_MESSAGE M;
1391 char
1392 F[sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT)];
1393 struct Operations {
1394 I2O_PARAM_OPERATIONS_LIST_HEADER Header;
1395 I2O_PARAM_OPERATION_ALL_TEMPLATE Template[1];
1396 } O;
1397 } Message;
1398 struct Operations *Operations_Ptr;
1399 I2O_UTIL_PARAMS_GET_MESSAGE *Message_Ptr;
1400 struct ParamBuffer {
1401 I2O_PARAM_RESULTS_LIST_HEADER Header;
1402 I2O_PARAM_READ_OPERATION_RESULT Read;
1403 char Info[1];
1404 } *Buffer_Ptr;
1405
1406 Message_Ptr = (I2O_UTIL_PARAMS_GET_MESSAGE *)ASR_fillMessage(&Message,
1407 sizeof(I2O_UTIL_PARAMS_GET_MESSAGE)
1408 + sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT));
1409 Operations_Ptr = (struct Operations *)((char *)Message_Ptr
1410 + sizeof(I2O_UTIL_PARAMS_GET_MESSAGE)
1411 + sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT));
1412 bzero(Operations_Ptr, sizeof(struct Operations));
1413 I2O_PARAM_OPERATIONS_LIST_HEADER_setOperationCount(
1414 &(Operations_Ptr->Header), 1);
1415 I2O_PARAM_OPERATION_ALL_TEMPLATE_setOperation(
1416 &(Operations_Ptr->Template[0]), I2O_PARAMS_OPERATION_FIELD_GET);
1417 I2O_PARAM_OPERATION_ALL_TEMPLATE_setFieldCount(
1418 &(Operations_Ptr->Template[0]), 0xFFFF);
1419 I2O_PARAM_OPERATION_ALL_TEMPLATE_setGroupNumber(
1420 &(Operations_Ptr->Template[0]), Group);
1421 Buffer_Ptr = (struct ParamBuffer *)Buffer;
1422 bzero(Buffer_Ptr, BufferSize);
1423
1424 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
1425 I2O_VERSION_11
1426 + (((sizeof(I2O_UTIL_PARAMS_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1427 / sizeof(U32)) << 4));
1428 I2O_MESSAGE_FRAME_setTargetAddress (&(Message_Ptr->StdMessageFrame),
1429 TID);
1430 I2O_MESSAGE_FRAME_setFunction (&(Message_Ptr->StdMessageFrame),
1431 I2O_UTIL_PARAMS_GET);
1432 /*
1433 * Set up the buffers as scatter gather elements.
1434 */
1435 SG(&(Message_Ptr->SGL), 0,
1436 I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER,
1437 Operations_Ptr, sizeof(struct Operations));
1438 SG(&(Message_Ptr->SGL), 1,
1439 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
1440 Buffer_Ptr, BufferSize);
1441
1442 if ((ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) == CAM_REQ_CMP)
1443 && (Buffer_Ptr->Header.ResultCount)) {
1444 return ((void *)(Buffer_Ptr->Info));
1445 }
1446 return (NULL);
984263bc
MD
1447} /* ASR_getParams */
1448
1449/*
7b0cd042 1450 * Acquire the LCT information.
984263bc 1451 */
7b0cd042
SW
1452static int
1453ASR_acquireLct(Asr_softc_t *sc)
984263bc 1454{
7b0cd042
SW
1455 PI2O_EXEC_LCT_NOTIFY_MESSAGE Message_Ptr;
1456 PI2O_SGE_SIMPLE_ELEMENT sg;
1457 int MessageSizeInBytes;
1458 caddr_t v;
1459 int len;
a1f2c914 1460 I2O_LCT Table, *TableP = &Table;
7b0cd042
SW
1461 PI2O_LCT_ENTRY Entry;
1462
1463 /*
1464 * sc value assumed valid
1465 */
1466 MessageSizeInBytes = sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) -
1467 sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT);
1468 if ((Message_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)kmalloc(
1469 MessageSizeInBytes, M_TEMP, M_WAITOK)) == NULL) {
1470 return (ENOMEM);
1471 }
1472 (void)ASR_fillMessage((void *)Message_Ptr, MessageSizeInBytes);
1473 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
1474 (I2O_VERSION_11 + (((sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) -
1475 sizeof(I2O_SG_ELEMENT)) / sizeof(U32)) << 4)));
1476 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
1477 I2O_EXEC_LCT_NOTIFY);
1478 I2O_EXEC_LCT_NOTIFY_MESSAGE_setClassIdentifier(Message_Ptr,
1479 I2O_CLASS_MATCH_ANYCLASS);
1480 /*
1481 * Call the LCT table to determine the number of device entries
1482 * to reserve space for.
1483 */
1484 SG(&(Message_Ptr->SGL), 0,
a1f2c914 1485 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER, TableP,
7b0cd042
SW
1486 sizeof(I2O_LCT));
1487 /*
1488 * since this code is reused in several systems, code efficiency
1489 * is greater by using a shift operation rather than a divide by
1490 * sizeof(u_int32_t).
1491 */
1492 I2O_LCT_setTableSize(&Table,
1493 (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)) >> 2);
1494 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1495 /*
1496 * Determine the size of the LCT table.
1497 */
1498 if (sc->ha_LCT) {
1499 kfree(sc->ha_LCT, M_TEMP);
1500 }
1501 /*
1502 * malloc only generates contiguous memory when less than a
1503 * page is expected. We must break the request up into an SG list ...
1504 */
1505 if (((len = (I2O_LCT_getTableSize(&Table) << 2)) <=
1506 (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)))
1507 || (len > (128 * 1024))) { /* Arbitrary */
1508 kfree(Message_Ptr, M_TEMP);
1509 return (EINVAL);
1510 }
1511 if ((sc->ha_LCT = (PI2O_LCT)kmalloc (len, M_TEMP, M_WAITOK)) == NULL) {
1512 kfree(Message_Ptr, M_TEMP);
1513 return (ENOMEM);
1514 }
1515 /*
1516 * since this code is reused in several systems, code efficiency
1517 * is greater by using a shift operation rather than a divide by
1518 * sizeof(u_int32_t).
1519 */
1520 I2O_LCT_setTableSize(sc->ha_LCT,
1521 (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)) >> 2);
1522 /*
1523 * Convert the access to the LCT table into a SG list.
1524 */
1525 sg = Message_Ptr->SGL.u.Simple;
1526 v = (caddr_t)(sc->ha_LCT);
1527 for (;;) {
1528 int next, base, span;
1529
1530 span = 0;
1531 next = base = KVTOPHYS(v);
1532 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, base);
1533
1534 /* How far can we go contiguously */
1535 while ((len > 0) && (base == next)) {
1536 int size;
1537
1538 next = trunc_page(base) + PAGE_SIZE;
1539 size = next - base;
1540 if (size > len) {
1541 size = len;
1542 }
1543 span += size;
1544 v += size;
1545 len -= size;
1546 base = KVTOPHYS(v);
1547 }
1548
1549 /* Construct the Flags */
1550 I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), span);
1551 {
1552 int rw = I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT;
1553 if (len <= 0) {
1554 rw = (I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT
1555 | I2O_SGL_FLAGS_LAST_ELEMENT
1556 | I2O_SGL_FLAGS_END_OF_BUFFER);
1557 }
1558 I2O_FLAGS_COUNT_setFlags(&(sg->FlagsCount), rw);
1559 }
1560
1561 if (len <= 0) {
1562 break;
1563 }
1564
1565 /*
1566 * Incrementing requires resizing of the packet.
1567 */
1568 ++sg;
1569 MessageSizeInBytes += sizeof(*sg);
1570 I2O_MESSAGE_FRAME_setMessageSize(
1571 &(Message_Ptr->StdMessageFrame),
1572 I2O_MESSAGE_FRAME_getMessageSize(
1573 &(Message_Ptr->StdMessageFrame))
1574 + (sizeof(*sg) / sizeof(U32)));
1575 {
1576 PI2O_EXEC_LCT_NOTIFY_MESSAGE NewMessage_Ptr;
1577
1578 if ((NewMessage_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)
1579 kmalloc(MessageSizeInBytes, M_TEMP, M_WAITOK))
1580 == NULL) {
1581 kfree(sc->ha_LCT, M_TEMP);
1582 sc->ha_LCT = NULL;
1583 kfree(Message_Ptr, M_TEMP);
1584 return (ENOMEM);
1585 }
1586 span = ((caddr_t)sg) - (caddr_t)Message_Ptr;
1587 bcopy(Message_Ptr, NewMessage_Ptr, span);
1588 kfree(Message_Ptr, M_TEMP);
1589 sg = (PI2O_SGE_SIMPLE_ELEMENT)
1590 (((caddr_t)NewMessage_Ptr) + span);
1591 Message_Ptr = NewMessage_Ptr;
1592 }
1593 }
1594 { int retval;
1595
1596 retval = ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1597 kfree(Message_Ptr, M_TEMP);
1598 if (retval != CAM_REQ_CMP) {
1599 return (ENODEV);
1600 }
1601 }
1602 /* If the LCT table grew, lets truncate accesses */
1603 if (I2O_LCT_getTableSize(&Table) < I2O_LCT_getTableSize(sc->ha_LCT)) {
1604 I2O_LCT_setTableSize(sc->ha_LCT, I2O_LCT_getTableSize(&Table));
1605 }
1606 for (Entry = sc->ha_LCT->LCTEntry; Entry < (PI2O_LCT_ENTRY)
1607 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
1608 ++Entry) {
1609 Entry->le_type = I2O_UNKNOWN;
1610 switch (I2O_CLASS_ID_getClass(&(Entry->ClassID))) {
1611
1612 case I2O_CLASS_RANDOM_BLOCK_STORAGE:
1613 Entry->le_type = I2O_BSA;
1614 break;
1615
1616 case I2O_CLASS_SCSI_PERIPHERAL:
1617 Entry->le_type = I2O_SCSI;
1618 break;
1619
1620 case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
1621 Entry->le_type = I2O_FCA;
1622 break;
1623
1624 case I2O_CLASS_BUS_ADAPTER_PORT:
1625 Entry->le_type = I2O_PORT | I2O_SCSI;
1626 /* FALLTHRU */
1627 case I2O_CLASS_FIBRE_CHANNEL_PORT:
1628 if (I2O_CLASS_ID_getClass(&(Entry->ClassID)) ==
1629 I2O_CLASS_FIBRE_CHANNEL_PORT) {
1630 Entry->le_type = I2O_PORT | I2O_FCA;
1631 }
1632 { struct ControllerInfo {
1633 I2O_PARAM_RESULTS_LIST_HEADER Header;
1634 I2O_PARAM_READ_OPERATION_RESULT Read;
1635 I2O_HBA_SCSI_CONTROLLER_INFO_SCALAR Info;
1636 } Buffer;
1637 PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR Info;
1638
1639 Entry->le_bus = 0xff;
1640 Entry->le_target = 0xff;
1641 Entry->le_lun = 0xff;
1642
1643 if ((Info = (PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR)
1644 ASR_getParams(sc,
1645 I2O_LCT_ENTRY_getLocalTID(Entry),
1646 I2O_HBA_SCSI_CONTROLLER_INFO_GROUP_NO,
1647 &Buffer, sizeof(struct ControllerInfo))) == NULL) {
1648 continue;
1649 }
1650 Entry->le_target
1651 = I2O_HBA_SCSI_CONTROLLER_INFO_SCALAR_getInitiatorID(
1652 Info);
1653 Entry->le_lun = 0;
1654 } /* FALLTHRU */
1655 default:
1656 continue;
1657 }
1658 { struct DeviceInfo {
1659 I2O_PARAM_RESULTS_LIST_HEADER Header;
1660 I2O_PARAM_READ_OPERATION_RESULT Read;
1661 I2O_DPT_DEVICE_INFO_SCALAR Info;
1662 } Buffer;
1663 PI2O_DPT_DEVICE_INFO_SCALAR Info;
1664
1665 Entry->le_bus = 0xff;
1666 Entry->le_target = 0xff;
1667 Entry->le_lun = 0xff;
1668
1669 if ((Info = (PI2O_DPT_DEVICE_INFO_SCALAR)
1670 ASR_getParams(sc,
1671 I2O_LCT_ENTRY_getLocalTID(Entry),
1672 I2O_DPT_DEVICE_INFO_GROUP_NO,
1673 &Buffer, sizeof(struct DeviceInfo))) == NULL) {
1674 continue;
1675 }
1676 Entry->le_type
1677 |= I2O_DPT_DEVICE_INFO_SCALAR_getDeviceType(Info);
1678 Entry->le_bus
1679 = I2O_DPT_DEVICE_INFO_SCALAR_getBus(Info);
1680 if ((Entry->le_bus > sc->ha_MaxBus)
1681 && (Entry->le_bus <= MAX_CHANNEL)) {
1682 sc->ha_MaxBus = Entry->le_bus;
1683 }
1684 Entry->le_target
1685 = I2O_DPT_DEVICE_INFO_SCALAR_getIdentifier(Info);
1686 Entry->le_lun
1687 = I2O_DPT_DEVICE_INFO_SCALAR_getLunInfo(Info);
1688 }
1689 }
1690 /*
1691 * A zero return value indicates success.
1692 */
1693 return (0);
984263bc
MD
1694} /* ASR_acquireLct */
1695
1696/*
1697 * Initialize a message frame.
1698 * We assume that the CDB has already been set up, so all we do here is
1699 * generate the Scatter Gather list.
1700 */
7b0cd042
SW
1701static PI2O_MESSAGE_FRAME
1702ASR_init_message(union asr_ccb *ccb, PI2O_MESSAGE_FRAME Message)
984263bc 1703{
7b0cd042
SW
1704 PI2O_MESSAGE_FRAME Message_Ptr;
1705 PI2O_SGE_SIMPLE_ELEMENT sg;
1706 Asr_softc_t *sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
1707 vm_size_t size, len;
1708 caddr_t v;
1709 U32 MessageSize;
1710 int next, span, base, rw;
1711 int target = ccb->ccb_h.target_id;
1712 int lun = ccb->ccb_h.target_lun;
1713 int bus =cam_sim_bus(xpt_path_sim(ccb->ccb_h.path));
1714 tid_t TID;
1715
1716 /* We only need to zero out the PRIVATE_SCSI_SCB_EXECUTE_MESSAGE */
1717 Message_Ptr = (I2O_MESSAGE_FRAME *)Message;
1718 bzero(Message_Ptr, (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) -
1719 sizeof(I2O_SG_ELEMENT)));
1720
1721 if ((TID = ASR_getTid (sc, bus, target, lun)) == (tid_t)-1) {
1722 PI2O_LCT_ENTRY Device;
1723
1724 TID = 0;
1725 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
1726 (((U32 *)sc->ha_LCT) + I2O_LCT_getTableSize(sc->ha_LCT));
1727 ++Device) {
1728 if ((Device->le_type != I2O_UNKNOWN)
1729 && (Device->le_bus == bus)
1730 && (Device->le_target == target)
1731 && (Device->le_lun == lun)
1732 && (I2O_LCT_ENTRY_getUserTID(Device) == 0xFFF)) {
1733 TID = I2O_LCT_ENTRY_getLocalTID(Device);
1734 ASR_setTid(sc, Device->le_bus,
1735 Device->le_target, Device->le_lun,
1736 TID);
1737 break;
1738 }
1739 }
1740 }
1741 if (TID == (tid_t)0) {
1742 return (NULL);
1743 }
1744 I2O_MESSAGE_FRAME_setTargetAddress(Message_Ptr, TID);
1745 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setTID(
1746 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, TID);
1747 I2O_MESSAGE_FRAME_setVersionOffset(Message_Ptr, I2O_VERSION_11 |
1748 (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1749 / sizeof(U32)) << 4));
1750 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
1751 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
1752 - sizeof(I2O_SG_ELEMENT)) / sizeof(U32));
1753 I2O_MESSAGE_FRAME_setInitiatorAddress (Message_Ptr, 1);
1754 I2O_MESSAGE_FRAME_setFunction(Message_Ptr, I2O_PRIVATE_MESSAGE);
1755 I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode (
1756 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, I2O_SCSI_SCB_EXEC);
1757 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (
1758 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
1759 I2O_SCB_FLAG_ENABLE_DISCONNECT
1760 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
1761 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
1762 /*
1763 * We do not need any (optional byteswapping) method access to
1764 * the Initiator & Transaction context field.
1765 */
1766 I2O_MESSAGE_FRAME_setInitiatorContext64(Message, (long)ccb);
1767
1768 I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
1769 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, DPT_ORGANIZATION_ID);
1770 /*
1771 * copy the cdb over
1772 */
1773 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(
1774 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, ccb->csio.cdb_len);
1775 bcopy(&(ccb->csio.cdb_io),
1776 ((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)->CDB,
1777 ccb->csio.cdb_len);
1778
1779 /*
1780 * Given a buffer describing a transfer, set up a scatter/gather map
1781 * in a ccb to map that SCSI transfer.
1782 */
1783
1784 rw = (ccb->ccb_h.flags & CAM_DIR_IN) ? 0 : I2O_SGL_FLAGS_DIR;
1785
1786 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (
1787 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
1788 (ccb->csio.dxfer_len)
1789 ? ((rw) ? (I2O_SCB_FLAG_XFER_TO_DEVICE
1790 | I2O_SCB_FLAG_ENABLE_DISCONNECT
1791 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
1792 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER)
1793 : (I2O_SCB_FLAG_XFER_FROM_DEVICE
1794 | I2O_SCB_FLAG_ENABLE_DISCONNECT
1795 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
1796 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER))
1797 : (I2O_SCB_FLAG_ENABLE_DISCONNECT
1798 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
1799 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
1800
1801 /*
1802 * Given a transfer described by a `data', fill in the SG list.
1803 */
1804 sg = &((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)->SGL.u.Simple[0];
1805
1806 len = ccb->csio.dxfer_len;
1807 v = ccb->csio.data_ptr;
1808 KASSERT(ccb->csio.dxfer_len >= 0, ("csio.dxfer_len < 0"));
1809 MessageSize = I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr);
1810 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setByteCount(
1811 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, len);
1812 while ((len > 0) && (sg < &((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
1813 Message_Ptr)->SGL.u.Simple[SG_SIZE])) {
1814 span = 0;
1815 next = base = KVTOPHYS(v);
1816 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, base);
1817
1818 /* How far can we go contiguously */
1819 while ((len > 0) && (base == next)) {
1820 next = trunc_page(base) + PAGE_SIZE;
1821 size = next - base;
1822 if (size > len) {
1823 size = len;
1824 }
1825 span += size;
1826 v += size;
1827 len -= size;
1828 base = KVTOPHYS(v);
1829 }
1830
1831 I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), span);
1832 if (len == 0) {
1833 rw |= I2O_SGL_FLAGS_LAST_ELEMENT;
1834 }
1835 I2O_FLAGS_COUNT_setFlags(&(sg->FlagsCount),
1836 I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT | rw);
1837 ++sg;
1838 MessageSize += sizeof(*sg) / sizeof(U32);
1839 }
1840 /* We always do the request sense ... */
1841 if ((span = ccb->csio.sense_len) == 0) {
1842 span = sizeof(ccb->csio.sense_data);
1843 }
1844 SG(sg, 0, I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
1845 &(ccb->csio.sense_data), span);
1846 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
1847 MessageSize + (sizeof(*sg) / sizeof(U32)));
1848 return (Message_Ptr);
984263bc
MD
1849} /* ASR_init_message */
1850
1851/*
7b0cd042 1852 * Reset the adapter.
984263bc 1853 */
7b0cd042
SW
1854static U32
1855ASR_initOutBound(Asr_softc_t *sc)
984263bc 1856{
7b0cd042
SW
1857 struct initOutBoundMessage {
1858 I2O_EXEC_OUTBOUND_INIT_MESSAGE M;
1859 U32 R;
1860 } Message;
1861 PI2O_EXEC_OUTBOUND_INIT_MESSAGE Message_Ptr;
1862 U32 *volatile Reply_Ptr;
1863 U32 Old;
1864
1865 /*
1866 * Build up our copy of the Message.
1867 */
1868 Message_Ptr = (PI2O_EXEC_OUTBOUND_INIT_MESSAGE)ASR_fillMessage(&Message,
1869 sizeof(I2O_EXEC_OUTBOUND_INIT_MESSAGE));
1870 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
1871 I2O_EXEC_OUTBOUND_INIT);
1872 I2O_EXEC_OUTBOUND_INIT_MESSAGE_setHostPageFrameSize(Message_Ptr, PAGE_SIZE);
1873 I2O_EXEC_OUTBOUND_INIT_MESSAGE_setOutboundMFrameSize(Message_Ptr,
1874 sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME));
1875 /*
1876 * Reset the Reply Status
1877 */
1878 *(Reply_Ptr = (U32 *)((char *)Message_Ptr
1879 + sizeof(I2O_EXEC_OUTBOUND_INIT_MESSAGE))) = 0;
1880 SG (&(Message_Ptr->SGL), 0, I2O_SGL_FLAGS_LAST_ELEMENT, Reply_Ptr,
1881 sizeof(U32));
1882 /*
1883 * Send the Message out
1884 */
1885 if ((Old = ASR_initiateCp(sc, (PI2O_MESSAGE_FRAME)Message_Ptr)) !=
1886 0xffffffff) {
1887 u_long size, addr;
1888
1889 /*
1890 * Wait for a response (Poll).
1891 */
1892 while (*Reply_Ptr < I2O_EXEC_OUTBOUND_INIT_REJECTED);
1893 /*
1894 * Re-enable the interrupts.
1895 */
1896 asr_set_intr(sc, Old);
1897 /*
1898 * Populate the outbound table.
1899 */
1900 if (sc->ha_Msgs == NULL) {
1901
1902 /* Allocate the reply frames */
1903 size = sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
1904 * sc->ha_Msgs_Count;
1905
1906 /*
1907 * contigmalloc only works reliably at
1908 * initialization time.
1909 */
1910 if ((sc->ha_Msgs = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
1911 contigmalloc (size, M_DEVBUF, M_WAITOK, 0ul,
1912 0xFFFFFFFFul, (u_long)sizeof(U32), 0ul)) != NULL) {
1913 bzero(sc->ha_Msgs, size);
1914 sc->ha_Msgs_Phys = KVTOPHYS(sc->ha_Msgs);
1915 }
1916 }
1917
1918 /* Initialize the outbound FIFO */
1919 if (sc->ha_Msgs != NULL)
1920 for(size = sc->ha_Msgs_Count, addr = sc->ha_Msgs_Phys;
1921 size; --size) {
1922 asr_set_FromFIFO(sc, addr);
8f706258
SW
1923 addr +=
1924 sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME);
1925 }
7b0cd042
SW
1926 return (*Reply_Ptr);
1927 }
1928 return (0);
984263bc
MD
1929} /* ASR_initOutBound */
1930
1931/*
7b0cd042 1932 * Set the system table
984263bc 1933 */
7b0cd042
SW
1934static int
1935ASR_setSysTab(Asr_softc_t *sc)
984263bc 1936{
7b0cd042
SW
1937 PI2O_EXEC_SYS_TAB_SET_MESSAGE Message_Ptr;
1938 PI2O_SET_SYSTAB_HEADER SystemTable;
1939 Asr_softc_t * ha;
1940 PI2O_SGE_SIMPLE_ELEMENT sg;
1941 int retVal;
1942
1943 if ((SystemTable = (PI2O_SET_SYSTAB_HEADER)kmalloc (
1944 sizeof(I2O_SET_SYSTAB_HEADER), M_TEMP, M_WAITOK | M_ZERO)) == NULL) {
1945 return (ENOMEM);
1946 }
1947 for (ha = Asr_softc_list; ha; ha = ha->ha_next) {
1948 ++SystemTable->NumberEntries;
1949 }
1950 if ((Message_Ptr = (PI2O_EXEC_SYS_TAB_SET_MESSAGE)kmalloc (
1951 sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
1952 + ((3+SystemTable->NumberEntries) * sizeof(I2O_SGE_SIMPLE_ELEMENT)),
1953 M_TEMP, M_WAITOK)) == NULL) {
1954 kfree(SystemTable, M_TEMP);
1955 return (ENOMEM);
1956 }
1957 (void)ASR_fillMessage((void *)Message_Ptr,
1958 sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
1959 + ((3+SystemTable->NumberEntries) * sizeof(I2O_SGE_SIMPLE_ELEMENT)));
1960 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
1961 (I2O_VERSION_11 +
1962 (((sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1963 / sizeof(U32)) << 4)));
1964 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
1965 I2O_EXEC_SYS_TAB_SET);
1966 /*
1967 * Call the LCT table to determine the number of device entries
1968 * to reserve space for.
1969 * since this code is reused in several systems, code efficiency
1970 * is greater by using a shift operation rather than a divide by
1971 * sizeof(u_int32_t).
1972 */
1973 sg = (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr
1974 + ((I2O_MESSAGE_FRAME_getVersionOffset(
1975 &(Message_Ptr->StdMessageFrame)) & 0xF0) >> 2));
1976 SG(sg, 0, I2O_SGL_FLAGS_DIR, SystemTable, sizeof(I2O_SET_SYSTAB_HEADER));
1977 ++sg;
1978 for (ha = Asr_softc_list; ha; ha = ha->ha_next) {
1979 SG(sg, 0,
1980 ((ha->ha_next)
1981 ? (I2O_SGL_FLAGS_DIR)
1982 : (I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER)),
1983 &(ha->ha_SystemTable), sizeof(ha->ha_SystemTable));
1984 ++sg;
1985 }
1986 SG(sg, 0, I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER, NULL, 0);
1987 SG(sg, 1, I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_LAST_ELEMENT
1988 | I2O_SGL_FLAGS_END_OF_BUFFER, NULL, 0);
1989 retVal = ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1990 kfree(Message_Ptr, M_TEMP);
1991 kfree(SystemTable, M_TEMP);
1992 return (retVal);
984263bc
MD
1993} /* ASR_setSysTab */
1994
7b0cd042
SW
1995static int
1996ASR_acquireHrt(Asr_softc_t *sc)
984263bc 1997{
7b0cd042
SW
1998 I2O_EXEC_HRT_GET_MESSAGE Message;
1999 I2O_EXEC_HRT_GET_MESSAGE *Message_Ptr;
2000 struct {
2001 I2O_HRT Header;
2002 I2O_HRT_ENTRY Entry[MAX_CHANNEL];
a1f2c914 2003 } Hrt, *HrtP = &Hrt;
7b0cd042
SW
2004 u_int8_t NumberOfEntries;
2005 PI2O_HRT_ENTRY Entry;
2006
2007 bzero(&Hrt, sizeof (Hrt));
2008 Message_Ptr = (I2O_EXEC_HRT_GET_MESSAGE *)ASR_fillMessage(&Message,
2009 sizeof(I2O_EXEC_HRT_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
2010 + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2011 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
2012 (I2O_VERSION_11
2013 + (((sizeof(I2O_EXEC_HRT_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
2014 / sizeof(U32)) << 4)));
2015 I2O_MESSAGE_FRAME_setFunction (&(Message_Ptr->StdMessageFrame),
2016 I2O_EXEC_HRT_GET);
2017
2018 /*
2019 * Set up the buffers as scatter gather elements.
2020 */
2021 SG(&(Message_Ptr->SGL), 0,
2022 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
a1f2c914 2023 HrtP, sizeof(Hrt));
7b0cd042
SW
2024 if (ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) != CAM_REQ_CMP) {
2025 return (ENODEV);
2026 }
2027 if ((NumberOfEntries = I2O_HRT_getNumberEntries(&Hrt.Header))
2028 > (MAX_CHANNEL + 1)) {
2029 NumberOfEntries = MAX_CHANNEL + 1;
2030 }
2031 for (Entry = Hrt.Header.HRTEntry;
2032 NumberOfEntries != 0;
2033 ++Entry, --NumberOfEntries) {
2034 PI2O_LCT_ENTRY Device;
2035
2036 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
2037 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
2038 ++Device) {
2039 if (I2O_LCT_ENTRY_getLocalTID(Device)
2040 == (I2O_HRT_ENTRY_getAdapterID(Entry) & 0xFFF)) {
2041 Device->le_bus = I2O_HRT_ENTRY_getAdapterID(
2042 Entry) >> 16;
2043 if ((Device->le_bus > sc->ha_MaxBus)
2044 && (Device->le_bus <= MAX_CHANNEL)) {
2045 sc->ha_MaxBus = Device->le_bus;
2046 }
2047 }
2048 }
2049 }
2050 return (0);
984263bc
MD
2051} /* ASR_acquireHrt */
2052
2053/*
7b0cd042 2054 * Enable the adapter.
984263bc 2055 */
7b0cd042
SW
2056static int
2057ASR_enableSys(Asr_softc_t *sc)
984263bc 2058{
7b0cd042
SW
2059 I2O_EXEC_SYS_ENABLE_MESSAGE Message;
2060 PI2O_EXEC_SYS_ENABLE_MESSAGE Message_Ptr;
2061
2062 Message_Ptr = (PI2O_EXEC_SYS_ENABLE_MESSAGE)ASR_fillMessage(&Message,
2063 sizeof(I2O_EXEC_SYS_ENABLE_MESSAGE));
2064 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
2065 I2O_EXEC_SYS_ENABLE);
2066 return (ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) != 0);
984263bc
MD
2067} /* ASR_enableSys */
2068
2069/*
7b0cd042 2070 * Perform the stages necessary to initialize the adapter
984263bc 2071 */
7b0cd042
SW
2072static int
2073ASR_init(Asr_softc_t *sc)
984263bc 2074{
7b0cd042
SW
2075 return ((ASR_initOutBound(sc) == 0)
2076 || (ASR_setSysTab(sc) != CAM_REQ_CMP)
2077 || (ASR_enableSys(sc) != CAM_REQ_CMP));
984263bc
MD
2078} /* ASR_init */
2079
2080/*
7b0cd042 2081 * Send a Synchronize Cache command to the target device.
984263bc 2082 */
7b0cd042
SW
2083static void
2084ASR_sync(Asr_softc_t *sc, int bus, int target, int lun)
984263bc 2085{
7b0cd042
SW
2086 tid_t TID;
2087
2088 /*
2089 * We will not synchronize the device when there are outstanding
2090 * commands issued by the OS (this is due to a locked up device,
2091 * as the OS normally would flush all outstanding commands before
2092 * issuing a shutdown or an adapter reset).
2093 */
2094 if ((sc != NULL)
2095 && (LIST_FIRST(&(sc->ha_ccb)) != NULL)
2096 && ((TID = ASR_getTid (sc, bus, target, lun)) != (tid_t)-1)
2097 && (TID != (tid_t)0)) {
2098 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE Message;
2099 PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE Message_Ptr;
2100
362298e4 2101 Message_Ptr = &Message;
7b0cd042
SW
2102 bzero(Message_Ptr, sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2103 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2104
2105 I2O_MESSAGE_FRAME_setVersionOffset(
2106 (PI2O_MESSAGE_FRAME)Message_Ptr,
2107 I2O_VERSION_11
2108 | (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2109 - sizeof(I2O_SG_ELEMENT))
2110 / sizeof(U32)) << 4));
2111 I2O_MESSAGE_FRAME_setMessageSize(
2112 (PI2O_MESSAGE_FRAME)Message_Ptr,
2113 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2114 - sizeof(I2O_SG_ELEMENT))
2115 / sizeof(U32));
2116 I2O_MESSAGE_FRAME_setInitiatorAddress (
2117 (PI2O_MESSAGE_FRAME)Message_Ptr, 1);
2118 I2O_MESSAGE_FRAME_setFunction(
2119 (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_PRIVATE_MESSAGE);
2120 I2O_MESSAGE_FRAME_setTargetAddress(
2121 (PI2O_MESSAGE_FRAME)Message_Ptr, TID);
2122 I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode (
2123 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2124 I2O_SCSI_SCB_EXEC);
2125 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setTID(Message_Ptr, TID);
2126 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2127 I2O_SCB_FLAG_ENABLE_DISCONNECT
2128 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2129 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
2130 I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
2131 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2132 DPT_ORGANIZATION_ID);
2133 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(Message_Ptr, 6);
2134 Message_Ptr->CDB[0] = SYNCHRONIZE_CACHE;
2135 Message_Ptr->CDB[1] = (lun << 5);
2136
2137 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2138 (I2O_SCB_FLAG_XFER_FROM_DEVICE
2139 | I2O_SCB_FLAG_ENABLE_DISCONNECT
2140 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2141 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
2142
2143 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
2144
2145 }
984263bc
MD
2146}
2147
7b0cd042
SW
2148static void
2149ASR_synchronize(Asr_softc_t *sc)
984263bc 2150{
7b0cd042
SW
2151 int bus, target, lun;
2152
2153 for (bus = 0; bus <= sc->ha_MaxBus; ++bus) {
2154 for (target = 0; target <= sc->ha_MaxId; ++target) {
2155 for (lun = 0; lun <= sc->ha_MaxLun; ++lun) {
2156 ASR_sync(sc,bus,target,lun);
2157 }
2158 }
2159 }
984263bc
MD
2160}
2161
2162/*
7b0cd042
SW
2163 * Reset the HBA, targets and BUS.
2164 * Currently this resets *all* the SCSI busses.
984263bc 2165 */
7b0cd042
SW
2166static __inline void
2167asr_hbareset(Asr_softc_t *sc)
984263bc 2168{
7b0cd042
SW
2169 ASR_synchronize(sc);
2170 (void)ASR_reset(sc);
984263bc
MD
2171} /* asr_hbareset */
2172
2173/*
7b0cd042 2174 * A reduced copy of the real pci_map_mem, incorporating the MAX_MAP
984263bc
MD
2175 * limit and a reduction in error checking (in the pre 4.0 case).
2176 */
7b0cd042
SW
2177static int
2178asr_pci_map_mem(device_t dev, Asr_softc_t *sc)
984263bc 2179{
7b0cd042
SW
2180 int rid;
2181 u_int32_t p, l, s;
2182
2183 /*
2184 * I2O specification says we must find first *memory* mapped BAR
2185 */
2186 for (rid = 0; rid < 4; rid++) {
2187 p = pci_read_config(dev, PCIR_BAR(rid), sizeof(p));
2188 if ((p & 1) == 0) {
2189 break;
2190 }
2191 }
2192 /*
2193 * Give up?
2194 */
2195 if (rid >= 4) {
2196 rid = 0;
2197 }
2198 rid = PCIR_BAR(rid);
2199 p = pci_read_config(dev, rid, sizeof(p));
2200 pci_write_config(dev, rid, -1, sizeof(p));
2201 l = 0 - (pci_read_config(dev, rid, sizeof(l)) & ~15);
2202 pci_write_config(dev, rid, p, sizeof(p));
2203 if (l > MAX_MAP) {
2204 l = MAX_MAP;
2205 }
2206 /*
2207 * The 2005S Zero Channel RAID solution is not a perfect PCI
2208 * citizen. It asks for 4MB on BAR0, and 0MB on BAR1, once
2209 * enabled it rewrites the size of BAR0 to 2MB, sets BAR1 to
2210 * BAR0+2MB and sets it's size to 2MB. The IOP registers are
2211 * accessible via BAR0, the messaging registers are accessible
2212 * via BAR1. If the subdevice code is 50 to 59 decimal.
2213 */
2214 s = pci_read_config(dev, PCIR_DEVVENDOR, sizeof(s));
2215 if (s != 0xA5111044) {
2216 s = pci_read_config(dev, PCIR_SUBVEND_0, sizeof(s));
2217 if ((((ADPTDOMINATOR_SUB_ID_START ^ s) & 0xF000FFFF) == 0)
2218 && (ADPTDOMINATOR_SUB_ID_START <= s)
2219 && (s <= ADPTDOMINATOR_SUB_ID_END)) {
2220 l = MAX_MAP; /* Conjoined BAR Raptor Daptor */
2221 }
2222 }
2223 p &= ~15;
2224 sc->ha_mem_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
2225 p, p + l, l, RF_ACTIVE);
2226 if (sc->ha_mem_res == NULL) {
2227 return (0);
2228 }
2229 sc->ha_Base = rman_get_start(sc->ha_mem_res);
2230 sc->ha_i2o_bhandle = rman_get_bushandle(sc->ha_mem_res);
2231 sc->ha_i2o_btag = rman_get_bustag(sc->ha_mem_res);
2232
2233 if (s == 0xA5111044) { /* Split BAR Raptor Daptor */
2234 if ((rid += sizeof(u_int32_t)) >= PCIR_BAR(4)) {
2235 return (0);
2236 }
2237 p = pci_read_config(dev, rid, sizeof(p));
2238 pci_write_config(dev, rid, -1, sizeof(p));
2239 l = 0 - (pci_read_config(dev, rid, sizeof(l)) & ~15);
2240 pci_write_config(dev, rid, p, sizeof(p));
2241 if (l > MAX_MAP) {
2242 l = MAX_MAP;
2243 }
2244 p &= ~15;
2245 sc->ha_mes_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
2246 p, p + l, l, RF_ACTIVE);
2247 if (sc->ha_mes_res == NULL) {
2248 return (0);
2249 }
2250 sc->ha_frame_bhandle = rman_get_bushandle(sc->ha_mes_res);
2251 sc->ha_frame_btag = rman_get_bustag(sc->ha_mes_res);
2252 } else {
2253 sc->ha_frame_bhandle = sc->ha_i2o_bhandle;
2254 sc->ha_frame_btag = sc->ha_i2o_btag;
2255 }
2256 return (1);
984263bc
MD
2257} /* asr_pci_map_mem */
2258
2259/*
7b0cd042 2260 * A simplified copy of the real pci_map_int with additional
984263bc
MD
2261 * registration requirements.
2262 */
7b0cd042
SW
2263static int
2264asr_pci_map_int(device_t dev, Asr_softc_t *sc)
984263bc 2265{
7b0cd042
SW
2266 int rid = 0;
2267
2268 sc->ha_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2269 RF_ACTIVE | RF_SHAREABLE);
2270 if (sc->ha_irq_res == NULL) {
2271 return (0);
2272 }
2273 if (bus_setup_intr(dev, sc->ha_irq_res, 0,
2274 (driver_intr_t *)asr_intr, (void *)sc, &(sc->ha_intr), NULL)) {
2275 return (0);
2276 }
2277 sc->ha_irq = pci_read_config(dev, PCIR_INTLINE, sizeof(char));
2278 return (1);
984263bc
MD
2279} /* asr_pci_map_int */
2280
7b0cd042
SW
2281static void
2282asr_status_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2283{
2284 Asr_softc_t *sc;
2285
2286 if (error)
2287 return;
2288
2289 sc = (Asr_softc_t *)arg;
2290
2291 /* XXX
2292 * The status word can be at a 64-bit address, but the existing
2293 * accessor macros simply cannot manipulate 64-bit addresses.
2294 */
2295 sc->ha_status_phys = (u_int32_t)segs[0].ds_addr +
2296 offsetof(struct Asr_status_mem, status);
2297 sc->ha_rstatus_phys = (u_int32_t)segs[0].ds_addr +
2298 offsetof(struct Asr_status_mem, rstatus);
2299}
2300
2301static int
2302asr_alloc_dma(Asr_softc_t *sc)
2303{
2304 device_t dev;
2305
2306 dev = sc->ha_dev;
2307
2308 if (bus_dma_tag_create(NULL, /* parent */
2309 1, 0, /* algnmnt, boundary */
2310 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
2311 BUS_SPACE_MAXADDR, /* highaddr */
7b0cd042
SW
2312 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
2313 BUS_SPACE_UNRESTRICTED, /* nsegments */
2314 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
2315 0, /* flags */
2316 &sc->ha_parent_dmat)) {
2317 device_printf(dev, "Cannot allocate parent DMA tag\n");
2318 return (ENOMEM);
2319 }
2320
2321 if (bus_dma_tag_create(sc->ha_parent_dmat, /* parent */
2322 1, 0, /* algnmnt, boundary */
2323 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
2324 BUS_SPACE_MAXADDR, /* highaddr */
7b0cd042
SW
2325 sizeof(sc->ha_statusmem),/* maxsize */
2326 1, /* nsegments */
2327 sizeof(sc->ha_statusmem),/* maxsegsize */
2328 0, /* flags */
2329 &sc->ha_statusmem_dmat)) {
2330 device_printf(dev, "Cannot allocate status DMA tag\n");
2331 bus_dma_tag_destroy(sc->ha_parent_dmat);
2332 return (ENOMEM);
2333 }
2334
2335 if (bus_dmamem_alloc(sc->ha_statusmem_dmat, (void **)&sc->ha_statusmem,
2336 BUS_DMA_NOWAIT, &sc->ha_statusmem_dmamap)) {
2337 device_printf(dev, "Cannot allocate status memory\n");
2338 bus_dma_tag_destroy(sc->ha_statusmem_dmat);
2339 bus_dma_tag_destroy(sc->ha_parent_dmat);
2340 return (ENOMEM);
2341 }
2342 (void)bus_dmamap_load(sc->ha_statusmem_dmat, sc->ha_statusmem_dmamap,
2343 sc->ha_statusmem, sizeof(sc->ha_statusmem), asr_status_cb, sc, 0);
2344
2345 return (0);
2346}
2347
2348static void
2349asr_release_dma(Asr_softc_t *sc)
2350{
2351
2352 if (sc->ha_rstatus_phys != 0)
2353 bus_dmamap_unload(sc->ha_statusmem_dmat,
2354 sc->ha_statusmem_dmamap);
2355 if (sc->ha_statusmem != NULL)
2356 bus_dmamem_free(sc->ha_statusmem_dmat, sc->ha_statusmem,
2357 sc->ha_statusmem_dmamap);
2358 if (sc->ha_statusmem_dmat != NULL)
2359 bus_dma_tag_destroy(sc->ha_statusmem_dmat);
2360 if (sc->ha_parent_dmat != NULL)
2361 bus_dma_tag_destroy(sc->ha_parent_dmat);
2362}
2363
984263bc 2364/*
7b0cd042 2365 * Attach the devices, and virtual devices to the driver list.
984263bc 2366 */
7b0cd042
SW
2367static int
2368asr_attach(device_t dev)
984263bc 2369{
7b0cd042
SW
2370 PI2O_EXEC_STATUS_GET_REPLY status;
2371 PI2O_LCT_ENTRY Device;
2372 Asr_softc_t *sc, **ha;
2373 struct scsi_inquiry_data *iq;
2374 int bus, size, unit;
2375 int error;
2376
2377 sc = device_get_softc(dev);
2378 unit = device_get_unit(dev);
2379 sc->ha_dev = dev;
2380
2381 if (Asr_softc_list == NULL) {
2382 /*
2383 * Fixup the OS revision as saved in the dptsig for the
2384 * engine (dptioctl.h) to pick up.
2385 */
2386 bcopy(osrelease, &ASR_sig.dsDescription[16], 5);
2387 }
2388 /*
2389 * Initialize the software structure
2390 */
2391 LIST_INIT(&(sc->ha_ccb));
2392 /* Link us into the HA list */
613a3753
SW
2393 for (ha = &Asr_softc_list; *ha; ha = &((*ha)->ha_next))
2394 ;
2395 *(ha) = sc;
7b0cd042
SW
2396
2397 /*
2398 * This is the real McCoy!
2399 */
2400 if (!asr_pci_map_mem(dev, sc)) {
2401 device_printf(dev, "could not map memory\n");
2402 return(ENXIO);
2403 }
2404 /* Enable if not formerly enabled */
2405 pci_write_config(dev, PCIR_COMMAND,
2406 pci_read_config(dev, PCIR_COMMAND, sizeof(char)) |
2407 PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN, sizeof(char));
2408
2409 sc->ha_pciBusNum = pci_get_bus(dev);
2410 sc->ha_pciDeviceNum = (pci_get_slot(dev) << 3) | pci_get_function(dev);
2411
2412 if ((error = asr_alloc_dma(sc)) != 0)
2413 return (error);
2414
2415 /* Check if the device is there? */
2416 if (ASR_resetIOP(sc) == 0) {
2417 device_printf(dev, "Cannot reset adapter\n");
2418 asr_release_dma(sc);
2419 return (EIO);
2420 }
2421 status = &sc->ha_statusmem->status;
2422 if (ASR_getStatus(sc) == NULL) {
2423 device_printf(dev, "could not initialize hardware\n");
2424 asr_release_dma(sc);
2425 return(ENODEV);
2426 }
2427 sc->ha_SystemTable.OrganizationID = status->OrganizationID;
2428 sc->ha_SystemTable.IOP_ID = status->IOP_ID;
2429 sc->ha_SystemTable.I2oVersion = status->I2oVersion;
2430 sc->ha_SystemTable.IopState = status->IopState;
2431 sc->ha_SystemTable.MessengerType = status->MessengerType;
2432 sc->ha_SystemTable.InboundMessageFrameSize = status->InboundMFrameSize;
2433 sc->ha_SystemTable.MessengerInfo.InboundMessagePortAddressLow =
2434 (U32)(sc->ha_Base + I2O_REG_TOFIFO); /* XXX 64-bit */
2435
2436 if (!asr_pci_map_int(dev, (void *)sc)) {
2437 device_printf(dev, "could not map interrupt\n");
2438 asr_release_dma(sc);
2439 return(ENXIO);
2440 }
2441
2442 /* Adjust the maximim inbound count */
2443 if (((sc->ha_QueueSize =
2444 I2O_EXEC_STATUS_GET_REPLY_getMaxInboundMFrames(status)) >
2445 MAX_INBOUND) || (sc->ha_QueueSize == 0)) {
2446 sc->ha_QueueSize = MAX_INBOUND;
2447 }
2448
2449 /* Adjust the maximum outbound count */
2450 if (((sc->ha_Msgs_Count =
2451 I2O_EXEC_STATUS_GET_REPLY_getMaxOutboundMFrames(status)) >
2452 MAX_OUTBOUND) || (sc->ha_Msgs_Count == 0)) {
2453 sc->ha_Msgs_Count = MAX_OUTBOUND;
2454 }
2455 if (sc->ha_Msgs_Count > sc->ha_QueueSize) {
2456 sc->ha_Msgs_Count = sc->ha_QueueSize;
2457 }
2458
2459 /* Adjust the maximum SG size to adapter */
2460 if ((size = (I2O_EXEC_STATUS_GET_REPLY_getInboundMFrameSize(status) <<
2461 2)) > MAX_INBOUND_SIZE) {
2462 size = MAX_INBOUND_SIZE;
2463 }
2464 sc->ha_SgSize = (size - sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2465 + sizeof(I2O_SG_ELEMENT)) / sizeof(I2O_SGE_SIMPLE_ELEMENT);
2466
2467 /*
2468 * Only do a bus/HBA reset on the first time through. On this
2469 * first time through, we do not send a flush to the devices.
2470 */
2471 if (ASR_init(sc) == 0) {
2472 struct BufferInfo {
2473 I2O_PARAM_RESULTS_LIST_HEADER Header;
2474 I2O_PARAM_READ_OPERATION_RESULT Read;
2475 I2O_DPT_EXEC_IOP_BUFFERS_SCALAR Info;
2476 } Buffer;
2477 PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR Info;
2478#define FW_DEBUG_BLED_OFFSET 8
2479
2480 if ((Info = (PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR)
2481 ASR_getParams(sc, 0, I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO,
2482 &Buffer, sizeof(struct BufferInfo))) != NULL) {
2483 sc->ha_blinkLED = FW_DEBUG_BLED_OFFSET +
2484 I2O_DPT_EXEC_IOP_BUFFERS_SCALAR_getSerialOutputOffset(Info);
2485 }
2486 if (ASR_acquireLct(sc) == 0) {
2487 (void)ASR_acquireHrt(sc);
2488 }
2489 } else {
2490 device_printf(dev, "failed to initialize\n");
2491 asr_release_dma(sc);
2492 return(ENXIO);
2493 }
2494 /*
2495 * Add in additional probe responses for more channels. We
2496 * are reusing the variable `target' for a channel loop counter.
2497 * Done here because of we need both the acquireLct and
2498 * acquireHrt data.
2499 */
2500 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
2501 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT)); ++Device) {
2502 if (Device->le_type == I2O_UNKNOWN) {
2503 continue;
2504 }
2505 if (I2O_LCT_ENTRY_getUserTID(Device) == 0xFFF) {
2506 if (Device->le_target > sc->ha_MaxId) {
2507 sc->ha_MaxId = Device->le_target;
2508 }
2509 if (Device->le_lun > sc->ha_MaxLun) {
2510 sc->ha_MaxLun = Device->le_lun;
2511 }
2512 }
2513 if (((Device->le_type & I2O_PORT) != 0)
2514 && (Device->le_bus <= MAX_CHANNEL)) {
2515 /* Do not increase MaxId for efficiency */
2516 sc->ha_adapter_target[Device->le_bus] =
2517 Device->le_target;
2518 }
2519 }
2520
2521 /*
2522 * Print the HBA model number as inquired from the card.
2523 */
2524
2525 device_printf(dev, " ");
2526
2527 if ((iq = (struct scsi_inquiry_data *)kmalloc(
2528 sizeof(struct scsi_inquiry_data), M_TEMP, M_WAITOK | M_ZERO)) !=
2529 NULL) {
2530 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE Message;
2531 PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE Message_Ptr;
2532 int posted = 0;
2533
362298e4 2534 Message_Ptr = &Message;
7b0cd042
SW
2535 bzero(Message_Ptr, sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) -
2536 sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2537
2538 I2O_MESSAGE_FRAME_setVersionOffset(
2539 (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_VERSION_11 |
2540 (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2541 - sizeof(I2O_SG_ELEMENT)) / sizeof(U32)) << 4));
2542 I2O_MESSAGE_FRAME_setMessageSize(
2543 (PI2O_MESSAGE_FRAME)Message_Ptr,
2544 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) -
2545 sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT)) /
2546 sizeof(U32));
2547 I2O_MESSAGE_FRAME_setInitiatorAddress(
2548 (PI2O_MESSAGE_FRAME)Message_Ptr, 1);
2549 I2O_MESSAGE_FRAME_setFunction(
2550 (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_PRIVATE_MESSAGE);
2551 I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode(
2552 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, I2O_SCSI_SCB_EXEC);
2553 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2554 I2O_SCB_FLAG_ENABLE_DISCONNECT
2555 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2556 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
2557 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setInterpret(Message_Ptr, 1);
2558 I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
2559 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2560 DPT_ORGANIZATION_ID);
2561 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(Message_Ptr, 6);
2562 Message_Ptr->CDB[0] = INQUIRY;
2563 Message_Ptr->CDB[4] =
2564 (unsigned char)sizeof(struct scsi_inquiry_data);
2565 if (Message_Ptr->CDB[4] == 0) {
2566 Message_Ptr->CDB[4] = 255;
2567 }
2568
2569 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2570 (I2O_SCB_FLAG_XFER_FROM_DEVICE
2571 | I2O_SCB_FLAG_ENABLE_DISCONNECT
2572 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2573 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
2574
2575 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setByteCount(
362298e4 2576 Message_Ptr, sizeof(struct scsi_inquiry_data));
7b0cd042
SW
2577 SG(&(Message_Ptr->SGL), 0,
2578 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
2579 iq, sizeof(struct scsi_inquiry_data));
2580 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
2581
2582 if (iq->vendor[0] && (iq->vendor[0] != ' ')) {
2583 kprintf (" ");
2584 ASR_prstring (iq->vendor, 8);
2585 ++posted;
2586 }
2587 if (iq->product[0] && (iq->product[0] != ' ')) {
2588 kprintf (" ");
2589 ASR_prstring (iq->product, 16);
2590 ++posted;
2591 }
2592 if (iq->revision[0] && (iq->revision[0] != ' ')) {
2593 kprintf (" FW Rev. ");
2594 ASR_prstring (iq->revision, 4);
2595 ++posted;
2596 }
2597 kfree(iq, M_TEMP);
2598 if (posted) {
2599 kprintf (",");
2600 }
2601 }
2602 kprintf (" %d channel, %d CCBs, Protocol I2O\n", sc->ha_MaxBus + 1,
2603 (sc->ha_QueueSize > MAX_INBOUND) ? MAX_INBOUND : sc->ha_QueueSize);
2604
2605 for (bus = 0; bus <= sc->ha_MaxBus; ++bus) {
2606 struct cam_devq * devq;
2607 int QueueSize = sc->ha_QueueSize;
2608
2609 if (QueueSize > MAX_INBOUND) {
2610 QueueSize = MAX_INBOUND;
2611 }
2612
2613 /*
2614 * Create the device queue for our SIM(s).
2615 */
2616 if ((devq = cam_simq_alloc(QueueSize)) == NULL) {
2617 continue;
2618 }
2619
2620 /*
2621 * Construct our first channel SIM entry
2622 */
2623 sc->ha_sim[bus] = cam_sim_alloc(asr_action, asr_poll, "asr", sc,
2624 unit, &sim_mplock,
2625 1, QueueSize, devq);
2626 if (sc->ha_sim[bus] == NULL) {
2627 continue;
2628 }
2629
2630 if (xpt_bus_register(sc->ha_sim[bus], bus) != CAM_SUCCESS){
2631 cam_sim_free(sc->ha_sim[bus]);
2632 sc->ha_sim[bus] = NULL;
2633 continue;
2634 }
2635
2636 if (xpt_create_path(&(sc->ha_path[bus]), /*periph*/NULL,
2637 cam_sim_path(sc->ha_sim[bus]), CAM_TARGET_WILDCARD,
2638 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2639 xpt_bus_deregister( cam_sim_path(sc->ha_sim[bus]));
2640 cam_sim_free(sc->ha_sim[bus]);
2641 sc->ha_sim[bus] = NULL;
2642 continue;
2643 }
2644 }
2645
2646 /*
2647 * Generate the device node information
2648 */
2649 sc->ha_devt = make_dev(&asr_ops, unit, UID_ROOT, GID_OPERATOR, 0640,
2650 "asr%d", unit);
2651 if (sc->ha_devt != NULL)
2652 (void)make_dev_alias(sc->ha_devt, "rdpti%d", unit);
2653 sc->ha_devt->si_drv1 = sc;
2654 return(0);
984263bc
MD
2655} /* asr_attach */
2656
7b0cd042
SW
2657static void
2658asr_poll(struct cam_sim *sim)
984263bc 2659{
7b0cd042 2660 asr_intr(cam_sim_softc(sim));
984263bc
MD
2661} /* asr_poll */
2662
7b0cd042
SW
2663static void
2664asr_action(struct cam_sim *sim, union ccb *ccb)
984263bc 2665{
7b0cd042
SW
2666 struct Asr_softc *sc;
2667
2668 debug_asr_printf("asr_action(%lx,%lx{%x})\n", (u_long)sim, (u_long)ccb,
2669 ccb->ccb_h.func_code);
2670
2671 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("asr_action\n"));
2672
2673 ccb->ccb_h.spriv_ptr0 = sc = (struct Asr_softc *)cam_sim_softc(sim);
2674
2675 switch (ccb->ccb_h.func_code) {
2676
2677 /* Common cases first */
2678 case XPT_SCSI_IO: /* Execute the requested I/O operation */
2679 {
2680 struct Message {
2681 char M[MAX_INBOUND_SIZE];
2682 } Message;
2683 PI2O_MESSAGE_FRAME Message_Ptr;
2684
2685 /* Reject incoming commands while we are resetting the card */
2686 if (sc->ha_in_reset != HA_OPERATIONAL) {
2687 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2688 if (sc->ha_in_reset >= HA_OFF_LINE) {
2689 /* HBA is now off-line */
2690 ccb->ccb_h.status |= CAM_UNREC_HBA_ERROR;
2691 } else {
2692 /* HBA currently resetting, try again later. */
2693 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
2694 }
2695 debug_asr_cmd_printf (" e\n");
2696 xpt_done(ccb);
2697 debug_asr_cmd_printf (" q\n");
2698 break;
2699 }
2700 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
2701 kprintf(
2702 "asr%d WARNING: scsi_cmd(%x) already done on b%dt%du%d\n",
2703 cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)),
2704 ccb->csio.cdb_io.cdb_bytes[0],
2705 cam_sim_bus(sim),
2706 ccb->ccb_h.target_id,
2707 ccb->ccb_h.target_lun);
2708 }
2709 debug_asr_cmd_printf("(%d,%d,%d,%d)", cam_sim_unit(sim),
2710 cam_sim_bus(sim), ccb->ccb_h.target_id,
2711 ccb->ccb_h.target_lun);
2712 debug_asr_dump_ccb(ccb);
2713
2714 if ((Message_Ptr = ASR_init_message((union asr_ccb *)ccb,
2715 (PI2O_MESSAGE_FRAME)&Message)) != NULL) {
2716 debug_asr_cmd2_printf ("TID=%x:\n",
2717 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_getTID(
2718 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr));
2719 debug_asr_cmd2_dump_message(Message_Ptr);
2720 debug_asr_cmd1_printf (" q");
2721
2722 if (ASR_queue (sc, Message_Ptr) == EMPTY_QUEUE) {
2723 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2724 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
2725 debug_asr_cmd_printf (" E\n");
2726 xpt_done(ccb);
2727 }
2728 debug_asr_cmd_printf(" Q\n");
2729 break;
2730 }
2731 /*
2732 * We will get here if there is no valid TID for the device
2733 * referenced in the scsi command packet.
2734 */
2735 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2736 ccb->ccb_h.status |= CAM_SEL_TIMEOUT;
2737 debug_asr_cmd_printf (" B\n");
2738 xpt_done(ccb);
2739 break;
2740 }
2741
2742 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */
3fd202cb 2743 /* Reset HBA device ... */
7b0cd042
SW
2744 asr_hbareset (sc);
2745 ccb->ccb_h.status = CAM_REQ_CMP;
2746 xpt_done(ccb);
2747 break;
2748
7b0cd042
SW
2749 case XPT_ABORT: /* Abort the specified CCB */
2750 /* XXX Implement */
2751 ccb->ccb_h.status = CAM_REQ_INVALID;
2752 xpt_done(ccb);
2753 break;
2754
2755 case XPT_SET_TRAN_SETTINGS:
2756 /* XXX Implement */
2757 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
2758 xpt_done(ccb);
2759 break;
2760
2761 case XPT_GET_TRAN_SETTINGS:
2762 /* Get default/user set transfer settings for the target */
2763 {
f19fcfb0 2764 struct ccb_trans_settings *cts = &(ccb->cts);
f19fcfb0
PA
2765 struct ccb_trans_settings_scsi *scsi =
2766 &cts->proto_specific.scsi;
2767 struct ccb_trans_settings_spi *spi =
2768 &cts->xport_specific.spi;
2769
2770 if (cts->type == CTS_TYPE_USER_SETTINGS) {
2771 cts->protocol = PROTO_SCSI;
2772 cts->protocol_version = SCSI_REV_2;
2773 cts->transport = XPORT_SPI;
2774 cts->transport_version = 2;
2775
2776 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
2777 spi->flags = CTS_SPI_FLAGS_DISC_ENB;
2778 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2779 spi->sync_period = 6; /* 40MHz */
2780 spi->sync_offset = 15;
2781 spi->valid = CTS_SPI_VALID_SYNC_RATE
2782 | CTS_SPI_VALID_SYNC_OFFSET
2783 | CTS_SPI_VALID_BUS_WIDTH
2784 | CTS_SPI_VALID_DISC;
2785 scsi->valid = CTS_SCSI_VALID_TQ;
2786
2787 ccb->ccb_h.status = CAM_REQ_CMP;
2788 } else {
2789 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
2790 }
7b0cd042
SW
2791 xpt_done(ccb);
2792 break;
2793 }
2794
2795 case XPT_CALC_GEOMETRY:
2796 {
2797 struct ccb_calc_geometry *ccg;
2798 u_int32_t size_mb;
2799 u_int32_t secs_per_cylinder;
2800
2801 ccg = &(ccb->ccg);
2802 size_mb = ccg->volume_size
2803 / ((1024L * 1024L) / ccg->block_size);
2804
2805 if (size_mb > 4096) {
2806 ccg->heads = 255;
2807 ccg->secs_per_track = 63;
2808 } else if (size_mb > 2048) {
2809 ccg->heads = 128;
2810 ccg->secs_per_track = 63;
2811 } else if (size_mb > 1024) {
2812 ccg->heads = 65;
2813 ccg->secs_per_track = 63;
2814 } else {
2815 ccg->heads = 64;
2816 ccg->secs_per_track = 32;
2817 }
2818 secs_per_cylinder = ccg->heads * ccg->secs_per_track;
2819 ccg->cylinders = ccg->volume_size / secs_per_cylinder;
2820 ccb->ccb_h.status = CAM_REQ_CMP;
2821 xpt_done(ccb);
2822 break;
2823 }
2824
2825 case XPT_RESET_BUS: /* Reset the specified SCSI bus */
2826 ASR_resetBus (sc, cam_sim_bus(sim));
2827 ccb->ccb_h.status = CAM_REQ_CMP;
2828 xpt_done(ccb);
2829 break;
2830
2831 case XPT_TERM_IO: /* Terminate the I/O process */
2832 /* XXX Implement */
2833 ccb->ccb_h.status = CAM_REQ_INVALID;
2834 xpt_done(ccb);
2835 break;
2836
2837 case XPT_PATH_INQ: /* Path routing inquiry */
2838 {
2839 struct ccb_pathinq *cpi = &(ccb->cpi);
2840
2841 cpi->version_num = 1; /* XXX??? */
2842 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
2843 cpi->target_sprt = 0;
2844 /* Not necessary to reset bus, done by HDM initialization */
2845 cpi->hba_misc = PIM_NOBUSRESET;
2846 cpi->hba_eng_cnt = 0;
2847 cpi->max_target = sc->ha_MaxId;
2848 cpi->max_lun = sc->ha_MaxLun;
2849 cpi->initiator_id = sc->ha_adapter_target[cam_sim_bus(sim)];
2850 cpi->bus_id = cam_sim_bus(sim);
2851 cpi->base_transfer_speed = 3300;
2852 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2853 strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN);
2854 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
2855 cpi->unit_number = cam_sim_unit(sim);
2856 cpi->ccb_h.status = CAM_REQ_CMP;
f19fcfb0
PA
2857 cpi->transport = XPORT_SPI;
2858 cpi->transport_version = 2;
2859 cpi->protocol = PROTO_SCSI;
2860 cpi->protocol_version = SCSI_REV_2;
7b0cd042
SW
2861 xpt_done(ccb);
2862 break;
2863 }
2864 default:
2865 ccb->ccb_h.status = CAM_REQ_INVALID;
2866 xpt_done(ccb);
2867 break;
2868 }
984263bc
MD
2869} /* asr_action */
2870
984263bc
MD
2871/*
2872 * Handle processing of current CCB as pointed to by the Status.
2873 */
7b0cd042
SW
2874static int
2875asr_intr(Asr_softc_t *sc)
984263bc 2876{
7b0cd042
SW
2877 int processed;
2878
2879 for(processed = 0; asr_get_status(sc) & Mask_InterruptsDisabled;
2880 processed = 1) {
2881 union asr_ccb *ccb;
2882 u_int dsc;
2883 U32 ReplyOffset;
2884 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply;
2885
2886 if (((ReplyOffset = asr_get_FromFIFO(sc)) == EMPTY_QUEUE)
2887 && ((ReplyOffset = asr_get_FromFIFO(sc)) == EMPTY_QUEUE)) {
2888 break;
2889 }
2890 Reply = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)(ReplyOffset
2891 - sc->ha_Msgs_Phys + (char *)(sc->ha_Msgs));
2892 /*
2893 * We do not need any (optional byteswapping) method access to
2894 * the Initiator context field.
2895 */
2896 ccb = (union asr_ccb *)(long)
2897 I2O_MESSAGE_FRAME_getInitiatorContext64(
2898 &(Reply->StdReplyFrame.StdMessageFrame));
2899 if (I2O_MESSAGE_FRAME_getMsgFlags(
2900 &(Reply->StdReplyFrame.StdMessageFrame))
2901 & I2O_MESSAGE_FLAGS_FAIL) {
2902 I2O_UTIL_NOP_MESSAGE Message;
2903 PI2O_UTIL_NOP_MESSAGE Message_Ptr;
2904 U32 MessageOffset;
2905
2906 MessageOffset = (u_long)
2907 I2O_FAILURE_REPLY_MESSAGE_FRAME_getPreservedMFA(
2908 (PI2O_FAILURE_REPLY_MESSAGE_FRAME)Reply);
2909 /*
2910 * Get the Original Message Frame's address, and get
2911 * it's Transaction Context into our space. (Currently
2912 * unused at original authorship, but better to be
2913 * safe than sorry). Straight copy means that we
2914 * need not concern ourselves with the (optional
2915 * byteswapping) method access.
2916 */
2917 Reply->StdReplyFrame.TransactionContext =
2918 bus_space_read_4(sc->ha_frame_btag,
2919 sc->ha_frame_bhandle, MessageOffset +
2920 offsetof(I2O_SINGLE_REPLY_MESSAGE_FRAME,
2921 TransactionContext));
2922 /*
2923 * For 64 bit machines, we need to reconstruct the
2924 * 64 bit context.
2925 */
2926 ccb = (union asr_ccb *)(long)
2927 I2O_MESSAGE_FRAME_getInitiatorContext64(
2928 &(Reply->StdReplyFrame.StdMessageFrame));
2929 /*
2930 * Unique error code for command failure.
2931 */
2932 I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode(
2933 &(Reply->StdReplyFrame), (u_int16_t)-2);
2934 /*
2935 * Modify the message frame to contain a NOP and
2936 * re-issue it to the controller.
2937 */
2938 Message_Ptr = (PI2O_UTIL_NOP_MESSAGE)ASR_fillMessage(
2939 &Message, sizeof(I2O_UTIL_NOP_MESSAGE));
2940#if (I2O_UTIL_NOP != 0)
2941 I2O_MESSAGE_FRAME_setFunction (
2942 &(Message_Ptr->StdMessageFrame),
2943 I2O_UTIL_NOP);
2944#endif
2945 /*
2946 * Copy the packet out to the Original Message
2947 */
2948 asr_set_frame(sc, Message_Ptr, MessageOffset,
2949 sizeof(I2O_UTIL_NOP_MESSAGE));
2950 /*
2951 * Issue the NOP
2952 */
2953 asr_set_ToFIFO(sc, MessageOffset);
2954 }
2955
2956 /*
2957 * Asynchronous command with no return requirements,
2958 * and a generic handler for immunity against odd error
2959 * returns from the adapter.
2960 */
2961 if (ccb == NULL) {
2962 /*
2963 * Return Reply so that it can be used for the
2964 * next command
2965 */
2966 asr_set_FromFIFO(sc, ReplyOffset);
2967 continue;
2968 }
2969
2970 /* Welease Wadjah! (and stop timeouts) */
2971 ASR_ccbRemove (sc, ccb);
2972
2973 dsc = I2O_SINGLE_REPLY_MESSAGE_FRAME_getDetailedStatusCode(
2974 &(Reply->StdReplyFrame));
2975 ccb->csio.scsi_status = dsc & I2O_SCSI_DEVICE_DSC_MASK;
2976 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2977 switch (dsc) {
2978
2979 case I2O_SCSI_DSC_SUCCESS:
2980 ccb->ccb_h.status |= CAM_REQ_CMP;
2981 break;
2982
2983 case I2O_SCSI_DSC_CHECK_CONDITION:
2984 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR |
2985 CAM_AUTOSNS_VALID;
2986 break;
2987
2988 case I2O_SCSI_DSC_BUSY:
2989 /* FALLTHRU */
2990 case I2O_SCSI_HBA_DSC_ADAPTER_BUSY:
2991 /* FALLTHRU */
2992 case I2O_SCSI_HBA_DSC_SCSI_BUS_RESET:
2993 /* FALLTHRU */
2994 case I2O_SCSI_HBA_DSC_BUS_BUSY:
2995 ccb->ccb_h.status |= CAM_SCSI_BUSY;
2996 break;
2997
2998 case I2O_SCSI_HBA_DSC_SELECTION_TIMEOUT:
2999 ccb->ccb_h.status |= CAM_SEL_TIMEOUT;
3000 break;
3001
3002 case I2O_SCSI_HBA_DSC_COMMAND_TIMEOUT:
3003 /* FALLTHRU */
3004 case I2O_SCSI_HBA_DSC_DEVICE_NOT_PRESENT:
3005 /* FALLTHRU */
3006 case I2O_SCSI_HBA_DSC_LUN_INVALID:
3007 /* FALLTHRU */
3008 case I2O_SCSI_HBA_DSC_SCSI_TID_INVALID:
3009 ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
3010 break;
3011
3012 case I2O_SCSI_HBA_DSC_DATA_OVERRUN:
3013 /* FALLTHRU */
3014 case I2O_SCSI_HBA_DSC_REQUEST_LENGTH_ERROR:
3015 ccb->ccb_h.status |= CAM_DATA_RUN_ERR;
3016 break;
3017
3018 default:
3019 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
3020 break;
3021 }
3022 if ((ccb->csio.resid = ccb->csio.dxfer_len) != 0) {
3023 ccb->csio.resid -=
3024 I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_getTransferCount(
3025 Reply);
3026 }
3027
3028 /* Sense data in reply packet */
3029 if (ccb->ccb_h.status & CAM_AUTOSNS_VALID) {
3030 u_int16_t size = I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_getAutoSenseTransferCount(Reply);
3031
3032 if (size) {
3033 if (size > sizeof(ccb->csio.sense_data)) {
3034 size = sizeof(ccb->csio.sense_data);
3035 }
3036 if (size > I2O_SCSI_SENSE_DATA_SZ) {
3037 size = I2O_SCSI_SENSE_DATA_SZ;
3038 }
3039 if ((ccb->csio.sense_len)
3040 && (size > ccb->csio.sense_len)) {
3041 size = ccb->csio.sense_len;
3042 }
3043 if (size < ccb->csio.sense_len) {
3044 ccb->csio.sense_resid =
3045 ccb->csio.sense_len - size;
3046 } else {
3047 ccb->csio.sense_resid = 0;
3048 }
3049 bzero(&(ccb->csio.sense_data),
3050 sizeof(ccb->csio.sense_data));
3051 bcopy(Reply->SenseData,
3052 &(ccb->csio.sense_data), size);
3053 }
3054 }
3055
3056 /*
3057 * Return Reply so that it can be used for the next command
3058 * since we have no more need for it now
3059 */
3060 asr_set_FromFIFO(sc, ReplyOffset);
3061
3062 if (ccb->ccb_h.path) {
3063 xpt_done ((union ccb *)ccb);
3064 } else {
3065 wakeup (ccb);
3066 }
3067 }
3068 return (processed);
984263bc
MD
3069} /* asr_intr */
3070
7b0cd042
SW
3071#undef QueueSize /* Grrrr */
3072#undef SG_Size /* Grrrr */
984263bc
MD
3073
3074/*
7b0cd042 3075 * Meant to be included at the bottom of asr.c !!!
984263bc
MD
3076 */
3077
3078/*
7b0cd042
SW
3079 * Included here as hard coded. Done because other necessary include
3080 * files utilize C++ comment structures which make them a nuisance to
3081 * included here just to pick up these three typedefs.
984263bc
MD
3082 */
3083typedef U32 DPT_TAG_T;
3084typedef U32 DPT_MSG_T;
3085typedef U32 DPT_RTN_T;
3086
7b0cd042
SW
3087#undef SCSI_RESET /* Conflicts with "scsi/scsiconf.h" defintion */
3088#include "dev/raid/asr/osd_unix.h"
984263bc 3089
7b0cd042 3090#define asr_unit(dev) minor(dev)
984263bc 3091
7b0cd042 3092static u_int8_t ASR_ctlr_held;
984263bc 3093
7b0cd042 3094static int
fef8985e 3095asr_open(struct dev_open_args *ap)
984263bc 3096{
b13267a5 3097 cdev_t dev = ap->a_head.a_dev;
7b0cd042 3098 int error;
984263bc 3099
7b0cd042
SW
3100 if (dev->si_drv1 == NULL) {
3101 return (ENODEV);
3102 }
7f2216bc 3103 crit_enter();
7b0cd042
SW
3104 if (ASR_ctlr_held) {
3105 error = EBUSY;
2b3f93ea
MD
3106 } else {
3107 error = caps_priv_check(ap->a_cred, SYSCAP_RESTRICTEDROOT);
3108 if (error == 0)
3109 ++ASR_ctlr_held;
7b0cd042 3110 }
7f2216bc 3111 crit_exit();
7b0cd042 3112 return (error);
984263bc
MD
3113} /* asr_open */
3114
7b0cd042 3115static int
fef8985e 3116asr_close(struct dev_close_args *ap)
984263bc 3117{
7b0cd042
SW
3118
3119 ASR_ctlr_held = 0;
3120 return (0);
984263bc
MD
3121} /* asr_close */
3122
3123
3124/*-------------------------------------------------------------------------*/
7b0cd042 3125/* Function ASR_queue_i */
984263bc 3126/*-------------------------------------------------------------------------*/
7b0cd042
SW
3127/* The Parameters Passed To This Function Are : */
3128/* Asr_softc_t * : HBA miniport driver's adapter data storage. */
3129/* PI2O_MESSAGE_FRAME : Msg Structure Pointer For This Command */
3130/* I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME following the Msg Structure */
3131/* */
3132/* This Function Will Take The User Request Packet And Convert It To An */
3133/* I2O MSG And Send It Off To The Adapter. */
3134/* */
3135/* Return : 0 For OK, Error Code Otherwise */
984263bc 3136/*-------------------------------------------------------------------------*/
7b0cd042
SW
3137static int
3138ASR_queue_i(Asr_softc_t *sc, PI2O_MESSAGE_FRAME Packet)
984263bc 3139{
7b0cd042
SW
3140 union asr_ccb * ccb;
3141 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply;
3142 PI2O_MESSAGE_FRAME Message_Ptr;
3143 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply_Ptr;
3144 int MessageSizeInBytes;
3145 int ReplySizeInBytes;
3146 int error;
3147 int s;
3148 /* Scatter Gather buffer list */
3149 struct ioctlSgList_S {
3150 SLIST_ENTRY(ioctlSgList_S) link;
3151 caddr_t UserSpace;
3152 I2O_FLAGS_COUNT FlagsCount;
3153 char KernelSpace[sizeof(long)];
3154 } * elm;
3155 /* Generates a `first' entry */
3156 SLIST_HEAD(ioctlSgListHead_S, ioctlSgList_S) sgList;
3157
3158 if (ASR_getBlinkLedCode(sc)) {
3159 debug_usr_cmd_printf ("Adapter currently in BlinkLed %x\n",
3160 ASR_getBlinkLedCode(sc));
3161 return (EIO);
3162 }
3163 /* Copy in the message into a local allocation */
3164 if ((Message_Ptr = (PI2O_MESSAGE_FRAME)kmalloc (
3165 sizeof(I2O_MESSAGE_FRAME), M_TEMP, M_WAITOK)) == NULL) {
3166 debug_usr_cmd_printf (
3167 "Failed to acquire I2O_MESSAGE_FRAME memory\n");
3168 return (ENOMEM);
3169 }
3170 if ((error = copyin ((caddr_t)Packet, (caddr_t)Message_Ptr,
3171 sizeof(I2O_MESSAGE_FRAME))) != 0) {
3172 kfree(Message_Ptr, M_TEMP);
3173 debug_usr_cmd_printf ("Can't copy in packet errno=%d\n", error);
3174 return (error);
3175 }
3176 /* Acquire information to determine type of packet */
3177 MessageSizeInBytes = (I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr)<<2);
3178 /* The offset of the reply information within the user packet */
3179 Reply = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)((char *)Packet
3180 + MessageSizeInBytes);
3181
3182 /* Check if the message is a synchronous initialization command */
3183 s = I2O_MESSAGE_FRAME_getFunction(Message_Ptr);
3184 kfree(Message_Ptr, M_TEMP);
3185 switch (s) {
3186
3187 case I2O_EXEC_IOP_RESET:
3188 { U32 status;
3189
3190 status = ASR_resetIOP(sc);
3191 ReplySizeInBytes = sizeof(status);
3192 debug_usr_cmd_printf ("resetIOP done\n");
3193 return (copyout ((caddr_t)&status, (caddr_t)Reply,
3194 ReplySizeInBytes));
3195 }
3196
3197 case I2O_EXEC_STATUS_GET:
3198 { PI2O_EXEC_STATUS_GET_REPLY status;
3199
3200 status = &sc->ha_statusmem->status;
3201 if (ASR_getStatus(sc) == NULL) {
3202 debug_usr_cmd_printf ("getStatus failed\n");
3203 return (ENXIO);
3204 }
3205 ReplySizeInBytes = sizeof(status);
3206 debug_usr_cmd_printf ("getStatus done\n");
3207 return (copyout ((caddr_t)status, (caddr_t)Reply,
3208 ReplySizeInBytes));
3209 }
3210
3211 case I2O_EXEC_OUTBOUND_INIT:
3212 { U32 status;
3213
3214 status = ASR_initOutBound(sc);
3215 ReplySizeInBytes = sizeof(status);
3216 debug_usr_cmd_printf ("intOutBound done\n");
3217 return (copyout ((caddr_t)&status, (caddr_t)Reply,
3218 ReplySizeInBytes));
3219 }
3220 }
3221
3222 /* Determine if the message size is valid */
3223 if ((MessageSizeInBytes < sizeof(I2O_MESSAGE_FRAME))
3224 || (MAX_INBOUND_SIZE < MessageSizeInBytes)) {
3225 debug_usr_cmd_printf ("Packet size %d incorrect\n",
3226 MessageSizeInBytes);
3227 return (EINVAL);
3228 }
3229
3230 if ((Message_Ptr = (PI2O_MESSAGE_FRAME)kmalloc (MessageSizeInBytes,
3231 M_TEMP, M_WAITOK)) == NULL) {
3232 debug_usr_cmd_printf ("Failed to acquire frame[%d] memory\n",
3233 MessageSizeInBytes);
3234 return (ENOMEM);
3235 }
3236 if ((error = copyin ((caddr_t)Packet, (caddr_t)Message_Ptr,
3237 MessageSizeInBytes)) != 0) {
3238 kfree(Message_Ptr, M_TEMP);
3239 debug_usr_cmd_printf ("Can't copy in packet[%d] errno=%d\n",
3240 MessageSizeInBytes, error);
3241 return (error);
3242 }
3243
3244 /* Check the size of the reply frame, and start constructing */
3245
3246 if ((Reply_Ptr = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)kmalloc (
3247 sizeof(I2O_MESSAGE_FRAME), M_TEMP, M_WAITOK)) == NULL) {
3248 kfree(Message_Ptr, M_TEMP);
3249 debug_usr_cmd_printf (
3250 "Failed to acquire I2O_MESSAGE_FRAME memory\n");
3251 return (ENOMEM);
3252 }
3253 if ((error = copyin ((caddr_t)Reply, (caddr_t)Reply_Ptr,
3254 sizeof(I2O_MESSAGE_FRAME))) != 0) {
3255 kfree(Reply_Ptr, M_TEMP);
3256 kfree(Message_Ptr, M_TEMP);
3257 debug_usr_cmd_printf (
3258 "Failed to copy in reply frame, errno=%d\n",
3259 error);
3260 return (error);
3261 }
3262 ReplySizeInBytes = (I2O_MESSAGE_FRAME_getMessageSize(
3263 &(Reply_Ptr->StdReplyFrame.StdMessageFrame)) << 2);
3264 kfree(Reply_Ptr, M_TEMP);
3265 if (ReplySizeInBytes < sizeof(I2O_SINGLE_REPLY_MESSAGE_FRAME)) {
3266 kfree(Message_Ptr, M_TEMP);
3267 debug_usr_cmd_printf (
3268 "Failed to copy in reply frame[%d], errno=%d\n",
3269 ReplySizeInBytes, error);
3270 return (EINVAL);
3271 }
3272
3273 if ((Reply_Ptr = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)kmalloc (
3274 ((ReplySizeInBytes > sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME))
3275 ? ReplySizeInBytes : sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)),
3276 M_TEMP, M_WAITOK)) == NULL) {
3277 kfree(Message_Ptr, M_TEMP);
3278 debug_usr_cmd_printf ("Failed to acquire frame[%d] memory\n",
3279 ReplySizeInBytes);
3280 return (ENOMEM);
3281 }
3282 (void)ASR_fillMessage((void *)Reply_Ptr, ReplySizeInBytes);
3283 Reply_Ptr->StdReplyFrame.StdMessageFrame.InitiatorContext
3284 = Message_Ptr->InitiatorContext;
3285 Reply_Ptr->StdReplyFrame.TransactionContext
3286 = ((PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr)->TransactionContext;
3287 I2O_MESSAGE_FRAME_setMsgFlags(
3288 &(Reply_Ptr->StdReplyFrame.StdMessageFrame),
3289 I2O_MESSAGE_FRAME_getMsgFlags(
3290 &(Reply_Ptr->StdReplyFrame.StdMessageFrame))
3291 | I2O_MESSAGE_FLAGS_REPLY);
3292
3293 /* Check if the message is a special case command */
3294 switch (I2O_MESSAGE_FRAME_getFunction(Message_Ptr)) {
3295 case I2O_EXEC_SYS_TAB_SET: /* Special Case of empty Scatter Gather */
3296 if (MessageSizeInBytes == ((I2O_MESSAGE_FRAME_getVersionOffset(
3297 Message_Ptr) & 0xF0) >> 2)) {
3298 kfree(Message_Ptr, M_TEMP);
3299 I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode(
3300 &(Reply_Ptr->StdReplyFrame),
3301 (ASR_setSysTab(sc) != CAM_REQ_CMP));
3302 I2O_MESSAGE_FRAME_setMessageSize(
3303 &(Reply_Ptr->StdReplyFrame.StdMessageFrame),
3304 sizeof(I2O_SINGLE_REPLY_MESSAGE_FRAME));
3305 error = copyout ((caddr_t)Reply_Ptr, (caddr_t)Reply,
3306 ReplySizeInBytes);
3307 kfree(Reply_Ptr, M_TEMP);
3308 return (error);
3309 }
3310 }
3311
3312 /* Deal in the general case */
3313 /* First allocate and optionally copy in each scatter gather element */
3314 SLIST_INIT(&sgList);
3315 if ((I2O_MESSAGE_FRAME_getVersionOffset(Message_Ptr) & 0xF0) != 0) {
3316 PI2O_SGE_SIMPLE_ELEMENT sg;
3317
3318 /*
3319 * since this code is reused in several systems, code
3320 * efficiency is greater by using a shift operation rather
3321 * than a divide by sizeof(u_int32_t).
3322 */
3323 sg = (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr
3324 + ((I2O_MESSAGE_FRAME_getVersionOffset(Message_Ptr) & 0xF0)
3325 >> 2));
3326 while (sg < (PI2O_SGE_SIMPLE_ELEMENT)(((caddr_t)Message_Ptr)
3327 + MessageSizeInBytes)) {
3328 caddr_t v;
3329 int len;
3330
3331 if ((I2O_FLAGS_COUNT_getFlags(&(sg->FlagsCount))
3332 & I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT) == 0) {
3333 error = EINVAL;
3334 break;
3335 }
3336 len = I2O_FLAGS_COUNT_getCount(&(sg->FlagsCount));
3337 debug_usr_cmd_printf ("SG[%d] = %x[%d]\n",
3338 sg - (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr
3339 + ((I2O_MESSAGE_FRAME_getVersionOffset(
3340 Message_Ptr) & 0xF0) >> 2)),
3341 I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg), len);
3342
3343 if ((elm = (struct ioctlSgList_S *)kmalloc (
3344 sizeof(*elm) - sizeof(elm->KernelSpace) + len,
3345 M_TEMP, M_WAITOK)) == NULL) {
3346 debug_usr_cmd_printf (
3347 "Failed to allocate SG[%d]\n", len);
3348 error = ENOMEM;
3349 break;
3350 }
3351 SLIST_INSERT_HEAD(&sgList, elm, link);
3352 elm->FlagsCount = sg->FlagsCount;
3353 elm->UserSpace = (caddr_t)
3354 (I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg));
3355 v = elm->KernelSpace;
3356 /* Copy in outgoing data (DIR bit could be invalid) */
3357 if ((error = copyin (elm->UserSpace, (caddr_t)v, len))
3358 != 0) {
3359 break;
3360 }
3361 /*
3362 * If the buffer is not contiguous, lets
3363 * break up the scatter/gather entries.
3364 */
3365 while ((len > 0)
3366 && (sg < (PI2O_SGE_SIMPLE_ELEMENT)
3367 (((caddr_t)Message_Ptr) + MAX_INBOUND_SIZE))) {
3368 int next, base, span;
3369
3370 span = 0;
3371 next = base = KVTOPHYS(v);
3372 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg,
3373 base);
3374
3375 /* How far can we go physically contiguously */
3376 while ((len > 0) && (base == next)) {
3377 int size;
3378
3379 next = trunc_page(base) + PAGE_SIZE;
3380 size = next - base;
3381 if (size > len) {
3382 size = len;
3383 }
3384 span += size;
3385 v += size;
3386 len -= size;
3387 base = KVTOPHYS(v);
3388 }
3389
3390 /* Construct the Flags */
3391 I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount),
3392 span);
3393 {
3394 int flags = I2O_FLAGS_COUNT_getFlags(
3395 &(elm->FlagsCount));
3396 /* Any remaining length? */
3397 if (len > 0) {
3398 flags &=
3399 ~(I2O_SGL_FLAGS_END_OF_BUFFER
3400 | I2O_SGL_FLAGS_LAST_ELEMENT);
3401 }
3402 I2O_FLAGS_COUNT_setFlags(
3403 &(sg->FlagsCount), flags);
3404 }
3405
3406 debug_usr_cmd_printf ("sg[%d] = %x[%d]\n",
3407 sg - (PI2O_SGE_SIMPLE_ELEMENT)
3408 ((char *)Message_Ptr
3409 + ((I2O_MESSAGE_FRAME_getVersionOffset(
3410 Message_Ptr) & 0xF0) >> 2)),
3411 I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg),
3412 span);
3413 if (len <= 0) {
3414 break;
3415 }
3416
3417 /*
3418 * Incrementing requires resizing of the
3419 * packet, and moving up the existing SG
3420 * elements.
3421 */
3422 ++sg;
3423 MessageSizeInBytes += sizeof(*sg);
3424 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
3425 I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr)
3426 + (sizeof(*sg) / sizeof(U32)));
3427 {
3428 PI2O_MESSAGE_FRAME NewMessage_Ptr;
3429
3430 if ((NewMessage_Ptr
3431 = (PI2O_MESSAGE_FRAME)
3432 kmalloc (MessageSizeInBytes,
3433 M_TEMP, M_WAITOK)) == NULL) {
3434 debug_usr_cmd_printf (
3435 "Failed to acquire frame[%d] memory\n",
3436 MessageSizeInBytes);
3437 error = ENOMEM;
3438 break;
3439 }
3440 span = ((caddr_t)sg)
3441 - (caddr_t)Message_Ptr;
3442 bcopy(Message_Ptr,NewMessage_Ptr, span);
3443 bcopy((caddr_t)(sg-1),
3444 ((caddr_t)NewMessage_Ptr) + span,
3445 MessageSizeInBytes - span);
3446 kfree(Message_Ptr, M_TEMP);
3447 sg = (PI2O_SGE_SIMPLE_ELEMENT)
3448 (((caddr_t)NewMessage_Ptr) + span);
3449 Message_Ptr = NewMessage_Ptr;
3450 }
3451 }
3452 if ((error)
3453 || ((I2O_FLAGS_COUNT_getFlags(&(sg->FlagsCount))
3454 & I2O_SGL_FLAGS_LAST_ELEMENT) != 0)) {
3455 break;
3456 }
3457 ++sg;
3458 }
3459 if (error) {
3460 while ((elm = SLIST_FIRST(&sgList)) != NULL) {
3461 SLIST_REMOVE_HEAD(&sgList, link);
3462 kfree(elm, M_TEMP);
3463 }
3464 kfree(Reply_Ptr, M_TEMP);
3465 kfree(Message_Ptr, M_TEMP);
3466 return (error);
3467 }
3468 }
3469
3470 debug_usr_cmd_printf ("Inbound: ");
3471 debug_usr_cmd_dump_message(Message_Ptr);
3472
3473 /* Send the command */
3474 if ((ccb = asr_alloc_ccb (sc)) == NULL) {
3475 /* Free up in-kernel buffers */
3476 while ((elm = SLIST_FIRST(&sgList)) != NULL) {
3477 SLIST_REMOVE_HEAD(&sgList, link);
3478 kfree(elm, M_TEMP);
3479 }
3480 kfree(Reply_Ptr, M_TEMP);
3481 kfree(Message_Ptr, M_TEMP);
3482 return (ENOMEM);
3483 }
3484
3485 /*
3486 * We do not need any (optional byteswapping) method access to
3487 * the Initiator context field.
3488 */
3489 I2O_MESSAGE_FRAME_setInitiatorContext64(
3490 (PI2O_MESSAGE_FRAME)Message_Ptr, (long)ccb);
3491
3492 (void)ASR_queue (sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
3493
3494 kfree(Message_Ptr, M_TEMP);
3495
3496 /*
3497 * Wait for the board to report a finished instruction.
3498 */
7f2216bc 3499 crit_enter();
7b0cd042
SW
3500 while ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
3501 if (ASR_getBlinkLedCode(sc)) {
3502 /* Reset Adapter */
3503 kprintf ("asr%d: Blink LED 0x%x resetting adapter\n",
3504 cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)),
3505 ASR_getBlinkLedCode(sc));
3506 if (ASR_reset (sc) == ENXIO) {
3507 /* Command Cleanup */
3508 ASR_ccbRemove(sc, ccb);
3509 }
7f2216bc 3510 crit_exit();
7b0cd042
SW
3511 /* Free up in-kernel buffers */
3512 while ((elm = SLIST_FIRST(&sgList)) != NULL) {
3513 SLIST_REMOVE_HEAD(&sgList, link);
3514 kfree(elm, M_TEMP);
3515 }
3516 kfree(Reply_Ptr, M_TEMP);
3517 asr_free_ccb(ccb);
3518 return (EIO);
3519 }
3520 /* Check every second for BlinkLed */
3521 /* There is no PRICAM, but outwardly PRIBIO is functional */
3522 tsleep(ccb, 0, "asr", hz);
3523 }
7f2216bc 3524 crit_exit();
984263bc 3525
7b0cd042
SW
3526 debug_usr_cmd_printf ("Outbound: ");
3527 debug_usr_cmd_dump_message(Reply_Ptr);
3528
3529 I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode(
3530 &(Reply_Ptr->StdReplyFrame),
3531 (ccb->ccb_h.status != CAM_REQ_CMP));
3532
3533 if (ReplySizeInBytes >= (sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
3534 - I2O_SCSI_SENSE_DATA_SZ - sizeof(U32))) {
3535 I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_setTransferCount(Reply_Ptr,
3536 ccb->csio.dxfer_len - ccb->csio.resid);
3537 }
3538 if ((ccb->ccb_h.status & CAM_AUTOSNS_VALID) && (ReplySizeInBytes
3539 > (sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
3540 - I2O_SCSI_SENSE_DATA_SZ))) {
3541 int size = ReplySizeInBytes
3542 - sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
3543 - I2O_SCSI_SENSE_DATA_SZ;
3544
3545 if (size > sizeof(ccb->csio.sense_data)) {
3546 size = sizeof(ccb->csio.sense_data);
3547 }
3548 if (size < ccb->csio.sense_len) {
3549 ccb->csio.sense_resid = ccb->csio.sense_len - size;
3550 } else {
3551 ccb->csio.sense_resid = 0;
3552 }
3553 bzero(&(ccb->csio.sense_data), sizeof(ccb->csio.sense_data));
3554 bcopy(&(ccb->csio.sense_data), Reply_Ptr->SenseData, size);
3555 I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_setAutoSenseTransferCount(
3556 Reply_Ptr, size);
3557 }
3558
3559 /* Free up in-kernel buffers */
3560 while ((elm = SLIST_FIRST(&sgList)) != NULL) {
3561 /* Copy out as necessary */
3562 if ((error == 0)
3563 /* DIR bit considered `valid', error due to ignorance works */
3564 && ((I2O_FLAGS_COUNT_getFlags(&(elm->FlagsCount))
3565 & I2O_SGL_FLAGS_DIR) == 0)) {
3566 error = copyout((caddr_t)(elm->KernelSpace),
3567 elm->UserSpace,
3568 I2O_FLAGS_COUNT_getCount(&(elm->FlagsCount)));
3569 }
3570 SLIST_REMOVE_HEAD(&sgList, link);
3571 kfree(elm, M_TEMP);
3572 }
3573 if (error == 0) {
3574 /* Copy reply frame to user space */
3575 error = copyout((caddr_t)Reply_Ptr, (caddr_t)Reply,
3576 ReplySizeInBytes);
3577 }
3578 kfree(Reply_Ptr, M_TEMP);
3579 asr_free_ccb(ccb);
3580
3581 return (error);
984263bc
MD
3582} /* ASR_queue_i */
3583
3584/*----------------------------------------------------------------------*/
7b0cd042 3585/* Function asr_ioctl */
984263bc 3586/*----------------------------------------------------------------------*/
7b0cd042
SW
3587/* The parameters passed to this function are : */
3588/* dev : Device number. */
3589/* cmd : Ioctl Command */
3590/* data : User Argument Passed In. */
3591/* flag : Mode Parameter */
3592/* proc : Process Parameter */
3593/* */
3594/* This function is the user interface into this adapter driver */
3595/* */
3596/* Return : zero if OK, error code if not */
984263bc
MD
3597/*----------------------------------------------------------------------*/
3598
7b0cd042 3599static int
fef8985e 3600asr_ioctl(struct dev_ioctl_args *ap)
984263bc 3601{
b13267a5 3602 cdev_t dev = ap->a_head.a_dev;
7b0cd042 3603 u_long cmd = ap->a_cmd;
fef8985e 3604 caddr_t data = ap->a_data;
7b0cd042
SW
3605 Asr_softc_t *sc = dev->si_drv1;
3606 int i, error = 0;
3607#ifdef ASR_IOCTL_COMPAT
3608 int j;
3609#endif /* ASR_IOCTL_COMPAT */
3610
7c4b329b
SW
3611 if (sc == NULL)
3612 return (EINVAL);
7b0cd042 3613
7c4b329b 3614 switch(cmd) {
7b0cd042
SW
3615 case DPT_SIGNATURE:
3616#ifdef ASR_IOCTL_COMPAT
3617#if (dsDescription_size != 50)
3618 case DPT_SIGNATURE + ((50 - dsDescription_size) << 16):
3619#endif
3620 if (cmd & 0xFFFF0000) {
3621 bcopy(&ASR_sig, data, sizeof(dpt_sig_S));
3622 return (0);
3623 }
3624 /* Traditional version of the ioctl interface */
3625 case DPT_SIGNATURE & 0x0000FFFF:
3626#endif
3627 return (copyout((caddr_t)(&ASR_sig), *((caddr_t *)data),
3628 sizeof(dpt_sig_S)));
3629
3630 /* Traditional version of the ioctl interface */
3631 case DPT_CTRLINFO & 0x0000FFFF:
3632 case DPT_CTRLINFO: {
3633 struct {
3634 u_int16_t length;
3635 u_int16_t drvrHBAnum;
3636 u_int32_t baseAddr;
3637 u_int16_t blinkState;
3638 u_int8_t pciBusNum;
3639 u_int8_t pciDeviceNum;
3640 u_int16_t hbaFlags;
3641 u_int16_t Interrupt;
3642 u_int32_t reserved1;
3643 u_int32_t reserved2;
3644 u_int32_t reserved3;
3645 } CtlrInfo;
3646
3647 bzero(&CtlrInfo, sizeof(CtlrInfo));
3648 CtlrInfo.length = sizeof(CtlrInfo) - sizeof(u_int16_t);
3649 CtlrInfo.drvrHBAnum = asr_unit(dev);
3650 CtlrInfo.baseAddr = sc->ha_Base;
3651 i = ASR_getBlinkLedCode (sc);
3652 if (i == -1)
3653 i = 0;
3654
3655 CtlrInfo.blinkState = i;
3656 CtlrInfo.pciBusNum = sc->ha_pciBusNum;
3657 CtlrInfo.pciDeviceNum = sc->ha_pciDeviceNum;
3658#define FLG_OSD_PCI_VALID 0x0001
3659#define FLG_OSD_DMA 0x0002
3660#define FLG_OSD_I2O 0x0004
3661 CtlrInfo.hbaFlags = FLG_OSD_PCI_VALID|FLG_OSD_DMA|FLG_OSD_I2O;
3662 CtlrInfo.Interrupt = sc->ha_irq;
3663#ifdef ASR_IOCTL_COMPAT
3664 if (cmd & 0xffff0000)
3665 bcopy(&CtlrInfo, data, sizeof(CtlrInfo));
3666 else
3667#endif /* ASR_IOCTL_COMPAT */
3668 error = copyout(&CtlrInfo, *(caddr_t *)data, sizeof(CtlrInfo));
3669 } return (error);
3670
3671 /* Traditional version of the ioctl interface */
3672 case DPT_SYSINFO & 0x0000FFFF:
3673 case DPT_SYSINFO: {
3674 sysInfo_S Info;
3675#ifdef ASR_IOCTL_COMPAT
3676 char * cp;
3677 /* Kernel Specific ptok `hack' */
3678#define ptok(a) ((char *)(uintptr_t)(a) + KERNBASE)
3679
3680 bzero(&Info, sizeof(Info));
3681
3682 /* Appears I am the only person in the Kernel doing this */
3683 outb (0x70, 0x12);
3684 i = inb(0x71);
3685 j = i >> 4;
3686 if (i == 0x0f) {
3687 outb (0x70, 0x19);
3688 j = inb (0x71);
3689 }
3690 Info.drive0CMOS = j;
3691
3692 j = i & 0x0f;
3693 if (i == 0x0f) {
3694 outb (0x70, 0x1a);
3695 j = inb (0x71);
3696 }
3697 Info.drive1CMOS = j;
3698
3699 Info.numDrives = *((char *)ptok(0x475));
3700#else /* ASR_IOCTL_COMPAT */
3701 bzero(&Info, sizeof(Info));
3702#endif /* ASR_IOCTL_COMPAT */
3703
3704 Info.processorFamily = ASR_sig.dsProcessorFamily;
7b0cd042
SW
3705 Info.osType = OS_BSDI_UNIX;
3706 Info.osMajorVersion = osrelease[0] - '0';
3707 Info.osMinorVersion = osrelease[2] - '0';
3708 /* Info.osRevision = 0; */
3709 /* Info.osSubRevision = 0; */
3710 Info.busType = SI_PCI_BUS;
3711 Info.flags = SI_OSversionValid|SI_BusTypeValid|SI_NO_SmartROM;
3712
3713#ifdef ASR_IOCTL_COMPAT
3714 Info.flags |= SI_CMOS_Valid | SI_NumDrivesValid;
3715 /* Go Out And Look For I2O SmartROM */
3716 for(j = 0xC8000; j < 0xE0000; j += 2048) {
3717 int k;
3718
3719 cp = ptok(j);
3720 if (*((unsigned short *)cp) != 0xAA55) {
3721 continue;
3722 }
3723 j += (cp[2] * 512) - 2048;
3724 if ((*((u_long *)(cp + 6))
3725 != ('S' + (' ' * 256) + (' ' * 65536L)))
3726 || (*((u_long *)(cp + 10))
3727 != ('I' + ('2' * 256) + ('0' * 65536L)))) {
3728 continue;
3729 }
3730 cp += 0x24;
3731 for (k = 0; k < 64; ++k) {
3732 if (*((unsigned short *)cp)
3733 == (' ' + ('v' * 256))) {
3734 break;
3735 }
3736 }
3737 if (k < 64) {
3738 Info.smartROMMajorVersion
3739 = *((unsigned char *)(cp += 4)) - '0';
3740 Info.smartROMMinorVersion
3741 = *((unsigned char *)(cp += 2));
3742 Info.smartROMRevision
3743 = *((unsigned char *)(++cp));
3744 Info.flags |= SI_SmartROMverValid;
3745 Info.flags &= ~SI_NO_SmartROM;
3746 break;
3747 }
3748 }
3749 /* Get The Conventional Memory Size From CMOS */
3750 outb (0x70, 0x16);
3751 j = inb (0x71);
3752 j <<= 8;
3753 outb (0x70, 0x15);
3754 j |= inb(0x71);
3755 Info.conventionalMemSize = j;
3756
3757 /* Get The Extended Memory Found At Power On From CMOS */
3758 outb (0x70, 0x31);
3759 j = inb (0x71);
3760 j <<= 8;
3761 outb (0x70, 0x30);
3762 j |= inb(0x71);
3763 Info.extendedMemSize = j;
3764 Info.flags |= SI_MemorySizeValid;
3765
3766 /* Copy Out The Info Structure To The User */
3767 if (cmd & 0xFFFF0000)
3768 bcopy(&Info, data, sizeof(Info));
3769 else
3770#endif /* ASR_IOCTL_COMPAT */
3771 error = copyout(&Info, *(caddr_t *)data, sizeof(Info));
3772 return (error); }
3773
3774 /* Get The BlinkLED State */
3775 case DPT_BLINKLED:
3776 i = ASR_getBlinkLedCode (sc);
3777 if (i == -1)
3778 i = 0;
3779#ifdef ASR_IOCTL_COMPAT
3780 if (cmd & 0xffff0000)
3781 bcopy(&i, data, sizeof(i));
3782 else
3783#endif /* ASR_IOCTL_COMPAT */
3784 error = copyout(&i, *(caddr_t *)data, sizeof(i));
3785 break;
3786
3787 /* Send an I2O command */
3788 case I2OUSRCMD:
3789 return (ASR_queue_i(sc, *((PI2O_MESSAGE_FRAME *)data)));
3790
3791 /* Reset and re-initialize the adapter */
3792 case I2ORESETCMD:
3793 return (ASR_reset(sc));
3794
3795 /* Rescan the LCT table and resynchronize the information */
3796 case I2ORESCANCMD:
3797 return (ASR_rescan(sc));
3798 }
3799 return (EINVAL);
984263bc 3800} /* asr_ioctl */