2 *****************************************************************************************
4 ** FILE NAME : arcmsr.c
5 ** BY : Erich Chen, Ching Huang
6 ** Description: SCSI RAID Device Driver for
7 ** ARECA (ARC11XX/ARC12XX/ARC13XX/ARC16XX/ARC188x) SATA/SAS RAID HOST Adapter
8 ** ARCMSR RAID Host adapter
9 ** [RAID controller:INTEL 331(PCI-X) 341(PCI-EXPRESS) chip set]
10 ******************************************************************************************
11 ************************************************************************
13 ** Copyright (c) 2004-2010 ARECA Co. Ltd.
14 ** Erich Chen, Taipei Taiwan All rights reserved.
16 ** Redistribution and use in source and binary forms, with or without
17 ** modification, are permitted provided that the following conditions
19 ** 1. Redistributions of source code must retain the above copyright
20 ** notice, this list of conditions and the following disclaimer.
21 ** 2. Redistributions in binary form must reproduce the above copyright
22 ** notice, this list of conditions and the following disclaimer in the
23 ** documentation and/or other materials provided with the distribution.
24 ** 3. The name of the author may not be used to endorse or promote products
25 ** derived from this software without specific prior written permission.
27 ** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
28 ** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
29 ** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
30 ** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
31 ** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING, BUT
32 ** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
33 ** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY
34 ** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
35 **(INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF
36 ** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 **************************************************************************
40 ** REV# DATE NAME DESCRIPTION
41 ** 1.00.00.00 03/31/2004 Erich Chen First release
42 ** 1.20.00.02 11/29/2004 Erich Chen bug fix with arcmsr_bus_reset when PHY error
43 ** 1.20.00.03 04/19/2005 Erich Chen add SATA 24 Ports adapter type support
44 ** clean unused function
45 ** 1.20.00.12 09/12/2005 Erich Chen bug fix with abort command handling,
46 ** firmware version check
47 ** and firmware update notify for hardware bug fix
48 ** handling if none zero high part physical address
50 ** 1.20.00.13 08/18/2006 Erich Chen remove pending srb and report busy
51 ** add iop message xfer
52 ** with scsi pass-through command
53 ** add new device id of sas raid adapters
54 ** code fit for SPARC64 & PPC
55 ** 1.20.00.14 02/05/2007 Erich Chen bug fix for incorrect ccb_h.status report
56 ** and cause g_vfs_done() read write error
57 ** 1.20.00.15 10/10/2007 Erich Chen support new RAID adapter type ARC120x
58 ** 1.20.00.16 10/10/2009 Erich Chen Bug fix for RAID adapter type ARC120x
59 ** bus_dmamem_alloc() with BUS_DMA_ZERO
60 ** 1.20.00.17 07/15/2010 Ching Huang Added support ARC1880
61 ** report CAM_DEV_NOT_THERE instead of CAM_SEL_TIMEOUT when device failed,
62 ** prevent cam_periph_error removing all LUN devices of one Target id
63 ** for any one LUN device failed
64 ** 1.20.00.18 10/14/2010 Ching Huang Fixed "inquiry data fails comparion at DV1 step"
65 ** 10/25/2010 Ching Huang Fixed bad range input in bus_alloc_resource for ADAPTER_TYPE_B
66 ** 1.20.00.19 11/11/2010 Ching Huang Fixed arcmsr driver prevent arcsas support for Areca SAS HBA ARC13x0
67 ** 1.20.00.20 12/08/2010 Ching Huang Avoid calling atomic_set_int function
68 ** 1.20.00.21 02/08/2011 Ching Huang Implement I/O request timeout
69 ** 02/14/2011 Ching Huang Modified pktRequestCount
70 ** 1.20.00.21 03/03/2011 Ching Huang if a command timeout, then wait its ccb back before free it
71 ** 1.20.00.22 07/04/2011 Ching Huang Fixed multiple MTX panic
72 ** 1.20.00.23 10/28/2011 Ching Huang Added TIMEOUT_DELAY in case of too many HDDs need to start
73 ** 1.20.00.23 11/08/2011 Ching Huang Added report device transfer speed
74 ** 1.20.00.23 01/30/2012 Ching Huang Fixed Request requeued and Retrying command
75 ** 1.20.00.24 06/11/2012 Ching Huang Fixed return sense data condition
76 ** 1.20.00.25 08/17/2012 Ching Huang Fixed hotplug device no function on type A adapter
77 ******************************************************************************************
78 * $FreeBSD: src/sys/dev/arcmsr/arcmsr.c,v 1.43 2012/09/04 05:15:54 delphij Exp $
81 #define ARCMSR_DEBUG1 1
83 #include <sys/param.h>
84 #include <sys/systm.h>
85 #include <sys/malloc.h>
86 #include <sys/kernel.h>
88 #include <sys/queue.h>
90 #include <sys/kthread.h>
91 #include <sys/module.h>
94 #include <sys/sysctl.h>
95 #include <sys/thread2.h>
97 #include <sys/device.h>
99 #include <vm/vm_param.h>
102 #include <machine/atomic.h>
103 #include <sys/conf.h>
104 #include <sys/rman.h>
106 #include <bus/cam/cam.h>
107 #include <bus/cam/cam_ccb.h>
108 #include <bus/cam/cam_sim.h>
109 #include <bus/cam/cam_periph.h>
110 #include <bus/cam/cam_xpt_periph.h>
111 #include <bus/cam/cam_xpt_sim.h>
112 #include <bus/cam/cam_debug.h>
113 #include <bus/cam/scsi/scsi_all.h>
114 #include <bus/cam/scsi/scsi_message.h>
116 **************************************************************************
117 **************************************************************************
119 #include <sys/endian.h>
120 #include <bus/pci/pcivar.h>
121 #include <bus/pci/pcireg.h>
122 #define ARCMSR_LOCK_INIT(l, s) lockinit(l, s, 0, LK_CANRECURSE)
123 #define ARCMSR_LOCK_DESTROY(l) lockuninit(l)
124 #define ARCMSR_LOCK_ACQUIRE(l) lockmgr(l, LK_EXCLUSIVE)
125 #define ARCMSR_LOCK_RELEASE(l) lockmgr(l, LK_RELEASE)
126 #define ARCMSR_LOCK_TRY(l) lockmgr(&l, LK_EXCLUSIVE|LK_NOWAIT);
127 #define arcmsr_htole32(x) htole32(x)
128 typedef struct lock arcmsr_lock_t;
130 #define arcmsr_callout_init(a) callout_init_mp(a);
132 #define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.25 2012-08-17"
133 #include <dev/raid/arcmsr/arcmsr.h>
134 #define SRB_SIZE ((sizeof(struct CommandControlBlock)+0x1f) & 0xffe0)
135 #define ARCMSR_SRBS_POOL_SIZE (SRB_SIZE * ARCMSR_MAX_FREESRB_NUM)
137 **************************************************************************
138 **************************************************************************
140 #define CHIP_REG_READ32(s, b, r) bus_space_read_4(acb->btag[b], acb->bhandle[b], offsetof(struct s, r))
141 #define CHIP_REG_WRITE32(s, b, r, d) bus_space_write_4(acb->btag[b], acb->bhandle[b], offsetof(struct s, r), d)
143 **************************************************************************
144 **************************************************************************
146 static void arcmsr_free_srb(struct CommandControlBlock *srb);
147 static struct CommandControlBlock * arcmsr_get_freesrb(struct AdapterControlBlock *acb);
148 static u_int8_t arcmsr_seek_cmd2abort(union ccb * abortccb);
149 static int arcmsr_probe(device_t dev);
150 static int arcmsr_attach(device_t dev);
151 static int arcmsr_detach(device_t dev);
152 static u_int32_t arcmsr_iop_ioctlcmd(struct AdapterControlBlock *acb, u_int32_t ioctl_cmd, caddr_t arg);
153 static void arcmsr_iop_parking(struct AdapterControlBlock *acb);
154 static int arcmsr_shutdown(device_t dev);
155 static void arcmsr_interrupt(struct AdapterControlBlock *acb);
156 static void arcmsr_polling_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb);
157 static void arcmsr_free_resource(struct AdapterControlBlock *acb);
158 static void arcmsr_bus_reset(struct AdapterControlBlock *acb);
159 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb);
160 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb);
161 static void arcmsr_iop_init(struct AdapterControlBlock *acb);
162 static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb);
163 static void arcmsr_post_ioctldata2iop(struct AdapterControlBlock *acb);
164 static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb);
165 static void arcmsr_srb_complete(struct CommandControlBlock *srb, int stand_flag);
166 static void arcmsr_iop_reset(struct AdapterControlBlock *acb);
167 static void arcmsr_report_sense_info(struct CommandControlBlock *srb);
168 static void arcmsr_build_srb(struct CommandControlBlock *srb, bus_dma_segment_t * dm_segs, u_int32_t nseg);
169 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, union ccb * pccb);
170 static int arcmsr_resume(device_t dev);
171 static int arcmsr_suspend(device_t dev);
172 static void arcmsr_rescanLun_cb(struct cam_periph *periph, union ccb *ccb);
173 static void arcmsr_polling_devmap(void* arg);
174 static void arcmsr_srb_timeout(void* arg);
176 static void arcmsr_dump_data(struct AdapterControlBlock *acb);
179 **************************************************************************
180 **************************************************************************
182 static void UDELAY(u_int32_t us) { DELAY(us); }
184 **************************************************************************
185 **************************************************************************
187 static bus_dmamap_callback_t arcmsr_map_free_srb;
188 static bus_dmamap_callback_t arcmsr_execute_srb;
190 **************************************************************************
191 **************************************************************************
193 static d_open_t arcmsr_open;
194 static d_close_t arcmsr_close;
195 static d_ioctl_t arcmsr_ioctl;
197 static device_method_t arcmsr_methods[]={
198 DEVMETHOD(device_probe, arcmsr_probe),
199 DEVMETHOD(device_attach, arcmsr_attach),
200 DEVMETHOD(device_detach, arcmsr_detach),
201 DEVMETHOD(device_shutdown, arcmsr_shutdown),
202 DEVMETHOD(device_suspend, arcmsr_suspend),
203 DEVMETHOD(device_resume, arcmsr_resume),
204 DEVMETHOD(bus_print_child, bus_generic_print_child),
205 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
209 static driver_t arcmsr_driver={
210 "arcmsr", arcmsr_methods, sizeof(struct AdapterControlBlock)
213 static devclass_t arcmsr_devclass;
214 DRIVER_MODULE(arcmsr, pci, arcmsr_driver, arcmsr_devclass, NULL, NULL);
215 MODULE_VERSION(arcmsr, 1);
216 MODULE_DEPEND(arcmsr, pci, 1, 1, 1);
217 MODULE_DEPEND(arcmsr, cam, 1, 1, 1);
218 #ifndef BUS_DMA_COHERENT
219 #define BUS_DMA_COHERENT 0x04 /* hint: map memory in a coherent way */
222 static struct dev_ops arcmsr_ops = {
223 { "arcmsr", 0, D_MPSAFE },
224 .d_open = arcmsr_open, /* open */
225 .d_close = arcmsr_close, /* close */
226 .d_ioctl = arcmsr_ioctl, /* ioctl */
229 static int arcmsr_msi_enable = 1;
230 TUNABLE_INT("hw.arcmsr.msi.enable", &arcmsr_msi_enable);
234 **************************************************************************
235 **************************************************************************
239 arcmsr_open(struct dev_open_args *ap)
241 cdev_t dev = ap->a_head.a_dev;
242 struct AdapterControlBlock *acb=dev->si_drv1;
251 **************************************************************************
252 **************************************************************************
256 arcmsr_close(struct dev_close_args *ap)
258 cdev_t dev = ap->a_head.a_dev;
259 struct AdapterControlBlock *acb=dev->si_drv1;
268 **************************************************************************
269 **************************************************************************
273 arcmsr_ioctl(struct dev_ioctl_args *ap)
275 cdev_t dev = ap->a_head.a_dev;
276 u_long ioctl_cmd = ap->a_cmd;
277 caddr_t arg = ap->a_data;
278 struct AdapterControlBlock *acb=dev->si_drv1;
283 return (arcmsr_iop_ioctlcmd(acb, ioctl_cmd, arg));
287 **********************************************************************
288 **********************************************************************
290 static u_int32_t arcmsr_disable_allintr( struct AdapterControlBlock *acb)
292 u_int32_t intmask_org=0;
294 switch (acb->adapter_type) {
295 case ACB_ADAPTER_TYPE_A: {
296 /* disable all outbound interrupt */
297 intmask_org=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intmask); /* disable outbound message0 int */
298 CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intmask, intmask_org|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE);
301 case ACB_ADAPTER_TYPE_B: {
302 /* disable all outbound interrupt */
303 intmask_org=CHIP_REG_READ32(HBB_DOORBELL,
304 0, iop2drv_doorbell_mask) & (~ARCMSR_IOP2DRV_MESSAGE_CMD_DONE); /* disable outbound message0 int */
305 CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell_mask, 0); /* disable all interrupt */
308 case ACB_ADAPTER_TYPE_C: {
309 /* disable all outbound interrupt */
310 intmask_org=CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_mask) ; /* disable outbound message0 int */
311 CHIP_REG_WRITE32(HBC_MessageUnit, 0, host_int_mask, intmask_org|ARCMSR_HBCMU_ALL_INTMASKENABLE);
315 return (intmask_org);
318 **********************************************************************
319 **********************************************************************
321 static void arcmsr_enable_allintr( struct AdapterControlBlock *acb, u_int32_t intmask_org)
325 switch (acb->adapter_type) {
326 case ACB_ADAPTER_TYPE_A: {
327 /* enable outbound Post Queue, outbound doorbell Interrupt */
328 mask=~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE|ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE|ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE);
329 CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intmask, intmask_org & mask);
330 acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff;
333 case ACB_ADAPTER_TYPE_B: {
334 /* enable ARCMSR_IOP2DRV_MESSAGE_CMD_DONE */
335 mask=(ARCMSR_IOP2DRV_DATA_WRITE_OK|ARCMSR_IOP2DRV_DATA_READ_OK|ARCMSR_IOP2DRV_CDB_DONE|ARCMSR_IOP2DRV_MESSAGE_CMD_DONE);
336 CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell_mask, intmask_org | mask); /*1=interrupt enable, 0=interrupt disable*/
337 acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f;
340 case ACB_ADAPTER_TYPE_C: {
341 /* enable outbound Post Queue, outbound doorbell Interrupt */
342 mask=~(ARCMSR_HBCMU_UTILITY_A_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK);
343 CHIP_REG_WRITE32(HBC_MessageUnit, 0, host_int_mask, intmask_org & mask);
344 acb->outbound_int_enable= ~(intmask_org & mask) & 0x0000000f;
350 **********************************************************************
351 **********************************************************************
353 static u_int8_t arcmsr_hba_wait_msgint_ready(struct AdapterControlBlock *acb)
356 u_int8_t Retries=0x00;
359 for(Index=0; Index < 100; Index++) {
360 if(CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
361 CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, ARCMSR_MU_OUTBOUND_MESSAGE0_INT);/*clear interrupt*/
366 }while(Retries++ < 20);/*max 20 sec*/
370 **********************************************************************
371 **********************************************************************
373 static u_int8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb)
376 u_int8_t Retries=0x00;
379 for(Index=0; Index < 100; Index++) {
380 if(CHIP_REG_READ32(HBB_DOORBELL, 0, iop2drv_doorbell) & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
381 CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN);/*clear interrupt*/
382 CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT);
387 }while(Retries++ < 20);/*max 20 sec*/
391 **********************************************************************
392 **********************************************************************
394 static u_int8_t arcmsr_hbc_wait_msgint_ready(struct AdapterControlBlock *acb)
397 u_int8_t Retries=0x00;
400 for(Index=0; Index < 100; Index++) {
401 if(CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
402 CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR);/*clear interrupt*/
407 }while(Retries++ < 20);/*max 20 sec*/
411 ************************************************************************
412 ************************************************************************
414 static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb)
416 int retry_count=30;/* enlarge wait flush adapter cache time: 10 minute */
418 CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_FLUSH_CACHE);
420 if(arcmsr_hba_wait_msgint_ready(acb)) {
425 }while(retry_count!=0);
428 ************************************************************************
429 ************************************************************************
431 static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb)
433 int retry_count=30;/* enlarge wait flush adapter cache time: 10 minute */
435 CHIP_REG_WRITE32(HBB_DOORBELL,
436 0, drv2iop_doorbell, ARCMSR_MESSAGE_FLUSH_CACHE);
438 if(arcmsr_hbb_wait_msgint_ready(acb)) {
443 }while(retry_count!=0);
446 ************************************************************************
447 ************************************************************************
449 static void arcmsr_flush_hbc_cache(struct AdapterControlBlock *acb)
451 int retry_count=30;/* enlarge wait flush adapter cache time: 10 minute */
453 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_FLUSH_CACHE);
454 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
456 if(arcmsr_hbc_wait_msgint_ready(acb)) {
461 }while(retry_count!=0);
464 ************************************************************************
465 ************************************************************************
467 static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
469 switch (acb->adapter_type) {
470 case ACB_ADAPTER_TYPE_A: {
471 arcmsr_flush_hba_cache(acb);
474 case ACB_ADAPTER_TYPE_B: {
475 arcmsr_flush_hbb_cache(acb);
478 case ACB_ADAPTER_TYPE_C: {
479 arcmsr_flush_hbc_cache(acb);
485 *******************************************************************************
486 *******************************************************************************
488 static int arcmsr_suspend(device_t dev)
490 struct AdapterControlBlock *acb = device_get_softc(dev);
492 /* flush controller */
493 arcmsr_iop_parking(acb);
494 /* disable all outbound interrupt */
495 arcmsr_disable_allintr(acb);
499 *******************************************************************************
500 *******************************************************************************
502 static int arcmsr_resume(device_t dev)
504 struct AdapterControlBlock *acb = device_get_softc(dev);
506 arcmsr_iop_init(acb);
510 *********************************************************************************
511 *********************************************************************************
513 static void arcmsr_async(void *cb_arg, u_int32_t code, struct cam_path *path, void *arg)
517 **********************************************************************
518 **********************************************************************
520 static void arcmsr_srb_complete(struct CommandControlBlock *srb, int stand_flag)
522 struct AdapterControlBlock *acb=srb->acb;
523 union ccb * pccb=srb->pccb;
525 if(srb->srb_flags & SRB_FLAG_TIMER_START)
526 callout_stop(&srb->ccb_callout);
527 if((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
530 if((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
531 op = BUS_DMASYNC_POSTREAD;
533 op = BUS_DMASYNC_POSTWRITE;
535 bus_dmamap_sync(acb->dm_segs_dmat, srb->dm_segs_dmamap, op);
536 bus_dmamap_unload(acb->dm_segs_dmat, srb->dm_segs_dmamap);
539 atomic_subtract_int(&acb->srboutstandingcount, 1);
540 if((acb->acb_flags & ACB_F_CAM_DEV_QFRZN) && (
541 acb->srboutstandingcount < ARCMSR_RELEASE_SIMQ_LEVEL)) {
542 acb->acb_flags &= ~ACB_F_CAM_DEV_QFRZN;
543 pccb->ccb_h.status |= CAM_RELEASE_SIMQ;
546 if(srb->srb_state != ARCMSR_SRB_TIMEOUT)
547 arcmsr_free_srb(srb);
549 acb->pktReturnCount++;
555 **********************************************************************
556 **********************************************************************
558 static void arcmsr_report_sense_info(struct CommandControlBlock *srb)
560 union ccb * pccb=srb->pccb;
562 pccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
563 pccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
564 if(pccb->csio.sense_len) {
565 memset(&pccb->csio.sense_data, 0, sizeof(pccb->csio.sense_data));
566 memcpy(&pccb->csio.sense_data, srb->arcmsr_cdb.SenseData,
567 get_min(sizeof(struct SENSE_DATA), sizeof(pccb->csio.sense_data)));
568 ((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70); /* Valid,ErrorCode */
569 pccb->ccb_h.status |= CAM_AUTOSNS_VALID;
573 *********************************************************************
574 *********************************************************************
576 static void arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb)
578 CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_ABORT_CMD);
579 if(!arcmsr_hba_wait_msgint_ready(acb)) {
580 kprintf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit);
584 *********************************************************************
585 *********************************************************************
587 static void arcmsr_abort_hbb_allcmd(struct AdapterControlBlock *acb)
589 CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_ABORT_CMD);
590 if(!arcmsr_hbb_wait_msgint_ready(acb)) {
591 kprintf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit);
595 *********************************************************************
596 *********************************************************************
598 static void arcmsr_abort_hbc_allcmd(struct AdapterControlBlock *acb)
600 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_ABORT_CMD);
601 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
602 if(!arcmsr_hbc_wait_msgint_ready(acb)) {
603 kprintf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit);
607 *********************************************************************
608 *********************************************************************
610 static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
612 switch (acb->adapter_type) {
613 case ACB_ADAPTER_TYPE_A: {
614 arcmsr_abort_hba_allcmd(acb);
617 case ACB_ADAPTER_TYPE_B: {
618 arcmsr_abort_hbb_allcmd(acb);
621 case ACB_ADAPTER_TYPE_C: {
622 arcmsr_abort_hbc_allcmd(acb);
628 **************************************************************************
629 **************************************************************************
631 static void arcmsr_report_srb_state(struct AdapterControlBlock *acb, struct CommandControlBlock *srb, u_int16_t error)
635 target=srb->pccb->ccb_h.target_id;
636 lun=srb->pccb->ccb_h.target_lun;
638 if(acb->devstate[target][lun]==ARECA_RAID_GONE) {
639 acb->devstate[target][lun]=ARECA_RAID_GOOD;
641 srb->pccb->ccb_h.status |= CAM_REQ_CMP;
642 arcmsr_srb_complete(srb, 1);
644 switch(srb->arcmsr_cdb.DeviceStatus) {
645 case ARCMSR_DEV_SELECT_TIMEOUT: {
646 if(acb->devstate[target][lun]==ARECA_RAID_GOOD) {
647 kprintf( "arcmsr%d: Target=%x, Lun=%x, selection timeout, raid volume was lost\n", acb->pci_unit, target, lun);
649 acb->devstate[target][lun]=ARECA_RAID_GONE;
650 srb->pccb->ccb_h.status |= CAM_DEV_NOT_THERE;
651 arcmsr_srb_complete(srb, 1);
654 case ARCMSR_DEV_ABORTED:
655 case ARCMSR_DEV_INIT_FAIL: {
656 acb->devstate[target][lun]=ARECA_RAID_GONE;
657 srb->pccb->ccb_h.status |= CAM_DEV_NOT_THERE;
658 arcmsr_srb_complete(srb, 1);
661 case SCSISTAT_CHECK_CONDITION: {
662 acb->devstate[target][lun]=ARECA_RAID_GOOD;
663 arcmsr_report_sense_info(srb);
664 arcmsr_srb_complete(srb, 1);
668 kprintf("arcmsr%d: scsi id=%d lun=%d isr got command error done, but got unknown DeviceStatus=0x%x\n"
669 , acb->pci_unit, target, lun ,srb->arcmsr_cdb.DeviceStatus);
670 acb->devstate[target][lun]=ARECA_RAID_GONE;
671 srb->pccb->ccb_h.status |= CAM_UNCOR_PARITY;
672 /*unknown error or crc error just for retry*/
673 arcmsr_srb_complete(srb, 1);
679 **************************************************************************
680 **************************************************************************
682 static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, u_int32_t flag_srb, u_int16_t error)
684 struct CommandControlBlock *srb;
686 /* check if command done with no error*/
687 switch (acb->adapter_type) {
688 case ACB_ADAPTER_TYPE_C:
689 srb = (struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb & 0xFFFFFFE0));/*frame must be 32 bytes aligned*/
691 case ACB_ADAPTER_TYPE_A:
692 case ACB_ADAPTER_TYPE_B:
694 srb = (struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/
697 if((srb->acb!=acb) || (srb->srb_state!=ARCMSR_SRB_START)) {
698 if(srb->srb_state == ARCMSR_SRB_TIMEOUT) {
699 arcmsr_free_srb(srb);
700 kprintf("arcmsr%d: srb='%p' return srb has been timeouted\n", acb->pci_unit, srb);
703 kprintf("arcmsr%d: return srb has been completed\n"
704 "srb='%p' srb_state=0x%x outstanding srb count=%d \n",
705 acb->pci_unit, srb, srb->srb_state, acb->srboutstandingcount);
708 arcmsr_report_srb_state(acb, srb, error);
711 **************************************************************************
712 **************************************************************************
714 static void arcmsr_srb_timeout(void* arg)
716 struct CommandControlBlock *srb = (struct CommandControlBlock *)arg;
717 struct AdapterControlBlock *acb;
721 target=srb->pccb->ccb_h.target_id;
722 lun=srb->pccb->ccb_h.target_lun;
724 ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
725 if(srb->srb_state == ARCMSR_SRB_START)
727 cmd = srb->pccb->csio.cdb_io.cdb_bytes[0];
728 srb->srb_state = ARCMSR_SRB_TIMEOUT;
729 srb->pccb->ccb_h.status |= CAM_CMD_TIMEOUT;
730 arcmsr_srb_complete(srb, 1);
731 kprintf("arcmsr%d: scsi id %d lun %d cmd=0x%x srb='%p' ccb command time out!\n",
732 acb->pci_unit, target, lun, cmd, srb);
734 ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
736 arcmsr_dump_data(acb);
741 **********************************************************************
742 **********************************************************************
744 static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
750 switch (acb->adapter_type) {
751 case ACB_ADAPTER_TYPE_A: {
752 u_int32_t outbound_intstatus;
754 /*clear and abort all outbound posted Q*/
755 outbound_intstatus=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & acb->outbound_int_enable;
756 CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, outbound_intstatus);/*clear interrupt*/
757 while(((flag_srb=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_queueport)) != 0xFFFFFFFF) && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
758 error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
759 arcmsr_drain_donequeue(acb, flag_srb, error);
763 case ACB_ADAPTER_TYPE_B: {
764 struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu;
766 /*clear all outbound posted Q*/
767 CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ARCMSR_DOORBELL_INT_CLEAR_PATTERN); /* clear doorbell interrupt */
768 for(i=0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
769 if((flag_srb=phbbmu->done_qbuffer[i])!=0) {
770 phbbmu->done_qbuffer[i]=0;
771 error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
772 arcmsr_drain_donequeue(acb, flag_srb, error);
774 phbbmu->post_qbuffer[i]=0;
775 }/*drain reply FIFO*/
776 phbbmu->doneq_index=0;
777 phbbmu->postq_index=0;
780 case ACB_ADAPTER_TYPE_C: {
782 while((CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
783 flag_srb=CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_queueport_low);
784 error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1)?TRUE:FALSE;
785 arcmsr_drain_donequeue(acb, flag_srb, error);
792 ****************************************************************************
793 ****************************************************************************
795 static void arcmsr_iop_reset(struct AdapterControlBlock *acb)
797 struct CommandControlBlock *srb;
798 u_int32_t intmask_org;
801 if(acb->srboutstandingcount>0) {
802 /* disable all outbound interrupt */
803 intmask_org=arcmsr_disable_allintr(acb);
804 /*clear and abort all outbound posted Q*/
805 arcmsr_done4abort_postqueue(acb);
806 /* talk to iop 331 outstanding command aborted*/
807 arcmsr_abort_allcmd(acb);
808 for(i=0;i<ARCMSR_MAX_FREESRB_NUM;i++) {
809 srb=acb->psrb_pool[i];
810 if(srb->srb_state==ARCMSR_SRB_START) {
811 srb->srb_state=ARCMSR_SRB_ABORTED;
812 srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
813 arcmsr_srb_complete(srb, 1);
814 kprintf("arcmsr%d: scsi id=%d lun=%d srb='%p' aborted\n"
815 , acb->pci_unit, srb->pccb->ccb_h.target_id
816 , srb->pccb->ccb_h.target_lun, srb);
819 /* enable all outbound interrupt */
820 arcmsr_enable_allintr(acb, intmask_org);
822 acb->srboutstandingcount=0;
823 acb->workingsrb_doneindex=0;
824 acb->workingsrb_startindex=0;
826 acb->pktRequestCount = 0;
827 acb->pktReturnCount = 0;
831 **********************************************************************
832 **********************************************************************
834 static void arcmsr_build_srb(struct CommandControlBlock *srb,
835 bus_dma_segment_t *dm_segs, u_int32_t nseg)
837 struct ARCMSR_CDB * arcmsr_cdb= &srb->arcmsr_cdb;
838 u_int8_t * psge=(u_int8_t *)&arcmsr_cdb->u;
839 u_int32_t address_lo, address_hi;
840 union ccb * pccb=srb->pccb;
841 struct ccb_scsiio * pcsio= &pccb->csio;
842 u_int32_t arccdbsize=0x30;
844 memset(arcmsr_cdb, 0, sizeof(struct ARCMSR_CDB));
846 arcmsr_cdb->TargetID=pccb->ccb_h.target_id;
847 arcmsr_cdb->LUN=pccb->ccb_h.target_lun;
848 arcmsr_cdb->Function=1;
849 arcmsr_cdb->CdbLength=(u_int8_t)pcsio->cdb_len;
850 arcmsr_cdb->Context=0;
851 bcopy(pcsio->cdb_io.cdb_bytes, arcmsr_cdb->Cdb, pcsio->cdb_len);
853 struct AdapterControlBlock *acb=srb->acb;
855 u_int32_t length, i, cdb_sgcount=0;
857 if((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
858 op=BUS_DMASYNC_PREREAD;
860 op=BUS_DMASYNC_PREWRITE;
861 arcmsr_cdb->Flags|=ARCMSR_CDB_FLAG_WRITE;
862 srb->srb_flags|=SRB_FLAG_WRITE;
864 bus_dmamap_sync(acb->dm_segs_dmat, srb->dm_segs_dmamap, op);
865 for(i=0;i<nseg;i++) {
866 /* Get the physical address of the current data pointer */
867 length=arcmsr_htole32(dm_segs[i].ds_len);
868 address_lo=arcmsr_htole32(dma_addr_lo32(dm_segs[i].ds_addr));
869 address_hi=arcmsr_htole32(dma_addr_hi32(dm_segs[i].ds_addr));
871 struct SG32ENTRY * pdma_sg=(struct SG32ENTRY *)psge;
872 pdma_sg->address=address_lo;
873 pdma_sg->length=length;
874 psge += sizeof(struct SG32ENTRY);
875 arccdbsize += sizeof(struct SG32ENTRY);
877 u_int32_t sg64s_size=0, tmplength=length;
880 u_int64_t span4G, length0;
881 struct SG64ENTRY * pdma_sg=(struct SG64ENTRY *)psge;
883 span4G=(u_int64_t)address_lo + tmplength;
884 pdma_sg->addresshigh=address_hi;
885 pdma_sg->address=address_lo;
886 if(span4G > 0x100000000) {
887 /*see if cross 4G boundary*/
888 length0=0x100000000-address_lo;
889 pdma_sg->length=(u_int32_t)length0|IS_SG64_ADDR;
890 address_hi=address_hi+1;
892 tmplength=tmplength-(u_int32_t)length0;
893 sg64s_size += sizeof(struct SG64ENTRY);
894 psge += sizeof(struct SG64ENTRY);
897 pdma_sg->length=tmplength|IS_SG64_ADDR;
898 sg64s_size += sizeof(struct SG64ENTRY);
899 psge += sizeof(struct SG64ENTRY);
903 arccdbsize += sg64s_size;
907 arcmsr_cdb->sgcount=(u_int8_t)cdb_sgcount;
908 arcmsr_cdb->DataLength=pcsio->dxfer_len;
909 if( arccdbsize > 256) {
910 arcmsr_cdb->Flags|=ARCMSR_CDB_FLAG_SGL_BSIZE;
913 arcmsr_cdb->DataLength = 0;
915 srb->arc_cdb_size=arccdbsize;
918 **************************************************************************
919 **************************************************************************
921 static void arcmsr_post_srb(struct AdapterControlBlock *acb, struct CommandControlBlock *srb)
923 u_int32_t cdb_shifted_phyaddr=(u_int32_t) srb->cdb_shifted_phyaddr;
924 struct ARCMSR_CDB * arcmsr_cdb=(struct ARCMSR_CDB *)&srb->arcmsr_cdb;
926 bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, (srb->srb_flags & SRB_FLAG_WRITE) ? BUS_DMASYNC_POSTWRITE:BUS_DMASYNC_POSTREAD);
927 atomic_add_int(&acb->srboutstandingcount, 1);
928 srb->srb_state=ARCMSR_SRB_START;
930 switch (acb->adapter_type) {
931 case ACB_ADAPTER_TYPE_A: {
932 if(arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
933 CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_queueport, cdb_shifted_phyaddr|ARCMSR_SRBPOST_FLAG_SGL_BSIZE);
935 CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_queueport, cdb_shifted_phyaddr);
939 case ACB_ADAPTER_TYPE_B: {
940 struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu;
941 int ending_index, index;
943 index=phbbmu->postq_index;
944 ending_index=((index+1)%ARCMSR_MAX_HBB_POSTQUEUE);
945 phbbmu->post_qbuffer[ending_index]=0;
946 if(arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
947 phbbmu->post_qbuffer[index]= cdb_shifted_phyaddr|ARCMSR_SRBPOST_FLAG_SGL_BSIZE;
949 phbbmu->post_qbuffer[index]= cdb_shifted_phyaddr;
952 index %= ARCMSR_MAX_HBB_POSTQUEUE; /*if last index number set it to 0 */
953 phbbmu->postq_index=index;
954 CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_CDB_POSTED);
957 case ACB_ADAPTER_TYPE_C:
959 u_int32_t ccb_post_stamp, arc_cdb_size, cdb_phyaddr_hi32;
961 arc_cdb_size=(srb->arc_cdb_size>0x300)?0x300:srb->arc_cdb_size;
962 ccb_post_stamp=(cdb_shifted_phyaddr | ((arc_cdb_size-1) >> 6) | 1);
963 cdb_phyaddr_hi32 = acb->srb_phyaddr.B.phyadd_high;
966 CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_high, cdb_phyaddr_hi32);
967 CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_low, ccb_post_stamp);
971 CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_low, ccb_post_stamp);
978 ************************************************************************
979 ************************************************************************
981 static struct QBUFFER * arcmsr_get_iop_rqbuffer( struct AdapterControlBlock *acb)
983 struct QBUFFER *qbuffer=NULL;
985 switch (acb->adapter_type) {
986 case ACB_ADAPTER_TYPE_A: {
987 struct HBA_MessageUnit *phbamu=(struct HBA_MessageUnit *)acb->pmu;
989 qbuffer=(struct QBUFFER *)&phbamu->message_rbuffer;
992 case ACB_ADAPTER_TYPE_B: {
993 struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu;
995 qbuffer=(struct QBUFFER *)&phbbmu->hbb_rwbuffer->message_rbuffer;
998 case ACB_ADAPTER_TYPE_C: {
999 struct HBC_MessageUnit *phbcmu=(struct HBC_MessageUnit *)acb->pmu;
1001 qbuffer=(struct QBUFFER *)&phbcmu->message_rbuffer;
1008 ************************************************************************
1009 ************************************************************************
1011 static struct QBUFFER * arcmsr_get_iop_wqbuffer( struct AdapterControlBlock *acb)
1013 struct QBUFFER *qbuffer=NULL;
1015 switch (acb->adapter_type) {
1016 case ACB_ADAPTER_TYPE_A: {
1017 struct HBA_MessageUnit *phbamu=(struct HBA_MessageUnit *)acb->pmu;
1019 qbuffer=(struct QBUFFER *)&phbamu->message_wbuffer;
1022 case ACB_ADAPTER_TYPE_B: {
1023 struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu;
1025 qbuffer=(struct QBUFFER *)&phbbmu->hbb_rwbuffer->message_wbuffer;
1028 case ACB_ADAPTER_TYPE_C: {
1029 struct HBC_MessageUnit *phbcmu=(struct HBC_MessageUnit *)acb->pmu;
1031 qbuffer=(struct QBUFFER *)&phbcmu->message_wbuffer;
1038 **************************************************************************
1039 **************************************************************************
1041 static void arcmsr_iop_message_read(struct AdapterControlBlock *acb)
1043 switch (acb->adapter_type) {
1044 case ACB_ADAPTER_TYPE_A: {
1045 /* let IOP know data has been read */
1046 CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK);
1049 case ACB_ADAPTER_TYPE_B: {
1050 /* let IOP know data has been read */
1051 CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_READ_OK);
1054 case ACB_ADAPTER_TYPE_C: {
1055 /* let IOP know data has been read */
1056 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK);
1061 **************************************************************************
1062 **************************************************************************
1064 static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb)
1066 switch (acb->adapter_type) {
1067 case ACB_ADAPTER_TYPE_A: {
1069 ** push inbound doorbell tell iop, driver data write ok
1070 ** and wait reply on next hwinterrupt for next Qbuffer post
1072 CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK);
1075 case ACB_ADAPTER_TYPE_B: {
1077 ** push inbound doorbell tell iop, driver data write ok
1078 ** and wait reply on next hwinterrupt for next Qbuffer post
1080 CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_WRITE_OK);
1083 case ACB_ADAPTER_TYPE_C: {
1085 ** push inbound doorbell tell iop, driver data write ok
1086 ** and wait reply on next hwinterrupt for next Qbuffer post
1088 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_DATA_WRITE_OK);
1094 **********************************************************************
1095 **********************************************************************
1097 static void arcmsr_post_ioctldata2iop(struct AdapterControlBlock *acb)
1100 struct QBUFFER *pwbuffer;
1101 u_int8_t * iop_data;
1102 int32_t allxfer_len=0;
1104 pwbuffer=arcmsr_get_iop_wqbuffer(acb);
1105 iop_data=(u_int8_t *)pwbuffer->data;
1106 if(acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READ) {
1107 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READ);
1108 while((acb->wqbuf_firstindex!=acb->wqbuf_lastindex)
1109 && (allxfer_len<124)) {
1110 pQbuffer=&acb->wqbuffer[acb->wqbuf_firstindex];
1111 memcpy(iop_data, pQbuffer, 1);
1112 acb->wqbuf_firstindex++;
1113 acb->wqbuf_firstindex %=ARCMSR_MAX_QBUFFER; /*if last index number set it to 0 */
1117 pwbuffer->data_len=allxfer_len;
1119 ** push inbound doorbell and wait reply at hwinterrupt routine for next Qbuffer post
1121 arcmsr_iop_message_wrote(acb);
1125 ************************************************************************
1126 ************************************************************************
1128 static void arcmsr_stop_hba_bgrb(struct AdapterControlBlock *acb)
1130 acb->acb_flags &=~ACB_F_MSG_START_BGRB;
1131 CHIP_REG_WRITE32(HBA_MessageUnit,
1132 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB);
1133 if(!arcmsr_hba_wait_msgint_ready(acb)) {
1134 kprintf("arcmsr%d: wait 'stop adapter background rebuild' timeout \n"
1140 ************************************************************************
1141 ************************************************************************
1143 static void arcmsr_stop_hbb_bgrb(struct AdapterControlBlock *acb)
1145 acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
1146 CHIP_REG_WRITE32(HBB_DOORBELL,
1147 0, drv2iop_doorbell, ARCMSR_MESSAGE_STOP_BGRB);
1148 if(!arcmsr_hbb_wait_msgint_ready(acb)) {
1149 kprintf( "arcmsr%d: wait 'stop adapter background rebuild' timeout \n"
1154 ************************************************************************
1155 ************************************************************************
1157 static void arcmsr_stop_hbc_bgrb(struct AdapterControlBlock *acb)
1159 acb->acb_flags &=~ACB_F_MSG_START_BGRB;
1160 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB);
1161 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell,ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
1162 if(!arcmsr_hbc_wait_msgint_ready(acb)) {
1163 kprintf("arcmsr%d: wait 'stop adapter background rebuild' timeout \n", acb->pci_unit);
1167 ************************************************************************
1168 ************************************************************************
1170 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb)
1172 switch (acb->adapter_type) {
1173 case ACB_ADAPTER_TYPE_A: {
1174 arcmsr_stop_hba_bgrb(acb);
1177 case ACB_ADAPTER_TYPE_B: {
1178 arcmsr_stop_hbb_bgrb(acb);
1181 case ACB_ADAPTER_TYPE_C: {
1182 arcmsr_stop_hbc_bgrb(acb);
1188 ************************************************************************
1189 ************************************************************************
1191 static void arcmsr_poll(struct cam_sim * psim)
1193 struct AdapterControlBlock *acb;
1196 acb = (struct AdapterControlBlock *)cam_sim_softc(psim);
1197 mutex = lockstatus(&acb->qbuffer_lock, curthread);
1199 ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
1200 arcmsr_interrupt(acb);
1202 ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
1205 **************************************************************************
1206 **************************************************************************
1208 static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock *acb)
1210 struct QBUFFER *prbuffer;
1213 int my_empty_len, iop_len, rqbuf_firstindex, rqbuf_lastindex;
1215 /*check this iop data if overflow my rqbuffer*/
1216 rqbuf_lastindex=acb->rqbuf_lastindex;
1217 rqbuf_firstindex=acb->rqbuf_firstindex;
1218 prbuffer=arcmsr_get_iop_rqbuffer(acb);
1219 iop_data=(u_int8_t *)prbuffer->data;
1220 iop_len=prbuffer->data_len;
1221 my_empty_len=(rqbuf_firstindex-rqbuf_lastindex-1)&(ARCMSR_MAX_QBUFFER-1);
1222 if(my_empty_len>=iop_len) {
1223 while(iop_len > 0) {
1224 pQbuffer=&acb->rqbuffer[rqbuf_lastindex];
1225 memcpy(pQbuffer, iop_data, 1);
1227 rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;/*if last index number set it to 0 */
1231 acb->rqbuf_lastindex=rqbuf_lastindex;
1232 arcmsr_iop_message_read(acb);
1233 /*signature, let IOP know data has been read */
1235 acb->acb_flags|=ACB_F_IOPDATA_OVERFLOW;
1239 **************************************************************************
1240 **************************************************************************
1242 static void arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock *acb)
1244 acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READ;
1246 *****************************************************************
1247 ** check if there are any mail packages from user space program
1248 ** in my post bag, now is the time to send them into Areca's firmware
1249 *****************************************************************
1251 if(acb->wqbuf_firstindex!=acb->wqbuf_lastindex) {
1253 struct QBUFFER *pwbuffer;
1257 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READ);
1258 pwbuffer=arcmsr_get_iop_wqbuffer(acb);
1259 iop_data=(u_int8_t *)pwbuffer->data;
1260 while((acb->wqbuf_firstindex!=acb->wqbuf_lastindex)
1261 && (allxfer_len<124)) {
1262 pQbuffer=&acb->wqbuffer[acb->wqbuf_firstindex];
1263 memcpy(iop_data, pQbuffer, 1);
1264 acb->wqbuf_firstindex++;
1265 acb->wqbuf_firstindex %=ARCMSR_MAX_QBUFFER; /*if last index number set it to 0 */
1269 pwbuffer->data_len=allxfer_len;
1271 ** push inbound doorbell tell iop driver data write ok
1272 ** and wait reply on next hwinterrupt for next Qbuffer post
1274 arcmsr_iop_message_wrote(acb);
1276 if(acb->wqbuf_firstindex==acb->wqbuf_lastindex) {
1277 acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED;
1281 static void arcmsr_rescanLun_cb(struct cam_periph *periph, union ccb *ccb)
1284 if (ccb->ccb_h.status != CAM_REQ_CMP)
1285 kprintf("arcmsr_rescanLun_cb: Rescan Target=%x, lun=%x, failure status=%x\n",ccb->ccb_h.target_id,ccb->ccb_h.target_lun,ccb->ccb_h.status);
1287 kprintf("arcmsr_rescanLun_cb: Rescan lun successfully!\n");
1289 xpt_free_path(ccb->ccb_h.path);
1292 static void arcmsr_rescan_lun(struct AdapterControlBlock *acb, int target, int lun)
1294 struct cam_path *path;
1297 if (xpt_create_path(&path, xpt_periph, cam_sim_path(acb->psim), target, lun) != CAM_REQ_CMP)
1299 /* kprintf("arcmsr_rescan_lun: Rescan Target=%x, Lun=%x\n", target, lun); */
1300 bzero(&ccb, sizeof(union ccb));
1301 xpt_setup_ccb(&ccb.ccb_h, path, 5);
1302 ccb.ccb_h.func_code = XPT_SCAN_LUN;
1303 ccb.ccb_h.cbfcnp = arcmsr_rescanLun_cb;
1304 ccb.crcn.flags = CAM_FLAG_NONE;
1309 static void arcmsr_abort_dr_ccbs(struct AdapterControlBlock *acb, int target, int lun)
1311 struct CommandControlBlock *srb;
1312 u_int32_t intmask_org;
1315 ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
1316 /* disable all outbound interrupts */
1317 intmask_org = arcmsr_disable_allintr(acb);
1318 for (i = 0; i < ARCMSR_MAX_FREESRB_NUM; i++)
1320 srb = acb->psrb_pool[i];
1321 if (srb->srb_state == ARCMSR_SRB_START)
1323 if((target == srb->pccb->ccb_h.target_id) && (lun == srb->pccb->ccb_h.target_lun))
1325 srb->srb_state = ARCMSR_SRB_ABORTED;
1326 srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
1327 arcmsr_srb_complete(srb, 1);
1328 kprintf("arcmsr%d: abort scsi id %d lun %d srb=%p \n", acb->pci_unit, target, lun, srb);
1332 /* enable outbound Post Queue, outbound doorbell Interrupt */
1333 arcmsr_enable_allintr(acb, intmask_org);
1334 ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
1339 **************************************************************************
1340 **************************************************************************
1342 static void arcmsr_dr_handle(struct AdapterControlBlock *acb) {
1343 u_int32_t devicemap;
1344 u_int32_t target, lun;
1345 u_int32_t deviceMapCurrent[4]={0};
1348 switch (acb->adapter_type) {
1349 case ACB_ADAPTER_TYPE_A:
1350 devicemap = offsetof(struct HBA_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
1351 for (target= 0; target < 4; target++)
1353 deviceMapCurrent[target]=bus_space_read_4(acb->btag[0], acb->bhandle[0], devicemap);
1358 case ACB_ADAPTER_TYPE_B:
1359 devicemap = offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
1360 for (target= 0; target < 4; target++)
1362 deviceMapCurrent[target]=bus_space_read_4(acb->btag[1], acb->bhandle[1], devicemap);
1367 case ACB_ADAPTER_TYPE_C:
1368 devicemap = offsetof(struct HBC_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
1369 for (target= 0; target < 4; target++)
1371 deviceMapCurrent[target]=bus_space_read_4(acb->btag[0], acb->bhandle[0], devicemap);
1377 if(acb->acb_flags & ACB_F_BUS_HANG_ON)
1379 acb->acb_flags &= ~ACB_F_BUS_HANG_ON;
1382 ** adapter posted CONFIG message
1383 ** copy the new map, note if there are differences with the current map
1385 pDevMap = (u_int8_t *)&deviceMapCurrent[0];
1386 for (target= 0; target < ARCMSR_MAX_TARGETID - 1; target++)
1388 if (*pDevMap != acb->device_map[target])
1390 u_int8_t difference, bit_check;
1392 difference= *pDevMap ^ acb->device_map[target];
1393 for(lun=0; lun < ARCMSR_MAX_TARGETLUN; lun++)
1395 bit_check=(1 << lun); /*check bit from 0....31*/
1396 if(difference & bit_check)
1398 if(acb->device_map[target] & bit_check)
1399 {/* unit departed */
1400 kprintf("arcmsr_dr_handle: Target=%x, lun=%x, GONE!!!\n",target,lun);
1401 arcmsr_abort_dr_ccbs(acb, target, lun);
1402 arcmsr_rescan_lun(acb, target, lun);
1403 acb->devstate[target][lun] = ARECA_RAID_GONE;
1407 kprintf("arcmsr_dr_handle: Target=%x, lun=%x, Plug-IN!!!\n",target,lun);
1408 arcmsr_rescan_lun(acb, target, lun);
1409 acb->devstate[target][lun] = ARECA_RAID_GOOD;
1413 /* kprintf("arcmsr_dr_handle: acb->device_map[%x]=0x%x, deviceMapCurrent[%x]=%x\n",target,acb->device_map[target],target,*pDevMap); */
1414 acb->device_map[target]= *pDevMap;
1420 **************************************************************************
1421 **************************************************************************
1423 static void arcmsr_hba_message_isr(struct AdapterControlBlock *acb) {
1424 u_int32_t outbound_message;
1426 CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, ARCMSR_MU_OUTBOUND_MESSAGE0_INT);
1427 outbound_message = CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[0]);
1428 if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
1429 arcmsr_dr_handle( acb );
1432 **************************************************************************
1433 **************************************************************************
1435 static void arcmsr_hbb_message_isr(struct AdapterControlBlock *acb) {
1436 u_int32_t outbound_message;
1438 /* clear interrupts */
1439 CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN);
1440 outbound_message = CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[0]);
1441 if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
1442 arcmsr_dr_handle( acb );
1445 **************************************************************************
1446 **************************************************************************
1448 static void arcmsr_hbc_message_isr(struct AdapterControlBlock *acb) {
1449 u_int32_t outbound_message;
1451 CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR);
1452 outbound_message = CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[0]);
1453 if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
1454 arcmsr_dr_handle( acb );
1457 **************************************************************************
1458 **************************************************************************
1460 static void arcmsr_hba_doorbell_isr(struct AdapterControlBlock *acb)
1462 u_int32_t outbound_doorbell;
1465 *******************************************************************
1466 ** Maybe here we need to check wrqbuffer_lock is lock or not
1467 ** DOORBELL: din! don!
1468 ** check if there are any mail need to pack from firmware
1469 *******************************************************************
1471 outbound_doorbell=CHIP_REG_READ32(HBA_MessageUnit,
1472 0, outbound_doorbell);
1473 CHIP_REG_WRITE32(HBA_MessageUnit,
1474 0, outbound_doorbell, outbound_doorbell); /* clear doorbell interrupt */
1475 if(outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK) {
1476 arcmsr_iop2drv_data_wrote_handle(acb);
1478 if(outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK) {
1479 arcmsr_iop2drv_data_read_handle(acb);
1483 **************************************************************************
1484 **************************************************************************
1486 static void arcmsr_hbc_doorbell_isr(struct AdapterControlBlock *acb)
1488 u_int32_t outbound_doorbell;
1491 *******************************************************************
1492 ** Maybe here we need to check wrqbuffer_lock is lock or not
1493 ** DOORBELL: din! don!
1494 ** check if there are any mail need to pack from firmware
1495 *******************************************************************
1497 outbound_doorbell=CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell);
1498 CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, outbound_doorbell); /* clear doorbell interrupt */
1499 if(outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK) {
1500 arcmsr_iop2drv_data_wrote_handle(acb);
1502 if(outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK) {
1503 arcmsr_iop2drv_data_read_handle(acb);
1505 if(outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
1506 arcmsr_hbc_message_isr(acb); /* messenger of "driver to iop commands" */
1510 **************************************************************************
1511 **************************************************************************
1513 static void arcmsr_hba_postqueue_isr(struct AdapterControlBlock *acb)
1519 *****************************************************************************
1520 ** areca cdb command done
1521 *****************************************************************************
1523 bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap,
1524 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1525 while((flag_srb=CHIP_REG_READ32(HBA_MessageUnit,
1526 0, outbound_queueport)) != 0xFFFFFFFF) {
1527 /* check if command done with no error*/
1528 error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
1529 arcmsr_drain_donequeue(acb, flag_srb, error);
1530 } /*drain reply FIFO*/
1533 **************************************************************************
1534 **************************************************************************
1536 static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb)
1538 struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu;
1544 *****************************************************************************
1545 ** areca cdb command done
1546 *****************************************************************************
1548 bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap,
1549 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1550 index=phbbmu->doneq_index;
1551 while((flag_srb=phbbmu->done_qbuffer[index]) != 0) {
1552 phbbmu->done_qbuffer[index]=0;
1554 index %= ARCMSR_MAX_HBB_POSTQUEUE; /*if last index number set it to 0 */
1555 phbbmu->doneq_index=index;
1556 /* check if command done with no error*/
1557 error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
1558 arcmsr_drain_donequeue(acb, flag_srb, error);
1559 } /*drain reply FIFO*/
1562 **************************************************************************
1563 **************************************************************************
1565 static void arcmsr_hbc_postqueue_isr(struct AdapterControlBlock *acb)
1567 u_int32_t flag_srb,throttling=0;
1571 *****************************************************************************
1572 ** areca cdb command done
1573 *****************************************************************************
1575 bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1577 while(CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) {
1579 flag_srb=CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_queueport_low);
1580 /* check if command done with no error*/
1581 error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1)?TRUE:FALSE;
1582 arcmsr_drain_donequeue(acb, flag_srb, error);
1583 if(throttling==ARCMSR_HBC_ISR_THROTTLING_LEVEL) {
1584 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell,ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING);
1588 } /*drain reply FIFO*/
1591 **********************************************************************
1592 **********************************************************************
1594 static void arcmsr_handle_hba_isr( struct AdapterControlBlock *acb)
1596 u_int32_t outbound_intStatus;
1598 *********************************************
1599 ** check outbound intstatus
1600 *********************************************
1602 outbound_intStatus=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & acb->outbound_int_enable;
1603 if(!outbound_intStatus) {
1604 /*it must be share irq*/
1607 CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, outbound_intStatus);/*clear interrupt*/
1608 /* MU doorbell interrupts*/
1609 if(outbound_intStatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT) {
1610 arcmsr_hba_doorbell_isr(acb);
1612 /* MU post queue interrupts*/
1613 if(outbound_intStatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) {
1614 arcmsr_hba_postqueue_isr(acb);
1616 if(outbound_intStatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
1617 arcmsr_hba_message_isr(acb);
1621 **********************************************************************
1622 **********************************************************************
1624 static void arcmsr_handle_hbb_isr( struct AdapterControlBlock *acb)
1626 u_int32_t outbound_doorbell;
1628 *********************************************
1629 ** check outbound intstatus
1630 *********************************************
1632 outbound_doorbell=CHIP_REG_READ32(HBB_DOORBELL, 0, iop2drv_doorbell) & acb->outbound_int_enable;
1633 if(!outbound_doorbell) {
1634 /*it must be share irq*/
1637 CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ~outbound_doorbell); /* clear doorbell interrupt */
1638 CHIP_REG_READ32(HBB_DOORBELL, 0, iop2drv_doorbell);
1639 CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT);
1640 /* MU ioctl transfer doorbell interrupts*/
1641 if(outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) {
1642 arcmsr_iop2drv_data_wrote_handle(acb);
1644 if(outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK) {
1645 arcmsr_iop2drv_data_read_handle(acb);
1647 /* MU post queue interrupts*/
1648 if(outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE) {
1649 arcmsr_hbb_postqueue_isr(acb);
1651 if(outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
1652 arcmsr_hbb_message_isr(acb);
1656 **********************************************************************
1657 **********************************************************************
1659 static void arcmsr_handle_hbc_isr( struct AdapterControlBlock *acb)
1661 u_int32_t host_interrupt_status;
1663 *********************************************
1664 ** check outbound intstatus
1665 *********************************************
1667 host_interrupt_status=CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status);
1668 if(!host_interrupt_status) {
1669 /*it must be share irq*/
1672 /* MU doorbell interrupts*/
1673 if(host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR) {
1674 arcmsr_hbc_doorbell_isr(acb);
1676 /* MU post queue interrupts*/
1677 if(host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) {
1678 arcmsr_hbc_postqueue_isr(acb);
1682 ******************************************************************************
1683 ******************************************************************************
1685 static void arcmsr_interrupt(struct AdapterControlBlock *acb)
1687 switch (acb->adapter_type) {
1688 case ACB_ADAPTER_TYPE_A:
1689 arcmsr_handle_hba_isr(acb);
1691 case ACB_ADAPTER_TYPE_B:
1692 arcmsr_handle_hbb_isr(acb);
1694 case ACB_ADAPTER_TYPE_C:
1695 arcmsr_handle_hbc_isr(acb);
1698 kprintf("arcmsr%d: interrupt service,"
1699 " unknown adapter type =%d\n", acb->pci_unit, acb->adapter_type);
1704 **********************************************************************
1705 **********************************************************************
1707 static void arcmsr_intr_handler(void *arg)
1709 struct AdapterControlBlock *acb=(struct AdapterControlBlock *)arg;
1711 ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
1712 arcmsr_interrupt(acb);
1713 ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
1716 ******************************************************************************
1717 ******************************************************************************
1719 static void arcmsr_polling_devmap(void* arg)
1721 struct AdapterControlBlock *acb = (struct AdapterControlBlock *)arg;
1722 switch (acb->adapter_type) {
1723 case ACB_ADAPTER_TYPE_A:
1724 CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
1727 case ACB_ADAPTER_TYPE_B:
1728 CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_GET_CONFIG);
1731 case ACB_ADAPTER_TYPE_C:
1732 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
1733 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
1737 if((acb->acb_flags & ACB_F_SCSISTOPADAPTER) == 0)
1739 callout_reset(&acb->devmap_callout, 5 * hz, arcmsr_polling_devmap, acb); /* polling per 5 seconds */
1744 *******************************************************************************
1746 *******************************************************************************
1748 static void arcmsr_iop_parking(struct AdapterControlBlock *acb)
1750 u_int32_t intmask_org;
1753 /* stop adapter background rebuild */
1754 if(acb->acb_flags & ACB_F_MSG_START_BGRB) {
1755 intmask_org = arcmsr_disable_allintr(acb);
1756 arcmsr_stop_adapter_bgrb(acb);
1757 arcmsr_flush_adapter_cache(acb);
1758 arcmsr_enable_allintr(acb, intmask_org);
1763 ***********************************************************************
1765 ************************************************************************
1767 u_int32_t arcmsr_iop_ioctlcmd(struct AdapterControlBlock *acb, u_int32_t ioctl_cmd, caddr_t arg)
1769 struct CMD_MESSAGE_FIELD * pcmdmessagefld;
1770 u_int32_t retvalue=EINVAL;
1772 pcmdmessagefld=(struct CMD_MESSAGE_FIELD *) arg;
1773 if(memcmp(pcmdmessagefld->cmdmessage.Signature, "ARCMSR", 6)!=0) {
1776 ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
1778 case ARCMSR_MESSAGE_READ_RQBUFFER: {
1779 u_int8_t * pQbuffer;
1780 u_int8_t * ptmpQbuffer=pcmdmessagefld->messagedatabuffer;
1781 u_int32_t allxfer_len=0;
1783 while((acb->rqbuf_firstindex!=acb->rqbuf_lastindex)
1784 && (allxfer_len<1031)) {
1785 /*copy READ QBUFFER to srb*/
1786 pQbuffer= &acb->rqbuffer[acb->rqbuf_firstindex];
1787 memcpy(ptmpQbuffer, pQbuffer, 1);
1788 acb->rqbuf_firstindex++;
1789 acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
1790 /*if last index number set it to 0 */
1794 if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1795 struct QBUFFER * prbuffer;
1796 u_int8_t * iop_data;
1799 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1800 prbuffer=arcmsr_get_iop_rqbuffer(acb);
1801 iop_data=(u_int8_t *)prbuffer->data;
1802 iop_len=(u_int32_t)prbuffer->data_len;
1803 /*this iop data does no chance to make me overflow again here, so just do it*/
1805 pQbuffer= &acb->rqbuffer[acb->rqbuf_lastindex];
1806 memcpy(pQbuffer, iop_data, 1);
1807 acb->rqbuf_lastindex++;
1808 acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
1809 /*if last index number set it to 0 */
1813 arcmsr_iop_message_read(acb);
1814 /*signature, let IOP know data has been readed */
1816 pcmdmessagefld->cmdmessage.Length=allxfer_len;
1817 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK;
1818 retvalue=ARCMSR_MESSAGE_SUCCESS;
1821 case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
1822 u_int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
1823 u_int8_t * pQbuffer;
1824 u_int8_t * ptmpuserbuffer=pcmdmessagefld->messagedatabuffer;
1826 user_len=pcmdmessagefld->cmdmessage.Length;
1827 /*check if data xfer length of this request will overflow my array qbuffer */
1828 wqbuf_lastindex=acb->wqbuf_lastindex;
1829 wqbuf_firstindex=acb->wqbuf_firstindex;
1830 if(wqbuf_lastindex!=wqbuf_firstindex) {
1831 arcmsr_post_ioctldata2iop(acb);
1832 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_ERROR;
1834 my_empty_len=(wqbuf_firstindex-wqbuf_lastindex-1)&(ARCMSR_MAX_QBUFFER-1);
1835 if(my_empty_len>=user_len) {
1837 /*copy srb data to wqbuffer*/
1838 pQbuffer= &acb->wqbuffer[acb->wqbuf_lastindex];
1839 memcpy(pQbuffer, ptmpuserbuffer, 1);
1840 acb->wqbuf_lastindex++;
1841 acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
1842 /*if last index number set it to 0 */
1846 /*post fist Qbuffer*/
1847 if(acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
1848 acb->acb_flags &=~ACB_F_MESSAGE_WQBUFFER_CLEARED;
1849 arcmsr_post_ioctldata2iop(acb);
1851 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK;
1853 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_ERROR;
1856 retvalue=ARCMSR_MESSAGE_SUCCESS;
1859 case ARCMSR_MESSAGE_CLEAR_RQBUFFER: {
1860 u_int8_t * pQbuffer=acb->rqbuffer;
1862 if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1863 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1864 arcmsr_iop_message_read(acb);
1865 /*signature, let IOP know data has been readed */
1867 acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
1868 acb->rqbuf_firstindex=0;
1869 acb->rqbuf_lastindex=0;
1870 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
1871 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK;
1872 retvalue=ARCMSR_MESSAGE_SUCCESS;
1875 case ARCMSR_MESSAGE_CLEAR_WQBUFFER:
1877 u_int8_t * pQbuffer=acb->wqbuffer;
1879 if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1880 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1881 arcmsr_iop_message_read(acb);
1882 /*signature, let IOP know data has been readed */
1884 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED|ACB_F_MESSAGE_WQBUFFER_READ);
1885 acb->wqbuf_firstindex=0;
1886 acb->wqbuf_lastindex=0;
1887 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
1888 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK;
1889 retvalue=ARCMSR_MESSAGE_SUCCESS;
1892 case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: {
1893 u_int8_t * pQbuffer;
1895 if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1896 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1897 arcmsr_iop_message_read(acb);
1898 /*signature, let IOP know data has been readed */
1900 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED
1901 |ACB_F_MESSAGE_RQBUFFER_CLEARED
1902 |ACB_F_MESSAGE_WQBUFFER_READ);
1903 acb->rqbuf_firstindex=0;
1904 acb->rqbuf_lastindex=0;
1905 acb->wqbuf_firstindex=0;
1906 acb->wqbuf_lastindex=0;
1907 pQbuffer=acb->rqbuffer;
1908 memset(pQbuffer, 0, sizeof(struct QBUFFER));
1909 pQbuffer=acb->wqbuffer;
1910 memset(pQbuffer, 0, sizeof(struct QBUFFER));
1911 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK;
1912 retvalue=ARCMSR_MESSAGE_SUCCESS;
1915 case ARCMSR_MESSAGE_REQUEST_RETURNCODE_3F: {
1916 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_3F;
1917 retvalue=ARCMSR_MESSAGE_SUCCESS;
1920 case ARCMSR_MESSAGE_SAY_HELLO: {
1921 u_int8_t * hello_string="Hello! I am ARCMSR";
1922 u_int8_t * puserbuffer=(u_int8_t *)pcmdmessagefld->messagedatabuffer;
1924 if(memcpy(puserbuffer, hello_string, (int16_t)strlen(hello_string))) {
1925 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_ERROR;
1926 ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
1929 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK;
1930 retvalue=ARCMSR_MESSAGE_SUCCESS;
1933 case ARCMSR_MESSAGE_SAY_GOODBYE: {
1934 arcmsr_iop_parking(acb);
1935 retvalue=ARCMSR_MESSAGE_SUCCESS;
1938 case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: {
1939 arcmsr_flush_adapter_cache(acb);
1940 retvalue=ARCMSR_MESSAGE_SUCCESS;
1944 ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
1948 **************************************************************************
1949 **************************************************************************
1951 static void arcmsr_free_srb(struct CommandControlBlock *srb)
1953 struct AdapterControlBlock *acb;
1957 mutex = lockstatus(&acb->qbuffer_lock, curthread);
1959 ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
1960 srb->srb_state=ARCMSR_SRB_DONE;
1962 acb->srbworkingQ[acb->workingsrb_doneindex]=srb;
1963 acb->workingsrb_doneindex++;
1964 acb->workingsrb_doneindex %= ARCMSR_MAX_FREESRB_NUM;
1966 ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
1969 **************************************************************************
1970 **************************************************************************
1972 struct CommandControlBlock * arcmsr_get_freesrb(struct AdapterControlBlock *acb)
1974 struct CommandControlBlock *srb=NULL;
1975 u_int32_t workingsrb_startindex, workingsrb_doneindex;
1978 mutex = lockstatus(&acb->qbuffer_lock, curthread);
1980 ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
1981 workingsrb_doneindex=acb->workingsrb_doneindex;
1982 workingsrb_startindex=acb->workingsrb_startindex;
1983 srb=acb->srbworkingQ[workingsrb_startindex];
1984 workingsrb_startindex++;
1985 workingsrb_startindex %= ARCMSR_MAX_FREESRB_NUM;
1986 if(workingsrb_doneindex!=workingsrb_startindex) {
1987 acb->workingsrb_startindex=workingsrb_startindex;
1992 ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
1996 **************************************************************************
1997 **************************************************************************
1999 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, union ccb * pccb)
2001 struct CMD_MESSAGE_FIELD * pcmdmessagefld;
2002 int retvalue = 0, transfer_len = 0;
2004 u_int32_t controlcode = (u_int32_t ) pccb->csio.cdb_io.cdb_bytes[5] << 24 |
2005 (u_int32_t ) pccb->csio.cdb_io.cdb_bytes[6] << 16 |
2006 (u_int32_t ) pccb->csio.cdb_io.cdb_bytes[7] << 8 |
2007 (u_int32_t ) pccb->csio.cdb_io.cdb_bytes[8];
2008 /* 4 bytes: Areca io control code */
2009 if((pccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
2010 buffer = pccb->csio.data_ptr;
2011 transfer_len = pccb->csio.dxfer_len;
2013 retvalue = ARCMSR_MESSAGE_FAIL;
2016 if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) {
2017 retvalue = ARCMSR_MESSAGE_FAIL;
2020 pcmdmessagefld = (struct CMD_MESSAGE_FIELD *) buffer;
2021 switch(controlcode) {
2022 case ARCMSR_MESSAGE_READ_RQBUFFER: {
2024 u_int8_t *ptmpQbuffer=pcmdmessagefld->messagedatabuffer;
2025 int32_t allxfer_len = 0;
2027 while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex)
2028 && (allxfer_len < 1031)) {
2029 pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex];
2030 memcpy(ptmpQbuffer, pQbuffer, 1);
2031 acb->rqbuf_firstindex++;
2032 acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
2036 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2037 struct QBUFFER *prbuffer;
2041 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2042 prbuffer=arcmsr_get_iop_rqbuffer(acb);
2043 iop_data = (u_int8_t *)prbuffer->data;
2044 iop_len =(u_int32_t)prbuffer->data_len;
2045 while (iop_len > 0) {
2046 pQbuffer= &acb->rqbuffer[acb->rqbuf_lastindex];
2047 memcpy(pQbuffer, iop_data, 1);
2048 acb->rqbuf_lastindex++;
2049 acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
2053 arcmsr_iop_message_read(acb);
2055 pcmdmessagefld->cmdmessage.Length = allxfer_len;
2056 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
2057 retvalue=ARCMSR_MESSAGE_SUCCESS;
2060 case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
2061 int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
2063 u_int8_t *ptmpuserbuffer=pcmdmessagefld->messagedatabuffer;
2065 user_len = pcmdmessagefld->cmdmessage.Length;
2066 wqbuf_lastindex = acb->wqbuf_lastindex;
2067 wqbuf_firstindex = acb->wqbuf_firstindex;
2068 if (wqbuf_lastindex != wqbuf_firstindex) {
2069 arcmsr_post_ioctldata2iop(acb);
2070 /* has error report sensedata */
2071 if(pccb->csio.sense_len) {
2072 ((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70);
2073 /* Valid,ErrorCode */
2074 ((u_int8_t *)&pccb->csio.sense_data)[2] = 0x05;
2075 /* FileMark,EndOfMedia,IncorrectLength,Reserved,SenseKey */
2076 ((u_int8_t *)&pccb->csio.sense_data)[7] = 0x0A;
2077 /* AdditionalSenseLength */
2078 ((u_int8_t *)&pccb->csio.sense_data)[12] = 0x20;
2079 /* AdditionalSenseCode */
2081 retvalue = ARCMSR_MESSAGE_FAIL;
2083 my_empty_len = (wqbuf_firstindex-wqbuf_lastindex - 1)
2084 &(ARCMSR_MAX_QBUFFER - 1);
2085 if (my_empty_len >= user_len) {
2086 while (user_len > 0) {
2087 pQbuffer = &acb->wqbuffer[acb->wqbuf_lastindex];
2088 memcpy(pQbuffer, ptmpuserbuffer, 1);
2089 acb->wqbuf_lastindex++;
2090 acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
2094 if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
2096 ~ACB_F_MESSAGE_WQBUFFER_CLEARED;
2097 arcmsr_post_ioctldata2iop(acb);
2100 /* has error report sensedata */
2101 if(pccb->csio.sense_len) {
2102 ((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70);
2103 /* Valid,ErrorCode */
2104 ((u_int8_t *)&pccb->csio.sense_data)[2] = 0x05;
2105 /* FileMark,EndOfMedia,IncorrectLength,Reserved,SenseKey */
2106 ((u_int8_t *)&pccb->csio.sense_data)[7] = 0x0A;
2107 /* AdditionalSenseLength */
2108 ((u_int8_t *)&pccb->csio.sense_data)[12] = 0x20;
2109 /* AdditionalSenseCode */
2111 retvalue = ARCMSR_MESSAGE_FAIL;
2116 case ARCMSR_MESSAGE_CLEAR_RQBUFFER: {
2117 u_int8_t *pQbuffer = acb->rqbuffer;
2119 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2120 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2121 arcmsr_iop_message_read(acb);
2123 acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
2124 acb->rqbuf_firstindex = 0;
2125 acb->rqbuf_lastindex = 0;
2126 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
2127 pcmdmessagefld->cmdmessage.ReturnCode =
2128 ARCMSR_MESSAGE_RETURNCODE_OK;
2131 case ARCMSR_MESSAGE_CLEAR_WQBUFFER: {
2132 u_int8_t *pQbuffer = acb->wqbuffer;
2134 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2135 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2136 arcmsr_iop_message_read(acb);
2139 (ACB_F_MESSAGE_WQBUFFER_CLEARED |
2140 ACB_F_MESSAGE_WQBUFFER_READ);
2141 acb->wqbuf_firstindex = 0;
2142 acb->wqbuf_lastindex = 0;
2143 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
2144 pcmdmessagefld->cmdmessage.ReturnCode =
2145 ARCMSR_MESSAGE_RETURNCODE_OK;
2148 case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: {
2151 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2152 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2153 arcmsr_iop_message_read(acb);
2156 (ACB_F_MESSAGE_WQBUFFER_CLEARED
2157 | ACB_F_MESSAGE_RQBUFFER_CLEARED
2158 | ACB_F_MESSAGE_WQBUFFER_READ);
2159 acb->rqbuf_firstindex = 0;
2160 acb->rqbuf_lastindex = 0;
2161 acb->wqbuf_firstindex = 0;
2162 acb->wqbuf_lastindex = 0;
2163 pQbuffer = acb->rqbuffer;
2164 memset(pQbuffer, 0, sizeof (struct QBUFFER));
2165 pQbuffer = acb->wqbuffer;
2166 memset(pQbuffer, 0, sizeof (struct QBUFFER));
2167 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
2170 case ARCMSR_MESSAGE_REQUEST_RETURNCODE_3F: {
2171 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_3F;
2174 case ARCMSR_MESSAGE_SAY_HELLO: {
2175 int8_t * hello_string = "Hello! I am ARCMSR";
2177 memcpy(pcmdmessagefld->messagedatabuffer, hello_string
2178 , (int16_t)strlen(hello_string));
2179 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
2182 case ARCMSR_MESSAGE_SAY_GOODBYE:
2183 arcmsr_iop_parking(acb);
2185 case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE:
2186 arcmsr_flush_adapter_cache(acb);
2189 retvalue = ARCMSR_MESSAGE_FAIL;
2195 *********************************************************************
2196 *********************************************************************
2198 static void arcmsr_execute_srb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
2200 struct CommandControlBlock *srb=(struct CommandControlBlock *)arg;
2201 struct AdapterControlBlock *acb=(struct AdapterControlBlock *)srb->acb;
2206 target=pccb->ccb_h.target_id;
2207 lun=pccb->ccb_h.target_lun;
2208 #ifdef ARCMSR_DEBUG1
2209 acb->pktRequestCount++;
2212 if(error != EFBIG) {
2213 kprintf("arcmsr%d: unexpected error %x"
2214 " returned from 'bus_dmamap_load' \n"
2215 , acb->pci_unit, error);
2217 if((pccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
2218 pccb->ccb_h.status |= CAM_REQ_TOO_BIG;
2220 arcmsr_srb_complete(srb, 0);
2223 if(nseg > ARCMSR_MAX_SG_ENTRIES) {
2224 pccb->ccb_h.status |= CAM_REQ_TOO_BIG;
2225 arcmsr_srb_complete(srb, 0);
2228 if(acb->acb_flags & ACB_F_BUS_RESET) {
2229 kprintf("arcmsr%d: bus reset and return busy \n", acb->pci_unit);
2230 pccb->ccb_h.status |= CAM_SCSI_BUS_RESET;
2231 arcmsr_srb_complete(srb, 0);
2234 if(acb->devstate[target][lun]==ARECA_RAID_GONE) {
2235 u_int8_t block_cmd, cmd;
2237 cmd = pccb->csio.cdb_io.cdb_bytes[0];
2238 block_cmd= cmd & 0x0f;
2239 if(block_cmd==0x08 || block_cmd==0x0a) {
2240 kprintf("arcmsr%d:block 'read/write' command "
2241 "with gone raid volume Cmd=0x%2x, TargetId=%d, Lun=%d \n"
2242 , acb->pci_unit, cmd, target, lun);
2243 pccb->ccb_h.status |= CAM_DEV_NOT_THERE;
2244 arcmsr_srb_complete(srb, 0);
2248 if((pccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
2250 bus_dmamap_unload(acb->dm_segs_dmat, srb->dm_segs_dmamap);
2252 arcmsr_srb_complete(srb, 0);
2255 if(acb->srboutstandingcount > ARCMSR_MAX_OUTSTANDING_CMD) {
2256 xpt_freeze_simq(acb->psim, 1);
2257 pccb->ccb_h.status = CAM_REQUEUE_REQ;
2258 acb->acb_flags |= ACB_F_CAM_DEV_QFRZN;
2259 arcmsr_srb_complete(srb, 0);
2262 pccb->ccb_h.status |= CAM_SIM_QUEUED;
2263 arcmsr_build_srb(srb, dm_segs, nseg);
2264 arcmsr_post_srb(acb, srb);
2265 if (pccb->ccb_h.timeout != CAM_TIME_INFINITY)
2267 arcmsr_callout_init(&srb->ccb_callout);
2268 callout_reset(&srb->ccb_callout, ((pccb->ccb_h.timeout + (ARCMSR_TIMEOUT_DELAY * 1000)) * hz) / 1000, arcmsr_srb_timeout, srb);
2269 srb->srb_flags |= SRB_FLAG_TIMER_START;
2273 *****************************************************************************************
2274 *****************************************************************************************
2276 static u_int8_t arcmsr_seek_cmd2abort(union ccb * abortccb)
2278 struct CommandControlBlock *srb;
2279 struct AdapterControlBlock *acb=(struct AdapterControlBlock *) abortccb->ccb_h.arcmsr_ccbacb_ptr;
2280 u_int32_t intmask_org;
2285 ***************************************************************************
2286 ** It is the upper layer do abort command this lock just prior to calling us.
2287 ** First determine if we currently own this command.
2288 ** Start by searching the device queue. If not found
2289 ** at all, and the system wanted us to just abort the
2290 ** command return success.
2291 ***************************************************************************
2293 if(acb->srboutstandingcount!=0) {
2294 /* disable all outbound interrupt */
2295 intmask_org=arcmsr_disable_allintr(acb);
2296 for(i=0;i<ARCMSR_MAX_FREESRB_NUM;i++) {
2297 srb=acb->psrb_pool[i];
2298 if(srb->srb_state==ARCMSR_SRB_START) {
2299 if(srb->pccb==abortccb) {
2300 srb->srb_state=ARCMSR_SRB_ABORTED;
2301 kprintf("arcmsr%d:scsi id=%d lun=%d abort srb '%p'"
2302 "outstanding command \n"
2303 , acb->pci_unit, abortccb->ccb_h.target_id
2304 , abortccb->ccb_h.target_lun, srb);
2305 arcmsr_polling_srbdone(acb, srb);
2306 /* enable outbound Post Queue, outbound doorbell Interrupt */
2307 arcmsr_enable_allintr(acb, intmask_org);
2312 /* enable outbound Post Queue, outbound doorbell Interrupt */
2313 arcmsr_enable_allintr(acb, intmask_org);
2318 ****************************************************************************
2319 ****************************************************************************
2321 static void arcmsr_bus_reset(struct AdapterControlBlock *acb)
2326 acb->acb_flags |=ACB_F_BUS_RESET;
2327 while(acb->srboutstandingcount!=0 && retry < 400) {
2328 arcmsr_interrupt(acb);
2332 arcmsr_iop_reset(acb);
2333 acb->acb_flags &= ~ACB_F_BUS_RESET;
2336 **************************************************************************
2337 **************************************************************************
2339 static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
2342 pccb->ccb_h.status |= CAM_REQ_CMP;
2343 switch (pccb->csio.cdb_io.cdb_bytes[0]) {
2345 unsigned char inqdata[36];
2346 char *buffer=pccb->csio.data_ptr;
2348 if (pccb->ccb_h.target_lun) {
2349 pccb->ccb_h.status |= CAM_SEL_TIMEOUT;
2353 inqdata[0] = T_PROCESSOR; /* Periph Qualifier & Periph Dev Type */
2354 inqdata[1] = 0; /* rem media bit & Dev Type Modifier */
2355 inqdata[2] = 0; /* ISO, ECMA, & ANSI versions */
2357 inqdata[4] = 31; /* length of additional data */
2361 strncpy(&inqdata[8], "Areca ", 8); /* Vendor Identification */
2362 strncpy(&inqdata[16], "RAID controller ", 16); /* Product Identification */
2363 strncpy(&inqdata[32], "R001", 4); /* Product Revision */
2364 memcpy(buffer, inqdata, sizeof(inqdata));
2370 if (arcmsr_iop_message_xfer(acb, pccb)) {
2371 pccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
2372 pccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
2382 *********************************************************************
2383 *********************************************************************
2385 static void arcmsr_action(struct cam_sim * psim, union ccb * pccb)
2387 struct AdapterControlBlock * acb;
2389 acb=(struct AdapterControlBlock *) cam_sim_softc(psim);
2391 pccb->ccb_h.status |= CAM_REQ_INVALID;
2395 switch (pccb->ccb_h.func_code) {
2397 struct CommandControlBlock *srb;
2398 int target=pccb->ccb_h.target_id;
2401 /* virtual device for iop message transfer */
2402 arcmsr_handle_virtual_command(acb, pccb);
2405 if((srb=arcmsr_get_freesrb(acb)) == NULL) {
2406 pccb->ccb_h.status |= CAM_RESRC_UNAVAIL;
2410 pccb->ccb_h.arcmsr_ccbsrb_ptr=srb;
2411 pccb->ccb_h.arcmsr_ccbacb_ptr=acb;
2413 if((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
2414 if(!(pccb->ccb_h.flags & CAM_SCATTER_VALID)) {
2416 if(!(pccb->ccb_h.flags & CAM_DATA_PHYS)) {
2417 /* Buffer is virtual */
2421 error = bus_dmamap_load(acb->dm_segs_dmat
2422 , srb->dm_segs_dmamap
2423 , pccb->csio.data_ptr
2424 , pccb->csio.dxfer_len
2425 , arcmsr_execute_srb, srb, /*flags*/0);
2426 if(error == EINPROGRESS) {
2427 xpt_freeze_simq(acb->psim, 1);
2428 pccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2432 else { /* Buffer is physical */
2433 struct bus_dma_segment seg;
2435 seg.ds_addr = (bus_addr_t)pccb->csio.data_ptr;
2436 seg.ds_len = pccb->csio.dxfer_len;
2437 arcmsr_execute_srb(srb, &seg, 1, 0);
2440 /* Scatter/gather list */
2441 struct bus_dma_segment *segs;
2443 if((pccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0
2444 || (pccb->ccb_h.flags & CAM_DATA_PHYS) != 0) {
2445 pccb->ccb_h.status |= CAM_PROVIDE_FAIL;
2447 kfree(srb, M_DEVBUF);
2450 segs=(struct bus_dma_segment *)pccb->csio.data_ptr;
2451 arcmsr_execute_srb(srb, segs, pccb->csio.sglist_cnt, 0);
2454 arcmsr_execute_srb(srb, NULL, 0, 0);
2458 case XPT_TARGET_IO: {
2459 /* target mode not yet support vendor specific commands. */
2460 pccb->ccb_h.status |= CAM_REQ_CMP;
2464 case XPT_PATH_INQ: {
2465 struct ccb_pathinq *cpi= &pccb->cpi;
2468 cpi->hba_inquiry=PI_SDTR_ABLE | PI_TAG_ABLE;
2472 cpi->max_target=ARCMSR_MAX_TARGETID; /* 0-16 */
2473 cpi->max_lun=ARCMSR_MAX_TARGETLUN; /* 0-7 */
2474 cpi->initiator_id=ARCMSR_SCSI_INITIATOR_ID; /* 255 */
2475 cpi->bus_id=cam_sim_bus(psim);
2476 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2477 strncpy(cpi->hba_vid, "ARCMSR", HBA_IDLEN);
2478 strncpy(cpi->dev_name, cam_sim_name(psim), DEV_IDLEN);
2479 cpi->unit_number=cam_sim_unit(psim);
2480 if(acb->adapter_bus_speed == ACB_BUS_SPEED_6G)
2481 cpi->base_transfer_speed = 600000;
2483 cpi->base_transfer_speed = 300000;
2484 if((acb->vendor_device_id == PCIDevVenIDARC1880) ||
2485 (acb->vendor_device_id == PCIDevVenIDARC1680))
2487 cpi->transport = XPORT_SAS;
2488 cpi->transport_version = 0;
2489 cpi->protocol_version = SCSI_REV_SPC2;
2493 cpi->transport = XPORT_SPI;
2494 cpi->transport_version = 2;
2495 cpi->protocol_version = SCSI_REV_2;
2497 cpi->protocol = PROTO_SCSI;
2498 cpi->ccb_h.status |= CAM_REQ_CMP;
2503 union ccb *pabort_ccb;
2505 pabort_ccb=pccb->cab.abort_ccb;
2506 switch (pabort_ccb->ccb_h.func_code) {
2507 case XPT_ACCEPT_TARGET_IO:
2508 case XPT_IMMED_NOTIFY:
2509 case XPT_CONT_TARGET_IO:
2510 if(arcmsr_seek_cmd2abort(pabort_ccb)==TRUE) {
2511 pabort_ccb->ccb_h.status |= CAM_REQ_ABORTED;
2512 xpt_done(pabort_ccb);
2513 pccb->ccb_h.status |= CAM_REQ_CMP;
2515 xpt_print_path(pabort_ccb->ccb_h.path);
2516 kprintf("Not found\n");
2517 pccb->ccb_h.status |= CAM_PATH_INVALID;
2521 pccb->ccb_h.status |= CAM_UA_ABORT;
2524 pccb->ccb_h.status |= CAM_REQ_INVALID;
2531 case XPT_RESET_DEV: {
2534 arcmsr_bus_reset(acb);
2535 for (i=0; i < 500; i++) {
2538 pccb->ccb_h.status |= CAM_REQ_CMP;
2543 pccb->ccb_h.status |= CAM_REQ_INVALID;
2547 case XPT_GET_TRAN_SETTINGS: {
2548 struct ccb_trans_settings *cts;
2550 if(pccb->ccb_h.target_id == 16) {
2551 pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL;
2557 struct ccb_trans_settings_scsi *scsi;
2558 struct ccb_trans_settings_spi *spi;
2559 struct ccb_trans_settings_sas *sas;
2561 scsi = &cts->proto_specific.scsi;
2562 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
2563 scsi->valid = CTS_SCSI_VALID_TQ;
2564 cts->protocol = PROTO_SCSI;
2566 if((acb->vendor_device_id == PCIDevVenIDARC1880) ||
2567 (acb->vendor_device_id == PCIDevVenIDARC1680))
2569 cts->protocol_version = SCSI_REV_SPC2;
2570 cts->transport_version = 0;
2571 cts->transport = XPORT_SAS;
2572 sas = &cts->xport_specific.sas;
2573 sas->valid = CTS_SAS_VALID_SPEED;
2574 if(acb->vendor_device_id == PCIDevVenIDARC1880)
2575 sas->bitrate = 600000;
2576 else if(acb->vendor_device_id == PCIDevVenIDARC1680)
2577 sas->bitrate = 300000;
2581 cts->protocol_version = SCSI_REV_2;
2582 cts->transport_version = 2;
2583 cts->transport = XPORT_SPI;
2584 spi = &cts->xport_specific.spi;
2585 spi->flags = CTS_SPI_FLAGS_DISC_ENB;
2587 spi->sync_offset=32;
2588 spi->bus_width=MSG_EXT_WDTR_BUS_16_BIT;
2589 spi->valid = CTS_SPI_VALID_DISC
2590 | CTS_SPI_VALID_SYNC_RATE
2591 | CTS_SPI_VALID_SYNC_OFFSET
2592 | CTS_SPI_VALID_BUS_WIDTH;
2595 pccb->ccb_h.status |= CAM_REQ_CMP;
2599 case XPT_SET_TRAN_SETTINGS: {
2600 pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL;
2604 case XPT_CALC_GEOMETRY:
2605 if(pccb->ccb_h.target_id == 16) {
2606 pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL;
2610 cam_calc_geometry(&pccb->ccg, 1);
2614 pccb->ccb_h.status |= CAM_REQ_INVALID;
2620 **********************************************************************
2621 **********************************************************************
2623 static void arcmsr_start_hba_bgrb(struct AdapterControlBlock *acb)
2625 acb->acb_flags |= ACB_F_MSG_START_BGRB;
2626 CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB);
2627 if(!arcmsr_hba_wait_msgint_ready(acb)) {
2628 kprintf("arcmsr%d: wait 'start adapter background rebuild' timeout \n", acb->pci_unit);
2632 **********************************************************************
2633 **********************************************************************
2635 static void arcmsr_start_hbb_bgrb(struct AdapterControlBlock *acb)
2637 acb->acb_flags |= ACB_F_MSG_START_BGRB;
2638 CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_START_BGRB);
2639 if(!arcmsr_hbb_wait_msgint_ready(acb)) {
2640 kprintf( "arcmsr%d: wait 'start adapter background rebuild' timeout \n", acb->pci_unit);
2644 **********************************************************************
2645 **********************************************************************
2647 static void arcmsr_start_hbc_bgrb(struct AdapterControlBlock *acb)
2649 acb->acb_flags |= ACB_F_MSG_START_BGRB;
2650 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB);
2651 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
2652 if(!arcmsr_hbc_wait_msgint_ready(acb)) {
2653 kprintf("arcmsr%d: wait 'start adapter background rebuild' timeout \n", acb->pci_unit);
2657 **********************************************************************
2658 **********************************************************************
2660 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb)
2662 switch (acb->adapter_type) {
2663 case ACB_ADAPTER_TYPE_A:
2664 arcmsr_start_hba_bgrb(acb);
2666 case ACB_ADAPTER_TYPE_B:
2667 arcmsr_start_hbb_bgrb(acb);
2669 case ACB_ADAPTER_TYPE_C:
2670 arcmsr_start_hbc_bgrb(acb);
2675 **********************************************************************
2677 **********************************************************************
2679 static void arcmsr_polling_hba_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb)
2681 struct CommandControlBlock *srb;
2682 u_int32_t flag_srb, outbound_intstatus, poll_srb_done=0, poll_count=0;
2687 outbound_intstatus=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & acb->outbound_int_enable;
2688 CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, outbound_intstatus); /*clear interrupt*/
2689 bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2691 if((flag_srb=CHIP_REG_READ32(HBA_MessageUnit,
2692 0, outbound_queueport))==0xFFFFFFFF) {
2694 break;/*chip FIFO no ccb for completion already*/
2697 if ((poll_count > 100) && (poll_srb != NULL)) {
2700 goto polling_ccb_retry;
2703 /* check if command done with no error*/
2704 srb=(struct CommandControlBlock *)
2705 (acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/
2706 error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
2707 poll_srb_done = (srb==poll_srb) ? 1:0;
2708 if((srb->acb!=acb) || (srb->srb_state!=ARCMSR_SRB_START)) {
2709 if(srb->srb_state==ARCMSR_SRB_ABORTED) {
2710 kprintf("arcmsr%d: scsi id=%d lun=%d srb='%p'"
2711 "poll command abort successfully \n"
2713 , srb->pccb->ccb_h.target_id
2714 , srb->pccb->ccb_h.target_lun, srb);
2715 srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
2716 arcmsr_srb_complete(srb, 1);
2719 kprintf("arcmsr%d: polling get an illegal srb command done srb='%p'"
2720 "srboutstandingcount=%d \n"
2722 , srb, acb->srboutstandingcount);
2725 arcmsr_report_srb_state(acb, srb, error);
2726 } /*drain reply FIFO*/
2729 **********************************************************************
2731 **********************************************************************
2733 static void arcmsr_polling_hbb_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb)
2735 struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu;
2736 struct CommandControlBlock *srb;
2737 u_int32_t flag_srb, poll_srb_done=0, poll_count=0;
2743 CHIP_REG_WRITE32(HBB_DOORBELL,
2744 0, iop2drv_doorbell, ARCMSR_DOORBELL_INT_CLEAR_PATTERN); /* clear doorbell interrupt */
2745 bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2747 index=phbbmu->doneq_index;
2748 if((flag_srb=phbbmu->done_qbuffer[index]) == 0) {
2750 break;/*chip FIFO no ccb for completion already*/
2753 if ((poll_count > 100) && (poll_srb != NULL)) {
2756 goto polling_ccb_retry;
2759 phbbmu->done_qbuffer[index]=0;
2761 index %= ARCMSR_MAX_HBB_POSTQUEUE; /*if last index number set it to 0 */
2762 phbbmu->doneq_index=index;
2763 /* check if command done with no error*/
2764 srb=(struct CommandControlBlock *)
2765 (acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/
2766 error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
2767 poll_srb_done = (srb==poll_srb) ? 1:0;
2768 if((srb->acb!=acb) || (srb->srb_state!=ARCMSR_SRB_START)) {
2769 if(srb->srb_state==ARCMSR_SRB_ABORTED) {
2770 kprintf("arcmsr%d: scsi id=%d lun=%d srb='%p'"
2771 "poll command abort successfully \n"
2773 , srb->pccb->ccb_h.target_id
2774 , srb->pccb->ccb_h.target_lun, srb);
2775 srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
2776 arcmsr_srb_complete(srb, 1);
2779 kprintf("arcmsr%d: polling get an illegal srb command done srb='%p'"
2780 "srboutstandingcount=%d \n"
2782 , srb, acb->srboutstandingcount);
2785 arcmsr_report_srb_state(acb, srb, error);
2786 } /*drain reply FIFO*/
2789 **********************************************************************
2791 **********************************************************************
2793 static void arcmsr_polling_hbc_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb)
2795 struct CommandControlBlock *srb;
2796 u_int32_t flag_srb, poll_srb_done=0, poll_count=0;
2801 bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2803 if(!(CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR)) {
2805 break;/*chip FIFO no ccb for completion already*/
2808 if ((poll_count > 100) && (poll_srb != NULL)) {
2811 if (acb->srboutstandingcount == 0) {
2814 goto polling_ccb_retry;
2817 flag_srb = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_queueport_low);
2818 /* check if command done with no error*/
2819 srb=(struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb & 0xFFFFFFE0));/*frame must be 32 bytes aligned*/
2820 error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1)?TRUE:FALSE;
2821 if (poll_srb != NULL)
2822 poll_srb_done = (srb==poll_srb) ? 1:0;
2823 if((srb->acb!=acb) || (srb->srb_state!=ARCMSR_SRB_START)) {
2824 if(srb->srb_state==ARCMSR_SRB_ABORTED) {
2825 kprintf("arcmsr%d: scsi id=%d lun=%d srb='%p'poll command abort successfully \n"
2826 , acb->pci_unit, srb->pccb->ccb_h.target_id, srb->pccb->ccb_h.target_lun, srb);
2827 srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
2828 arcmsr_srb_complete(srb, 1);
2831 kprintf("arcmsr%d: polling get an illegal srb command done srb='%p'srboutstandingcount=%d \n"
2832 , acb->pci_unit, srb, acb->srboutstandingcount);
2835 arcmsr_report_srb_state(acb, srb, error);
2836 } /*drain reply FIFO*/
2839 **********************************************************************
2840 **********************************************************************
2842 static void arcmsr_polling_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb)
2844 switch (acb->adapter_type) {
2845 case ACB_ADAPTER_TYPE_A: {
2846 arcmsr_polling_hba_srbdone(acb, poll_srb);
2849 case ACB_ADAPTER_TYPE_B: {
2850 arcmsr_polling_hbb_srbdone(acb, poll_srb);
2853 case ACB_ADAPTER_TYPE_C: {
2854 arcmsr_polling_hbc_srbdone(acb, poll_srb);
2860 **********************************************************************
2861 **********************************************************************
2863 static void arcmsr_get_hba_config(struct AdapterControlBlock *acb)
2865 char *acb_firm_model=acb->firm_model;
2866 char *acb_firm_version=acb->firm_version;
2867 char *acb_device_map = acb->device_map;
2868 size_t iop_firm_model=offsetof(struct HBA_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]); /*firm_model,15,60-67*/
2869 size_t iop_firm_version=offsetof(struct HBA_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]); /*firm_version,17,68-83*/
2870 size_t iop_device_map = offsetof(struct HBA_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
2873 CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
2874 if(!arcmsr_hba_wait_msgint_ready(acb)) {
2875 kprintf("arcmsr%d: wait 'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit);
2879 *acb_firm_model=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_model+i);
2880 /* 8 bytes firm_model, 15, 60-67*/
2886 *acb_firm_version=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_version+i);
2887 /* 16 bytes firm_version, 17, 68-83*/
2893 *acb_device_map=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_device_map+i);
2897 kprintf("ARECA RAID ADAPTER%d: %s \n", acb->pci_unit, ARCMSR_DRIVER_VERSION);
2898 kprintf("ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n", acb->pci_unit, acb->firm_version);
2899 acb->firm_request_len=CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[1]); /*firm_request_len, 1, 04-07*/
2900 acb->firm_numbers_queue=CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/
2901 acb->firm_sdram_size=CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[3]); /*firm_sdram_size, 3, 12-15*/
2902 acb->firm_ide_channels=CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[4]); /*firm_ide_channels, 4, 16-19*/
2903 acb->firm_cfg_version=CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]); /*firm_cfg_version, 25, */
2906 **********************************************************************
2907 **********************************************************************
2909 static void arcmsr_get_hbb_config(struct AdapterControlBlock *acb)
2911 char *acb_firm_model=acb->firm_model;
2912 char *acb_firm_version=acb->firm_version;
2913 char *acb_device_map = acb->device_map;
2914 size_t iop_firm_model=offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]); /*firm_model,15,60-67*/
2915 size_t iop_firm_version=offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]); /*firm_version,17,68-83*/
2916 size_t iop_device_map = offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
2919 CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_GET_CONFIG);
2920 if(!arcmsr_hbb_wait_msgint_ready(acb)) {
2921 kprintf( "arcmsr%d: wait" "'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit);
2925 *acb_firm_model=bus_space_read_1(acb->btag[1], acb->bhandle[1], iop_firm_model+i);
2926 /* 8 bytes firm_model, 15, 60-67*/
2932 *acb_firm_version=bus_space_read_1(acb->btag[1], acb->bhandle[1], iop_firm_version+i);
2933 /* 16 bytes firm_version, 17, 68-83*/
2939 *acb_device_map=bus_space_read_1(acb->btag[1], acb->bhandle[1], iop_device_map+i);
2943 kprintf("ARECA RAID ADAPTER%d: %s \n", acb->pci_unit, ARCMSR_DRIVER_VERSION);
2944 kprintf("ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n", acb->pci_unit, acb->firm_version);
2945 acb->firm_request_len=CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[1]); /*firm_request_len, 1, 04-07*/
2946 acb->firm_numbers_queue=CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/
2947 acb->firm_sdram_size=CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[3]); /*firm_sdram_size, 3, 12-15*/
2948 acb->firm_ide_channels=CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[4]); /*firm_ide_channels, 4, 16-19*/
2949 acb->firm_cfg_version=CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]); /*firm_cfg_version, 25, */
2952 **********************************************************************
2953 **********************************************************************
2955 static void arcmsr_get_hbc_config(struct AdapterControlBlock *acb)
2957 char *acb_firm_model=acb->firm_model;
2958 char *acb_firm_version=acb->firm_version;
2959 char *acb_device_map = acb->device_map;
2960 size_t iop_firm_model=offsetof(struct HBC_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]); /*firm_model,15,60-67*/
2961 size_t iop_firm_version=offsetof(struct HBC_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]); /*firm_version,17,68-83*/
2962 size_t iop_device_map = offsetof(struct HBC_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
2965 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
2966 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
2967 if(!arcmsr_hbc_wait_msgint_ready(acb)) {
2968 kprintf("arcmsr%d: wait 'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit);
2972 *acb_firm_model=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_model+i);
2973 /* 8 bytes firm_model, 15, 60-67*/
2979 *acb_firm_version=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_version+i);
2980 /* 16 bytes firm_version, 17, 68-83*/
2986 *acb_device_map=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_device_map+i);
2990 kprintf("ARECA RAID ADAPTER%d: %s \n", acb->pci_unit, ARCMSR_DRIVER_VERSION);
2991 kprintf("ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n", acb->pci_unit, acb->firm_version);
2992 acb->firm_request_len =CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[1]); /*firm_request_len, 1, 04-07*/
2993 acb->firm_numbers_queue =CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/
2994 acb->firm_sdram_size =CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[3]); /*firm_sdram_size, 3, 12-15*/
2995 acb->firm_ide_channels =CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[4]); /*firm_ide_channels, 4, 16-19*/
2996 acb->firm_cfg_version =CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]); /*firm_cfg_version, 25, */
2999 **********************************************************************
3000 **********************************************************************
3002 static void arcmsr_get_firmware_spec(struct AdapterControlBlock *acb)
3004 switch (acb->adapter_type) {
3005 case ACB_ADAPTER_TYPE_A: {
3006 arcmsr_get_hba_config(acb);
3009 case ACB_ADAPTER_TYPE_B: {
3010 arcmsr_get_hbb_config(acb);
3013 case ACB_ADAPTER_TYPE_C: {
3014 arcmsr_get_hbc_config(acb);
3020 **********************************************************************
3021 **********************************************************************
3023 static void arcmsr_wait_firmware_ready( struct AdapterControlBlock *acb)
3027 switch (acb->adapter_type) {
3028 case ACB_ADAPTER_TYPE_A: {
3029 while ((CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_msgaddr1) & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0)
3031 if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */
3033 kprintf( "arcmsr%d:timed out waiting for firmware \n", acb->pci_unit);
3036 UDELAY(15000); /* wait 15 milli-seconds */
3040 case ACB_ADAPTER_TYPE_B: {
3041 while ((CHIP_REG_READ32(HBB_DOORBELL, 0, iop2drv_doorbell) & ARCMSR_MESSAGE_FIRMWARE_OK) == 0)
3043 if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */
3045 kprintf( "arcmsr%d: timed out waiting for firmware \n", acb->pci_unit);
3048 UDELAY(15000); /* wait 15 milli-seconds */
3050 CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT);
3053 case ACB_ADAPTER_TYPE_C: {
3054 while ((CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_msgaddr1) & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK) == 0)
3056 if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */
3058 kprintf( "arcmsr%d:timed out waiting for firmware ready\n", acb->pci_unit);
3061 UDELAY(15000); /* wait 15 milli-seconds */
3068 **********************************************************************
3069 **********************************************************************
3071 static void arcmsr_clear_doorbell_queue_buffer( struct AdapterControlBlock *acb)
3073 u_int32_t outbound_doorbell;
3075 switch (acb->adapter_type) {
3076 case ACB_ADAPTER_TYPE_A: {
3077 /* empty doorbell Qbuffer if door bell ringed */
3078 outbound_doorbell = CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_doorbell);
3079 CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_doorbell, outbound_doorbell); /*clear doorbell interrupt */
3080 CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK);
3084 case ACB_ADAPTER_TYPE_B: {
3085 CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN);/*clear interrupt and message state*/
3086 CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_READ_OK);
3087 /* let IOP know data has been read */
3090 case ACB_ADAPTER_TYPE_C: {
3091 /* empty doorbell Qbuffer if door bell ringed */
3092 outbound_doorbell = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell);
3093 CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, outbound_doorbell); /*clear doorbell interrupt */
3094 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK);
3101 ************************************************************************
3102 ************************************************************************
3104 static u_int32_t arcmsr_iop_confirm(struct AdapterControlBlock *acb)
3106 unsigned long srb_phyaddr;
3107 u_int32_t srb_phyaddr_hi32;
3110 ********************************************************************
3111 ** here we need to tell iop 331 our freesrb.HighPart
3112 ** if freesrb.HighPart is not zero
3113 ********************************************************************
3115 srb_phyaddr= (unsigned long) acb->srb_phyaddr.phyaddr;
3116 // srb_phyaddr_hi32=(u_int32_t) ((srb_phyaddr>>16)>>16);
3117 srb_phyaddr_hi32=acb->srb_phyaddr.B.phyadd_high;
3118 switch (acb->adapter_type) {
3119 case ACB_ADAPTER_TYPE_A: {
3120 if(srb_phyaddr_hi32!=0) {
3121 CHIP_REG_WRITE32(HBA_MessageUnit, 0, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG);
3122 CHIP_REG_WRITE32(HBA_MessageUnit, 0, msgcode_rwbuffer[1], srb_phyaddr_hi32);
3123 CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_SET_CONFIG);
3124 if(!arcmsr_hba_wait_msgint_ready(acb)) {
3125 kprintf( "arcmsr%d: 'set srb high part physical address' timeout \n", acb->pci_unit);
3132 ***********************************************************************
3133 ** if adapter type B, set window of "post command Q"
3134 ***********************************************************************
3136 case ACB_ADAPTER_TYPE_B: {
3137 u_int32_t post_queue_phyaddr;
3138 struct HBB_MessageUnit *phbbmu;
3140 phbbmu=(struct HBB_MessageUnit *)acb->pmu;
3141 phbbmu->postq_index=0;
3142 phbbmu->doneq_index=0;
3143 CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_SET_POST_WINDOW);
3144 if(!arcmsr_hbb_wait_msgint_ready(acb)) {
3145 kprintf( "arcmsr%d: 'set window of post command Q' timeout\n", acb->pci_unit);
3148 post_queue_phyaddr = srb_phyaddr + ARCMSR_SRBS_POOL_SIZE
3149 + offsetof(struct HBB_MessageUnit, post_qbuffer);
3150 CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG); /* driver "set config" signature */
3151 CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[1], srb_phyaddr_hi32); /* normal should be zero */
3152 CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[2], post_queue_phyaddr); /* postQ size (256+8)*4 */
3153 CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[3], post_queue_phyaddr+1056); /* doneQ size (256+8)*4 */
3154 CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[4], 1056); /* srb maxQ size must be --> [(256+8)*4] */
3155 CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_SET_CONFIG);
3156 if(!arcmsr_hbb_wait_msgint_ready(acb)) {
3157 kprintf( "arcmsr%d: 'set command Q window' timeout \n", acb->pci_unit);
3160 CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_START_DRIVER_MODE);
3161 if(!arcmsr_hbb_wait_msgint_ready(acb)) {
3162 kprintf( "arcmsr%d: 'start diver mode' timeout \n", acb->pci_unit);
3167 case ACB_ADAPTER_TYPE_C: {
3168 if(srb_phyaddr_hi32!=0) {
3169 CHIP_REG_WRITE32(HBC_MessageUnit, 0, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG);
3170 CHIP_REG_WRITE32(HBC_MessageUnit, 0, msgcode_rwbuffer[1], srb_phyaddr_hi32);
3171 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_SET_CONFIG);
3172 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell,ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
3173 if(!arcmsr_hbc_wait_msgint_ready(acb)) {
3174 kprintf( "arcmsr%d: 'set srb high part physical address' timeout \n", acb->pci_unit);
3184 ************************************************************************
3185 ************************************************************************
3187 static void arcmsr_enable_eoi_mode(struct AdapterControlBlock *acb)
3189 switch (acb->adapter_type)
3191 case ACB_ADAPTER_TYPE_A:
3192 case ACB_ADAPTER_TYPE_C:
3194 case ACB_ADAPTER_TYPE_B: {
3195 CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell,ARCMSR_MESSAGE_ACTIVE_EOI_MODE);
3196 if(!arcmsr_hbb_wait_msgint_ready(acb)) {
3197 kprintf( "arcmsr%d: 'iop enable eoi mode' timeout \n", acb->pci_unit);
3206 **********************************************************************
3207 **********************************************************************
3209 static void arcmsr_iop_init(struct AdapterControlBlock *acb)
3211 u_int32_t intmask_org;
3213 /* disable all outbound interrupt */
3214 intmask_org=arcmsr_disable_allintr(acb);
3215 arcmsr_wait_firmware_ready(acb);
3216 arcmsr_iop_confirm(acb);
3217 arcmsr_get_firmware_spec(acb);
3218 /*start background rebuild*/
3219 arcmsr_start_adapter_bgrb(acb);
3220 /* empty doorbell Qbuffer if door bell ringed */
3221 arcmsr_clear_doorbell_queue_buffer(acb);
3222 arcmsr_enable_eoi_mode(acb);
3223 /* enable outbound Post Queue, outbound doorbell Interrupt */
3224 arcmsr_enable_allintr(acb, intmask_org);
3225 acb->acb_flags |=ACB_F_IOP_INITED;
3228 **********************************************************************
3229 **********************************************************************
3231 static void arcmsr_map_free_srb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
3233 struct AdapterControlBlock *acb=arg;
3234 struct CommandControlBlock *srb_tmp;
3235 u_int8_t * dma_memptr;
3237 unsigned long srb_phyaddr=(unsigned long)segs->ds_addr;
3239 dma_memptr=acb->uncacheptr;
3240 acb->srb_phyaddr.phyaddr=srb_phyaddr;
3241 srb_tmp=(struct CommandControlBlock *)dma_memptr;
3242 for(i=0;i<ARCMSR_MAX_FREESRB_NUM;i++) {
3243 if(bus_dmamap_create(acb->dm_segs_dmat,
3244 /*flags*/0, &srb_tmp->dm_segs_dmamap)!=0) {
3245 acb->acb_flags |= ACB_F_MAPFREESRB_FAILD;
3247 " srb dmamap bus_dmamap_create error\n", acb->pci_unit);
3250 srb_tmp->cdb_shifted_phyaddr=(acb->adapter_type==ACB_ADAPTER_TYPE_C)?srb_phyaddr:(srb_phyaddr >> 5);
3252 acb->srbworkingQ[i]=acb->psrb_pool[i]=srb_tmp;
3253 srb_phyaddr=srb_phyaddr+SRB_SIZE;
3254 srb_tmp = (struct CommandControlBlock *)((unsigned long)srb_tmp+SRB_SIZE);
3256 acb->vir2phy_offset=(unsigned long)srb_tmp-srb_phyaddr;
3259 ************************************************************************
3262 ************************************************************************
3264 static void arcmsr_free_resource(struct AdapterControlBlock *acb)
3266 /* remove the control device */
3267 if(acb->ioctl_dev != NULL) {
3268 destroy_dev(acb->ioctl_dev);
3270 bus_dmamap_unload(acb->srb_dmat, acb->srb_dmamap);
3271 bus_dmamap_destroy(acb->srb_dmat, acb->srb_dmamap);
3272 bus_dma_tag_destroy(acb->srb_dmat);
3273 bus_dma_tag_destroy(acb->dm_segs_dmat);
3274 bus_dma_tag_destroy(acb->parent_dmat);
3277 ************************************************************************
3278 ************************************************************************
3280 static u_int32_t arcmsr_initialize(device_t dev)
3282 struct AdapterControlBlock *acb=device_get_softc(dev);
3283 u_int16_t pci_command;
3284 int i, j,max_coherent_size;
3285 u_int32_t vendor_dev_id;
3287 vendor_dev_id = pci_get_devid(dev);
3288 acb->vendor_device_id = vendor_dev_id;
3289 switch (vendor_dev_id) {
3290 case PCIDevVenIDARC1880:
3291 case PCIDevVenIDARC1882:
3292 case PCIDevVenIDARC1213:
3293 case PCIDevVenIDARC1223: {
3294 acb->adapter_type=ACB_ADAPTER_TYPE_C;
3295 acb->adapter_bus_speed = ACB_BUS_SPEED_6G;
3296 max_coherent_size=ARCMSR_SRBS_POOL_SIZE;
3299 case PCIDevVenIDARC1200:
3300 case PCIDevVenIDARC1201: {
3301 acb->adapter_type=ACB_ADAPTER_TYPE_B;
3302 acb->adapter_bus_speed = ACB_BUS_SPEED_3G;
3303 max_coherent_size=ARCMSR_SRBS_POOL_SIZE+(sizeof(struct HBB_MessageUnit));
3306 case PCIDevVenIDARC1110:
3307 case PCIDevVenIDARC1120:
3308 case PCIDevVenIDARC1130:
3309 case PCIDevVenIDARC1160:
3310 case PCIDevVenIDARC1170:
3311 case PCIDevVenIDARC1210:
3312 case PCIDevVenIDARC1220:
3313 case PCIDevVenIDARC1230:
3314 case PCIDevVenIDARC1231:
3315 case PCIDevVenIDARC1260:
3316 case PCIDevVenIDARC1261:
3317 case PCIDevVenIDARC1270:
3318 case PCIDevVenIDARC1280:
3319 case PCIDevVenIDARC1212:
3320 case PCIDevVenIDARC1222:
3321 case PCIDevVenIDARC1380:
3322 case PCIDevVenIDARC1381:
3323 case PCIDevVenIDARC1680:
3324 case PCIDevVenIDARC1681: {
3325 acb->adapter_type=ACB_ADAPTER_TYPE_A;
3326 acb->adapter_bus_speed = ACB_BUS_SPEED_3G;
3327 max_coherent_size=ARCMSR_SRBS_POOL_SIZE;
3332 " unknown RAID adapter type \n", device_get_unit(dev));
3336 if(bus_dma_tag_create( /*parent*/ NULL,
3339 /*lowaddr*/ BUS_SPACE_MAXADDR,
3340 /*highaddr*/ BUS_SPACE_MAXADDR,
3343 /*maxsize*/ BUS_SPACE_MAXSIZE_32BIT,
3344 /*nsegments*/ BUS_SPACE_UNRESTRICTED,
3345 /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT,
3347 &acb->parent_dmat) != 0)
3349 kprintf("arcmsr%d: parent_dmat bus_dma_tag_create failure!\n", device_get_unit(dev));
3352 /* Create a single tag describing a region large enough to hold all of the s/g lists we will need. */
3353 if(bus_dma_tag_create( /*parent_dmat*/ acb->parent_dmat,
3356 /*lowaddr*/ BUS_SPACE_MAXADDR,
3357 /*highaddr*/ BUS_SPACE_MAXADDR,
3360 /*maxsize*/ ARCMSR_MAX_SG_ENTRIES * PAGE_SIZE * ARCMSR_MAX_FREESRB_NUM,
3361 /*nsegments*/ ARCMSR_MAX_SG_ENTRIES,
3362 /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT,
3364 &acb->dm_segs_dmat) != 0)
3366 bus_dma_tag_destroy(acb->parent_dmat);
3367 kprintf("arcmsr%d: dm_segs_dmat bus_dma_tag_create failure!\n", device_get_unit(dev));
3370 /* DMA tag for our srb structures.... Allocate the freesrb memory */
3371 if(bus_dma_tag_create( /*parent_dmat*/ acb->parent_dmat,
3374 /*lowaddr*/ BUS_SPACE_MAXADDR_32BIT,
3375 /*highaddr*/ BUS_SPACE_MAXADDR,
3378 /*maxsize*/ max_coherent_size,
3380 /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT,
3382 &acb->srb_dmat) != 0)
3384 bus_dma_tag_destroy(acb->dm_segs_dmat);
3385 bus_dma_tag_destroy(acb->parent_dmat);
3386 kprintf("arcmsr%d: srb_dmat bus_dma_tag_create failure!\n", device_get_unit(dev));
3389 /* Allocation for our srbs */
3390 if(bus_dmamem_alloc(acb->srb_dmat, (void **)&acb->uncacheptr, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &acb->srb_dmamap) != 0) {
3391 bus_dma_tag_destroy(acb->srb_dmat);
3392 bus_dma_tag_destroy(acb->dm_segs_dmat);
3393 bus_dma_tag_destroy(acb->parent_dmat);
3394 kprintf("arcmsr%d: srb_dmat bus_dmamem_alloc failure!\n", device_get_unit(dev));
3397 /* And permanently map them */
3398 if(bus_dmamap_load(acb->srb_dmat, acb->srb_dmamap, acb->uncacheptr, max_coherent_size, arcmsr_map_free_srb, acb, /*flags*/0)) {
3399 bus_dma_tag_destroy(acb->srb_dmat);
3400 bus_dma_tag_destroy(acb->dm_segs_dmat);
3401 bus_dma_tag_destroy(acb->parent_dmat);
3402 kprintf("arcmsr%d: srb_dmat bus_dmamap_load failure!\n", device_get_unit(dev));
3405 pci_command=pci_read_config(dev, PCIR_COMMAND, 2);
3406 pci_command |= PCIM_CMD_BUSMASTEREN;
3407 pci_command |= PCIM_CMD_PERRESPEN;
3408 pci_command |= PCIM_CMD_MWRICEN;
3409 /* Enable Busmaster/Mem */
3410 pci_command |= PCIM_CMD_MEMEN;
3411 pci_write_config(dev, PCIR_COMMAND, pci_command, 2);
3412 switch(acb->adapter_type) {
3413 case ACB_ADAPTER_TYPE_A: {
3414 u_int32_t rid0=PCIR_BAR(0);
3415 vm_offset_t mem_base0;
3417 acb->sys_res_arcmsr[0]=bus_alloc_resource(dev,SYS_RES_MEMORY, &rid0, 0ul, ~0ul, 0x1000, RF_ACTIVE);
3418 if(acb->sys_res_arcmsr[0] == NULL) {
3419 arcmsr_free_resource(acb);
3420 kprintf("arcmsr%d: bus_alloc_resource failure!\n", device_get_unit(dev));
3423 if(rman_get_start(acb->sys_res_arcmsr[0]) <= 0) {
3424 arcmsr_free_resource(acb);
3425 kprintf("arcmsr%d: rman_get_start failure!\n", device_get_unit(dev));
3428 mem_base0=(vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[0]);
3430 arcmsr_free_resource(acb);
3431 kprintf("arcmsr%d: rman_get_virtual failure!\n", device_get_unit(dev));
3434 acb->btag[0]=rman_get_bustag(acb->sys_res_arcmsr[0]);
3435 acb->bhandle[0]=rman_get_bushandle(acb->sys_res_arcmsr[0]);
3436 acb->pmu=(struct MessageUnit_UNION *)mem_base0;
3439 case ACB_ADAPTER_TYPE_B: {
3440 struct HBB_MessageUnit *phbbmu;
3441 struct CommandControlBlock *freesrb;
3442 u_int32_t rid[]={ PCIR_BAR(0), PCIR_BAR(2) };
3443 vm_offset_t mem_base[]={0,0};
3444 for(i=0; i<2; i++) {
3446 acb->sys_res_arcmsr[i]=bus_alloc_resource(dev,SYS_RES_MEMORY, &rid[i],
3447 0ul, ~0ul, sizeof(struct HBB_DOORBELL), RF_ACTIVE);
3449 acb->sys_res_arcmsr[i]=bus_alloc_resource(dev, SYS_RES_MEMORY, &rid[i],
3450 0ul, ~0ul, sizeof(struct HBB_RWBUFFER), RF_ACTIVE);
3452 if(acb->sys_res_arcmsr[i] == NULL) {
3453 arcmsr_free_resource(acb);
3454 kprintf("arcmsr%d: bus_alloc_resource %d failure!\n", device_get_unit(dev), i);
3457 if(rman_get_start(acb->sys_res_arcmsr[i]) <= 0) {
3458 arcmsr_free_resource(acb);
3459 kprintf("arcmsr%d: rman_get_start %d failure!\n", device_get_unit(dev), i);
3462 mem_base[i]=(vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[i]);
3463 if(mem_base[i]==0) {
3464 arcmsr_free_resource(acb);
3465 kprintf("arcmsr%d: rman_get_virtual %d failure!\n", device_get_unit(dev), i);
3468 acb->btag[i]=rman_get_bustag(acb->sys_res_arcmsr[i]);
3469 acb->bhandle[i]=rman_get_bushandle(acb->sys_res_arcmsr[i]);
3471 freesrb=(struct CommandControlBlock *)acb->uncacheptr;
3472 // acb->pmu=(struct MessageUnit_UNION *)&freesrb[ARCMSR_MAX_FREESRB_NUM];
3473 acb->pmu=(struct MessageUnit_UNION *)((unsigned long)freesrb+ARCMSR_SRBS_POOL_SIZE);
3474 phbbmu=(struct HBB_MessageUnit *)acb->pmu;
3475 phbbmu->hbb_doorbell=(struct HBB_DOORBELL *)mem_base[0];
3476 phbbmu->hbb_rwbuffer=(struct HBB_RWBUFFER *)mem_base[1];
3479 case ACB_ADAPTER_TYPE_C: {
3480 u_int32_t rid0=PCIR_BAR(1);
3481 vm_offset_t mem_base0;
3483 acb->sys_res_arcmsr[0]=bus_alloc_resource(dev,SYS_RES_MEMORY, &rid0, 0ul, ~0ul, sizeof(struct HBC_MessageUnit), RF_ACTIVE);
3484 if(acb->sys_res_arcmsr[0] == NULL) {
3485 arcmsr_free_resource(acb);
3486 kprintf("arcmsr%d: bus_alloc_resource failure!\n", device_get_unit(dev));
3489 if(rman_get_start(acb->sys_res_arcmsr[0]) <= 0) {
3490 arcmsr_free_resource(acb);
3491 kprintf("arcmsr%d: rman_get_start failure!\n", device_get_unit(dev));
3494 mem_base0=(vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[0]);
3496 arcmsr_free_resource(acb);
3497 kprintf("arcmsr%d: rman_get_virtual failure!\n", device_get_unit(dev));
3500 acb->btag[0]=rman_get_bustag(acb->sys_res_arcmsr[0]);
3501 acb->bhandle[0]=rman_get_bushandle(acb->sys_res_arcmsr[0]);
3502 acb->pmu=(struct MessageUnit_UNION *)mem_base0;
3506 if(acb->acb_flags & ACB_F_MAPFREESRB_FAILD) {
3507 arcmsr_free_resource(acb);
3508 kprintf("arcmsr%d: map free srb failure!\n", device_get_unit(dev));
3511 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED|ACB_F_MESSAGE_RQBUFFER_CLEARED|ACB_F_MESSAGE_WQBUFFER_READ);
3512 acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
3514 ********************************************************************
3515 ** init raid volume state
3516 ********************************************************************
3518 for(i=0;i<ARCMSR_MAX_TARGETID;i++) {
3519 for(j=0;j<ARCMSR_MAX_TARGETLUN;j++) {
3520 acb->devstate[i][j]=ARECA_RAID_GONE;
3523 arcmsr_iop_init(acb);
3527 ************************************************************************
3528 ************************************************************************
3530 static int arcmsr_attach(device_t dev)
3532 struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev);
3533 u_int32_t unit=device_get_unit(dev);
3534 struct ccb_setasync csa;
3535 struct cam_devq *devq; /* Device Queue to use for this SIM */
3536 struct resource *irqres;
3541 kprintf("arcmsr%d: cannot allocate softc\n", unit);
3544 ARCMSR_LOCK_INIT(&acb->qbuffer_lock, "arcmsr Q buffer lock");
3545 if(arcmsr_initialize(dev)) {
3546 kprintf("arcmsr%d: initialize failure!\n", unit);
3547 ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock);
3550 /* After setting up the adapter, map our interrupt */
3552 acb->irq_type = pci_alloc_1intr(dev, arcmsr_msi_enable, &rid,
3554 irqres=bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0ul, ~0ul, 1,
3556 if(irqres == NULL ||
3557 bus_setup_intr(dev, irqres, INTR_MPSAFE, arcmsr_intr_handler, acb, &acb->ih, NULL)) {
3558 arcmsr_free_resource(acb);
3559 ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock);
3560 kprintf("arcmsr%d: unable to register interrupt handler!\n", unit);
3567 * Now let the CAM generic SCSI layer find the SCSI devices on
3568 * the bus * start queue to reset to the idle loop. *
3569 * Create device queue of SIM(s) * (MAX_START_JOB - 1) :
3570 * max_sim_transactions
3572 devq=cam_simq_alloc(ARCMSR_MAX_START_JOB);
3574 arcmsr_free_resource(acb);
3575 bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres);
3576 if (acb->irq_type == PCI_INTR_TYPE_MSI)
3577 pci_release_msi(dev);
3578 ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock);
3579 kprintf("arcmsr%d: cam_simq_alloc failure!\n", unit);
3582 acb->psim=cam_sim_alloc(arcmsr_action, arcmsr_poll, "arcmsr", acb, unit, &acb->qbuffer_lock, 1, ARCMSR_MAX_OUTSTANDING_CMD, devq);
3583 cam_simq_release(devq);
3584 if(acb->psim == NULL) {
3585 arcmsr_free_resource(acb);
3586 bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres);
3587 if (acb->irq_type == PCI_INTR_TYPE_MSI)
3588 pci_release_msi(dev);
3589 ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock);
3590 kprintf("arcmsr%d: cam_sim_alloc failure!\n", unit);
3593 ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
3594 if(xpt_bus_register(acb->psim, 0) != CAM_SUCCESS) {
3595 arcmsr_free_resource(acb);
3596 bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres);
3597 if (acb->irq_type == PCI_INTR_TYPE_MSI)
3598 pci_release_msi(dev);
3599 cam_sim_free(acb->psim);
3600 ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock);
3601 kprintf("arcmsr%d: xpt_bus_register failure!\n", unit);
3604 if(xpt_create_path(&acb->ppath, /* periph */ NULL, cam_sim_path(acb->psim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
3605 arcmsr_free_resource(acb);
3606 bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres);
3607 if (acb->irq_type == PCI_INTR_TYPE_MSI)
3608 pci_release_msi(dev);
3609 xpt_bus_deregister(cam_sim_path(acb->psim));
3610 cam_sim_free(acb->psim);
3611 ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock);
3612 kprintf("arcmsr%d: xpt_create_path failure!\n", unit);
3616 ****************************************************
3618 xpt_setup_ccb(&csa.ccb_h, acb->ppath, /*priority*/5);
3619 csa.ccb_h.func_code=XPT_SASYNC_CB;
3620 csa.event_enable=AC_FOUND_DEVICE|AC_LOST_DEVICE;
3621 csa.callback=arcmsr_async;
3622 csa.callback_arg=acb->psim;
3623 xpt_action((union ccb *)&csa);
3624 ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
3625 /* Create the control device. */
3626 acb->ioctl_dev=make_dev(&arcmsr_ops, unit, UID_ROOT, GID_WHEEL /* GID_OPERATOR */, S_IRUSR | S_IWUSR, "arcmsr%d", unit);
3628 acb->ioctl_dev->si_drv1=acb;
3629 (void)make_dev_alias(acb->ioctl_dev, "arc%d", unit);
3630 arcmsr_callout_init(&acb->devmap_callout);
3631 callout_reset(&acb->devmap_callout, 60 * hz, arcmsr_polling_devmap, acb);
3636 ************************************************************************
3637 ************************************************************************
3639 static int arcmsr_probe(device_t dev)
3642 static char buf[256];
3643 char x_type[]={"X-TYPE"};
3647 if (pci_get_vendor(dev) != PCI_VENDOR_ID_ARECA) {
3650 switch(id=pci_get_devid(dev)) {
3651 case PCIDevVenIDARC1110:
3652 case PCIDevVenIDARC1200:
3653 case PCIDevVenIDARC1201:
3654 case PCIDevVenIDARC1210:
3657 case PCIDevVenIDARC1120:
3658 case PCIDevVenIDARC1130:
3659 case PCIDevVenIDARC1160:
3660 case PCIDevVenIDARC1170:
3661 case PCIDevVenIDARC1220:
3662 case PCIDevVenIDARC1230:
3663 case PCIDevVenIDARC1231:
3664 case PCIDevVenIDARC1260:
3665 case PCIDevVenIDARC1261:
3666 case PCIDevVenIDARC1270:
3667 case PCIDevVenIDARC1280:
3670 case PCIDevVenIDARC1212:
3671 case PCIDevVenIDARC1222:
3672 case PCIDevVenIDARC1380:
3673 case PCIDevVenIDARC1381:
3674 case PCIDevVenIDARC1680:
3675 case PCIDevVenIDARC1681:
3678 case PCIDevVenIDARC1880:
3679 case PCIDevVenIDARC1882:
3680 case PCIDevVenIDARC1213:
3681 case PCIDevVenIDARC1223:
3683 arcmsr_msi_enable = 0;
3691 ksprintf(buf, "Areca %s Host Adapter RAID Controller%s", type, raid6 ? " (RAID6 capable)" : "");
3692 device_set_desc_copy(dev, buf);
3693 return (BUS_PROBE_DEFAULT);
3696 ************************************************************************
3697 ************************************************************************
3699 static int arcmsr_shutdown(device_t dev)
3702 struct CommandControlBlock *srb;
3703 struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev);
3705 /* stop adapter background rebuild */
3706 ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
3707 /* disable all outbound interrupt */
3708 arcmsr_disable_allintr(acb);
3709 arcmsr_stop_adapter_bgrb(acb);
3710 arcmsr_flush_adapter_cache(acb);
3711 /* abort all outstanding command */
3712 acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
3713 acb->acb_flags &= ~ACB_F_IOP_INITED;
3714 if(acb->srboutstandingcount!=0) {
3715 /*clear and abort all outbound posted Q*/
3716 arcmsr_done4abort_postqueue(acb);
3717 /* talk to iop 331 outstanding command aborted*/
3718 arcmsr_abort_allcmd(acb);
3719 for(i=0;i<ARCMSR_MAX_FREESRB_NUM;i++) {
3720 srb=acb->psrb_pool[i];
3721 if(srb->srb_state==ARCMSR_SRB_START) {
3722 srb->srb_state=ARCMSR_SRB_ABORTED;
3723 srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
3724 arcmsr_srb_complete(srb, 1);
3728 acb->srboutstandingcount=0;
3729 acb->workingsrb_doneindex=0;
3730 acb->workingsrb_startindex=0;
3731 #ifdef ARCMSR_DEBUG1
3732 acb->pktRequestCount = 0;
3733 acb->pktReturnCount = 0;
3735 ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
3739 ************************************************************************
3740 ************************************************************************
3742 static int arcmsr_detach(device_t dev)
3744 struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev);
3747 callout_stop(&acb->devmap_callout);
3748 bus_teardown_intr(dev, acb->irqres, acb->ih);
3749 arcmsr_shutdown(dev);
3750 arcmsr_free_resource(acb);
3751 for(i=0; (acb->sys_res_arcmsr[i]!=NULL) && (i<2); i++) {
3752 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(i), acb->sys_res_arcmsr[i]);
3754 bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres);
3755 if (acb->irq_type == PCI_INTR_TYPE_MSI)
3756 pci_release_msi(dev);
3757 ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
3758 xpt_async(AC_LOST_DEVICE, acb->ppath, NULL);
3759 xpt_free_path(acb->ppath);
3760 xpt_bus_deregister(cam_sim_path(acb->psim));
3761 cam_sim_free(acb->psim);
3762 ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
3763 ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock);
3767 #ifdef ARCMSR_DEBUG1
3768 static void arcmsr_dump_data(struct AdapterControlBlock *acb)
3770 if((acb->pktRequestCount - acb->pktReturnCount) == 0)
3772 printf("Command Request Count =0x%x\n",acb->pktRequestCount);
3773 printf("Command Return Count =0x%x\n",acb->pktReturnCount);
3774 printf("Command (Req-Rtn) Count =0x%x\n",(acb->pktRequestCount - acb->pktReturnCount));
3775 printf("Queued Command Count =0x%x\n",acb->srboutstandingcount);