2 *****************************************************************************************
4 ** FILE NAME : arcmsr.c
5 ** BY : Erich Chen, Ching Huang
6 ** Description: SCSI RAID Device Driver for
7 ** ARECA (ARC11XX/ARC12XX/ARC13XX/ARC16XX/ARC188x) SATA/SAS RAID HOST Adapter
8 ** ARCMSR RAID Host adapter
9 ** [RAID controller:INTEL 331(PCI-X) 341(PCI-EXPRESS) chip set]
10 ******************************************************************************************
11 ************************************************************************
13 ** Copyright (c) 2004-2010 ARECA Co. Ltd.
14 ** Erich Chen, Taipei Taiwan All rights reserved.
16 ** Redistribution and use in source and binary forms, with or without
17 ** modification, are permitted provided that the following conditions
19 ** 1. Redistributions of source code must retain the above copyright
20 ** notice, this list of conditions and the following disclaimer.
21 ** 2. Redistributions in binary form must reproduce the above copyright
22 ** notice, this list of conditions and the following disclaimer in the
23 ** documentation and/or other materials provided with the distribution.
24 ** 3. The name of the author may not be used to endorse or promote products
25 ** derived from this software without specific prior written permission.
27 ** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
28 ** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
29 ** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
30 ** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
31 ** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING, BUT
32 ** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
33 ** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY
34 ** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
35 **(INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF
36 ** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 **************************************************************************
40 ** REV# DATE NAME DESCRIPTION
41 ** 1.00.00.00 03/31/2004 Erich Chen First release
42 ** 1.20.00.02 11/29/2004 Erich Chen bug fix with arcmsr_bus_reset when PHY error
43 ** 1.20.00.03 04/19/2005 Erich Chen add SATA 24 Ports adapter type support
44 ** clean unused function
45 ** 1.20.00.12 09/12/2005 Erich Chen bug fix with abort command handling,
46 ** firmware version check
47 ** and firmware update notify for hardware bug fix
48 ** handling if none zero high part physical address
50 ** 1.20.00.13 08/18/2006 Erich Chen remove pending srb and report busy
51 ** add iop message xfer
52 ** with scsi pass-through command
53 ** add new device id of sas raid adapters
54 ** code fit for SPARC64 & PPC
55 ** 1.20.00.14 02/05/2007 Erich Chen bug fix for incorrect ccb_h.status report
56 ** and cause g_vfs_done() read write error
57 ** 1.20.00.15 10/10/2007 Erich Chen support new RAID adapter type ARC120x
58 ** 1.20.00.16 10/10/2009 Erich Chen Bug fix for RAID adapter type ARC120x
59 ** bus_dmamem_alloc() with BUS_DMA_ZERO
60 ** 1.20.00.17 07/15/2010 Ching Huang Added support ARC1880
61 ** report CAM_DEV_NOT_THERE instead of CAM_SEL_TIMEOUT when device failed,
62 ** prevent cam_periph_error removing all LUN devices of one Target id
63 ** for any one LUN device failed
64 ** 1.20.00.18 10/14/2010 Ching Huang Fixed "inquiry data fails comparion at DV1 step"
65 ** 10/25/2010 Ching Huang Fixed bad range input in bus_alloc_resource for ADAPTER_TYPE_B
66 ** 1.20.00.19 11/11/2010 Ching Huang Fixed arcmsr driver prevent arcsas support for Areca SAS HBA ARC13x0
67 ** 1.20.00.20 12/08/2010 Ching Huang Avoid calling atomic_set_int function
68 ** 1.20.00.21 02/08/2011 Ching Huang Implement I/O request timeout
69 ** 02/14/2011 Ching Huang Modified pktRequestCount
70 ** 1.20.00.21 03/03/2011 Ching Huang if a command timeout, then wait its ccb back before free it
71 ** 1.20.00.22 07/04/2011 Ching Huang Fixed multiple MTX panic
72 ** 1.20.00.23 10/28/2011 Ching Huang Added TIMEOUT_DELAY in case of too many HDDs need to start
73 ** 1.20.00.23 11/08/2011 Ching Huang Added report device transfer speed
74 ** 1.20.00.23 01/30/2012 Ching Huang Fixed Request requeued and Retrying command
75 ** 1.20.00.24 06/11/2012 Ching Huang Fixed return sense data condition
76 ** 1.20.00.25 08/17/2012 Ching Huang Fixed hotplug device no function on type A adapter
77 ******************************************************************************************
78 * $FreeBSD: src/sys/dev/arcmsr/arcmsr.c,v 1.43 2012/09/04 05:15:54 delphij Exp $
81 #define ARCMSR_DEBUG1 1
83 #include <sys/param.h>
84 #include <sys/systm.h>
85 #include <sys/malloc.h>
86 #include <sys/kernel.h>
88 #include <sys/queue.h>
90 #include <sys/kthread.h>
91 #include <sys/module.h>
94 #include <sys/sysctl.h>
95 #include <sys/thread2.h>
97 #include <sys/device.h>
99 #include <vm/vm_param.h>
102 #include <machine/atomic.h>
103 #include <sys/conf.h>
104 #include <sys/rman.h>
106 #include <bus/cam/cam.h>
107 #include <bus/cam/cam_ccb.h>
108 #include <bus/cam/cam_sim.h>
109 #include <bus/cam/cam_periph.h>
110 #include <bus/cam/cam_xpt_periph.h>
111 #include <bus/cam/cam_xpt_sim.h>
112 #include <bus/cam/cam_debug.h>
113 #include <bus/cam/scsi/scsi_all.h>
114 #include <bus/cam/scsi/scsi_message.h>
116 **************************************************************************
117 **************************************************************************
119 #include <sys/endian.h>
120 #include <bus/pci/pcivar.h>
121 #include <bus/pci/pcireg.h>
122 #define ARCMSR_LOCK_INIT(l, s) lockinit(l, s, 0, LK_CANRECURSE)
123 #define ARCMSR_LOCK_DESTROY(l) lockuninit(l)
124 #define ARCMSR_LOCK_ACQUIRE(l) lockmgr(l, LK_EXCLUSIVE)
125 #define ARCMSR_LOCK_RELEASE(l) lockmgr(l, LK_RELEASE)
126 #define ARCMSR_LOCK_TRY(l) lockmgr(&l, LK_EXCLUSIVE|LK_NOWAIT);
127 #define arcmsr_htole32(x) htole32(x)
128 typedef struct lock arcmsr_lock_t;
130 #if !defined(CAM_NEW_TRAN_CODE)
131 #define CAM_NEW_TRAN_CODE 1
134 #define arcmsr_callout_init(a) callout_init_mp(a);
136 #define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.25 2012-08-17"
137 #include <dev/raid/arcmsr/arcmsr.h>
138 #define SRB_SIZE ((sizeof(struct CommandControlBlock)+0x1f) & 0xffe0)
139 #define ARCMSR_SRBS_POOL_SIZE (SRB_SIZE * ARCMSR_MAX_FREESRB_NUM)
141 **************************************************************************
142 **************************************************************************
144 #define CHIP_REG_READ32(s, b, r) bus_space_read_4(acb->btag[b], acb->bhandle[b], offsetof(struct s, r))
145 #define CHIP_REG_WRITE32(s, b, r, d) bus_space_write_4(acb->btag[b], acb->bhandle[b], offsetof(struct s, r), d)
147 **************************************************************************
148 **************************************************************************
150 static void arcmsr_free_srb(struct CommandControlBlock *srb);
151 static struct CommandControlBlock * arcmsr_get_freesrb(struct AdapterControlBlock *acb);
152 static u_int8_t arcmsr_seek_cmd2abort(union ccb * abortccb);
153 static int arcmsr_probe(device_t dev);
154 static int arcmsr_attach(device_t dev);
155 static int arcmsr_detach(device_t dev);
156 static u_int32_t arcmsr_iop_ioctlcmd(struct AdapterControlBlock *acb, u_int32_t ioctl_cmd, caddr_t arg);
157 static void arcmsr_iop_parking(struct AdapterControlBlock *acb);
158 static int arcmsr_shutdown(device_t dev);
159 static void arcmsr_interrupt(struct AdapterControlBlock *acb);
160 static void arcmsr_polling_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb);
161 static void arcmsr_free_resource(struct AdapterControlBlock *acb);
162 static void arcmsr_bus_reset(struct AdapterControlBlock *acb);
163 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb);
164 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb);
165 static void arcmsr_iop_init(struct AdapterControlBlock *acb);
166 static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb);
167 static void arcmsr_post_ioctldata2iop(struct AdapterControlBlock *acb);
168 static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb);
169 static void arcmsr_srb_complete(struct CommandControlBlock *srb, int stand_flag);
170 static void arcmsr_iop_reset(struct AdapterControlBlock *acb);
171 static void arcmsr_report_sense_info(struct CommandControlBlock *srb);
172 static void arcmsr_build_srb(struct CommandControlBlock *srb, bus_dma_segment_t * dm_segs, u_int32_t nseg);
173 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, union ccb * pccb);
174 static int arcmsr_resume(device_t dev);
175 static int arcmsr_suspend(device_t dev);
176 static void arcmsr_rescanLun_cb(struct cam_periph *periph, union ccb *ccb);
177 static void arcmsr_polling_devmap(void* arg);
178 static void arcmsr_srb_timeout(void* arg);
180 static void arcmsr_dump_data(struct AdapterControlBlock *acb);
183 **************************************************************************
184 **************************************************************************
186 static void UDELAY(u_int32_t us) { DELAY(us); }
188 **************************************************************************
189 **************************************************************************
191 static bus_dmamap_callback_t arcmsr_map_free_srb;
192 static bus_dmamap_callback_t arcmsr_execute_srb;
194 **************************************************************************
195 **************************************************************************
197 static d_open_t arcmsr_open;
198 static d_close_t arcmsr_close;
199 static d_ioctl_t arcmsr_ioctl;
201 static device_method_t arcmsr_methods[]={
202 DEVMETHOD(device_probe, arcmsr_probe),
203 DEVMETHOD(device_attach, arcmsr_attach),
204 DEVMETHOD(device_detach, arcmsr_detach),
205 DEVMETHOD(device_shutdown, arcmsr_shutdown),
206 DEVMETHOD(device_suspend, arcmsr_suspend),
207 DEVMETHOD(device_resume, arcmsr_resume),
208 DEVMETHOD(bus_print_child, bus_generic_print_child),
209 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
213 static driver_t arcmsr_driver={
214 "arcmsr", arcmsr_methods, sizeof(struct AdapterControlBlock)
217 static devclass_t arcmsr_devclass;
218 DRIVER_MODULE(arcmsr, pci, arcmsr_driver, arcmsr_devclass, NULL, NULL);
219 MODULE_VERSION(arcmsr, 1);
220 MODULE_DEPEND(arcmsr, pci, 1, 1, 1);
221 MODULE_DEPEND(arcmsr, cam, 1, 1, 1);
222 #ifndef BUS_DMA_COHERENT
223 #define BUS_DMA_COHERENT 0x04 /* hint: map memory in a coherent way */
226 static struct dev_ops arcmsr_ops = {
227 { "arcmsr", 0, D_MPSAFE },
228 .d_open = arcmsr_open, /* open */
229 .d_close = arcmsr_close, /* close */
230 .d_ioctl = arcmsr_ioctl, /* ioctl */
233 static int arcmsr_msi_enable = 1;
234 TUNABLE_INT("hw.arcmsr.msi.enable", &arcmsr_msi_enable);
238 **************************************************************************
239 **************************************************************************
243 arcmsr_open(struct dev_open_args *ap)
245 cdev_t dev = ap->a_head.a_dev;
246 struct AdapterControlBlock *acb=dev->si_drv1;
255 **************************************************************************
256 **************************************************************************
260 arcmsr_close(struct dev_close_args *ap)
262 cdev_t dev = ap->a_head.a_dev;
263 struct AdapterControlBlock *acb=dev->si_drv1;
272 **************************************************************************
273 **************************************************************************
277 arcmsr_ioctl(struct dev_ioctl_args *ap)
279 cdev_t dev = ap->a_head.a_dev;
280 u_long ioctl_cmd = ap->a_cmd;
281 caddr_t arg = ap->a_data;
282 struct AdapterControlBlock *acb=dev->si_drv1;
287 return (arcmsr_iop_ioctlcmd(acb, ioctl_cmd, arg));
291 **********************************************************************
292 **********************************************************************
294 static u_int32_t arcmsr_disable_allintr( struct AdapterControlBlock *acb)
296 u_int32_t intmask_org=0;
298 switch (acb->adapter_type) {
299 case ACB_ADAPTER_TYPE_A: {
300 /* disable all outbound interrupt */
301 intmask_org=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intmask); /* disable outbound message0 int */
302 CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intmask, intmask_org|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE);
305 case ACB_ADAPTER_TYPE_B: {
306 /* disable all outbound interrupt */
307 intmask_org=CHIP_REG_READ32(HBB_DOORBELL,
308 0, iop2drv_doorbell_mask) & (~ARCMSR_IOP2DRV_MESSAGE_CMD_DONE); /* disable outbound message0 int */
309 CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell_mask, 0); /* disable all interrupt */
312 case ACB_ADAPTER_TYPE_C: {
313 /* disable all outbound interrupt */
314 intmask_org=CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_mask) ; /* disable outbound message0 int */
315 CHIP_REG_WRITE32(HBC_MessageUnit, 0, host_int_mask, intmask_org|ARCMSR_HBCMU_ALL_INTMASKENABLE);
319 return (intmask_org);
322 **********************************************************************
323 **********************************************************************
325 static void arcmsr_enable_allintr( struct AdapterControlBlock *acb, u_int32_t intmask_org)
329 switch (acb->adapter_type) {
330 case ACB_ADAPTER_TYPE_A: {
331 /* enable outbound Post Queue, outbound doorbell Interrupt */
332 mask=~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE|ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE|ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE);
333 CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intmask, intmask_org & mask);
334 acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff;
337 case ACB_ADAPTER_TYPE_B: {
338 /* enable ARCMSR_IOP2DRV_MESSAGE_CMD_DONE */
339 mask=(ARCMSR_IOP2DRV_DATA_WRITE_OK|ARCMSR_IOP2DRV_DATA_READ_OK|ARCMSR_IOP2DRV_CDB_DONE|ARCMSR_IOP2DRV_MESSAGE_CMD_DONE);
340 CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell_mask, intmask_org | mask); /*1=interrupt enable, 0=interrupt disable*/
341 acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f;
344 case ACB_ADAPTER_TYPE_C: {
345 /* enable outbound Post Queue, outbound doorbell Interrupt */
346 mask=~(ARCMSR_HBCMU_UTILITY_A_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK);
347 CHIP_REG_WRITE32(HBC_MessageUnit, 0, host_int_mask, intmask_org & mask);
348 acb->outbound_int_enable= ~(intmask_org & mask) & 0x0000000f;
354 **********************************************************************
355 **********************************************************************
357 static u_int8_t arcmsr_hba_wait_msgint_ready(struct AdapterControlBlock *acb)
360 u_int8_t Retries=0x00;
363 for(Index=0; Index < 100; Index++) {
364 if(CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
365 CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, ARCMSR_MU_OUTBOUND_MESSAGE0_INT);/*clear interrupt*/
370 }while(Retries++ < 20);/*max 20 sec*/
374 **********************************************************************
375 **********************************************************************
377 static u_int8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb)
380 u_int8_t Retries=0x00;
383 for(Index=0; Index < 100; Index++) {
384 if(CHIP_REG_READ32(HBB_DOORBELL, 0, iop2drv_doorbell) & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
385 CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN);/*clear interrupt*/
386 CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT);
391 }while(Retries++ < 20);/*max 20 sec*/
395 **********************************************************************
396 **********************************************************************
398 static u_int8_t arcmsr_hbc_wait_msgint_ready(struct AdapterControlBlock *acb)
401 u_int8_t Retries=0x00;
404 for(Index=0; Index < 100; Index++) {
405 if(CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
406 CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR);/*clear interrupt*/
411 }while(Retries++ < 20);/*max 20 sec*/
415 ************************************************************************
416 ************************************************************************
418 static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb)
420 int retry_count=30;/* enlarge wait flush adapter cache time: 10 minute */
422 CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_FLUSH_CACHE);
424 if(arcmsr_hba_wait_msgint_ready(acb)) {
429 }while(retry_count!=0);
432 ************************************************************************
433 ************************************************************************
435 static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb)
437 int retry_count=30;/* enlarge wait flush adapter cache time: 10 minute */
439 CHIP_REG_WRITE32(HBB_DOORBELL,
440 0, drv2iop_doorbell, ARCMSR_MESSAGE_FLUSH_CACHE);
442 if(arcmsr_hbb_wait_msgint_ready(acb)) {
447 }while(retry_count!=0);
450 ************************************************************************
451 ************************************************************************
453 static void arcmsr_flush_hbc_cache(struct AdapterControlBlock *acb)
455 int retry_count=30;/* enlarge wait flush adapter cache time: 10 minute */
457 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_FLUSH_CACHE);
458 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
460 if(arcmsr_hbc_wait_msgint_ready(acb)) {
465 }while(retry_count!=0);
468 ************************************************************************
469 ************************************************************************
471 static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
473 switch (acb->adapter_type) {
474 case ACB_ADAPTER_TYPE_A: {
475 arcmsr_flush_hba_cache(acb);
478 case ACB_ADAPTER_TYPE_B: {
479 arcmsr_flush_hbb_cache(acb);
482 case ACB_ADAPTER_TYPE_C: {
483 arcmsr_flush_hbc_cache(acb);
489 *******************************************************************************
490 *******************************************************************************
492 static int arcmsr_suspend(device_t dev)
494 struct AdapterControlBlock *acb = device_get_softc(dev);
496 /* flush controller */
497 arcmsr_iop_parking(acb);
498 /* disable all outbound interrupt */
499 arcmsr_disable_allintr(acb);
503 *******************************************************************************
504 *******************************************************************************
506 static int arcmsr_resume(device_t dev)
508 struct AdapterControlBlock *acb = device_get_softc(dev);
510 arcmsr_iop_init(acb);
514 *********************************************************************************
515 *********************************************************************************
517 static void arcmsr_async(void *cb_arg, u_int32_t code, struct cam_path *path, void *arg)
521 **********************************************************************
522 **********************************************************************
524 static void arcmsr_srb_complete(struct CommandControlBlock *srb, int stand_flag)
526 struct AdapterControlBlock *acb=srb->acb;
527 union ccb * pccb=srb->pccb;
529 if(srb->srb_flags & SRB_FLAG_TIMER_START)
530 callout_stop(&srb->ccb_callout);
531 if((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
534 if((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
535 op = BUS_DMASYNC_POSTREAD;
537 op = BUS_DMASYNC_POSTWRITE;
539 bus_dmamap_sync(acb->dm_segs_dmat, srb->dm_segs_dmamap, op);
540 bus_dmamap_unload(acb->dm_segs_dmat, srb->dm_segs_dmamap);
543 atomic_subtract_int(&acb->srboutstandingcount, 1);
544 if((acb->acb_flags & ACB_F_CAM_DEV_QFRZN) && (
545 acb->srboutstandingcount < ARCMSR_RELEASE_SIMQ_LEVEL)) {
546 acb->acb_flags &= ~ACB_F_CAM_DEV_QFRZN;
547 pccb->ccb_h.status |= CAM_RELEASE_SIMQ;
550 if(srb->srb_state != ARCMSR_SRB_TIMEOUT)
551 arcmsr_free_srb(srb);
553 acb->pktReturnCount++;
559 **********************************************************************
560 **********************************************************************
562 static void arcmsr_report_sense_info(struct CommandControlBlock *srb)
564 union ccb * pccb=srb->pccb;
566 pccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
567 pccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
568 if(pccb->csio.sense_len) {
569 memset(&pccb->csio.sense_data, 0, sizeof(pccb->csio.sense_data));
570 memcpy(&pccb->csio.sense_data, srb->arcmsr_cdb.SenseData,
571 get_min(sizeof(struct SENSE_DATA), sizeof(pccb->csio.sense_data)));
572 ((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70); /* Valid,ErrorCode */
573 pccb->ccb_h.status |= CAM_AUTOSNS_VALID;
577 *********************************************************************
578 *********************************************************************
580 static void arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb)
582 CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_ABORT_CMD);
583 if(!arcmsr_hba_wait_msgint_ready(acb)) {
584 kprintf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit);
588 *********************************************************************
589 *********************************************************************
591 static void arcmsr_abort_hbb_allcmd(struct AdapterControlBlock *acb)
593 CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_ABORT_CMD);
594 if(!arcmsr_hbb_wait_msgint_ready(acb)) {
595 kprintf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit);
599 *********************************************************************
600 *********************************************************************
602 static void arcmsr_abort_hbc_allcmd(struct AdapterControlBlock *acb)
604 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_ABORT_CMD);
605 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
606 if(!arcmsr_hbc_wait_msgint_ready(acb)) {
607 kprintf("arcmsr%d: wait 'abort all outstanding command' timeout \n", acb->pci_unit);
611 *********************************************************************
612 *********************************************************************
614 static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
616 switch (acb->adapter_type) {
617 case ACB_ADAPTER_TYPE_A: {
618 arcmsr_abort_hba_allcmd(acb);
621 case ACB_ADAPTER_TYPE_B: {
622 arcmsr_abort_hbb_allcmd(acb);
625 case ACB_ADAPTER_TYPE_C: {
626 arcmsr_abort_hbc_allcmd(acb);
632 **************************************************************************
633 **************************************************************************
635 static void arcmsr_report_srb_state(struct AdapterControlBlock *acb, struct CommandControlBlock *srb, u_int16_t error)
639 target=srb->pccb->ccb_h.target_id;
640 lun=srb->pccb->ccb_h.target_lun;
642 if(acb->devstate[target][lun]==ARECA_RAID_GONE) {
643 acb->devstate[target][lun]=ARECA_RAID_GOOD;
645 srb->pccb->ccb_h.status |= CAM_REQ_CMP;
646 arcmsr_srb_complete(srb, 1);
648 switch(srb->arcmsr_cdb.DeviceStatus) {
649 case ARCMSR_DEV_SELECT_TIMEOUT: {
650 if(acb->devstate[target][lun]==ARECA_RAID_GOOD) {
651 kprintf( "arcmsr%d: Target=%x, Lun=%x, selection timeout, raid volume was lost\n", acb->pci_unit, target, lun);
653 acb->devstate[target][lun]=ARECA_RAID_GONE;
654 srb->pccb->ccb_h.status |= CAM_DEV_NOT_THERE;
655 arcmsr_srb_complete(srb, 1);
658 case ARCMSR_DEV_ABORTED:
659 case ARCMSR_DEV_INIT_FAIL: {
660 acb->devstate[target][lun]=ARECA_RAID_GONE;
661 srb->pccb->ccb_h.status |= CAM_DEV_NOT_THERE;
662 arcmsr_srb_complete(srb, 1);
665 case SCSISTAT_CHECK_CONDITION: {
666 acb->devstate[target][lun]=ARECA_RAID_GOOD;
667 arcmsr_report_sense_info(srb);
668 arcmsr_srb_complete(srb, 1);
672 kprintf("arcmsr%d: scsi id=%d lun=%d isr got command error done, but got unknown DeviceStatus=0x%x\n"
673 , acb->pci_unit, target, lun ,srb->arcmsr_cdb.DeviceStatus);
674 acb->devstate[target][lun]=ARECA_RAID_GONE;
675 srb->pccb->ccb_h.status |= CAM_UNCOR_PARITY;
676 /*unknown error or crc error just for retry*/
677 arcmsr_srb_complete(srb, 1);
683 **************************************************************************
684 **************************************************************************
686 static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, u_int32_t flag_srb, u_int16_t error)
688 struct CommandControlBlock *srb;
690 /* check if command done with no error*/
691 switch (acb->adapter_type) {
692 case ACB_ADAPTER_TYPE_C:
693 srb = (struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb & 0xFFFFFFE0));/*frame must be 32 bytes aligned*/
695 case ACB_ADAPTER_TYPE_A:
696 case ACB_ADAPTER_TYPE_B:
698 srb = (struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/
701 if((srb->acb!=acb) || (srb->srb_state!=ARCMSR_SRB_START)) {
702 if(srb->srb_state == ARCMSR_SRB_TIMEOUT) {
703 arcmsr_free_srb(srb);
704 kprintf("arcmsr%d: srb='%p' return srb has been timeouted\n", acb->pci_unit, srb);
707 kprintf("arcmsr%d: return srb has been completed\n"
708 "srb='%p' srb_state=0x%x outstanding srb count=%d \n",
709 acb->pci_unit, srb, srb->srb_state, acb->srboutstandingcount);
712 arcmsr_report_srb_state(acb, srb, error);
715 **************************************************************************
716 **************************************************************************
718 static void arcmsr_srb_timeout(void* arg)
720 struct CommandControlBlock *srb = (struct CommandControlBlock *)arg;
721 struct AdapterControlBlock *acb;
725 target=srb->pccb->ccb_h.target_id;
726 lun=srb->pccb->ccb_h.target_lun;
728 ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
729 if(srb->srb_state == ARCMSR_SRB_START)
731 cmd = srb->pccb->csio.cdb_io.cdb_bytes[0];
732 srb->srb_state = ARCMSR_SRB_TIMEOUT;
733 srb->pccb->ccb_h.status |= CAM_CMD_TIMEOUT;
734 arcmsr_srb_complete(srb, 1);
735 kprintf("arcmsr%d: scsi id %d lun %d cmd=0x%x srb='%p' ccb command time out!\n",
736 acb->pci_unit, target, lun, cmd, srb);
738 ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
740 arcmsr_dump_data(acb);
745 **********************************************************************
746 **********************************************************************
748 static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
754 switch (acb->adapter_type) {
755 case ACB_ADAPTER_TYPE_A: {
756 u_int32_t outbound_intstatus;
758 /*clear and abort all outbound posted Q*/
759 outbound_intstatus=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & acb->outbound_int_enable;
760 CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, outbound_intstatus);/*clear interrupt*/
761 while(((flag_srb=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_queueport)) != 0xFFFFFFFF) && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
762 error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
763 arcmsr_drain_donequeue(acb, flag_srb, error);
767 case ACB_ADAPTER_TYPE_B: {
768 struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu;
770 /*clear all outbound posted Q*/
771 CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ARCMSR_DOORBELL_INT_CLEAR_PATTERN); /* clear doorbell interrupt */
772 for(i=0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
773 if((flag_srb=phbbmu->done_qbuffer[i])!=0) {
774 phbbmu->done_qbuffer[i]=0;
775 error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
776 arcmsr_drain_donequeue(acb, flag_srb, error);
778 phbbmu->post_qbuffer[i]=0;
779 }/*drain reply FIFO*/
780 phbbmu->doneq_index=0;
781 phbbmu->postq_index=0;
784 case ACB_ADAPTER_TYPE_C: {
786 while((CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
787 flag_srb=CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_queueport_low);
788 error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1)?TRUE:FALSE;
789 arcmsr_drain_donequeue(acb, flag_srb, error);
796 ****************************************************************************
797 ****************************************************************************
799 static void arcmsr_iop_reset(struct AdapterControlBlock *acb)
801 struct CommandControlBlock *srb;
802 u_int32_t intmask_org;
805 if(acb->srboutstandingcount>0) {
806 /* disable all outbound interrupt */
807 intmask_org=arcmsr_disable_allintr(acb);
808 /*clear and abort all outbound posted Q*/
809 arcmsr_done4abort_postqueue(acb);
810 /* talk to iop 331 outstanding command aborted*/
811 arcmsr_abort_allcmd(acb);
812 for(i=0;i<ARCMSR_MAX_FREESRB_NUM;i++) {
813 srb=acb->psrb_pool[i];
814 if(srb->srb_state==ARCMSR_SRB_START) {
815 srb->srb_state=ARCMSR_SRB_ABORTED;
816 srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
817 arcmsr_srb_complete(srb, 1);
818 kprintf("arcmsr%d: scsi id=%d lun=%d srb='%p' aborted\n"
819 , acb->pci_unit, srb->pccb->ccb_h.target_id
820 , srb->pccb->ccb_h.target_lun, srb);
823 /* enable all outbound interrupt */
824 arcmsr_enable_allintr(acb, intmask_org);
826 acb->srboutstandingcount=0;
827 acb->workingsrb_doneindex=0;
828 acb->workingsrb_startindex=0;
830 acb->pktRequestCount = 0;
831 acb->pktReturnCount = 0;
835 **********************************************************************
836 **********************************************************************
838 static void arcmsr_build_srb(struct CommandControlBlock *srb,
839 bus_dma_segment_t *dm_segs, u_int32_t nseg)
841 struct ARCMSR_CDB * arcmsr_cdb= &srb->arcmsr_cdb;
842 u_int8_t * psge=(u_int8_t *)&arcmsr_cdb->u;
843 u_int32_t address_lo, address_hi;
844 union ccb * pccb=srb->pccb;
845 struct ccb_scsiio * pcsio= &pccb->csio;
846 u_int32_t arccdbsize=0x30;
848 memset(arcmsr_cdb, 0, sizeof(struct ARCMSR_CDB));
850 arcmsr_cdb->TargetID=pccb->ccb_h.target_id;
851 arcmsr_cdb->LUN=pccb->ccb_h.target_lun;
852 arcmsr_cdb->Function=1;
853 arcmsr_cdb->CdbLength=(u_int8_t)pcsio->cdb_len;
854 arcmsr_cdb->Context=0;
855 bcopy(pcsio->cdb_io.cdb_bytes, arcmsr_cdb->Cdb, pcsio->cdb_len);
857 struct AdapterControlBlock *acb=srb->acb;
859 u_int32_t length, i, cdb_sgcount=0;
861 if((pccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
862 op=BUS_DMASYNC_PREREAD;
864 op=BUS_DMASYNC_PREWRITE;
865 arcmsr_cdb->Flags|=ARCMSR_CDB_FLAG_WRITE;
866 srb->srb_flags|=SRB_FLAG_WRITE;
868 bus_dmamap_sync(acb->dm_segs_dmat, srb->dm_segs_dmamap, op);
869 for(i=0;i<nseg;i++) {
870 /* Get the physical address of the current data pointer */
871 length=arcmsr_htole32(dm_segs[i].ds_len);
872 address_lo=arcmsr_htole32(dma_addr_lo32(dm_segs[i].ds_addr));
873 address_hi=arcmsr_htole32(dma_addr_hi32(dm_segs[i].ds_addr));
875 struct SG32ENTRY * pdma_sg=(struct SG32ENTRY *)psge;
876 pdma_sg->address=address_lo;
877 pdma_sg->length=length;
878 psge += sizeof(struct SG32ENTRY);
879 arccdbsize += sizeof(struct SG32ENTRY);
881 u_int32_t sg64s_size=0, tmplength=length;
884 u_int64_t span4G, length0;
885 struct SG64ENTRY * pdma_sg=(struct SG64ENTRY *)psge;
887 span4G=(u_int64_t)address_lo + tmplength;
888 pdma_sg->addresshigh=address_hi;
889 pdma_sg->address=address_lo;
890 if(span4G > 0x100000000) {
891 /*see if cross 4G boundary*/
892 length0=0x100000000-address_lo;
893 pdma_sg->length=(u_int32_t)length0|IS_SG64_ADDR;
894 address_hi=address_hi+1;
896 tmplength=tmplength-(u_int32_t)length0;
897 sg64s_size += sizeof(struct SG64ENTRY);
898 psge += sizeof(struct SG64ENTRY);
901 pdma_sg->length=tmplength|IS_SG64_ADDR;
902 sg64s_size += sizeof(struct SG64ENTRY);
903 psge += sizeof(struct SG64ENTRY);
907 arccdbsize += sg64s_size;
911 arcmsr_cdb->sgcount=(u_int8_t)cdb_sgcount;
912 arcmsr_cdb->DataLength=pcsio->dxfer_len;
913 if( arccdbsize > 256) {
914 arcmsr_cdb->Flags|=ARCMSR_CDB_FLAG_SGL_BSIZE;
917 arcmsr_cdb->DataLength = 0;
919 srb->arc_cdb_size=arccdbsize;
922 **************************************************************************
923 **************************************************************************
925 static void arcmsr_post_srb(struct AdapterControlBlock *acb, struct CommandControlBlock *srb)
927 u_int32_t cdb_shifted_phyaddr=(u_int32_t) srb->cdb_shifted_phyaddr;
928 struct ARCMSR_CDB * arcmsr_cdb=(struct ARCMSR_CDB *)&srb->arcmsr_cdb;
930 bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, (srb->srb_flags & SRB_FLAG_WRITE) ? BUS_DMASYNC_POSTWRITE:BUS_DMASYNC_POSTREAD);
931 atomic_add_int(&acb->srboutstandingcount, 1);
932 srb->srb_state=ARCMSR_SRB_START;
934 switch (acb->adapter_type) {
935 case ACB_ADAPTER_TYPE_A: {
936 if(arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
937 CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_queueport, cdb_shifted_phyaddr|ARCMSR_SRBPOST_FLAG_SGL_BSIZE);
939 CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_queueport, cdb_shifted_phyaddr);
943 case ACB_ADAPTER_TYPE_B: {
944 struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu;
945 int ending_index, index;
947 index=phbbmu->postq_index;
948 ending_index=((index+1)%ARCMSR_MAX_HBB_POSTQUEUE);
949 phbbmu->post_qbuffer[ending_index]=0;
950 if(arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
951 phbbmu->post_qbuffer[index]= cdb_shifted_phyaddr|ARCMSR_SRBPOST_FLAG_SGL_BSIZE;
953 phbbmu->post_qbuffer[index]= cdb_shifted_phyaddr;
956 index %= ARCMSR_MAX_HBB_POSTQUEUE; /*if last index number set it to 0 */
957 phbbmu->postq_index=index;
958 CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_CDB_POSTED);
961 case ACB_ADAPTER_TYPE_C:
963 u_int32_t ccb_post_stamp, arc_cdb_size, cdb_phyaddr_hi32;
965 arc_cdb_size=(srb->arc_cdb_size>0x300)?0x300:srb->arc_cdb_size;
966 ccb_post_stamp=(cdb_shifted_phyaddr | ((arc_cdb_size-1) >> 6) | 1);
967 cdb_phyaddr_hi32 = acb->srb_phyaddr.B.phyadd_high;
970 CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_high, cdb_phyaddr_hi32);
971 CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_low, ccb_post_stamp);
975 CHIP_REG_WRITE32(HBC_MessageUnit,0,inbound_queueport_low, ccb_post_stamp);
982 ************************************************************************
983 ************************************************************************
985 static struct QBUFFER * arcmsr_get_iop_rqbuffer( struct AdapterControlBlock *acb)
987 struct QBUFFER *qbuffer=NULL;
989 switch (acb->adapter_type) {
990 case ACB_ADAPTER_TYPE_A: {
991 struct HBA_MessageUnit *phbamu=(struct HBA_MessageUnit *)acb->pmu;
993 qbuffer=(struct QBUFFER *)&phbamu->message_rbuffer;
996 case ACB_ADAPTER_TYPE_B: {
997 struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu;
999 qbuffer=(struct QBUFFER *)&phbbmu->hbb_rwbuffer->message_rbuffer;
1002 case ACB_ADAPTER_TYPE_C: {
1003 struct HBC_MessageUnit *phbcmu=(struct HBC_MessageUnit *)acb->pmu;
1005 qbuffer=(struct QBUFFER *)&phbcmu->message_rbuffer;
1012 ************************************************************************
1013 ************************************************************************
1015 static struct QBUFFER * arcmsr_get_iop_wqbuffer( struct AdapterControlBlock *acb)
1017 struct QBUFFER *qbuffer=NULL;
1019 switch (acb->adapter_type) {
1020 case ACB_ADAPTER_TYPE_A: {
1021 struct HBA_MessageUnit *phbamu=(struct HBA_MessageUnit *)acb->pmu;
1023 qbuffer=(struct QBUFFER *)&phbamu->message_wbuffer;
1026 case ACB_ADAPTER_TYPE_B: {
1027 struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu;
1029 qbuffer=(struct QBUFFER *)&phbbmu->hbb_rwbuffer->message_wbuffer;
1032 case ACB_ADAPTER_TYPE_C: {
1033 struct HBC_MessageUnit *phbcmu=(struct HBC_MessageUnit *)acb->pmu;
1035 qbuffer=(struct QBUFFER *)&phbcmu->message_wbuffer;
1042 **************************************************************************
1043 **************************************************************************
1045 static void arcmsr_iop_message_read(struct AdapterControlBlock *acb)
1047 switch (acb->adapter_type) {
1048 case ACB_ADAPTER_TYPE_A: {
1049 /* let IOP know data has been read */
1050 CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK);
1053 case ACB_ADAPTER_TYPE_B: {
1054 /* let IOP know data has been read */
1055 CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_READ_OK);
1058 case ACB_ADAPTER_TYPE_C: {
1059 /* let IOP know data has been read */
1060 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK);
1065 **************************************************************************
1066 **************************************************************************
1068 static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb)
1070 switch (acb->adapter_type) {
1071 case ACB_ADAPTER_TYPE_A: {
1073 ** push inbound doorbell tell iop, driver data write ok
1074 ** and wait reply on next hwinterrupt for next Qbuffer post
1076 CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK);
1079 case ACB_ADAPTER_TYPE_B: {
1081 ** push inbound doorbell tell iop, driver data write ok
1082 ** and wait reply on next hwinterrupt for next Qbuffer post
1084 CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_WRITE_OK);
1087 case ACB_ADAPTER_TYPE_C: {
1089 ** push inbound doorbell tell iop, driver data write ok
1090 ** and wait reply on next hwinterrupt for next Qbuffer post
1092 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_DATA_WRITE_OK);
1098 **********************************************************************
1099 **********************************************************************
1101 static void arcmsr_post_ioctldata2iop(struct AdapterControlBlock *acb)
1104 struct QBUFFER *pwbuffer;
1105 u_int8_t * iop_data;
1106 int32_t allxfer_len=0;
1108 pwbuffer=arcmsr_get_iop_wqbuffer(acb);
1109 iop_data=(u_int8_t *)pwbuffer->data;
1110 if(acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READ) {
1111 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READ);
1112 while((acb->wqbuf_firstindex!=acb->wqbuf_lastindex)
1113 && (allxfer_len<124)) {
1114 pQbuffer=&acb->wqbuffer[acb->wqbuf_firstindex];
1115 memcpy(iop_data, pQbuffer, 1);
1116 acb->wqbuf_firstindex++;
1117 acb->wqbuf_firstindex %=ARCMSR_MAX_QBUFFER; /*if last index number set it to 0 */
1121 pwbuffer->data_len=allxfer_len;
1123 ** push inbound doorbell and wait reply at hwinterrupt routine for next Qbuffer post
1125 arcmsr_iop_message_wrote(acb);
1129 ************************************************************************
1130 ************************************************************************
1132 static void arcmsr_stop_hba_bgrb(struct AdapterControlBlock *acb)
1134 acb->acb_flags &=~ACB_F_MSG_START_BGRB;
1135 CHIP_REG_WRITE32(HBA_MessageUnit,
1136 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB);
1137 if(!arcmsr_hba_wait_msgint_ready(acb)) {
1138 kprintf("arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
1144 ************************************************************************
1145 ************************************************************************
1147 static void arcmsr_stop_hbb_bgrb(struct AdapterControlBlock *acb)
1149 acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
1150 CHIP_REG_WRITE32(HBB_DOORBELL,
1151 0, drv2iop_doorbell, ARCMSR_MESSAGE_STOP_BGRB);
1152 if(!arcmsr_hbb_wait_msgint_ready(acb)) {
1153 kprintf( "arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
1158 ************************************************************************
1159 ************************************************************************
1161 static void arcmsr_stop_hbc_bgrb(struct AdapterControlBlock *acb)
1163 acb->acb_flags &=~ACB_F_MSG_START_BGRB;
1164 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB);
1165 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell,ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
1166 if(!arcmsr_hbc_wait_msgint_ready(acb)) {
1167 kprintf("arcmsr%d: wait 'stop adapter background rebulid' timeout \n", acb->pci_unit);
1171 ************************************************************************
1172 ************************************************************************
1174 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb)
1176 switch (acb->adapter_type) {
1177 case ACB_ADAPTER_TYPE_A: {
1178 arcmsr_stop_hba_bgrb(acb);
1181 case ACB_ADAPTER_TYPE_B: {
1182 arcmsr_stop_hbb_bgrb(acb);
1185 case ACB_ADAPTER_TYPE_C: {
1186 arcmsr_stop_hbc_bgrb(acb);
1192 ************************************************************************
1193 ************************************************************************
1195 static void arcmsr_poll(struct cam_sim * psim)
1197 struct AdapterControlBlock *acb;
1200 acb = (struct AdapterControlBlock *)cam_sim_softc(psim);
1201 mutex = lockstatus(&acb->qbuffer_lock, curthread);
1203 ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
1204 arcmsr_interrupt(acb);
1206 ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
1209 **************************************************************************
1210 **************************************************************************
1212 static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock *acb)
1214 struct QBUFFER *prbuffer;
1217 int my_empty_len, iop_len, rqbuf_firstindex, rqbuf_lastindex;
1219 /*check this iop data if overflow my rqbuffer*/
1220 rqbuf_lastindex=acb->rqbuf_lastindex;
1221 rqbuf_firstindex=acb->rqbuf_firstindex;
1222 prbuffer=arcmsr_get_iop_rqbuffer(acb);
1223 iop_data=(u_int8_t *)prbuffer->data;
1224 iop_len=prbuffer->data_len;
1225 my_empty_len=(rqbuf_firstindex-rqbuf_lastindex-1)&(ARCMSR_MAX_QBUFFER-1);
1226 if(my_empty_len>=iop_len) {
1227 while(iop_len > 0) {
1228 pQbuffer=&acb->rqbuffer[rqbuf_lastindex];
1229 memcpy(pQbuffer, iop_data, 1);
1231 rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;/*if last index number set it to 0 */
1235 acb->rqbuf_lastindex=rqbuf_lastindex;
1236 arcmsr_iop_message_read(acb);
1237 /*signature, let IOP know data has been read */
1239 acb->acb_flags|=ACB_F_IOPDATA_OVERFLOW;
1243 **************************************************************************
1244 **************************************************************************
1246 static void arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock *acb)
1248 acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READ;
1250 *****************************************************************
1251 ** check if there are any mail packages from user space program
1252 ** in my post bag, now is the time to send them into Areca's firmware
1253 *****************************************************************
1255 if(acb->wqbuf_firstindex!=acb->wqbuf_lastindex) {
1257 struct QBUFFER *pwbuffer;
1261 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READ);
1262 pwbuffer=arcmsr_get_iop_wqbuffer(acb);
1263 iop_data=(u_int8_t *)pwbuffer->data;
1264 while((acb->wqbuf_firstindex!=acb->wqbuf_lastindex)
1265 && (allxfer_len<124)) {
1266 pQbuffer=&acb->wqbuffer[acb->wqbuf_firstindex];
1267 memcpy(iop_data, pQbuffer, 1);
1268 acb->wqbuf_firstindex++;
1269 acb->wqbuf_firstindex %=ARCMSR_MAX_QBUFFER; /*if last index number set it to 0 */
1273 pwbuffer->data_len=allxfer_len;
1275 ** push inbound doorbell tell iop driver data write ok
1276 ** and wait reply on next hwinterrupt for next Qbuffer post
1278 arcmsr_iop_message_wrote(acb);
1280 if(acb->wqbuf_firstindex==acb->wqbuf_lastindex) {
1281 acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED;
1285 static void arcmsr_rescanLun_cb(struct cam_periph *periph, union ccb *ccb)
1288 if (ccb->ccb_h.status != CAM_REQ_CMP)
1289 kprintf("arcmsr_rescanLun_cb: Rescan Target=%x, lun=%x, failure status=%x\n",ccb->ccb_h.target_id,ccb->ccb_h.target_lun,ccb->ccb_h.status);
1291 kprintf("arcmsr_rescanLun_cb: Rescan lun successfully!\n");
1293 xpt_free_path(ccb->ccb_h.path);
1296 static void arcmsr_rescan_lun(struct AdapterControlBlock *acb, int target, int lun)
1298 struct cam_path *path;
1301 if (xpt_create_path(&path, xpt_periph, cam_sim_path(acb->psim), target, lun) != CAM_REQ_CMP)
1303 /* kprintf("arcmsr_rescan_lun: Rescan Target=%x, Lun=%x\n", target, lun); */
1304 bzero(&ccb, sizeof(union ccb));
1305 xpt_setup_ccb(&ccb.ccb_h, path, 5);
1306 ccb.ccb_h.func_code = XPT_SCAN_LUN;
1307 ccb.ccb_h.cbfcnp = arcmsr_rescanLun_cb;
1308 ccb.crcn.flags = CAM_FLAG_NONE;
1313 static void arcmsr_abort_dr_ccbs(struct AdapterControlBlock *acb, int target, int lun)
1315 struct CommandControlBlock *srb;
1316 u_int32_t intmask_org;
1319 ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
1320 /* disable all outbound interrupts */
1321 intmask_org = arcmsr_disable_allintr(acb);
1322 for (i = 0; i < ARCMSR_MAX_FREESRB_NUM; i++)
1324 srb = acb->psrb_pool[i];
1325 if (srb->srb_state == ARCMSR_SRB_START)
1327 if((target == srb->pccb->ccb_h.target_id) && (lun == srb->pccb->ccb_h.target_lun))
1329 srb->srb_state = ARCMSR_SRB_ABORTED;
1330 srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
1331 arcmsr_srb_complete(srb, 1);
1332 kprintf("arcmsr%d: abort scsi id %d lun %d srb=%p \n", acb->pci_unit, target, lun, srb);
1336 /* enable outbound Post Queue, outbound doorbell Interrupt */
1337 arcmsr_enable_allintr(acb, intmask_org);
1338 ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
1343 **************************************************************************
1344 **************************************************************************
1346 static void arcmsr_dr_handle(struct AdapterControlBlock *acb) {
1347 u_int32_t devicemap;
1348 u_int32_t target, lun;
1349 u_int32_t deviceMapCurrent[4]={0};
1352 switch (acb->adapter_type) {
1353 case ACB_ADAPTER_TYPE_A:
1354 devicemap = offsetof(struct HBA_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
1355 for (target= 0; target < 4; target++)
1357 deviceMapCurrent[target]=bus_space_read_4(acb->btag[0], acb->bhandle[0], devicemap);
1362 case ACB_ADAPTER_TYPE_B:
1363 devicemap = offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
1364 for (target= 0; target < 4; target++)
1366 deviceMapCurrent[target]=bus_space_read_4(acb->btag[1], acb->bhandle[1], devicemap);
1371 case ACB_ADAPTER_TYPE_C:
1372 devicemap = offsetof(struct HBC_MessageUnit, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
1373 for (target= 0; target < 4; target++)
1375 deviceMapCurrent[target]=bus_space_read_4(acb->btag[0], acb->bhandle[0], devicemap);
1381 if(acb->acb_flags & ACB_F_BUS_HANG_ON)
1383 acb->acb_flags &= ~ACB_F_BUS_HANG_ON;
1386 ** adapter posted CONFIG message
1387 ** copy the new map, note if there are differences with the current map
1389 pDevMap = (u_int8_t *)&deviceMapCurrent[0];
1390 for (target= 0; target < ARCMSR_MAX_TARGETID - 1; target++)
1392 if (*pDevMap != acb->device_map[target])
1394 u_int8_t difference, bit_check;
1396 difference= *pDevMap ^ acb->device_map[target];
1397 for(lun=0; lun < ARCMSR_MAX_TARGETLUN; lun++)
1399 bit_check=(1 << lun); /*check bit from 0....31*/
1400 if(difference & bit_check)
1402 if(acb->device_map[target] & bit_check)
1403 {/* unit departed */
1404 kprintf("arcmsr_dr_handle: Target=%x, lun=%x, GONE!!!\n",target,lun);
1405 arcmsr_abort_dr_ccbs(acb, target, lun);
1406 arcmsr_rescan_lun(acb, target, lun);
1407 acb->devstate[target][lun] = ARECA_RAID_GONE;
1411 kprintf("arcmsr_dr_handle: Target=%x, lun=%x, Plug-IN!!!\n",target,lun);
1412 arcmsr_rescan_lun(acb, target, lun);
1413 acb->devstate[target][lun] = ARECA_RAID_GOOD;
1417 /* kprintf("arcmsr_dr_handle: acb->device_map[%x]=0x%x, deviceMapCurrent[%x]=%x\n",target,acb->device_map[target],target,*pDevMap); */
1418 acb->device_map[target]= *pDevMap;
1424 **************************************************************************
1425 **************************************************************************
1427 static void arcmsr_hba_message_isr(struct AdapterControlBlock *acb) {
1428 u_int32_t outbound_message;
1430 CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, ARCMSR_MU_OUTBOUND_MESSAGE0_INT);
1431 outbound_message = CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[0]);
1432 if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
1433 arcmsr_dr_handle( acb );
1436 **************************************************************************
1437 **************************************************************************
1439 static void arcmsr_hbb_message_isr(struct AdapterControlBlock *acb) {
1440 u_int32_t outbound_message;
1442 /* clear interrupts */
1443 CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN);
1444 outbound_message = CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[0]);
1445 if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
1446 arcmsr_dr_handle( acb );
1449 **************************************************************************
1450 **************************************************************************
1452 static void arcmsr_hbc_message_isr(struct AdapterControlBlock *acb) {
1453 u_int32_t outbound_message;
1455 CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR);
1456 outbound_message = CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[0]);
1457 if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
1458 arcmsr_dr_handle( acb );
1461 **************************************************************************
1462 **************************************************************************
1464 static void arcmsr_hba_doorbell_isr(struct AdapterControlBlock *acb)
1466 u_int32_t outbound_doorbell;
1469 *******************************************************************
1470 ** Maybe here we need to check wrqbuffer_lock is lock or not
1471 ** DOORBELL: din! don!
1472 ** check if there are any mail need to pack from firmware
1473 *******************************************************************
1475 outbound_doorbell=CHIP_REG_READ32(HBA_MessageUnit,
1476 0, outbound_doorbell);
1477 CHIP_REG_WRITE32(HBA_MessageUnit,
1478 0, outbound_doorbell, outbound_doorbell); /* clear doorbell interrupt */
1479 if(outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK) {
1480 arcmsr_iop2drv_data_wrote_handle(acb);
1482 if(outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK) {
1483 arcmsr_iop2drv_data_read_handle(acb);
1487 **************************************************************************
1488 **************************************************************************
1490 static void arcmsr_hbc_doorbell_isr(struct AdapterControlBlock *acb)
1492 u_int32_t outbound_doorbell;
1495 *******************************************************************
1496 ** Maybe here we need to check wrqbuffer_lock is lock or not
1497 ** DOORBELL: din! don!
1498 ** check if there are any mail need to pack from firmware
1499 *******************************************************************
1501 outbound_doorbell=CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell);
1502 CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, outbound_doorbell); /* clear doorbell interrupt */
1503 if(outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK) {
1504 arcmsr_iop2drv_data_wrote_handle(acb);
1506 if(outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK) {
1507 arcmsr_iop2drv_data_read_handle(acb);
1509 if(outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
1510 arcmsr_hbc_message_isr(acb); /* messenger of "driver to iop commands" */
1514 **************************************************************************
1515 **************************************************************************
1517 static void arcmsr_hba_postqueue_isr(struct AdapterControlBlock *acb)
1523 *****************************************************************************
1524 ** areca cdb command done
1525 *****************************************************************************
1527 bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap,
1528 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1529 while((flag_srb=CHIP_REG_READ32(HBA_MessageUnit,
1530 0, outbound_queueport)) != 0xFFFFFFFF) {
1531 /* check if command done with no error*/
1532 error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
1533 arcmsr_drain_donequeue(acb, flag_srb, error);
1534 } /*drain reply FIFO*/
1537 **************************************************************************
1538 **************************************************************************
1540 static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb)
1542 struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu;
1548 *****************************************************************************
1549 ** areca cdb command done
1550 *****************************************************************************
1552 bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap,
1553 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1554 index=phbbmu->doneq_index;
1555 while((flag_srb=phbbmu->done_qbuffer[index]) != 0) {
1556 phbbmu->done_qbuffer[index]=0;
1558 index %= ARCMSR_MAX_HBB_POSTQUEUE; /*if last index number set it to 0 */
1559 phbbmu->doneq_index=index;
1560 /* check if command done with no error*/
1561 error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
1562 arcmsr_drain_donequeue(acb, flag_srb, error);
1563 } /*drain reply FIFO*/
1566 **************************************************************************
1567 **************************************************************************
1569 static void arcmsr_hbc_postqueue_isr(struct AdapterControlBlock *acb)
1571 u_int32_t flag_srb,throttling=0;
1575 *****************************************************************************
1576 ** areca cdb command done
1577 *****************************************************************************
1579 bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1581 while(CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) {
1583 flag_srb=CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_queueport_low);
1584 /* check if command done with no error*/
1585 error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1)?TRUE:FALSE;
1586 arcmsr_drain_donequeue(acb, flag_srb, error);
1587 if(throttling==ARCMSR_HBC_ISR_THROTTLING_LEVEL) {
1588 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell,ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING);
1592 } /*drain reply FIFO*/
1595 **********************************************************************
1596 **********************************************************************
1598 static void arcmsr_handle_hba_isr( struct AdapterControlBlock *acb)
1600 u_int32_t outbound_intStatus;
1602 *********************************************
1603 ** check outbound intstatus
1604 *********************************************
1606 outbound_intStatus=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & acb->outbound_int_enable;
1607 if(!outbound_intStatus) {
1608 /*it must be share irq*/
1611 CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, outbound_intStatus);/*clear interrupt*/
1612 /* MU doorbell interrupts*/
1613 if(outbound_intStatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT) {
1614 arcmsr_hba_doorbell_isr(acb);
1616 /* MU post queue interrupts*/
1617 if(outbound_intStatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) {
1618 arcmsr_hba_postqueue_isr(acb);
1620 if(outbound_intStatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
1621 arcmsr_hba_message_isr(acb);
1625 **********************************************************************
1626 **********************************************************************
1628 static void arcmsr_handle_hbb_isr( struct AdapterControlBlock *acb)
1630 u_int32_t outbound_doorbell;
1632 *********************************************
1633 ** check outbound intstatus
1634 *********************************************
1636 outbound_doorbell=CHIP_REG_READ32(HBB_DOORBELL, 0, iop2drv_doorbell) & acb->outbound_int_enable;
1637 if(!outbound_doorbell) {
1638 /*it must be share irq*/
1641 CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ~outbound_doorbell); /* clear doorbell interrupt */
1642 CHIP_REG_READ32(HBB_DOORBELL, 0, iop2drv_doorbell);
1643 CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT);
1644 /* MU ioctl transfer doorbell interrupts*/
1645 if(outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) {
1646 arcmsr_iop2drv_data_wrote_handle(acb);
1648 if(outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK) {
1649 arcmsr_iop2drv_data_read_handle(acb);
1651 /* MU post queue interrupts*/
1652 if(outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE) {
1653 arcmsr_hbb_postqueue_isr(acb);
1655 if(outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
1656 arcmsr_hbb_message_isr(acb);
1660 **********************************************************************
1661 **********************************************************************
1663 static void arcmsr_handle_hbc_isr( struct AdapterControlBlock *acb)
1665 u_int32_t host_interrupt_status;
1667 *********************************************
1668 ** check outbound intstatus
1669 *********************************************
1671 host_interrupt_status=CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status);
1672 if(!host_interrupt_status) {
1673 /*it must be share irq*/
1676 /* MU doorbell interrupts*/
1677 if(host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR) {
1678 arcmsr_hbc_doorbell_isr(acb);
1680 /* MU post queue interrupts*/
1681 if(host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) {
1682 arcmsr_hbc_postqueue_isr(acb);
1686 ******************************************************************************
1687 ******************************************************************************
1689 static void arcmsr_interrupt(struct AdapterControlBlock *acb)
1691 switch (acb->adapter_type) {
1692 case ACB_ADAPTER_TYPE_A:
1693 arcmsr_handle_hba_isr(acb);
1695 case ACB_ADAPTER_TYPE_B:
1696 arcmsr_handle_hbb_isr(acb);
1698 case ACB_ADAPTER_TYPE_C:
1699 arcmsr_handle_hbc_isr(acb);
1702 kprintf("arcmsr%d: interrupt service,"
1703 " unknown adapter type =%d\n", acb->pci_unit, acb->adapter_type);
1708 **********************************************************************
1709 **********************************************************************
1711 static void arcmsr_intr_handler(void *arg)
1713 struct AdapterControlBlock *acb=(struct AdapterControlBlock *)arg;
1715 ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
1716 arcmsr_interrupt(acb);
1717 ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
1720 ******************************************************************************
1721 ******************************************************************************
1723 static void arcmsr_polling_devmap(void* arg)
1725 struct AdapterControlBlock *acb = (struct AdapterControlBlock *)arg;
1726 switch (acb->adapter_type) {
1727 case ACB_ADAPTER_TYPE_A:
1728 CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
1731 case ACB_ADAPTER_TYPE_B:
1732 CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_GET_CONFIG);
1735 case ACB_ADAPTER_TYPE_C:
1736 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
1737 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
1741 if((acb->acb_flags & ACB_F_SCSISTOPADAPTER) == 0)
1743 callout_reset(&acb->devmap_callout, 5 * hz, arcmsr_polling_devmap, acb); /* polling per 5 seconds */
1748 *******************************************************************************
1750 *******************************************************************************
1752 static void arcmsr_iop_parking(struct AdapterControlBlock *acb)
1754 u_int32_t intmask_org;
1757 /* stop adapter background rebuild */
1758 if(acb->acb_flags & ACB_F_MSG_START_BGRB) {
1759 intmask_org = arcmsr_disable_allintr(acb);
1760 arcmsr_stop_adapter_bgrb(acb);
1761 arcmsr_flush_adapter_cache(acb);
1762 arcmsr_enable_allintr(acb, intmask_org);
1767 ***********************************************************************
1769 ************************************************************************
1771 u_int32_t arcmsr_iop_ioctlcmd(struct AdapterControlBlock *acb, u_int32_t ioctl_cmd, caddr_t arg)
1773 struct CMD_MESSAGE_FIELD * pcmdmessagefld;
1774 u_int32_t retvalue=EINVAL;
1776 pcmdmessagefld=(struct CMD_MESSAGE_FIELD *) arg;
1777 if(memcmp(pcmdmessagefld->cmdmessage.Signature, "ARCMSR", 6)!=0) {
1780 ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
1782 case ARCMSR_MESSAGE_READ_RQBUFFER: {
1783 u_int8_t * pQbuffer;
1784 u_int8_t * ptmpQbuffer=pcmdmessagefld->messagedatabuffer;
1785 u_int32_t allxfer_len=0;
1787 while((acb->rqbuf_firstindex!=acb->rqbuf_lastindex)
1788 && (allxfer_len<1031)) {
1789 /*copy READ QBUFFER to srb*/
1790 pQbuffer= &acb->rqbuffer[acb->rqbuf_firstindex];
1791 memcpy(ptmpQbuffer, pQbuffer, 1);
1792 acb->rqbuf_firstindex++;
1793 acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
1794 /*if last index number set it to 0 */
1798 if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1799 struct QBUFFER * prbuffer;
1800 u_int8_t * iop_data;
1803 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1804 prbuffer=arcmsr_get_iop_rqbuffer(acb);
1805 iop_data=(u_int8_t *)prbuffer->data;
1806 iop_len=(u_int32_t)prbuffer->data_len;
1807 /*this iop data does no chance to make me overflow again here, so just do it*/
1809 pQbuffer= &acb->rqbuffer[acb->rqbuf_lastindex];
1810 memcpy(pQbuffer, iop_data, 1);
1811 acb->rqbuf_lastindex++;
1812 acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
1813 /*if last index number set it to 0 */
1817 arcmsr_iop_message_read(acb);
1818 /*signature, let IOP know data has been readed */
1820 pcmdmessagefld->cmdmessage.Length=allxfer_len;
1821 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK;
1822 retvalue=ARCMSR_MESSAGE_SUCCESS;
1825 case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
1826 u_int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
1827 u_int8_t * pQbuffer;
1828 u_int8_t * ptmpuserbuffer=pcmdmessagefld->messagedatabuffer;
1830 user_len=pcmdmessagefld->cmdmessage.Length;
1831 /*check if data xfer length of this request will overflow my array qbuffer */
1832 wqbuf_lastindex=acb->wqbuf_lastindex;
1833 wqbuf_firstindex=acb->wqbuf_firstindex;
1834 if(wqbuf_lastindex!=wqbuf_firstindex) {
1835 arcmsr_post_ioctldata2iop(acb);
1836 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_ERROR;
1838 my_empty_len=(wqbuf_firstindex-wqbuf_lastindex-1)&(ARCMSR_MAX_QBUFFER-1);
1839 if(my_empty_len>=user_len) {
1841 /*copy srb data to wqbuffer*/
1842 pQbuffer= &acb->wqbuffer[acb->wqbuf_lastindex];
1843 memcpy(pQbuffer, ptmpuserbuffer, 1);
1844 acb->wqbuf_lastindex++;
1845 acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
1846 /*if last index number set it to 0 */
1850 /*post fist Qbuffer*/
1851 if(acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
1852 acb->acb_flags &=~ACB_F_MESSAGE_WQBUFFER_CLEARED;
1853 arcmsr_post_ioctldata2iop(acb);
1855 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK;
1857 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_ERROR;
1860 retvalue=ARCMSR_MESSAGE_SUCCESS;
1863 case ARCMSR_MESSAGE_CLEAR_RQBUFFER: {
1864 u_int8_t * pQbuffer=acb->rqbuffer;
1866 if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1867 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1868 arcmsr_iop_message_read(acb);
1869 /*signature, let IOP know data has been readed */
1871 acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
1872 acb->rqbuf_firstindex=0;
1873 acb->rqbuf_lastindex=0;
1874 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
1875 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK;
1876 retvalue=ARCMSR_MESSAGE_SUCCESS;
1879 case ARCMSR_MESSAGE_CLEAR_WQBUFFER:
1881 u_int8_t * pQbuffer=acb->wqbuffer;
1883 if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1884 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1885 arcmsr_iop_message_read(acb);
1886 /*signature, let IOP know data has been readed */
1888 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED|ACB_F_MESSAGE_WQBUFFER_READ);
1889 acb->wqbuf_firstindex=0;
1890 acb->wqbuf_lastindex=0;
1891 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
1892 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK;
1893 retvalue=ARCMSR_MESSAGE_SUCCESS;
1896 case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: {
1897 u_int8_t * pQbuffer;
1899 if(acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1900 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1901 arcmsr_iop_message_read(acb);
1902 /*signature, let IOP know data has been readed */
1904 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED
1905 |ACB_F_MESSAGE_RQBUFFER_CLEARED
1906 |ACB_F_MESSAGE_WQBUFFER_READ);
1907 acb->rqbuf_firstindex=0;
1908 acb->rqbuf_lastindex=0;
1909 acb->wqbuf_firstindex=0;
1910 acb->wqbuf_lastindex=0;
1911 pQbuffer=acb->rqbuffer;
1912 memset(pQbuffer, 0, sizeof(struct QBUFFER));
1913 pQbuffer=acb->wqbuffer;
1914 memset(pQbuffer, 0, sizeof(struct QBUFFER));
1915 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK;
1916 retvalue=ARCMSR_MESSAGE_SUCCESS;
1919 case ARCMSR_MESSAGE_REQUEST_RETURNCODE_3F: {
1920 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_3F;
1921 retvalue=ARCMSR_MESSAGE_SUCCESS;
1924 case ARCMSR_MESSAGE_SAY_HELLO: {
1925 u_int8_t * hello_string="Hello! I am ARCMSR";
1926 u_int8_t * puserbuffer=(u_int8_t *)pcmdmessagefld->messagedatabuffer;
1928 if(memcpy(puserbuffer, hello_string, (int16_t)strlen(hello_string))) {
1929 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_ERROR;
1930 ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
1933 pcmdmessagefld->cmdmessage.ReturnCode=ARCMSR_MESSAGE_RETURNCODE_OK;
1934 retvalue=ARCMSR_MESSAGE_SUCCESS;
1937 case ARCMSR_MESSAGE_SAY_GOODBYE: {
1938 arcmsr_iop_parking(acb);
1939 retvalue=ARCMSR_MESSAGE_SUCCESS;
1942 case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: {
1943 arcmsr_flush_adapter_cache(acb);
1944 retvalue=ARCMSR_MESSAGE_SUCCESS;
1948 ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
1952 **************************************************************************
1953 **************************************************************************
1955 static void arcmsr_free_srb(struct CommandControlBlock *srb)
1957 struct AdapterControlBlock *acb;
1961 mutex = lockstatus(&acb->qbuffer_lock, curthread);
1963 ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
1964 srb->srb_state=ARCMSR_SRB_DONE;
1966 acb->srbworkingQ[acb->workingsrb_doneindex]=srb;
1967 acb->workingsrb_doneindex++;
1968 acb->workingsrb_doneindex %= ARCMSR_MAX_FREESRB_NUM;
1970 ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
1973 **************************************************************************
1974 **************************************************************************
1976 struct CommandControlBlock * arcmsr_get_freesrb(struct AdapterControlBlock *acb)
1978 struct CommandControlBlock *srb=NULL;
1979 u_int32_t workingsrb_startindex, workingsrb_doneindex;
1982 mutex = lockstatus(&acb->qbuffer_lock, curthread);
1984 ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
1985 workingsrb_doneindex=acb->workingsrb_doneindex;
1986 workingsrb_startindex=acb->workingsrb_startindex;
1987 srb=acb->srbworkingQ[workingsrb_startindex];
1988 workingsrb_startindex++;
1989 workingsrb_startindex %= ARCMSR_MAX_FREESRB_NUM;
1990 if(workingsrb_doneindex!=workingsrb_startindex) {
1991 acb->workingsrb_startindex=workingsrb_startindex;
1996 ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
2000 **************************************************************************
2001 **************************************************************************
2003 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, union ccb * pccb)
2005 struct CMD_MESSAGE_FIELD * pcmdmessagefld;
2006 int retvalue = 0, transfer_len = 0;
2008 u_int32_t controlcode = (u_int32_t ) pccb->csio.cdb_io.cdb_bytes[5] << 24 |
2009 (u_int32_t ) pccb->csio.cdb_io.cdb_bytes[6] << 16 |
2010 (u_int32_t ) pccb->csio.cdb_io.cdb_bytes[7] << 8 |
2011 (u_int32_t ) pccb->csio.cdb_io.cdb_bytes[8];
2012 /* 4 bytes: Areca io control code */
2013 if((pccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
2014 buffer = pccb->csio.data_ptr;
2015 transfer_len = pccb->csio.dxfer_len;
2017 retvalue = ARCMSR_MESSAGE_FAIL;
2020 if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) {
2021 retvalue = ARCMSR_MESSAGE_FAIL;
2024 pcmdmessagefld = (struct CMD_MESSAGE_FIELD *) buffer;
2025 switch(controlcode) {
2026 case ARCMSR_MESSAGE_READ_RQBUFFER: {
2028 u_int8_t *ptmpQbuffer=pcmdmessagefld->messagedatabuffer;
2029 int32_t allxfer_len = 0;
2031 while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex)
2032 && (allxfer_len < 1031)) {
2033 pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex];
2034 memcpy(ptmpQbuffer, pQbuffer, 1);
2035 acb->rqbuf_firstindex++;
2036 acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
2040 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2041 struct QBUFFER *prbuffer;
2045 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2046 prbuffer=arcmsr_get_iop_rqbuffer(acb);
2047 iop_data = (u_int8_t *)prbuffer->data;
2048 iop_len =(u_int32_t)prbuffer->data_len;
2049 while (iop_len > 0) {
2050 pQbuffer= &acb->rqbuffer[acb->rqbuf_lastindex];
2051 memcpy(pQbuffer, iop_data, 1);
2052 acb->rqbuf_lastindex++;
2053 acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
2057 arcmsr_iop_message_read(acb);
2059 pcmdmessagefld->cmdmessage.Length = allxfer_len;
2060 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
2061 retvalue=ARCMSR_MESSAGE_SUCCESS;
2064 case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
2065 int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
2067 u_int8_t *ptmpuserbuffer=pcmdmessagefld->messagedatabuffer;
2069 user_len = pcmdmessagefld->cmdmessage.Length;
2070 wqbuf_lastindex = acb->wqbuf_lastindex;
2071 wqbuf_firstindex = acb->wqbuf_firstindex;
2072 if (wqbuf_lastindex != wqbuf_firstindex) {
2073 arcmsr_post_ioctldata2iop(acb);
2074 /* has error report sensedata */
2075 if(pccb->csio.sense_len) {
2076 ((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70);
2077 /* Valid,ErrorCode */
2078 ((u_int8_t *)&pccb->csio.sense_data)[2] = 0x05;
2079 /* FileMark,EndOfMedia,IncorrectLength,Reserved,SenseKey */
2080 ((u_int8_t *)&pccb->csio.sense_data)[7] = 0x0A;
2081 /* AdditionalSenseLength */
2082 ((u_int8_t *)&pccb->csio.sense_data)[12] = 0x20;
2083 /* AdditionalSenseCode */
2085 retvalue = ARCMSR_MESSAGE_FAIL;
2087 my_empty_len = (wqbuf_firstindex-wqbuf_lastindex - 1)
2088 &(ARCMSR_MAX_QBUFFER - 1);
2089 if (my_empty_len >= user_len) {
2090 while (user_len > 0) {
2091 pQbuffer = &acb->wqbuffer[acb->wqbuf_lastindex];
2092 memcpy(pQbuffer, ptmpuserbuffer, 1);
2093 acb->wqbuf_lastindex++;
2094 acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
2098 if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
2100 ~ACB_F_MESSAGE_WQBUFFER_CLEARED;
2101 arcmsr_post_ioctldata2iop(acb);
2104 /* has error report sensedata */
2105 if(pccb->csio.sense_len) {
2106 ((u_int8_t *)&pccb->csio.sense_data)[0] = (0x1 << 7 | 0x70);
2107 /* Valid,ErrorCode */
2108 ((u_int8_t *)&pccb->csio.sense_data)[2] = 0x05;
2109 /* FileMark,EndOfMedia,IncorrectLength,Reserved,SenseKey */
2110 ((u_int8_t *)&pccb->csio.sense_data)[7] = 0x0A;
2111 /* AdditionalSenseLength */
2112 ((u_int8_t *)&pccb->csio.sense_data)[12] = 0x20;
2113 /* AdditionalSenseCode */
2115 retvalue = ARCMSR_MESSAGE_FAIL;
2120 case ARCMSR_MESSAGE_CLEAR_RQBUFFER: {
2121 u_int8_t *pQbuffer = acb->rqbuffer;
2123 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2124 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2125 arcmsr_iop_message_read(acb);
2127 acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
2128 acb->rqbuf_firstindex = 0;
2129 acb->rqbuf_lastindex = 0;
2130 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
2131 pcmdmessagefld->cmdmessage.ReturnCode =
2132 ARCMSR_MESSAGE_RETURNCODE_OK;
2135 case ARCMSR_MESSAGE_CLEAR_WQBUFFER: {
2136 u_int8_t *pQbuffer = acb->wqbuffer;
2138 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2139 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2140 arcmsr_iop_message_read(acb);
2143 (ACB_F_MESSAGE_WQBUFFER_CLEARED |
2144 ACB_F_MESSAGE_WQBUFFER_READ);
2145 acb->wqbuf_firstindex = 0;
2146 acb->wqbuf_lastindex = 0;
2147 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
2148 pcmdmessagefld->cmdmessage.ReturnCode =
2149 ARCMSR_MESSAGE_RETURNCODE_OK;
2152 case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: {
2155 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
2156 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
2157 arcmsr_iop_message_read(acb);
2160 (ACB_F_MESSAGE_WQBUFFER_CLEARED
2161 | ACB_F_MESSAGE_RQBUFFER_CLEARED
2162 | ACB_F_MESSAGE_WQBUFFER_READ);
2163 acb->rqbuf_firstindex = 0;
2164 acb->rqbuf_lastindex = 0;
2165 acb->wqbuf_firstindex = 0;
2166 acb->wqbuf_lastindex = 0;
2167 pQbuffer = acb->rqbuffer;
2168 memset(pQbuffer, 0, sizeof (struct QBUFFER));
2169 pQbuffer = acb->wqbuffer;
2170 memset(pQbuffer, 0, sizeof (struct QBUFFER));
2171 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
2174 case ARCMSR_MESSAGE_REQUEST_RETURNCODE_3F: {
2175 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_3F;
2178 case ARCMSR_MESSAGE_SAY_HELLO: {
2179 int8_t * hello_string = "Hello! I am ARCMSR";
2181 memcpy(pcmdmessagefld->messagedatabuffer, hello_string
2182 , (int16_t)strlen(hello_string));
2183 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
2186 case ARCMSR_MESSAGE_SAY_GOODBYE:
2187 arcmsr_iop_parking(acb);
2189 case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE:
2190 arcmsr_flush_adapter_cache(acb);
2193 retvalue = ARCMSR_MESSAGE_FAIL;
2199 *********************************************************************
2200 *********************************************************************
2202 static void arcmsr_execute_srb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
2204 struct CommandControlBlock *srb=(struct CommandControlBlock *)arg;
2205 struct AdapterControlBlock *acb=(struct AdapterControlBlock *)srb->acb;
2210 target=pccb->ccb_h.target_id;
2211 lun=pccb->ccb_h.target_lun;
2212 #ifdef ARCMSR_DEBUG1
2213 acb->pktRequestCount++;
2216 if(error != EFBIG) {
2217 kprintf("arcmsr%d: unexpected error %x"
2218 " returned from 'bus_dmamap_load' \n"
2219 , acb->pci_unit, error);
2221 if((pccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
2222 pccb->ccb_h.status |= CAM_REQ_TOO_BIG;
2224 arcmsr_srb_complete(srb, 0);
2227 if(nseg > ARCMSR_MAX_SG_ENTRIES) {
2228 pccb->ccb_h.status |= CAM_REQ_TOO_BIG;
2229 arcmsr_srb_complete(srb, 0);
2232 if(acb->acb_flags & ACB_F_BUS_RESET) {
2233 kprintf("arcmsr%d: bus reset and return busy \n", acb->pci_unit);
2234 pccb->ccb_h.status |= CAM_SCSI_BUS_RESET;
2235 arcmsr_srb_complete(srb, 0);
2238 if(acb->devstate[target][lun]==ARECA_RAID_GONE) {
2239 u_int8_t block_cmd, cmd;
2241 cmd = pccb->csio.cdb_io.cdb_bytes[0];
2242 block_cmd= cmd & 0x0f;
2243 if(block_cmd==0x08 || block_cmd==0x0a) {
2244 kprintf("arcmsr%d:block 'read/write' command "
2245 "with gone raid volume Cmd=0x%2x, TargetId=%d, Lun=%d \n"
2246 , acb->pci_unit, cmd, target, lun);
2247 pccb->ccb_h.status |= CAM_DEV_NOT_THERE;
2248 arcmsr_srb_complete(srb, 0);
2252 if((pccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
2254 bus_dmamap_unload(acb->dm_segs_dmat, srb->dm_segs_dmamap);
2256 arcmsr_srb_complete(srb, 0);
2259 if(acb->srboutstandingcount > ARCMSR_MAX_OUTSTANDING_CMD) {
2260 xpt_freeze_simq(acb->psim, 1);
2261 pccb->ccb_h.status = CAM_REQUEUE_REQ;
2262 acb->acb_flags |= ACB_F_CAM_DEV_QFRZN;
2263 arcmsr_srb_complete(srb, 0);
2266 pccb->ccb_h.status |= CAM_SIM_QUEUED;
2267 arcmsr_build_srb(srb, dm_segs, nseg);
2268 arcmsr_post_srb(acb, srb);
2269 if (pccb->ccb_h.timeout != CAM_TIME_INFINITY)
2271 arcmsr_callout_init(&srb->ccb_callout);
2272 callout_reset(&srb->ccb_callout, ((pccb->ccb_h.timeout + (ARCMSR_TIMEOUT_DELAY * 1000)) * hz) / 1000, arcmsr_srb_timeout, srb);
2273 srb->srb_flags |= SRB_FLAG_TIMER_START;
2277 *****************************************************************************************
2278 *****************************************************************************************
2280 static u_int8_t arcmsr_seek_cmd2abort(union ccb * abortccb)
2282 struct CommandControlBlock *srb;
2283 struct AdapterControlBlock *acb=(struct AdapterControlBlock *) abortccb->ccb_h.arcmsr_ccbacb_ptr;
2284 u_int32_t intmask_org;
2289 ***************************************************************************
2290 ** It is the upper layer do abort command this lock just prior to calling us.
2291 ** First determine if we currently own this command.
2292 ** Start by searching the device queue. If not found
2293 ** at all, and the system wanted us to just abort the
2294 ** command return success.
2295 ***************************************************************************
2297 if(acb->srboutstandingcount!=0) {
2298 /* disable all outbound interrupt */
2299 intmask_org=arcmsr_disable_allintr(acb);
2300 for(i=0;i<ARCMSR_MAX_FREESRB_NUM;i++) {
2301 srb=acb->psrb_pool[i];
2302 if(srb->srb_state==ARCMSR_SRB_START) {
2303 if(srb->pccb==abortccb) {
2304 srb->srb_state=ARCMSR_SRB_ABORTED;
2305 kprintf("arcmsr%d:scsi id=%d lun=%d abort srb '%p'"
2306 "outstanding command \n"
2307 , acb->pci_unit, abortccb->ccb_h.target_id
2308 , abortccb->ccb_h.target_lun, srb);
2309 arcmsr_polling_srbdone(acb, srb);
2310 /* enable outbound Post Queue, outbound doorbell Interrupt */
2311 arcmsr_enable_allintr(acb, intmask_org);
2316 /* enable outbound Post Queue, outbound doorbell Interrupt */
2317 arcmsr_enable_allintr(acb, intmask_org);
2322 ****************************************************************************
2323 ****************************************************************************
2325 static void arcmsr_bus_reset(struct AdapterControlBlock *acb)
2330 acb->acb_flags |=ACB_F_BUS_RESET;
2331 while(acb->srboutstandingcount!=0 && retry < 400) {
2332 arcmsr_interrupt(acb);
2336 arcmsr_iop_reset(acb);
2337 acb->acb_flags &= ~ACB_F_BUS_RESET;
2340 **************************************************************************
2341 **************************************************************************
2343 static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
2346 pccb->ccb_h.status |= CAM_REQ_CMP;
2347 switch (pccb->csio.cdb_io.cdb_bytes[0]) {
2349 unsigned char inqdata[36];
2350 char *buffer=pccb->csio.data_ptr;
2352 if (pccb->ccb_h.target_lun) {
2353 pccb->ccb_h.status |= CAM_SEL_TIMEOUT;
2357 inqdata[0] = T_PROCESSOR; /* Periph Qualifier & Periph Dev Type */
2358 inqdata[1] = 0; /* rem media bit & Dev Type Modifier */
2359 inqdata[2] = 0; /* ISO, ECMA, & ANSI versions */
2361 inqdata[4] = 31; /* length of additional data */
2365 strncpy(&inqdata[8], "Areca ", 8); /* Vendor Identification */
2366 strncpy(&inqdata[16], "RAID controller ", 16); /* Product Identification */
2367 strncpy(&inqdata[32], "R001", 4); /* Product Revision */
2368 memcpy(buffer, inqdata, sizeof(inqdata));
2374 if (arcmsr_iop_message_xfer(acb, pccb)) {
2375 pccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
2376 pccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
2386 *********************************************************************
2387 *********************************************************************
2389 static void arcmsr_action(struct cam_sim * psim, union ccb * pccb)
2391 struct AdapterControlBlock * acb;
2393 acb=(struct AdapterControlBlock *) cam_sim_softc(psim);
2395 pccb->ccb_h.status |= CAM_REQ_INVALID;
2399 switch (pccb->ccb_h.func_code) {
2401 struct CommandControlBlock *srb;
2402 int target=pccb->ccb_h.target_id;
2405 /* virtual device for iop message transfer */
2406 arcmsr_handle_virtual_command(acb, pccb);
2409 if((srb=arcmsr_get_freesrb(acb)) == NULL) {
2410 pccb->ccb_h.status |= CAM_RESRC_UNAVAIL;
2414 pccb->ccb_h.arcmsr_ccbsrb_ptr=srb;
2415 pccb->ccb_h.arcmsr_ccbacb_ptr=acb;
2417 if((pccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
2418 if(!(pccb->ccb_h.flags & CAM_SCATTER_VALID)) {
2420 if(!(pccb->ccb_h.flags & CAM_DATA_PHYS)) {
2421 /* Buffer is virtual */
2425 error = bus_dmamap_load(acb->dm_segs_dmat
2426 , srb->dm_segs_dmamap
2427 , pccb->csio.data_ptr
2428 , pccb->csio.dxfer_len
2429 , arcmsr_execute_srb, srb, /*flags*/0);
2430 if(error == EINPROGRESS) {
2431 xpt_freeze_simq(acb->psim, 1);
2432 pccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2436 else { /* Buffer is physical */
2437 struct bus_dma_segment seg;
2439 seg.ds_addr = (bus_addr_t)pccb->csio.data_ptr;
2440 seg.ds_len = pccb->csio.dxfer_len;
2441 arcmsr_execute_srb(srb, &seg, 1, 0);
2444 /* Scatter/gather list */
2445 struct bus_dma_segment *segs;
2447 if((pccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0
2448 || (pccb->ccb_h.flags & CAM_DATA_PHYS) != 0) {
2449 pccb->ccb_h.status |= CAM_PROVIDE_FAIL;
2451 kfree(srb, M_DEVBUF);
2454 segs=(struct bus_dma_segment *)pccb->csio.data_ptr;
2455 arcmsr_execute_srb(srb, segs, pccb->csio.sglist_cnt, 0);
2458 arcmsr_execute_srb(srb, NULL, 0, 0);
2462 case XPT_TARGET_IO: {
2463 /* target mode not yet support vendor specific commands. */
2464 pccb->ccb_h.status |= CAM_REQ_CMP;
2468 case XPT_PATH_INQ: {
2469 struct ccb_pathinq *cpi= &pccb->cpi;
2472 cpi->hba_inquiry=PI_SDTR_ABLE | PI_TAG_ABLE;
2476 cpi->max_target=ARCMSR_MAX_TARGETID; /* 0-16 */
2477 cpi->max_lun=ARCMSR_MAX_TARGETLUN; /* 0-7 */
2478 cpi->initiator_id=ARCMSR_SCSI_INITIATOR_ID; /* 255 */
2479 cpi->bus_id=cam_sim_bus(psim);
2480 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2481 strncpy(cpi->hba_vid, "ARCMSR", HBA_IDLEN);
2482 strncpy(cpi->dev_name, cam_sim_name(psim), DEV_IDLEN);
2483 cpi->unit_number=cam_sim_unit(psim);
2484 #ifdef CAM_NEW_TRAN_CODE
2485 if(acb->adapter_bus_speed == ACB_BUS_SPEED_6G)
2486 cpi->base_transfer_speed = 600000;
2488 cpi->base_transfer_speed = 300000;
2489 if((acb->vendor_device_id == PCIDevVenIDARC1880) ||
2490 (acb->vendor_device_id == PCIDevVenIDARC1680))
2492 cpi->transport = XPORT_SAS;
2493 cpi->transport_version = 0;
2494 cpi->protocol_version = SCSI_REV_SPC2;
2498 cpi->transport = XPORT_SPI;
2499 cpi->transport_version = 2;
2500 cpi->protocol_version = SCSI_REV_2;
2502 cpi->protocol = PROTO_SCSI;
2504 cpi->ccb_h.status |= CAM_REQ_CMP;
2509 union ccb *pabort_ccb;
2511 pabort_ccb=pccb->cab.abort_ccb;
2512 switch (pabort_ccb->ccb_h.func_code) {
2513 case XPT_ACCEPT_TARGET_IO:
2514 case XPT_IMMED_NOTIFY:
2515 case XPT_CONT_TARGET_IO:
2516 if(arcmsr_seek_cmd2abort(pabort_ccb)==TRUE) {
2517 pabort_ccb->ccb_h.status |= CAM_REQ_ABORTED;
2518 xpt_done(pabort_ccb);
2519 pccb->ccb_h.status |= CAM_REQ_CMP;
2521 xpt_print_path(pabort_ccb->ccb_h.path);
2522 kprintf("Not found\n");
2523 pccb->ccb_h.status |= CAM_PATH_INVALID;
2527 pccb->ccb_h.status |= CAM_UA_ABORT;
2530 pccb->ccb_h.status |= CAM_REQ_INVALID;
2537 case XPT_RESET_DEV: {
2540 arcmsr_bus_reset(acb);
2541 for (i=0; i < 500; i++) {
2544 pccb->ccb_h.status |= CAM_REQ_CMP;
2549 pccb->ccb_h.status |= CAM_REQ_INVALID;
2553 case XPT_GET_TRAN_SETTINGS: {
2554 struct ccb_trans_settings *cts;
2556 if(pccb->ccb_h.target_id == 16) {
2557 pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL;
2562 #ifdef CAM_NEW_TRAN_CODE
2564 struct ccb_trans_settings_scsi *scsi;
2565 struct ccb_trans_settings_spi *spi;
2566 struct ccb_trans_settings_sas *sas;
2568 scsi = &cts->proto_specific.scsi;
2569 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
2570 scsi->valid = CTS_SCSI_VALID_TQ;
2571 cts->protocol = PROTO_SCSI;
2573 if((acb->vendor_device_id == PCIDevVenIDARC1880) ||
2574 (acb->vendor_device_id == PCIDevVenIDARC1680))
2576 cts->protocol_version = SCSI_REV_SPC2;
2577 cts->transport_version = 0;
2578 cts->transport = XPORT_SAS;
2579 sas = &cts->xport_specific.sas;
2580 sas->valid = CTS_SAS_VALID_SPEED;
2581 if(acb->vendor_device_id == PCIDevVenIDARC1880)
2582 sas->bitrate = 600000;
2583 else if(acb->vendor_device_id == PCIDevVenIDARC1680)
2584 sas->bitrate = 300000;
2588 cts->protocol_version = SCSI_REV_2;
2589 cts->transport_version = 2;
2590 cts->transport = XPORT_SPI;
2591 spi = &cts->xport_specific.spi;
2592 spi->flags = CTS_SPI_FLAGS_DISC_ENB;
2594 spi->sync_offset=32;
2595 spi->bus_width=MSG_EXT_WDTR_BUS_16_BIT;
2596 spi->valid = CTS_SPI_VALID_DISC
2597 | CTS_SPI_VALID_SYNC_RATE
2598 | CTS_SPI_VALID_SYNC_OFFSET
2599 | CTS_SPI_VALID_BUS_WIDTH;
2604 cts->flags=(CCB_TRANS_DISC_ENB | CCB_TRANS_TAG_ENB);
2606 cts->sync_offset=32;
2607 cts->bus_width=MSG_EXT_WDTR_BUS_16_BIT;
2608 cts->valid=CCB_TRANS_SYNC_RATE_VALID |
2609 CCB_TRANS_SYNC_OFFSET_VALID |
2610 CCB_TRANS_BUS_WIDTH_VALID |
2611 CCB_TRANS_DISC_VALID |
2615 pccb->ccb_h.status |= CAM_REQ_CMP;
2619 case XPT_SET_TRAN_SETTINGS: {
2620 pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL;
2624 case XPT_CALC_GEOMETRY:
2625 if(pccb->ccb_h.target_id == 16) {
2626 pccb->ccb_h.status |= CAM_FUNC_NOTAVAIL;
2630 cam_calc_geometry(&pccb->ccg, 1);
2634 pccb->ccb_h.status |= CAM_REQ_INVALID;
2640 **********************************************************************
2641 **********************************************************************
2643 static void arcmsr_start_hba_bgrb(struct AdapterControlBlock *acb)
2645 acb->acb_flags |= ACB_F_MSG_START_BGRB;
2646 CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB);
2647 if(!arcmsr_hba_wait_msgint_ready(acb)) {
2648 kprintf("arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit);
2652 **********************************************************************
2653 **********************************************************************
2655 static void arcmsr_start_hbb_bgrb(struct AdapterControlBlock *acb)
2657 acb->acb_flags |= ACB_F_MSG_START_BGRB;
2658 CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_START_BGRB);
2659 if(!arcmsr_hbb_wait_msgint_ready(acb)) {
2660 kprintf( "arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit);
2664 **********************************************************************
2665 **********************************************************************
2667 static void arcmsr_start_hbc_bgrb(struct AdapterControlBlock *acb)
2669 acb->acb_flags |= ACB_F_MSG_START_BGRB;
2670 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB);
2671 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
2672 if(!arcmsr_hbc_wait_msgint_ready(acb)) {
2673 kprintf("arcmsr%d: wait 'start adapter background rebulid' timeout \n", acb->pci_unit);
2677 **********************************************************************
2678 **********************************************************************
2680 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb)
2682 switch (acb->adapter_type) {
2683 case ACB_ADAPTER_TYPE_A:
2684 arcmsr_start_hba_bgrb(acb);
2686 case ACB_ADAPTER_TYPE_B:
2687 arcmsr_start_hbb_bgrb(acb);
2689 case ACB_ADAPTER_TYPE_C:
2690 arcmsr_start_hbc_bgrb(acb);
2695 **********************************************************************
2697 **********************************************************************
2699 static void arcmsr_polling_hba_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb)
2701 struct CommandControlBlock *srb;
2702 u_int32_t flag_srb, outbound_intstatus, poll_srb_done=0, poll_count=0;
2707 outbound_intstatus=CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_intstatus) & acb->outbound_int_enable;
2708 CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_intstatus, outbound_intstatus); /*clear interrupt*/
2709 bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2711 if((flag_srb=CHIP_REG_READ32(HBA_MessageUnit,
2712 0, outbound_queueport))==0xFFFFFFFF) {
2714 break;/*chip FIFO no ccb for completion already*/
2717 if ((poll_count > 100) && (poll_srb != NULL)) {
2720 goto polling_ccb_retry;
2723 /* check if command done with no error*/
2724 srb=(struct CommandControlBlock *)
2725 (acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/
2726 error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
2727 poll_srb_done = (srb==poll_srb) ? 1:0;
2728 if((srb->acb!=acb) || (srb->srb_state!=ARCMSR_SRB_START)) {
2729 if(srb->srb_state==ARCMSR_SRB_ABORTED) {
2730 kprintf("arcmsr%d: scsi id=%d lun=%d srb='%p'"
2731 "poll command abort successfully \n"
2733 , srb->pccb->ccb_h.target_id
2734 , srb->pccb->ccb_h.target_lun, srb);
2735 srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
2736 arcmsr_srb_complete(srb, 1);
2739 kprintf("arcmsr%d: polling get an illegal srb command done srb='%p'"
2740 "srboutstandingcount=%d \n"
2742 , srb, acb->srboutstandingcount);
2745 arcmsr_report_srb_state(acb, srb, error);
2746 } /*drain reply FIFO*/
2749 **********************************************************************
2751 **********************************************************************
2753 static void arcmsr_polling_hbb_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb)
2755 struct HBB_MessageUnit *phbbmu=(struct HBB_MessageUnit *)acb->pmu;
2756 struct CommandControlBlock *srb;
2757 u_int32_t flag_srb, poll_srb_done=0, poll_count=0;
2763 CHIP_REG_WRITE32(HBB_DOORBELL,
2764 0, iop2drv_doorbell, ARCMSR_DOORBELL_INT_CLEAR_PATTERN); /* clear doorbell interrupt */
2765 bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2767 index=phbbmu->doneq_index;
2768 if((flag_srb=phbbmu->done_qbuffer[index]) == 0) {
2770 break;/*chip FIFO no ccb for completion already*/
2773 if ((poll_count > 100) && (poll_srb != NULL)) {
2776 goto polling_ccb_retry;
2779 phbbmu->done_qbuffer[index]=0;
2781 index %= ARCMSR_MAX_HBB_POSTQUEUE; /*if last index number set it to 0 */
2782 phbbmu->doneq_index=index;
2783 /* check if command done with no error*/
2784 srb=(struct CommandControlBlock *)
2785 (acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/
2786 error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE0)?TRUE:FALSE;
2787 poll_srb_done = (srb==poll_srb) ? 1:0;
2788 if((srb->acb!=acb) || (srb->srb_state!=ARCMSR_SRB_START)) {
2789 if(srb->srb_state==ARCMSR_SRB_ABORTED) {
2790 kprintf("arcmsr%d: scsi id=%d lun=%d srb='%p'"
2791 "poll command abort successfully \n"
2793 , srb->pccb->ccb_h.target_id
2794 , srb->pccb->ccb_h.target_lun, srb);
2795 srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
2796 arcmsr_srb_complete(srb, 1);
2799 kprintf("arcmsr%d: polling get an illegal srb command done srb='%p'"
2800 "srboutstandingcount=%d \n"
2802 , srb, acb->srboutstandingcount);
2805 arcmsr_report_srb_state(acb, srb, error);
2806 } /*drain reply FIFO*/
2809 **********************************************************************
2811 **********************************************************************
2813 static void arcmsr_polling_hbc_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb)
2815 struct CommandControlBlock *srb;
2816 u_int32_t flag_srb, poll_srb_done=0, poll_count=0;
2821 bus_dmamap_sync(acb->srb_dmat, acb->srb_dmamap, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2823 if(!(CHIP_REG_READ32(HBC_MessageUnit, 0, host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR)) {
2825 break;/*chip FIFO no ccb for completion already*/
2828 if ((poll_count > 100) && (poll_srb != NULL)) {
2831 if (acb->srboutstandingcount == 0) {
2834 goto polling_ccb_retry;
2837 flag_srb = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_queueport_low);
2838 /* check if command done with no error*/
2839 srb=(struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb & 0xFFFFFFE0));/*frame must be 32 bytes aligned*/
2840 error=(flag_srb & ARCMSR_SRBREPLY_FLAG_ERROR_MODE1)?TRUE:FALSE;
2841 if (poll_srb != NULL)
2842 poll_srb_done = (srb==poll_srb) ? 1:0;
2843 if((srb->acb!=acb) || (srb->srb_state!=ARCMSR_SRB_START)) {
2844 if(srb->srb_state==ARCMSR_SRB_ABORTED) {
2845 kprintf("arcmsr%d: scsi id=%d lun=%d srb='%p'poll command abort successfully \n"
2846 , acb->pci_unit, srb->pccb->ccb_h.target_id, srb->pccb->ccb_h.target_lun, srb);
2847 srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
2848 arcmsr_srb_complete(srb, 1);
2851 kprintf("arcmsr%d: polling get an illegal srb command done srb='%p'srboutstandingcount=%d \n"
2852 , acb->pci_unit, srb, acb->srboutstandingcount);
2855 arcmsr_report_srb_state(acb, srb, error);
2856 } /*drain reply FIFO*/
2859 **********************************************************************
2860 **********************************************************************
2862 static void arcmsr_polling_srbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_srb)
2864 switch (acb->adapter_type) {
2865 case ACB_ADAPTER_TYPE_A: {
2866 arcmsr_polling_hba_srbdone(acb, poll_srb);
2869 case ACB_ADAPTER_TYPE_B: {
2870 arcmsr_polling_hbb_srbdone(acb, poll_srb);
2873 case ACB_ADAPTER_TYPE_C: {
2874 arcmsr_polling_hbc_srbdone(acb, poll_srb);
2880 **********************************************************************
2881 **********************************************************************
2883 static void arcmsr_get_hba_config(struct AdapterControlBlock *acb)
2885 char *acb_firm_model=acb->firm_model;
2886 char *acb_firm_version=acb->firm_version;
2887 char *acb_device_map = acb->device_map;
2888 size_t iop_firm_model=offsetof(struct HBA_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]); /*firm_model,15,60-67*/
2889 size_t iop_firm_version=offsetof(struct HBA_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]); /*firm_version,17,68-83*/
2890 size_t iop_device_map = offsetof(struct HBA_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
2893 CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
2894 if(!arcmsr_hba_wait_msgint_ready(acb)) {
2895 kprintf("arcmsr%d: wait 'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit);
2899 *acb_firm_model=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_model+i);
2900 /* 8 bytes firm_model, 15, 60-67*/
2906 *acb_firm_version=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_version+i);
2907 /* 16 bytes firm_version, 17, 68-83*/
2913 *acb_device_map=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_device_map+i);
2917 kprintf("ARECA RAID ADAPTER%d: %s \n", acb->pci_unit, ARCMSR_DRIVER_VERSION);
2918 kprintf("ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n", acb->pci_unit, acb->firm_version);
2919 acb->firm_request_len=CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[1]); /*firm_request_len, 1, 04-07*/
2920 acb->firm_numbers_queue=CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/
2921 acb->firm_sdram_size=CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[3]); /*firm_sdram_size, 3, 12-15*/
2922 acb->firm_ide_channels=CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[4]); /*firm_ide_channels, 4, 16-19*/
2923 acb->firm_cfg_version=CHIP_REG_READ32(HBA_MessageUnit, 0, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]); /*firm_cfg_version, 25, */
2926 **********************************************************************
2927 **********************************************************************
2929 static void arcmsr_get_hbb_config(struct AdapterControlBlock *acb)
2931 char *acb_firm_model=acb->firm_model;
2932 char *acb_firm_version=acb->firm_version;
2933 char *acb_device_map = acb->device_map;
2934 size_t iop_firm_model=offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]); /*firm_model,15,60-67*/
2935 size_t iop_firm_version=offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]); /*firm_version,17,68-83*/
2936 size_t iop_device_map = offsetof(struct HBB_RWBUFFER, msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
2939 CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_GET_CONFIG);
2940 if(!arcmsr_hbb_wait_msgint_ready(acb)) {
2941 kprintf( "arcmsr%d: wait" "'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit);
2945 *acb_firm_model=bus_space_read_1(acb->btag[1], acb->bhandle[1], iop_firm_model+i);
2946 /* 8 bytes firm_model, 15, 60-67*/
2952 *acb_firm_version=bus_space_read_1(acb->btag[1], acb->bhandle[1], iop_firm_version+i);
2953 /* 16 bytes firm_version, 17, 68-83*/
2959 *acb_device_map=bus_space_read_1(acb->btag[1], acb->bhandle[1], iop_device_map+i);
2963 kprintf("ARECA RAID ADAPTER%d: %s \n", acb->pci_unit, ARCMSR_DRIVER_VERSION);
2964 kprintf("ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n", acb->pci_unit, acb->firm_version);
2965 acb->firm_request_len=CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[1]); /*firm_request_len, 1, 04-07*/
2966 acb->firm_numbers_queue=CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/
2967 acb->firm_sdram_size=CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[3]); /*firm_sdram_size, 3, 12-15*/
2968 acb->firm_ide_channels=CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[4]); /*firm_ide_channels, 4, 16-19*/
2969 acb->firm_cfg_version=CHIP_REG_READ32(HBB_RWBUFFER, 1, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]); /*firm_cfg_version, 25, */
2972 **********************************************************************
2973 **********************************************************************
2975 static void arcmsr_get_hbc_config(struct AdapterControlBlock *acb)
2977 char *acb_firm_model=acb->firm_model;
2978 char *acb_firm_version=acb->firm_version;
2979 char *acb_device_map = acb->device_map;
2980 size_t iop_firm_model=offsetof(struct HBC_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]); /*firm_model,15,60-67*/
2981 size_t iop_firm_version=offsetof(struct HBC_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]); /*firm_version,17,68-83*/
2982 size_t iop_device_map = offsetof(struct HBC_MessageUnit,msgcode_rwbuffer[ARCMSR_FW_DEVMAP_OFFSET]);
2985 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
2986 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
2987 if(!arcmsr_hbc_wait_msgint_ready(acb)) {
2988 kprintf("arcmsr%d: wait 'get adapter firmware miscellaneous data' timeout \n", acb->pci_unit);
2992 *acb_firm_model=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_model+i);
2993 /* 8 bytes firm_model, 15, 60-67*/
2999 *acb_firm_version=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_firm_version+i);
3000 /* 16 bytes firm_version, 17, 68-83*/
3006 *acb_device_map=bus_space_read_1(acb->btag[0], acb->bhandle[0], iop_device_map+i);
3010 kprintf("ARECA RAID ADAPTER%d: %s \n", acb->pci_unit, ARCMSR_DRIVER_VERSION);
3011 kprintf("ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n", acb->pci_unit, acb->firm_version);
3012 acb->firm_request_len =CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[1]); /*firm_request_len, 1, 04-07*/
3013 acb->firm_numbers_queue =CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[2]); /*firm_numbers_queue, 2, 08-11*/
3014 acb->firm_sdram_size =CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[3]); /*firm_sdram_size, 3, 12-15*/
3015 acb->firm_ide_channels =CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[4]); /*firm_ide_channels, 4, 16-19*/
3016 acb->firm_cfg_version =CHIP_REG_READ32(HBC_MessageUnit, 0, msgcode_rwbuffer[ARCMSR_FW_CFGVER_OFFSET]); /*firm_cfg_version, 25, */
3019 **********************************************************************
3020 **********************************************************************
3022 static void arcmsr_get_firmware_spec(struct AdapterControlBlock *acb)
3024 switch (acb->adapter_type) {
3025 case ACB_ADAPTER_TYPE_A: {
3026 arcmsr_get_hba_config(acb);
3029 case ACB_ADAPTER_TYPE_B: {
3030 arcmsr_get_hbb_config(acb);
3033 case ACB_ADAPTER_TYPE_C: {
3034 arcmsr_get_hbc_config(acb);
3040 **********************************************************************
3041 **********************************************************************
3043 static void arcmsr_wait_firmware_ready( struct AdapterControlBlock *acb)
3047 switch (acb->adapter_type) {
3048 case ACB_ADAPTER_TYPE_A: {
3049 while ((CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_msgaddr1) & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0)
3051 if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */
3053 kprintf( "arcmsr%d:timed out waiting for firmware \n", acb->pci_unit);
3056 UDELAY(15000); /* wait 15 milli-seconds */
3060 case ACB_ADAPTER_TYPE_B: {
3061 while ((CHIP_REG_READ32(HBB_DOORBELL, 0, iop2drv_doorbell) & ARCMSR_MESSAGE_FIRMWARE_OK) == 0)
3063 if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */
3065 kprintf( "arcmsr%d: timed out waiting for firmware \n", acb->pci_unit);
3068 UDELAY(15000); /* wait 15 milli-seconds */
3070 CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_END_OF_INTERRUPT);
3073 case ACB_ADAPTER_TYPE_C: {
3074 while ((CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_msgaddr1) & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK) == 0)
3076 if (timeout++ > 2000) /* (2000*15)/1000 = 30 sec */
3078 kprintf( "arcmsr%d:timed out waiting for firmware ready\n", acb->pci_unit);
3081 UDELAY(15000); /* wait 15 milli-seconds */
3088 **********************************************************************
3089 **********************************************************************
3091 static void arcmsr_clear_doorbell_queue_buffer( struct AdapterControlBlock *acb)
3093 u_int32_t outbound_doorbell;
3095 switch (acb->adapter_type) {
3096 case ACB_ADAPTER_TYPE_A: {
3097 /* empty doorbell Qbuffer if door bell ringed */
3098 outbound_doorbell = CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_doorbell);
3099 CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_doorbell, outbound_doorbell); /*clear doorbell interrupt */
3100 CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK);
3104 case ACB_ADAPTER_TYPE_B: {
3105 CHIP_REG_WRITE32(HBB_DOORBELL, 0, iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN);/*clear interrupt and message state*/
3106 CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_READ_OK);
3107 /* let IOP know data has been read */
3110 case ACB_ADAPTER_TYPE_C: {
3111 /* empty doorbell Qbuffer if door bell ringed */
3112 outbound_doorbell = CHIP_REG_READ32(HBC_MessageUnit, 0, outbound_doorbell);
3113 CHIP_REG_WRITE32(HBC_MessageUnit, 0, outbound_doorbell_clear, outbound_doorbell); /*clear doorbell interrupt */
3114 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK);
3121 ************************************************************************
3122 ************************************************************************
3124 static u_int32_t arcmsr_iop_confirm(struct AdapterControlBlock *acb)
3126 unsigned long srb_phyaddr;
3127 u_int32_t srb_phyaddr_hi32;
3130 ********************************************************************
3131 ** here we need to tell iop 331 our freesrb.HighPart
3132 ** if freesrb.HighPart is not zero
3133 ********************************************************************
3135 srb_phyaddr= (unsigned long) acb->srb_phyaddr.phyaddr;
3136 // srb_phyaddr_hi32=(u_int32_t) ((srb_phyaddr>>16)>>16);
3137 srb_phyaddr_hi32=acb->srb_phyaddr.B.phyadd_high;
3138 switch (acb->adapter_type) {
3139 case ACB_ADAPTER_TYPE_A: {
3140 if(srb_phyaddr_hi32!=0) {
3141 CHIP_REG_WRITE32(HBA_MessageUnit, 0, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG);
3142 CHIP_REG_WRITE32(HBA_MessageUnit, 0, msgcode_rwbuffer[1], srb_phyaddr_hi32);
3143 CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_SET_CONFIG);
3144 if(!arcmsr_hba_wait_msgint_ready(acb)) {
3145 kprintf( "arcmsr%d: 'set srb high part physical address' timeout \n", acb->pci_unit);
3152 ***********************************************************************
3153 ** if adapter type B, set window of "post command Q"
3154 ***********************************************************************
3156 case ACB_ADAPTER_TYPE_B: {
3157 u_int32_t post_queue_phyaddr;
3158 struct HBB_MessageUnit *phbbmu;
3160 phbbmu=(struct HBB_MessageUnit *)acb->pmu;
3161 phbbmu->postq_index=0;
3162 phbbmu->doneq_index=0;
3163 CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_SET_POST_WINDOW);
3164 if(!arcmsr_hbb_wait_msgint_ready(acb)) {
3165 kprintf( "arcmsr%d: 'set window of post command Q' timeout\n", acb->pci_unit);
3168 post_queue_phyaddr = srb_phyaddr + ARCMSR_SRBS_POOL_SIZE
3169 + offsetof(struct HBB_MessageUnit, post_qbuffer);
3170 CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG); /* driver "set config" signature */
3171 CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[1], srb_phyaddr_hi32); /* normal should be zero */
3172 CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[2], post_queue_phyaddr); /* postQ size (256+8)*4 */
3173 CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[3], post_queue_phyaddr+1056); /* doneQ size (256+8)*4 */
3174 CHIP_REG_WRITE32(HBB_RWBUFFER, 1, msgcode_rwbuffer[4], 1056); /* srb maxQ size must be --> [(256+8)*4] */
3175 CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_SET_CONFIG);
3176 if(!arcmsr_hbb_wait_msgint_ready(acb)) {
3177 kprintf( "arcmsr%d: 'set command Q window' timeout \n", acb->pci_unit);
3180 CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell, ARCMSR_MESSAGE_START_DRIVER_MODE);
3181 if(!arcmsr_hbb_wait_msgint_ready(acb)) {
3182 kprintf( "arcmsr%d: 'start diver mode' timeout \n", acb->pci_unit);
3187 case ACB_ADAPTER_TYPE_C: {
3188 if(srb_phyaddr_hi32!=0) {
3189 CHIP_REG_WRITE32(HBC_MessageUnit, 0, msgcode_rwbuffer[0], ARCMSR_SIGNATURE_SET_CONFIG);
3190 CHIP_REG_WRITE32(HBC_MessageUnit, 0, msgcode_rwbuffer[1], srb_phyaddr_hi32);
3191 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_msgaddr0, ARCMSR_INBOUND_MESG0_SET_CONFIG);
3192 CHIP_REG_WRITE32(HBC_MessageUnit, 0, inbound_doorbell,ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
3193 if(!arcmsr_hbc_wait_msgint_ready(acb)) {
3194 kprintf( "arcmsr%d: 'set srb high part physical address' timeout \n", acb->pci_unit);
3204 ************************************************************************
3205 ************************************************************************
3207 static void arcmsr_enable_eoi_mode(struct AdapterControlBlock *acb)
3209 switch (acb->adapter_type)
3211 case ACB_ADAPTER_TYPE_A:
3212 case ACB_ADAPTER_TYPE_C:
3214 case ACB_ADAPTER_TYPE_B: {
3215 CHIP_REG_WRITE32(HBB_DOORBELL, 0, drv2iop_doorbell,ARCMSR_MESSAGE_ACTIVE_EOI_MODE);
3216 if(!arcmsr_hbb_wait_msgint_ready(acb)) {
3217 kprintf( "arcmsr%d: 'iop enable eoi mode' timeout \n", acb->pci_unit);
3226 **********************************************************************
3227 **********************************************************************
3229 static void arcmsr_iop_init(struct AdapterControlBlock *acb)
3231 u_int32_t intmask_org;
3233 /* disable all outbound interrupt */
3234 intmask_org=arcmsr_disable_allintr(acb);
3235 arcmsr_wait_firmware_ready(acb);
3236 arcmsr_iop_confirm(acb);
3237 arcmsr_get_firmware_spec(acb);
3238 /*start background rebuild*/
3239 arcmsr_start_adapter_bgrb(acb);
3240 /* empty doorbell Qbuffer if door bell ringed */
3241 arcmsr_clear_doorbell_queue_buffer(acb);
3242 arcmsr_enable_eoi_mode(acb);
3243 /* enable outbound Post Queue, outbound doorbell Interrupt */
3244 arcmsr_enable_allintr(acb, intmask_org);
3245 acb->acb_flags |=ACB_F_IOP_INITED;
3248 **********************************************************************
3249 **********************************************************************
3251 static void arcmsr_map_free_srb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
3253 struct AdapterControlBlock *acb=arg;
3254 struct CommandControlBlock *srb_tmp;
3255 u_int8_t * dma_memptr;
3257 unsigned long srb_phyaddr=(unsigned long)segs->ds_addr;
3259 dma_memptr=acb->uncacheptr;
3260 acb->srb_phyaddr.phyaddr=srb_phyaddr;
3261 srb_tmp=(struct CommandControlBlock *)dma_memptr;
3262 for(i=0;i<ARCMSR_MAX_FREESRB_NUM;i++) {
3263 if(bus_dmamap_create(acb->dm_segs_dmat,
3264 /*flags*/0, &srb_tmp->dm_segs_dmamap)!=0) {
3265 acb->acb_flags |= ACB_F_MAPFREESRB_FAILD;
3267 " srb dmamap bus_dmamap_create error\n", acb->pci_unit);
3270 srb_tmp->cdb_shifted_phyaddr=(acb->adapter_type==ACB_ADAPTER_TYPE_C)?srb_phyaddr:(srb_phyaddr >> 5);
3272 acb->srbworkingQ[i]=acb->psrb_pool[i]=srb_tmp;
3273 srb_phyaddr=srb_phyaddr+SRB_SIZE;
3274 srb_tmp = (struct CommandControlBlock *)((unsigned long)srb_tmp+SRB_SIZE);
3276 acb->vir2phy_offset=(unsigned long)srb_tmp-srb_phyaddr;
3279 ************************************************************************
3282 ************************************************************************
3284 static void arcmsr_free_resource(struct AdapterControlBlock *acb)
3286 /* remove the control device */
3287 if(acb->ioctl_dev != NULL) {
3288 destroy_dev(acb->ioctl_dev);
3290 bus_dmamap_unload(acb->srb_dmat, acb->srb_dmamap);
3291 bus_dmamap_destroy(acb->srb_dmat, acb->srb_dmamap);
3292 bus_dma_tag_destroy(acb->srb_dmat);
3293 bus_dma_tag_destroy(acb->dm_segs_dmat);
3294 bus_dma_tag_destroy(acb->parent_dmat);
3297 ************************************************************************
3298 ************************************************************************
3300 static u_int32_t arcmsr_initialize(device_t dev)
3302 struct AdapterControlBlock *acb=device_get_softc(dev);
3303 u_int16_t pci_command;
3304 int i, j,max_coherent_size;
3305 u_int32_t vendor_dev_id;
3307 vendor_dev_id = pci_get_devid(dev);
3308 acb->vendor_device_id = vendor_dev_id;
3309 switch (vendor_dev_id) {
3310 case PCIDevVenIDARC1880:
3311 case PCIDevVenIDARC1882:
3312 case PCIDevVenIDARC1213:
3313 case PCIDevVenIDARC1223: {
3314 acb->adapter_type=ACB_ADAPTER_TYPE_C;
3315 acb->adapter_bus_speed = ACB_BUS_SPEED_6G;
3316 max_coherent_size=ARCMSR_SRBS_POOL_SIZE;
3319 case PCIDevVenIDARC1200:
3320 case PCIDevVenIDARC1201: {
3321 acb->adapter_type=ACB_ADAPTER_TYPE_B;
3322 acb->adapter_bus_speed = ACB_BUS_SPEED_3G;
3323 max_coherent_size=ARCMSR_SRBS_POOL_SIZE+(sizeof(struct HBB_MessageUnit));
3326 case PCIDevVenIDARC1110:
3327 case PCIDevVenIDARC1120:
3328 case PCIDevVenIDARC1130:
3329 case PCIDevVenIDARC1160:
3330 case PCIDevVenIDARC1170:
3331 case PCIDevVenIDARC1210:
3332 case PCIDevVenIDARC1220:
3333 case PCIDevVenIDARC1230:
3334 case PCIDevVenIDARC1231:
3335 case PCIDevVenIDARC1260:
3336 case PCIDevVenIDARC1261:
3337 case PCIDevVenIDARC1270:
3338 case PCIDevVenIDARC1280:
3339 case PCIDevVenIDARC1212:
3340 case PCIDevVenIDARC1222:
3341 case PCIDevVenIDARC1380:
3342 case PCIDevVenIDARC1381:
3343 case PCIDevVenIDARC1680:
3344 case PCIDevVenIDARC1681: {
3345 acb->adapter_type=ACB_ADAPTER_TYPE_A;
3346 acb->adapter_bus_speed = ACB_BUS_SPEED_3G;
3347 max_coherent_size=ARCMSR_SRBS_POOL_SIZE;
3352 " unknown RAID adapter type \n", device_get_unit(dev));
3356 if(bus_dma_tag_create( /*parent*/ NULL,
3359 /*lowaddr*/ BUS_SPACE_MAXADDR,
3360 /*highaddr*/ BUS_SPACE_MAXADDR,
3363 /*maxsize*/ BUS_SPACE_MAXSIZE_32BIT,
3364 /*nsegments*/ BUS_SPACE_UNRESTRICTED,
3365 /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT,
3367 &acb->parent_dmat) != 0)
3369 kprintf("arcmsr%d: parent_dmat bus_dma_tag_create failure!\n", device_get_unit(dev));
3372 /* Create a single tag describing a region large enough to hold all of the s/g lists we will need. */
3373 if(bus_dma_tag_create( /*parent_dmat*/ acb->parent_dmat,
3376 /*lowaddr*/ BUS_SPACE_MAXADDR,
3377 /*highaddr*/ BUS_SPACE_MAXADDR,
3380 /*maxsize*/ ARCMSR_MAX_SG_ENTRIES * PAGE_SIZE * ARCMSR_MAX_FREESRB_NUM,
3381 /*nsegments*/ ARCMSR_MAX_SG_ENTRIES,
3382 /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT,
3384 &acb->dm_segs_dmat) != 0)
3386 bus_dma_tag_destroy(acb->parent_dmat);
3387 kprintf("arcmsr%d: dm_segs_dmat bus_dma_tag_create failure!\n", device_get_unit(dev));
3390 /* DMA tag for our srb structures.... Allocate the freesrb memory */
3391 if(bus_dma_tag_create( /*parent_dmat*/ acb->parent_dmat,
3394 /*lowaddr*/ BUS_SPACE_MAXADDR_32BIT,
3395 /*highaddr*/ BUS_SPACE_MAXADDR,
3398 /*maxsize*/ max_coherent_size,
3400 /*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT,
3402 &acb->srb_dmat) != 0)
3404 bus_dma_tag_destroy(acb->dm_segs_dmat);
3405 bus_dma_tag_destroy(acb->parent_dmat);
3406 kprintf("arcmsr%d: srb_dmat bus_dma_tag_create failure!\n", device_get_unit(dev));
3409 /* Allocation for our srbs */
3410 if(bus_dmamem_alloc(acb->srb_dmat, (void **)&acb->uncacheptr, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &acb->srb_dmamap) != 0) {
3411 bus_dma_tag_destroy(acb->srb_dmat);
3412 bus_dma_tag_destroy(acb->dm_segs_dmat);
3413 bus_dma_tag_destroy(acb->parent_dmat);
3414 kprintf("arcmsr%d: srb_dmat bus_dmamem_alloc failure!\n", device_get_unit(dev));
3417 /* And permanently map them */
3418 if(bus_dmamap_load(acb->srb_dmat, acb->srb_dmamap, acb->uncacheptr, max_coherent_size, arcmsr_map_free_srb, acb, /*flags*/0)) {
3419 bus_dma_tag_destroy(acb->srb_dmat);
3420 bus_dma_tag_destroy(acb->dm_segs_dmat);
3421 bus_dma_tag_destroy(acb->parent_dmat);
3422 kprintf("arcmsr%d: srb_dmat bus_dmamap_load failure!\n", device_get_unit(dev));
3425 pci_command=pci_read_config(dev, PCIR_COMMAND, 2);
3426 pci_command |= PCIM_CMD_BUSMASTEREN;
3427 pci_command |= PCIM_CMD_PERRESPEN;
3428 pci_command |= PCIM_CMD_MWRICEN;
3429 /* Enable Busmaster/Mem */
3430 pci_command |= PCIM_CMD_MEMEN;
3431 pci_write_config(dev, PCIR_COMMAND, pci_command, 2);
3432 switch(acb->adapter_type) {
3433 case ACB_ADAPTER_TYPE_A: {
3434 u_int32_t rid0=PCIR_BAR(0);
3435 vm_offset_t mem_base0;
3437 acb->sys_res_arcmsr[0]=bus_alloc_resource(dev,SYS_RES_MEMORY, &rid0, 0ul, ~0ul, 0x1000, RF_ACTIVE);
3438 if(acb->sys_res_arcmsr[0] == NULL) {
3439 arcmsr_free_resource(acb);
3440 kprintf("arcmsr%d: bus_alloc_resource failure!\n", device_get_unit(dev));
3443 if(rman_get_start(acb->sys_res_arcmsr[0]) <= 0) {
3444 arcmsr_free_resource(acb);
3445 kprintf("arcmsr%d: rman_get_start failure!\n", device_get_unit(dev));
3448 mem_base0=(vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[0]);
3450 arcmsr_free_resource(acb);
3451 kprintf("arcmsr%d: rman_get_virtual failure!\n", device_get_unit(dev));
3454 acb->btag[0]=rman_get_bustag(acb->sys_res_arcmsr[0]);
3455 acb->bhandle[0]=rman_get_bushandle(acb->sys_res_arcmsr[0]);
3456 acb->pmu=(struct MessageUnit_UNION *)mem_base0;
3459 case ACB_ADAPTER_TYPE_B: {
3460 struct HBB_MessageUnit *phbbmu;
3461 struct CommandControlBlock *freesrb;
3462 u_int32_t rid[]={ PCIR_BAR(0), PCIR_BAR(2) };
3463 vm_offset_t mem_base[]={0,0};
3464 for(i=0; i<2; i++) {
3466 acb->sys_res_arcmsr[i]=bus_alloc_resource(dev,SYS_RES_MEMORY, &rid[i],
3467 0ul, ~0ul, sizeof(struct HBB_DOORBELL), RF_ACTIVE);
3469 acb->sys_res_arcmsr[i]=bus_alloc_resource(dev, SYS_RES_MEMORY, &rid[i],
3470 0ul, ~0ul, sizeof(struct HBB_RWBUFFER), RF_ACTIVE);
3472 if(acb->sys_res_arcmsr[i] == NULL) {
3473 arcmsr_free_resource(acb);
3474 kprintf("arcmsr%d: bus_alloc_resource %d failure!\n", device_get_unit(dev), i);
3477 if(rman_get_start(acb->sys_res_arcmsr[i]) <= 0) {
3478 arcmsr_free_resource(acb);
3479 kprintf("arcmsr%d: rman_get_start %d failure!\n", device_get_unit(dev), i);
3482 mem_base[i]=(vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[i]);
3483 if(mem_base[i]==0) {
3484 arcmsr_free_resource(acb);
3485 kprintf("arcmsr%d: rman_get_virtual %d failure!\n", device_get_unit(dev), i);
3488 acb->btag[i]=rman_get_bustag(acb->sys_res_arcmsr[i]);
3489 acb->bhandle[i]=rman_get_bushandle(acb->sys_res_arcmsr[i]);
3491 freesrb=(struct CommandControlBlock *)acb->uncacheptr;
3492 // acb->pmu=(struct MessageUnit_UNION *)&freesrb[ARCMSR_MAX_FREESRB_NUM];
3493 acb->pmu=(struct MessageUnit_UNION *)((unsigned long)freesrb+ARCMSR_SRBS_POOL_SIZE);
3494 phbbmu=(struct HBB_MessageUnit *)acb->pmu;
3495 phbbmu->hbb_doorbell=(struct HBB_DOORBELL *)mem_base[0];
3496 phbbmu->hbb_rwbuffer=(struct HBB_RWBUFFER *)mem_base[1];
3499 case ACB_ADAPTER_TYPE_C: {
3500 u_int32_t rid0=PCIR_BAR(1);
3501 vm_offset_t mem_base0;
3503 acb->sys_res_arcmsr[0]=bus_alloc_resource(dev,SYS_RES_MEMORY, &rid0, 0ul, ~0ul, sizeof(struct HBC_MessageUnit), RF_ACTIVE);
3504 if(acb->sys_res_arcmsr[0] == NULL) {
3505 arcmsr_free_resource(acb);
3506 kprintf("arcmsr%d: bus_alloc_resource failure!\n", device_get_unit(dev));
3509 if(rman_get_start(acb->sys_res_arcmsr[0]) <= 0) {
3510 arcmsr_free_resource(acb);
3511 kprintf("arcmsr%d: rman_get_start failure!\n", device_get_unit(dev));
3514 mem_base0=(vm_offset_t) rman_get_virtual(acb->sys_res_arcmsr[0]);
3516 arcmsr_free_resource(acb);
3517 kprintf("arcmsr%d: rman_get_virtual failure!\n", device_get_unit(dev));
3520 acb->btag[0]=rman_get_bustag(acb->sys_res_arcmsr[0]);
3521 acb->bhandle[0]=rman_get_bushandle(acb->sys_res_arcmsr[0]);
3522 acb->pmu=(struct MessageUnit_UNION *)mem_base0;
3526 if(acb->acb_flags & ACB_F_MAPFREESRB_FAILD) {
3527 arcmsr_free_resource(acb);
3528 kprintf("arcmsr%d: map free srb failure!\n", device_get_unit(dev));
3531 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED|ACB_F_MESSAGE_RQBUFFER_CLEARED|ACB_F_MESSAGE_WQBUFFER_READ);
3532 acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
3534 ********************************************************************
3535 ** init raid volume state
3536 ********************************************************************
3538 for(i=0;i<ARCMSR_MAX_TARGETID;i++) {
3539 for(j=0;j<ARCMSR_MAX_TARGETLUN;j++) {
3540 acb->devstate[i][j]=ARECA_RAID_GONE;
3543 arcmsr_iop_init(acb);
3547 ************************************************************************
3548 ************************************************************************
3550 static int arcmsr_attach(device_t dev)
3552 struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev);
3553 u_int32_t unit=device_get_unit(dev);
3554 struct ccb_setasync csa;
3555 struct cam_devq *devq; /* Device Queue to use for this SIM */
3556 struct resource *irqres;
3561 kprintf("arcmsr%d: cannot allocate softc\n", unit);
3564 ARCMSR_LOCK_INIT(&acb->qbuffer_lock, "arcmsr Q buffer lock");
3565 if(arcmsr_initialize(dev)) {
3566 kprintf("arcmsr%d: initialize failure!\n", unit);
3567 ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock);
3570 /* After setting up the adapter, map our interrupt */
3572 acb->irq_type = pci_alloc_1intr(dev, arcmsr_msi_enable, &rid,
3574 irqres=bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0ul, ~0ul, 1,
3576 if(irqres == NULL ||
3577 bus_setup_intr(dev, irqres, INTR_MPSAFE, arcmsr_intr_handler, acb, &acb->ih, NULL)) {
3578 arcmsr_free_resource(acb);
3579 ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock);
3580 kprintf("arcmsr%d: unable to register interrupt handler!\n", unit);
3587 * Now let the CAM generic SCSI layer find the SCSI devices on
3588 * the bus * start queue to reset to the idle loop. *
3589 * Create device queue of SIM(s) * (MAX_START_JOB - 1) :
3590 * max_sim_transactions
3592 devq=cam_simq_alloc(ARCMSR_MAX_START_JOB);
3594 arcmsr_free_resource(acb);
3595 bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres);
3596 if (acb->irq_type == PCI_INTR_TYPE_MSI)
3597 pci_release_msi(dev);
3598 ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock);
3599 kprintf("arcmsr%d: cam_simq_alloc failure!\n", unit);
3602 acb->psim=cam_sim_alloc(arcmsr_action, arcmsr_poll, "arcmsr", acb, unit, &acb->qbuffer_lock, 1, ARCMSR_MAX_OUTSTANDING_CMD, devq);
3603 cam_simq_release(devq);
3604 if(acb->psim == NULL) {
3605 arcmsr_free_resource(acb);
3606 bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres);
3607 if (acb->irq_type == PCI_INTR_TYPE_MSI)
3608 pci_release_msi(dev);
3609 ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock);
3610 kprintf("arcmsr%d: cam_sim_alloc failure!\n", unit);
3613 ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
3614 if(xpt_bus_register(acb->psim, 0) != CAM_SUCCESS) {
3615 arcmsr_free_resource(acb);
3616 bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres);
3617 if (acb->irq_type == PCI_INTR_TYPE_MSI)
3618 pci_release_msi(dev);
3619 cam_sim_free(acb->psim);
3620 ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock);
3621 kprintf("arcmsr%d: xpt_bus_register failure!\n", unit);
3624 if(xpt_create_path(&acb->ppath, /* periph */ NULL, cam_sim_path(acb->psim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
3625 arcmsr_free_resource(acb);
3626 bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres);
3627 if (acb->irq_type == PCI_INTR_TYPE_MSI)
3628 pci_release_msi(dev);
3629 xpt_bus_deregister(cam_sim_path(acb->psim));
3630 cam_sim_free(acb->psim);
3631 ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock);
3632 kprintf("arcmsr%d: xpt_create_path failure!\n", unit);
3636 ****************************************************
3638 xpt_setup_ccb(&csa.ccb_h, acb->ppath, /*priority*/5);
3639 csa.ccb_h.func_code=XPT_SASYNC_CB;
3640 csa.event_enable=AC_FOUND_DEVICE|AC_LOST_DEVICE;
3641 csa.callback=arcmsr_async;
3642 csa.callback_arg=acb->psim;
3643 xpt_action((union ccb *)&csa);
3644 ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
3645 /* Create the control device. */
3646 acb->ioctl_dev=make_dev(&arcmsr_ops, unit, UID_ROOT, GID_WHEEL /* GID_OPERATOR */, S_IRUSR | S_IWUSR, "arcmsr%d", unit);
3648 acb->ioctl_dev->si_drv1=acb;
3649 (void)make_dev_alias(acb->ioctl_dev, "arc%d", unit);
3650 arcmsr_callout_init(&acb->devmap_callout);
3651 callout_reset(&acb->devmap_callout, 60 * hz, arcmsr_polling_devmap, acb);
3656 ************************************************************************
3657 ************************************************************************
3659 static int arcmsr_probe(device_t dev)
3662 static char buf[256];
3663 char x_type[]={"X-TYPE"};
3667 if (pci_get_vendor(dev) != PCI_VENDOR_ID_ARECA) {
3670 switch(id=pci_get_devid(dev)) {
3671 case PCIDevVenIDARC1110:
3672 case PCIDevVenIDARC1200:
3673 case PCIDevVenIDARC1201:
3674 case PCIDevVenIDARC1210:
3677 case PCIDevVenIDARC1120:
3678 case PCIDevVenIDARC1130:
3679 case PCIDevVenIDARC1160:
3680 case PCIDevVenIDARC1170:
3681 case PCIDevVenIDARC1220:
3682 case PCIDevVenIDARC1230:
3683 case PCIDevVenIDARC1231:
3684 case PCIDevVenIDARC1260:
3685 case PCIDevVenIDARC1261:
3686 case PCIDevVenIDARC1270:
3687 case PCIDevVenIDARC1280:
3690 case PCIDevVenIDARC1212:
3691 case PCIDevVenIDARC1222:
3692 case PCIDevVenIDARC1380:
3693 case PCIDevVenIDARC1381:
3694 case PCIDevVenIDARC1680:
3695 case PCIDevVenIDARC1681:
3698 case PCIDevVenIDARC1880:
3699 case PCIDevVenIDARC1882:
3700 case PCIDevVenIDARC1213:
3701 case PCIDevVenIDARC1223:
3703 arcmsr_msi_enable = 0;
3711 ksprintf(buf, "Areca %s Host Adapter RAID Controller%s", type, raid6 ? " (RAID6 capable)" : "");
3712 device_set_desc_copy(dev, buf);
3713 return (BUS_PROBE_DEFAULT);
3716 ************************************************************************
3717 ************************************************************************
3719 static int arcmsr_shutdown(device_t dev)
3722 struct CommandControlBlock *srb;
3723 struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev);
3725 /* stop adapter background rebuild */
3726 ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
3727 /* disable all outbound interrupt */
3728 arcmsr_disable_allintr(acb);
3729 arcmsr_stop_adapter_bgrb(acb);
3730 arcmsr_flush_adapter_cache(acb);
3731 /* abort all outstanding command */
3732 acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
3733 acb->acb_flags &= ~ACB_F_IOP_INITED;
3734 if(acb->srboutstandingcount!=0) {
3735 /*clear and abort all outbound posted Q*/
3736 arcmsr_done4abort_postqueue(acb);
3737 /* talk to iop 331 outstanding command aborted*/
3738 arcmsr_abort_allcmd(acb);
3739 for(i=0;i<ARCMSR_MAX_FREESRB_NUM;i++) {
3740 srb=acb->psrb_pool[i];
3741 if(srb->srb_state==ARCMSR_SRB_START) {
3742 srb->srb_state=ARCMSR_SRB_ABORTED;
3743 srb->pccb->ccb_h.status |= CAM_REQ_ABORTED;
3744 arcmsr_srb_complete(srb, 1);
3748 acb->srboutstandingcount=0;
3749 acb->workingsrb_doneindex=0;
3750 acb->workingsrb_startindex=0;
3751 #ifdef ARCMSR_DEBUG1
3752 acb->pktRequestCount = 0;
3753 acb->pktReturnCount = 0;
3755 ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
3759 ************************************************************************
3760 ************************************************************************
3762 static int arcmsr_detach(device_t dev)
3764 struct AdapterControlBlock *acb=(struct AdapterControlBlock *)device_get_softc(dev);
3767 callout_stop(&acb->devmap_callout);
3768 bus_teardown_intr(dev, acb->irqres, acb->ih);
3769 arcmsr_shutdown(dev);
3770 arcmsr_free_resource(acb);
3771 for(i=0; (acb->sys_res_arcmsr[i]!=NULL) && (i<2); i++) {
3772 bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(i), acb->sys_res_arcmsr[i]);
3774 bus_release_resource(dev, SYS_RES_IRQ, 0, acb->irqres);
3775 if (acb->irq_type == PCI_INTR_TYPE_MSI)
3776 pci_release_msi(dev);
3777 ARCMSR_LOCK_ACQUIRE(&acb->qbuffer_lock);
3778 xpt_async(AC_LOST_DEVICE, acb->ppath, NULL);
3779 xpt_free_path(acb->ppath);
3780 xpt_bus_deregister(cam_sim_path(acb->psim));
3781 cam_sim_free(acb->psim);
3782 ARCMSR_LOCK_RELEASE(&acb->qbuffer_lock);
3783 ARCMSR_LOCK_DESTROY(&acb->qbuffer_lock);
3787 #ifdef ARCMSR_DEBUG1
3788 static void arcmsr_dump_data(struct AdapterControlBlock *acb)
3790 if((acb->pktRequestCount - acb->pktReturnCount) == 0)
3792 printf("Command Request Count =0x%x\n",acb->pktRequestCount);
3793 printf("Command Return Count =0x%x\n",acb->pktReturnCount);
3794 printf("Command (Req-Rtn) Count =0x%x\n",(acb->pktRequestCount - acb->pktReturnCount));
3795 printf("Queued Command Count =0x%x\n",acb->srboutstandingcount);