From: Sascha Wildner Date: Tue, 7 Dec 2010 23:54:45 +0000 (+0100) Subject: hptiop(4): Add the hptiop(4) RAID driver. X-Git-Url: https://gitweb.dragonflybsd.org/~alexh/dragonfly.git/commitdiff_plain/b781666a37d58af738513c17b41e0305d5c9f406 hptiop(4): Add the hptiop(4) RAID driver. The driver supports the following adapters: * HighPoint RocketRAID 3020 * HighPoint RocketRAID 3120 * HighPoint RocketRAID 3122 * HighPoint RocketRAID 3220 * HighPoint RocketRAID 3320 * HighPoint RocketRAID 3410 * HighPoint RocketRAID 3510 * HighPoint RocketRAID 3511 * HighPoint RocketRAID 3520 * HighPoint RocketRAID 3521 * HighPoint RocketRAID 3522 * HighPoint RocketRAID 3540 * HighPoint RocketRAID 4320 It was tested with the 3120. Many thanks to FreeBSD, from which this code was obtained. --- diff --git a/share/man/man4/Makefile b/share/man/man4/Makefile index 95545164fc..95e3e6bdc8 100644 --- a/share/man/man4/Makefile +++ b/share/man/man4/Makefile @@ -99,6 +99,7 @@ MAN= aac.4 \ gre.4 \ gusc.4 \ hifn.4 \ + hptiop.4 \ ichsmb.4 \ icmp.4 \ icmp6.4 \ diff --git a/share/man/man4/hptiop.4 b/share/man/man4/hptiop.4 new file mode 100644 index 0000000000..8a76b9572d --- /dev/null +++ b/share/man/man4/hptiop.4 @@ -0,0 +1,103 @@ +.\" Copyright (c) 2007 Christian Brueffer +.\" All rights reserved. +.\" +.\" Redistribution and use in source and binary forms, with or without +.\" modification, are permitted provided that the following conditions +.\" are met: +.\" 1. Redistributions of source code must retain the above copyright +.\" notice, this list of conditions and the following disclaimer. +.\" 2. Redistributions in binary form must reproduce the above copyright +.\" notice, this list of conditions and the following disclaimer in the +.\" documentation and/or other materials provided with the distribution. +.\" +.\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND +.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +.\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE +.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +.\" SUCH DAMAGE. +.\" +.\" $FreeBSD: src/share/man/man4/hptiop.4,v 1.2 2008/02/03 16:16:38 scottl Exp $ +.\" +.Dd December 8, 2010 +.Dt HPTIOP 4 +.Os +.Sh NAME +.Nm hptiop +.Nd "HighPoint RocketRAID 3xxx/4xxx device driver" +.Sh SYNOPSIS +To compile this driver into the kernel, +place the following line in your +kernel configuration file: +.Bd -ragged -offset indent +.Cd "device hptiop" +.Cd "device scbus" +.Cd "device da" +.Ed +.Pp +Alternatively, to load the driver as a +module at boot time, place the following line in +.Xr loader.conf 5 : +.Bd -literal -offset indent +hptiop_load="YES" +.Ed +.Sh DESCRIPTION +The +.Nm +driver provides support for the HighPoint RocketRAID 3xxx/4xxx series +of SAS and SATA RAID controllers. +.Sh HARDWARE +The +.Nm +driver supports the following SAS and SATA RAID controllers: +.Pp +.Bl -bullet -compact +.It +HighPoint RocketRAID 4320 +.It +HighPoint RocketRAID 3220 +.It +HighPoint RocketRAID 3320 +.It +HighPoint RocketRAID 3410 +.It +HighPoint RocketRAID 3520 +.It +HighPoint RocketRAID 3510 +.It +HighPoint RocketRAID 3511 +.It +HighPoint RocketRAID 3521 +.It +HighPoint RocketRAID 3522 +.It +HighPoint RocketRAID 3540 +.It +HighPoint RocketRAID 3120 +.It +HighPoint RocketRAID 3122 +.It +HighPoint RocketRAID 3020 +.El +.Sh NOTES +The +.Nm +driver has only been tested on the i386 and amd64 platforms. +.Sh SEE ALSO +.Xr cam 4 +.\".Xr hptmv 4 , +.Sh HISTORY +The +.Nm +device driver first appeared in +.Fx 7.0 . +.Pp +.Sh AUTHORS +The +.Nm +driver was written by HighPoint Technologies, Inc. diff --git a/sys/conf/files b/sys/conf/files index f5c73dca70..9e4d3734e0 100644 --- a/sys/conf/files +++ b/sys/conf/files @@ -390,6 +390,7 @@ dev/atm/hfa/fore_stats.c optional nowerror hfa dev/atm/hfa/fore_timer.c optional hfa dev/atm/hfa/fore_transmit.c optional nowerror hfa dev/atm/hfa/fore_vcm.c optional hfa +dev/raid/hptiop/hptiop.c optional hptiop scbus dev/netif/ie/if_ie.c optional nowerror ie isa dev/powermng/ichsmb/ichsmb.c optional ichsmb dev/powermng/ichsmb/ichsmb_pci.c optional ichsmb pci diff --git a/sys/config/GENERIC b/sys/config/GENERIC index 8f281c977c..49a35db684 100644 --- a/sys/config/GENERIC +++ b/sys/config/GENERIC @@ -128,6 +128,7 @@ device ahb # EISA AHA1742 family device ahc # AHA2940 and onboard AIC7xxx devices device ahd # AHA39320/29320 and onboard AIC79xx devices device amd # AMD 53C974 (Tekram DC-390(T)) +device hptiop # Highpoint RocketRaid 3xxx series device isp # Qlogic family device mpt # LSI-Logic MPT/Fusion device ncr # NCR/Symbios Logic diff --git a/sys/config/GENERIC_SMP b/sys/config/GENERIC_SMP index 619d777d5b..c3cefbc4ec 100644 --- a/sys/config/GENERIC_SMP +++ b/sys/config/GENERIC_SMP @@ -129,6 +129,7 @@ device ahb # EISA AHA1742 family device ahc # AHA2940 and onboard AIC7xxx devices device ahd # AHA39320/29320 and onboard AIC79xx devices device amd # AMD 53C974 (Tekram DC-390(T)) +device hptiop # Highpoint RocketRaid 3xxx series device isp # Qlogic family device mpt # LSI-Logic MPT/Fusion device ncr # NCR/Symbios Logic diff --git a/sys/config/LINT b/sys/config/LINT index f32602c271..414aa35171 100644 --- a/sys/config/LINT +++ b/sys/config/LINT @@ -1275,6 +1275,10 @@ options MFI_DEBUG # device arcmsr # Areca SATA II RAID +# +# Highpoint RocketRaid 3xxx series SATA RAID +device hptiop + # # 3ware ATA RAID # diff --git a/sys/config/X86_64_GENERIC b/sys/config/X86_64_GENERIC index a66a733fcd..93e8d7e747 100644 --- a/sys/config/X86_64_GENERIC +++ b/sys/config/X86_64_GENERIC @@ -114,6 +114,7 @@ device ahb # EISA AHA1742 family device ahc # AHA2940 and onboard AIC7xxx devices device ahd # AHA39320/29320 and onboard AIC79xx devices device amd # AMD 53C974 (Tekram DC-390(T)) +device hptiop # Highpoint RocketRaid 3xxx series device isp # Qlogic family device mpt # LSI-Logic MPT/Fusion device ncr # NCR/Symbios Logic diff --git a/sys/config/X86_64_GENERIC_SMP b/sys/config/X86_64_GENERIC_SMP index 6c3ffb8741..0676c29a10 100644 --- a/sys/config/X86_64_GENERIC_SMP +++ b/sys/config/X86_64_GENERIC_SMP @@ -114,6 +114,7 @@ device ahb # EISA AHA1742 family device ahc # AHA2940 and onboard AIC7xxx devices device ahd # AHA39320/29320 and onboard AIC79xx devices device amd # AMD 53C974 (Tekram DC-390(T)) +device hptiop # Highpoint RocketRaid 3xxx series device isp # Qlogic family device mpt # LSI-Logic MPT/Fusion device ncr # NCR/Symbios Logic diff --git a/sys/dev/raid/Makefile b/sys/dev/raid/Makefile index c0fb69587b..0dfe1e28d6 100644 --- a/sys/dev/raid/Makefile +++ b/sys/dev/raid/Makefile @@ -1,3 +1,3 @@ -SUBDIR=aac amr arcmsr asr ciss iir ips mfi mlx mly pst twa twe vinum +SUBDIR=aac amr arcmsr asr ciss hptiop iir ips mfi mlx mly pst twa twe vinum .include diff --git a/sys/dev/raid/hptiop/Makefile b/sys/dev/raid/hptiop/Makefile new file mode 100644 index 0000000000..6c3b5af628 --- /dev/null +++ b/sys/dev/raid/hptiop/Makefile @@ -0,0 +1,8 @@ +# $FreeBSD: src/sys/modules/hptiop/Makefile,v 1.1 2007/05/09 07:07:26 scottl Exp $ + +KMOD= hptiop +SRCS= hptiop.c +SRCS+= opt_scsi.h opt_cam.h +SRCS+= device_if.h bus_if.h pci_if.h + +.include diff --git a/sys/dev/raid/hptiop/hptiop.c b/sys/dev/raid/hptiop/hptiop.c new file mode 100644 index 0000000000..ae037c3be2 --- /dev/null +++ b/sys/dev/raid/hptiop/hptiop.c @@ -0,0 +1,2125 @@ +/* + * HighPoint RR3xxx/4xxx RAID Driver for FreeBSD + * Copyright (C) 2007-2008 HighPoint Technologies, Inc. All Rights Reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD: src/sys/dev/hptiop/hptiop.c,v 1.8 2010/06/19 13:41:17 mav Exp $ + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +static char driver_name[] = "hptiop"; +static char driver_version[] = "v1.3 (010208)"; + +static devclass_t hptiop_devclass; + +static int hptiop_send_sync_msg(struct hpt_iop_hba *hba, + u_int32_t msg, u_int32_t millisec); +static void hptiop_request_callback_itl(struct hpt_iop_hba *hba, + u_int32_t req); +static void hptiop_request_callback_mv(struct hpt_iop_hba *hba, u_int64_t req); +static void hptiop_os_message_callback(struct hpt_iop_hba *hba, u_int32_t msg); +static int hptiop_do_ioctl_itl(struct hpt_iop_hba *hba, + struct hpt_iop_ioctl_param *pParams); +static int hptiop_do_ioctl_mv(struct hpt_iop_hba *hba, + struct hpt_iop_ioctl_param *pParams); +static void hptiop_bus_scan_cb(struct cam_periph *periph, union ccb *ccb); +static int hptiop_rescan_bus(struct hpt_iop_hba *hba); +static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba); +static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba *hba); +static int hptiop_get_config_itl(struct hpt_iop_hba *hba, + struct hpt_iop_request_get_config *config); +static int hptiop_get_config_mv(struct hpt_iop_hba *hba, + struct hpt_iop_request_get_config *config); +static int hptiop_set_config_itl(struct hpt_iop_hba *hba, + struct hpt_iop_request_set_config *config); +static int hptiop_set_config_mv(struct hpt_iop_hba *hba, + struct hpt_iop_request_set_config *config); +static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba); +static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba); +static int hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba, + u_int32_t req32, struct hpt_iop_ioctl_param *pParams); +static int hptiop_post_ioctl_command_mv(struct hpt_iop_hba *hba, + struct hpt_iop_request_ioctl_command *req, + struct hpt_iop_ioctl_param *pParams); +static void hptiop_post_req_itl(struct hpt_iop_hba *hba, + struct hpt_iop_srb *srb, + bus_dma_segment_t *segs, int nsegs); +static void hptiop_post_req_mv(struct hpt_iop_hba *hba, + struct hpt_iop_srb *srb, + bus_dma_segment_t *segs, int nsegs); +static void hptiop_post_msg_itl(struct hpt_iop_hba *hba, u_int32_t msg); +static void hptiop_post_msg_mv(struct hpt_iop_hba *hba, u_int32_t msg); +static void hptiop_enable_intr_itl(struct hpt_iop_hba *hba); +static void hptiop_enable_intr_mv(struct hpt_iop_hba *hba); +static void hptiop_disable_intr_itl(struct hpt_iop_hba *hba); +static void hptiop_disable_intr_mv(struct hpt_iop_hba *hba); +static void hptiop_free_srb(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb); +static int hptiop_os_query_remove_device(struct hpt_iop_hba *hba, int tid); +static int hptiop_probe(device_t dev); +static int hptiop_attach(device_t dev); +static int hptiop_detach(device_t dev); +static int hptiop_shutdown(device_t dev); +static void hptiop_action(struct cam_sim *sim, union ccb *ccb); +static void hptiop_poll(struct cam_sim *sim); +static void hptiop_async(void *callback_arg, u_int32_t code, + struct cam_path *path, void *arg); +static void hptiop_pci_intr(void *arg); +static void hptiop_release_resource(struct hpt_iop_hba *hba); +static int hptiop_reset_adapter(struct hpt_iop_hba *hba); + +static d_open_t hptiop_open; +static d_close_t hptiop_close; +static d_ioctl_t hptiop_ioctl; + +static struct dev_ops hptiop_ops = { + { driver_name, 0, 0 }, + .d_open = hptiop_open, + .d_close = hptiop_close, + .d_ioctl = hptiop_ioctl, +}; + +#define hba_from_dev(dev) ((struct hpt_iop_hba *)(dev)->si_drv1) + +#define BUS_SPACE_WRT4_ITL(offset, value) bus_space_write_4(hba->bar0t,\ + hba->bar0h, offsetof(struct hpt_iopmu_itl, offset), (value)) +#define BUS_SPACE_RD4_ITL(offset) bus_space_read_4(hba->bar0t,\ + hba->bar0h, offsetof(struct hpt_iopmu_itl, offset)) + +#define BUS_SPACE_WRT4_MV0(offset, value) bus_space_write_4(hba->bar0t,\ + hba->bar0h, offsetof(struct hpt_iopmv_regs, offset), value) +#define BUS_SPACE_RD4_MV0(offset) bus_space_read_4(hba->bar0t,\ + hba->bar0h, offsetof(struct hpt_iopmv_regs, offset)) +#define BUS_SPACE_WRT4_MV2(offset, value) bus_space_write_4(hba->bar2t,\ + hba->bar2h, offsetof(struct hpt_iopmu_mv, offset), value) +#define BUS_SPACE_RD4_MV2(offset) bus_space_read_4(hba->bar2t,\ + hba->bar2h, offsetof(struct hpt_iopmu_mv, offset)) + +static int hptiop_open(struct dev_open_args *ap) +{ + cdev_t dev = ap->a_head.a_dev; + struct hpt_iop_hba *hba = hba_from_dev(dev); + + if (hba==NULL) + return ENXIO; + if (hba->flag & HPT_IOCTL_FLAG_OPEN) + return EBUSY; + hba->flag |= HPT_IOCTL_FLAG_OPEN; + return 0; +} + +static int hptiop_close(struct dev_close_args *ap) +{ + cdev_t dev = ap->a_head.a_dev; + struct hpt_iop_hba *hba = hba_from_dev(dev); + hba->flag &= ~(u_int32_t)HPT_IOCTL_FLAG_OPEN; + return 0; +} + +static int hptiop_ioctl(struct dev_ioctl_args *ap) +{ + cdev_t dev = ap->a_head.a_dev; + u_long cmd = ap->a_cmd; + caddr_t data = ap->a_data; + int ret = EFAULT; + struct hpt_iop_hba *hba = hba_from_dev(dev); + + get_mplock(); + + switch (cmd) { + case HPT_DO_IOCONTROL: + ret = hba->ops->do_ioctl(hba, + (struct hpt_iop_ioctl_param *)data); + break; + case HPT_SCAN_BUS: + ret = hptiop_rescan_bus(hba); + break; + } + + rel_mplock(); + + return ret; +} + +static u_int64_t hptiop_mv_outbound_read(struct hpt_iop_hba *hba) +{ + u_int64_t p; + u_int32_t outbound_tail = BUS_SPACE_RD4_MV2(outbound_tail); + u_int32_t outbound_head = BUS_SPACE_RD4_MV2(outbound_head); + + if (outbound_tail != outbound_head) { + bus_space_read_region_4(hba->bar2t, hba->bar2h, + offsetof(struct hpt_iopmu_mv, + outbound_q[outbound_tail]), + (u_int32_t *)&p, 2); + + outbound_tail++; + + if (outbound_tail == MVIOP_QUEUE_LEN) + outbound_tail = 0; + + BUS_SPACE_WRT4_MV2(outbound_tail, outbound_tail); + return p; + } else + return 0; +} + +static void hptiop_mv_inbound_write(u_int64_t p, struct hpt_iop_hba *hba) +{ + u_int32_t inbound_head = BUS_SPACE_RD4_MV2(inbound_head); + u_int32_t head = inbound_head + 1; + + if (head == MVIOP_QUEUE_LEN) + head = 0; + + bus_space_write_region_4(hba->bar2t, hba->bar2h, + offsetof(struct hpt_iopmu_mv, inbound_q[inbound_head]), + (u_int32_t *)&p, 2); + BUS_SPACE_WRT4_MV2(inbound_head, head); + BUS_SPACE_WRT4_MV0(inbound_doorbell, MVIOP_MU_INBOUND_INT_POSTQUEUE); +} + +static void hptiop_post_msg_itl(struct hpt_iop_hba *hba, u_int32_t msg) +{ + BUS_SPACE_WRT4_ITL(inbound_msgaddr0, msg); + BUS_SPACE_RD4_ITL(outbound_intstatus); +} + +static void hptiop_post_msg_mv(struct hpt_iop_hba *hba, u_int32_t msg) +{ + + BUS_SPACE_WRT4_MV2(inbound_msg, msg); + BUS_SPACE_WRT4_MV0(inbound_doorbell, MVIOP_MU_INBOUND_INT_MSG); + + BUS_SPACE_RD4_MV0(outbound_intmask); +} + +static int hptiop_wait_ready_itl(struct hpt_iop_hba * hba, u_int32_t millisec) +{ + u_int32_t req=0; + int i; + + for (i = 0; i < millisec; i++) { + req = BUS_SPACE_RD4_ITL(inbound_queue); + if (req != IOPMU_QUEUE_EMPTY) + break; + DELAY(1000); + } + + if (req!=IOPMU_QUEUE_EMPTY) { + BUS_SPACE_WRT4_ITL(outbound_queue, req); + BUS_SPACE_RD4_ITL(outbound_intstatus); + return 0; + } + + return -1; +} + +static int hptiop_wait_ready_mv(struct hpt_iop_hba * hba, u_int32_t millisec) +{ + if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec)) + return -1; + + return 0; +} + +static void hptiop_request_callback_itl(struct hpt_iop_hba * hba, + u_int32_t index) +{ + struct hpt_iop_srb *srb; + struct hpt_iop_request_scsi_command *req=0; + union ccb *ccb; + u_int8_t *cdb; + u_int32_t result, temp, dxfer; + u_int64_t temp64; + + if (index & IOPMU_QUEUE_MASK_HOST_BITS) { /*host req*/ + if (hba->firmware_version > 0x01020000 || + hba->interface_version > 0x01020000) { + srb = hba->srb[index & ~(u_int32_t) + (IOPMU_QUEUE_ADDR_HOST_BIT + | IOPMU_QUEUE_REQUEST_RESULT_BIT)]; + req = (struct hpt_iop_request_scsi_command *)srb; + if (index & IOPMU_QUEUE_REQUEST_RESULT_BIT) + result = IOP_RESULT_SUCCESS; + else + result = req->header.result; + } else { + srb = hba->srb[index & + ~(u_int32_t)IOPMU_QUEUE_ADDR_HOST_BIT]; + req = (struct hpt_iop_request_scsi_command *)srb; + result = req->header.result; + } + dxfer = req->dataxfer_length; + goto srb_complete; + } + + /*iop req*/ + temp = bus_space_read_4(hba->bar0t, hba->bar0h, index + + offsetof(struct hpt_iop_request_header, type)); + result = bus_space_read_4(hba->bar0t, hba->bar0h, index + + offsetof(struct hpt_iop_request_header, result)); + switch(temp) { + case IOP_REQUEST_TYPE_IOCTL_COMMAND: + { + temp64 = 0; + bus_space_write_region_4(hba->bar0t, hba->bar0h, index + + offsetof(struct hpt_iop_request_header, context), + (u_int32_t *)&temp64, 2); + wakeup((void *)((unsigned long)hba->u.itl.mu + index)); + break; + } + + case IOP_REQUEST_TYPE_SCSI_COMMAND: + bus_space_read_region_4(hba->bar0t, hba->bar0h, index + + offsetof(struct hpt_iop_request_header, context), + (u_int32_t *)&temp64, 2); + srb = (struct hpt_iop_srb *)(unsigned long)temp64; + dxfer = bus_space_read_4(hba->bar0t, hba->bar0h, + index + offsetof(struct hpt_iop_request_scsi_command, + dataxfer_length)); +srb_complete: + ccb = (union ccb *)srb->ccb; + if (ccb->ccb_h.flags & CAM_CDB_POINTER) + cdb = ccb->csio.cdb_io.cdb_ptr; + else + cdb = ccb->csio.cdb_io.cdb_bytes; + + if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */ + ccb->ccb_h.status = CAM_REQ_CMP; + goto scsi_done; + } + + switch (result) { + case IOP_RESULT_SUCCESS: + switch (ccb->ccb_h.flags & CAM_DIR_MASK) { + case CAM_DIR_IN: + bus_dmamap_sync(hba->io_dmat, + srb->dma_map, BUS_DMASYNC_POSTREAD); + bus_dmamap_unload(hba->io_dmat, srb->dma_map); + break; + case CAM_DIR_OUT: + bus_dmamap_sync(hba->io_dmat, + srb->dma_map, BUS_DMASYNC_POSTWRITE); + bus_dmamap_unload(hba->io_dmat, srb->dma_map); + break; + } + + ccb->ccb_h.status = CAM_REQ_CMP; + break; + + case IOP_RESULT_BAD_TARGET: + ccb->ccb_h.status = CAM_DEV_NOT_THERE; + break; + case IOP_RESULT_BUSY: + ccb->ccb_h.status = CAM_BUSY; + break; + case IOP_RESULT_INVALID_REQUEST: + ccb->ccb_h.status = CAM_REQ_INVALID; + break; + case IOP_RESULT_FAIL: + ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; + break; + case IOP_RESULT_RESET: + ccb->ccb_h.status = CAM_BUSY; + break; + case IOP_RESULT_CHECK_CONDITION: + if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) {/*iop*/ + bus_space_read_region_1(hba->bar0t, hba->bar0h, + index + offsetof(struct hpt_iop_request_scsi_command, + sg_list), (u_int8_t *)&ccb->csio.sense_data, + MIN(dxfer, sizeof(ccb->csio.sense_data))); + } else { + memcpy(&ccb->csio.sense_data, &req->sg_list, + MIN(dxfer, sizeof(ccb->csio.sense_data))); + } + ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; + ccb->ccb_h.status |= CAM_AUTOSNS_VALID; + ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; + break; + default: + ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; + break; + } +scsi_done: + if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) + BUS_SPACE_WRT4_ITL(outbound_queue, index); + + ccb->csio.resid = ccb->csio.dxfer_len - dxfer; + + hptiop_free_srb(hba, srb); + xpt_done(ccb); + break; + } +} + +static void hptiop_drain_outbound_queue_itl(struct hpt_iop_hba *hba) +{ + u_int32_t req, temp; + + while ((req = BUS_SPACE_RD4_ITL(outbound_queue)) !=IOPMU_QUEUE_EMPTY) { + if (req & IOPMU_QUEUE_MASK_HOST_BITS) + hptiop_request_callback_itl(hba, req); + else { + struct hpt_iop_request_header *p; + + p = (struct hpt_iop_request_header *) + ((char *)hba->u.itl.mu + req); + temp = bus_space_read_4(hba->bar0t, + hba->bar0h,req + + offsetof(struct hpt_iop_request_header, + flags)); + if (temp & IOP_REQUEST_FLAG_SYNC_REQUEST) { + u_int64_t temp64; + bus_space_read_region_4(hba->bar0t, + hba->bar0h,req + + offsetof(struct hpt_iop_request_header, + context), + (u_int32_t *)&temp64, 2); + if (temp64) { + hptiop_request_callback_itl(hba, req); + } else { + temp64 = 1; + bus_space_write_region_4(hba->bar0t, + hba->bar0h,req + + offsetof(struct hpt_iop_request_header, + context), + (u_int32_t *)&temp64, 2); + } + } else + hptiop_request_callback_itl(hba, req); + } + } +} + +static int hptiop_intr_itl(struct hpt_iop_hba * hba) +{ + u_int32_t status; + int ret = 0; + + status = BUS_SPACE_RD4_ITL(outbound_intstatus); + + if (status & IOPMU_OUTBOUND_INT_MSG0) { + u_int32_t msg = BUS_SPACE_RD4_ITL(outbound_msgaddr0); + KdPrint(("hptiop: received outbound msg %x\n", msg)); + BUS_SPACE_WRT4_ITL(outbound_intstatus, IOPMU_OUTBOUND_INT_MSG0); + hptiop_os_message_callback(hba, msg); + ret = 1; + } + + if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) { + hptiop_drain_outbound_queue_itl(hba); + ret = 1; + } + + return ret; +} + +static void hptiop_request_callback_mv(struct hpt_iop_hba * hba, + u_int64_t _tag) +{ + u_int32_t context = (u_int32_t)_tag; + + if (context & MVIOP_CMD_TYPE_SCSI) { + struct hpt_iop_srb *srb; + struct hpt_iop_request_scsi_command *req; + union ccb *ccb; + u_int8_t *cdb; + + srb = hba->srb[context >> MVIOP_REQUEST_NUMBER_START_BIT]; + req = (struct hpt_iop_request_scsi_command *)srb; + ccb = (union ccb *)srb->ccb; + if (ccb->ccb_h.flags & CAM_CDB_POINTER) + cdb = ccb->csio.cdb_io.cdb_ptr; + else + cdb = ccb->csio.cdb_io.cdb_bytes; + + if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */ + ccb->ccb_h.status = CAM_REQ_CMP; + goto scsi_done; + } + if (context & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT) + req->header.result = IOP_RESULT_SUCCESS; + + switch (req->header.result) { + case IOP_RESULT_SUCCESS: + switch (ccb->ccb_h.flags & CAM_DIR_MASK) { + case CAM_DIR_IN: + bus_dmamap_sync(hba->io_dmat, + srb->dma_map, BUS_DMASYNC_POSTREAD); + bus_dmamap_unload(hba->io_dmat, srb->dma_map); + break; + case CAM_DIR_OUT: + bus_dmamap_sync(hba->io_dmat, + srb->dma_map, BUS_DMASYNC_POSTWRITE); + bus_dmamap_unload(hba->io_dmat, srb->dma_map); + break; + } + ccb->ccb_h.status = CAM_REQ_CMP; + break; + case IOP_RESULT_BAD_TARGET: + ccb->ccb_h.status = CAM_DEV_NOT_THERE; + break; + case IOP_RESULT_BUSY: + ccb->ccb_h.status = CAM_BUSY; + break; + case IOP_RESULT_INVALID_REQUEST: + ccb->ccb_h.status = CAM_REQ_INVALID; + break; + case IOP_RESULT_FAIL: + ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; + break; + case IOP_RESULT_RESET: + ccb->ccb_h.status = CAM_BUSY; + break; + case IOP_RESULT_CHECK_CONDITION: + memcpy(&ccb->csio.sense_data, &req->sg_list, + MIN(req->dataxfer_length, sizeof(ccb->csio.sense_data))); + ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; + ccb->ccb_h.status |= CAM_AUTOSNS_VALID; + ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; + break; + default: + ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; + break; + } +scsi_done: + ccb->csio.resid = ccb->csio.dxfer_len - req->dataxfer_length; + + hptiop_free_srb(hba, srb); + xpt_done(ccb); + } else if (context & MVIOP_CMD_TYPE_IOCTL) { + struct hpt_iop_request_ioctl_command *req = hba->ctlcfg_ptr; + if (context & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT) + hba->config_done = 1; + else + hba->config_done = -1; + wakeup(req); + } else if (context & + (MVIOP_CMD_TYPE_SET_CONFIG | + MVIOP_CMD_TYPE_GET_CONFIG)) + hba->config_done = 1; + else { + device_printf(hba->pcidev, "wrong callback type\n"); + } +} + +static void hptiop_drain_outbound_queue_mv(struct hpt_iop_hba * hba) +{ + u_int64_t req; + + while ((req = hptiop_mv_outbound_read(hba))) { + if (req & MVIOP_MU_QUEUE_ADDR_HOST_BIT) { + if (req & MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT) { + hptiop_request_callback_mv(hba, req); + } + } + } +} + +static int hptiop_intr_mv(struct hpt_iop_hba * hba) +{ + u_int32_t status; + int ret = 0; + + status = BUS_SPACE_RD4_MV0(outbound_doorbell); + + if (status) + BUS_SPACE_WRT4_MV0(outbound_doorbell, ~status); + + if (status & MVIOP_MU_OUTBOUND_INT_MSG) { + u_int32_t msg = BUS_SPACE_RD4_MV2(outbound_msg); + KdPrint(("hptiop: received outbound msg %x\n", msg)); + hptiop_os_message_callback(hba, msg); + ret = 1; + } + + if (status & MVIOP_MU_OUTBOUND_INT_POSTQUEUE) { + hptiop_drain_outbound_queue_mv(hba); + ret = 1; + } + + return ret; +} + +static int hptiop_send_sync_request_itl(struct hpt_iop_hba * hba, + u_int32_t req32, u_int32_t millisec) +{ + u_int32_t i; + u_int64_t temp64; + + BUS_SPACE_WRT4_ITL(inbound_queue, req32); + BUS_SPACE_RD4_ITL(outbound_intstatus); + + for (i = 0; i < millisec; i++) { + hptiop_intr_itl(hba); + bus_space_read_region_4(hba->bar0t, hba->bar0h, req32 + + offsetof(struct hpt_iop_request_header, context), + (u_int32_t *)&temp64, 2); + if (temp64) + return 0; + DELAY(1000); + } + + return -1; +} + +static int hptiop_send_sync_request_mv(struct hpt_iop_hba *hba, + void *req, u_int32_t millisec) +{ + u_int32_t i; + u_int64_t phy_addr; + hba->config_done = 0; + + phy_addr = hba->ctlcfgcmd_phy | + (u_int64_t)MVIOP_MU_QUEUE_ADDR_HOST_BIT; + ((struct hpt_iop_request_get_config *)req)->header.flags |= + IOP_REQUEST_FLAG_SYNC_REQUEST | + IOP_REQUEST_FLAG_OUTPUT_CONTEXT; + hptiop_mv_inbound_write(phy_addr, hba); + BUS_SPACE_RD4_MV0(outbound_intmask); + + for (i = 0; i < millisec; i++) { + hptiop_intr_mv(hba); + if (hba->config_done) + return 0; + DELAY(1000); + } + return -1; +} + +static int hptiop_send_sync_msg(struct hpt_iop_hba *hba, + u_int32_t msg, u_int32_t millisec) +{ + u_int32_t i; + + hba->msg_done = 0; + hba->ops->post_msg(hba, msg); + + for (i=0; iops->iop_intr(hba); + if (hba->msg_done) + break; + DELAY(1000); + } + + return hba->msg_done? 0 : -1; +} + +static int hptiop_get_config_itl(struct hpt_iop_hba * hba, + struct hpt_iop_request_get_config * config) +{ + u_int32_t req32; + + config->header.size = sizeof(struct hpt_iop_request_get_config); + config->header.type = IOP_REQUEST_TYPE_GET_CONFIG; + config->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST; + config->header.result = IOP_RESULT_PENDING; + config->header.context = 0; + + req32 = BUS_SPACE_RD4_ITL(inbound_queue); + if (req32 == IOPMU_QUEUE_EMPTY) + return -1; + + bus_space_write_region_4(hba->bar0t, hba->bar0h, + req32, (u_int32_t *)config, + sizeof(struct hpt_iop_request_header) >> 2); + + if (hptiop_send_sync_request_itl(hba, req32, 20000)) { + KdPrint(("hptiop: get config send cmd failed")); + return -1; + } + + bus_space_read_region_4(hba->bar0t, hba->bar0h, + req32, (u_int32_t *)config, + sizeof(struct hpt_iop_request_get_config) >> 2); + + BUS_SPACE_WRT4_ITL(outbound_queue, req32); + + return 0; +} + +static int hptiop_get_config_mv(struct hpt_iop_hba * hba, + struct hpt_iop_request_get_config * config) +{ + struct hpt_iop_request_get_config *req; + + if (!(req = hba->ctlcfg_ptr)) + return -1; + + req->header.flags = 0; + req->header.type = IOP_REQUEST_TYPE_GET_CONFIG; + req->header.size = sizeof(struct hpt_iop_request_get_config); + req->header.result = IOP_RESULT_PENDING; + req->header.context = MVIOP_CMD_TYPE_GET_CONFIG; + + if (hptiop_send_sync_request_mv(hba, req, 20000)) { + KdPrint(("hptiop: get config send cmd failed")); + return -1; + } + + *config = *req; + return 0; +} + +static int hptiop_set_config_itl(struct hpt_iop_hba *hba, + struct hpt_iop_request_set_config *config) +{ + u_int32_t req32; + + req32 = BUS_SPACE_RD4_ITL(inbound_queue); + + if (req32 == IOPMU_QUEUE_EMPTY) + return -1; + + config->header.size = sizeof(struct hpt_iop_request_set_config); + config->header.type = IOP_REQUEST_TYPE_SET_CONFIG; + config->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST; + config->header.result = IOP_RESULT_PENDING; + config->header.context = 0; + + bus_space_write_region_4(hba->bar0t, hba->bar0h, req32, + (u_int32_t *)config, + sizeof(struct hpt_iop_request_set_config) >> 2); + + if (hptiop_send_sync_request_itl(hba, req32, 20000)) { + KdPrint(("hptiop: set config send cmd failed")); + return -1; + } + + BUS_SPACE_WRT4_ITL(outbound_queue, req32); + + return 0; +} + +static int hptiop_set_config_mv(struct hpt_iop_hba *hba, + struct hpt_iop_request_set_config *config) +{ + struct hpt_iop_request_set_config *req; + + if (!(req = hba->ctlcfg_ptr)) + return -1; + + memcpy((u_int8_t *)req + sizeof(struct hpt_iop_request_header), + (u_int8_t *)config + sizeof(struct hpt_iop_request_header), + sizeof(struct hpt_iop_request_set_config) - + sizeof(struct hpt_iop_request_header)); + + req->header.flags = 0; + req->header.type = IOP_REQUEST_TYPE_SET_CONFIG; + req->header.size = sizeof(struct hpt_iop_request_set_config); + req->header.result = IOP_RESULT_PENDING; + req->header.context = MVIOP_CMD_TYPE_SET_CONFIG; + + if (hptiop_send_sync_request_mv(hba, req, 20000)) { + KdPrint(("hptiop: set config send cmd failed")); + return -1; + } + + return 0; +} + +static int hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba, + u_int32_t req32, + struct hpt_iop_ioctl_param *pParams) +{ + u_int64_t temp64; + struct hpt_iop_request_ioctl_command req; + + if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) > + (hba->max_request_size - + offsetof(struct hpt_iop_request_ioctl_command, buf))) { + device_printf(hba->pcidev, "request size beyond max value"); + return -1; + } + + req.header.size = offsetof(struct hpt_iop_request_ioctl_command, buf) + + pParams->nInBufferSize; + req.header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND; + req.header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST; + req.header.result = IOP_RESULT_PENDING; + req.header.context = req32 + (u_int64_t)(unsigned long)hba->u.itl.mu; + req.ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode); + req.inbuf_size = pParams->nInBufferSize; + req.outbuf_size = pParams->nOutBufferSize; + req.bytes_returned = 0; + + bus_space_write_region_4(hba->bar0t, hba->bar0h, req32, (u_int32_t *)&req, + offsetof(struct hpt_iop_request_ioctl_command, buf)>>2); + + hptiop_lock_adapter(hba); + + BUS_SPACE_WRT4_ITL(inbound_queue, req32); + BUS_SPACE_RD4_ITL(outbound_intstatus); + + bus_space_read_region_4(hba->bar0t, hba->bar0h, req32 + + offsetof(struct hpt_iop_request_ioctl_command, header.context), + (u_int32_t *)&temp64, 2); + while (temp64) { + if (hptiop_sleep(hba, (void *)((unsigned long)hba->u.itl.mu + req32), + 0, "hptctl", HPT_OSM_TIMEOUT)==0) + break; + hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000); + bus_space_read_region_4(hba->bar0t, hba->bar0h,req32 + + offsetof(struct hpt_iop_request_ioctl_command, + header.context), + (u_int32_t *)&temp64, 2); + } + + hptiop_unlock_adapter(hba); + return 0; +} + +static int hptiop_bus_space_copyin(struct hpt_iop_hba *hba, u_int32_t bus, void *user, int size) +{ + unsigned char byte; + int i; + + for (i=0; ibar0t, hba->bar0h, bus + i, byte); + } + + return 0; +} + +static int hptiop_bus_space_copyout(struct hpt_iop_hba *hba, u_int32_t bus, void *user, int size) +{ + unsigned char byte; + int i; + + for (i=0; ibar0t, hba->bar0h, bus + i); + if (copyout(&byte, (u_int8_t *)user + i, 1)) + return -1; + } + + return 0; +} + +static int hptiop_do_ioctl_itl(struct hpt_iop_hba *hba, + struct hpt_iop_ioctl_param * pParams) +{ + u_int32_t req32; + u_int32_t result; + + if ((pParams->Magic != HPT_IOCTL_MAGIC) && + (pParams->Magic != HPT_IOCTL_MAGIC32)) + return EFAULT; + + req32 = BUS_SPACE_RD4_ITL(inbound_queue); + if (req32 == IOPMU_QUEUE_EMPTY) + return EFAULT; + + if (pParams->nInBufferSize) + if (hptiop_bus_space_copyin(hba, req32 + + offsetof(struct hpt_iop_request_ioctl_command, buf), + (void *)pParams->lpInBuffer, pParams->nInBufferSize)) + goto invalid; + + if (hptiop_post_ioctl_command_itl(hba, req32, pParams)) + goto invalid; + + result = bus_space_read_4(hba->bar0t, hba->bar0h, req32 + + offsetof(struct hpt_iop_request_ioctl_command, + header.result)); + + if (result == IOP_RESULT_SUCCESS) { + if (pParams->nOutBufferSize) + if (hptiop_bus_space_copyout(hba, req32 + + offsetof(struct hpt_iop_request_ioctl_command, buf) + + ((pParams->nInBufferSize + 3) & ~3), + (void *)pParams->lpOutBuffer, pParams->nOutBufferSize)) + goto invalid; + + if (pParams->lpBytesReturned) { + if (hptiop_bus_space_copyout(hba, req32 + + offsetof(struct hpt_iop_request_ioctl_command, bytes_returned), + (void *)pParams->lpBytesReturned, sizeof(unsigned long))) + goto invalid; + } + + BUS_SPACE_WRT4_ITL(outbound_queue, req32); + + return 0; + } else{ +invalid: + BUS_SPACE_WRT4_ITL(outbound_queue, req32); + + return EFAULT; + } +} + +static int hptiop_post_ioctl_command_mv(struct hpt_iop_hba *hba, + struct hpt_iop_request_ioctl_command *req, + struct hpt_iop_ioctl_param *pParams) +{ + u_int64_t req_phy; + int size = 0; + + if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) > + (hba->max_request_size - + offsetof(struct hpt_iop_request_ioctl_command, buf))) { + device_printf(hba->pcidev, "request size beyond max value"); + return -1; + } + + req->ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode); + req->inbuf_size = pParams->nInBufferSize; + req->outbuf_size = pParams->nOutBufferSize; + req->header.size = offsetof(struct hpt_iop_request_ioctl_command, buf) + + pParams->nInBufferSize; + req->header.context = (u_int64_t)MVIOP_CMD_TYPE_IOCTL; + req->header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND; + req->header.result = IOP_RESULT_PENDING; + req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT; + size = req->header.size >> 8; + size = size > 3 ? 3 : size; + req_phy = hba->ctlcfgcmd_phy | MVIOP_MU_QUEUE_ADDR_HOST_BIT | size; + hptiop_mv_inbound_write(req_phy, hba); + + BUS_SPACE_RD4_MV0(outbound_intmask); + + while (hba->config_done == 0) { + if (hptiop_sleep(hba, req, 0, + "hptctl", HPT_OSM_TIMEOUT)==0) + continue; + hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000); + } + return 0; +} + +static int hptiop_do_ioctl_mv(struct hpt_iop_hba *hba, + struct hpt_iop_ioctl_param *pParams) +{ + struct hpt_iop_request_ioctl_command *req; + + if ((pParams->Magic != HPT_IOCTL_MAGIC) && + (pParams->Magic != HPT_IOCTL_MAGIC32)) + return EFAULT; + + req = (struct hpt_iop_request_ioctl_command *)(hba->ctlcfg_ptr); + hba->config_done = 0; + hptiop_lock_adapter(hba); + if (pParams->nInBufferSize) + if (copyin((void *)pParams->lpInBuffer, + req->buf, pParams->nInBufferSize)) + goto invalid; + if (hptiop_post_ioctl_command_mv(hba, req, pParams)) + goto invalid; + + if (hba->config_done == 1) { + if (pParams->nOutBufferSize) + if (copyout(req->buf + + ((pParams->nInBufferSize + 3) & ~3), + (void *)pParams->lpOutBuffer, + pParams->nOutBufferSize)) + goto invalid; + + if (pParams->lpBytesReturned) + if (copyout(&req->bytes_returned, + (void*)pParams->lpBytesReturned, + sizeof(u_int32_t))) + goto invalid; + hptiop_unlock_adapter(hba); + return 0; + } else{ +invalid: + hptiop_unlock_adapter(hba); + return EFAULT; + } +} + +static int hptiop_rescan_bus(struct hpt_iop_hba * hba) +{ + union ccb *ccb; + + if ((ccb = xpt_alloc_ccb()) == NULL) + return(ENOMEM); + if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, cam_sim_path(hba->sim), + CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { + xpt_free_ccb(ccb); + return(EIO); + } + + xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, 5/*priority (low)*/); + ccb->ccb_h.func_code = XPT_SCAN_BUS; + ccb->ccb_h.cbfcnp = hptiop_bus_scan_cb; + ccb->crcn.flags = CAM_FLAG_NONE; + xpt_action(ccb); + return(0); +} + +static void hptiop_bus_scan_cb(struct cam_periph *periph, union ccb *ccb) +{ + xpt_free_path(ccb->ccb_h.path); + kfree(ccb, M_TEMP); +} + +static bus_dmamap_callback_t hptiop_map_srb; +static bus_dmamap_callback_t hptiop_post_scsi_command; +static bus_dmamap_callback_t hptiop_mv_map_ctlcfg; + +static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba) +{ + hba->bar0_rid = 0x10; + hba->bar0_res = bus_alloc_resource_any(hba->pcidev, + SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE); + + if (hba->bar0_res == NULL) { + device_printf(hba->pcidev, + "failed to get iop base adrress.\n"); + return -1; + } + hba->bar0t = rman_get_bustag(hba->bar0_res); + hba->bar0h = rman_get_bushandle(hba->bar0_res); + hba->u.itl.mu = (struct hpt_iopmu_itl *) + rman_get_virtual(hba->bar0_res); + + if (!hba->u.itl.mu) { + bus_release_resource(hba->pcidev, SYS_RES_MEMORY, + hba->bar0_rid, hba->bar0_res); + device_printf(hba->pcidev, "alloc mem res failed\n"); + return -1; + } + + return 0; +} + +static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba *hba) +{ + hba->bar0_rid = 0x10; + hba->bar0_res = bus_alloc_resource_any(hba->pcidev, + SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE); + + if (hba->bar0_res == NULL) { + device_printf(hba->pcidev, "failed to get iop bar0.\n"); + return -1; + } + hba->bar0t = rman_get_bustag(hba->bar0_res); + hba->bar0h = rman_get_bushandle(hba->bar0_res); + hba->u.mv.regs = (struct hpt_iopmv_regs *) + rman_get_virtual(hba->bar0_res); + + if (!hba->u.mv.regs) { + bus_release_resource(hba->pcidev, SYS_RES_MEMORY, + hba->bar0_rid, hba->bar0_res); + device_printf(hba->pcidev, "alloc bar0 mem res failed\n"); + return -1; + } + + hba->bar2_rid = 0x18; + hba->bar2_res = bus_alloc_resource_any(hba->pcidev, + SYS_RES_MEMORY, &hba->bar2_rid, RF_ACTIVE); + + if (hba->bar2_res == NULL) { + bus_release_resource(hba->pcidev, SYS_RES_MEMORY, + hba->bar0_rid, hba->bar0_res); + device_printf(hba->pcidev, "failed to get iop bar2.\n"); + return -1; + } + + hba->bar2t = rman_get_bustag(hba->bar2_res); + hba->bar2h = rman_get_bushandle(hba->bar2_res); + hba->u.mv.mu = (struct hpt_iopmu_mv *)rman_get_virtual(hba->bar2_res); + + if (!hba->u.mv.mu) { + bus_release_resource(hba->pcidev, SYS_RES_MEMORY, + hba->bar0_rid, hba->bar0_res); + bus_release_resource(hba->pcidev, SYS_RES_MEMORY, + hba->bar2_rid, hba->bar2_res); + device_printf(hba->pcidev, "alloc mem bar2 res failed\n"); + return -1; + } + + return 0; +} + +static void hptiop_release_pci_res_itl(struct hpt_iop_hba *hba) +{ + if (hba->bar0_res) + bus_release_resource(hba->pcidev, SYS_RES_MEMORY, + hba->bar0_rid, hba->bar0_res); +} + +static void hptiop_release_pci_res_mv(struct hpt_iop_hba *hba) +{ + if (hba->bar0_res) + bus_release_resource(hba->pcidev, SYS_RES_MEMORY, + hba->bar0_rid, hba->bar0_res); + if (hba->bar2_res) + bus_release_resource(hba->pcidev, SYS_RES_MEMORY, + hba->bar2_rid, hba->bar2_res); +} + +static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba) +{ + if (bus_dma_tag_create(hba->parent_dmat, + 1, + 0, + BUS_SPACE_MAXADDR_32BIT, + BUS_SPACE_MAXADDR, + NULL, NULL, + 0x800 - 0x8, + 1, + BUS_SPACE_MAXSIZE_32BIT, + BUS_DMA_ALLOCNOW, + &hba->ctlcfg_dmat)) { + device_printf(hba->pcidev, "alloc ctlcfg_dmat failed\n"); + return -1; + } + + if (bus_dmamem_alloc(hba->ctlcfg_dmat, (void **)&hba->ctlcfg_ptr, + BUS_DMA_WAITOK | BUS_DMA_COHERENT, + &hba->ctlcfg_dmamap) != 0) { + device_printf(hba->pcidev, + "bus_dmamem_alloc failed!\n"); + bus_dma_tag_destroy(hba->ctlcfg_dmat); + return -1; + } + + if (bus_dmamap_load(hba->ctlcfg_dmat, + hba->ctlcfg_dmamap, hba->ctlcfg_ptr, + MVIOP_IOCTLCFG_SIZE, + hptiop_mv_map_ctlcfg, hba, 0)) { + device_printf(hba->pcidev, "bus_dmamap_load failed!\n"); + if (hba->ctlcfg_dmat) + bus_dmamem_free(hba->ctlcfg_dmat, + hba->ctlcfg_ptr, hba->ctlcfg_dmamap); + bus_dma_tag_destroy(hba->ctlcfg_dmat); + return -1; + } + + return 0; +} + +static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba) +{ + if (hba->ctlcfg_dmat) { + bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap); + bus_dmamem_free(hba->ctlcfg_dmat, + hba->ctlcfg_ptr, hba->ctlcfg_dmamap); + bus_dma_tag_destroy(hba->ctlcfg_dmat); + } + + return 0; +} + +/* + * CAM driver interface + */ +static device_method_t driver_methods[] = { + /* Device interface */ + DEVMETHOD(device_probe, hptiop_probe), + DEVMETHOD(device_attach, hptiop_attach), + DEVMETHOD(device_detach, hptiop_detach), + DEVMETHOD(device_shutdown, hptiop_shutdown), + { 0, 0 } +}; + +static struct hptiop_adapter_ops hptiop_itl_ops = { + .iop_wait_ready = hptiop_wait_ready_itl, + .internal_memalloc = 0, + .internal_memfree = 0, + .alloc_pci_res = hptiop_alloc_pci_res_itl, + .release_pci_res = hptiop_release_pci_res_itl, + .enable_intr = hptiop_enable_intr_itl, + .disable_intr = hptiop_disable_intr_itl, + .get_config = hptiop_get_config_itl, + .set_config = hptiop_set_config_itl, + .iop_intr = hptiop_intr_itl, + .post_msg = hptiop_post_msg_itl, + .post_req = hptiop_post_req_itl, + .do_ioctl = hptiop_do_ioctl_itl, +}; + +static struct hptiop_adapter_ops hptiop_mv_ops = { + .iop_wait_ready = hptiop_wait_ready_mv, + .internal_memalloc = hptiop_internal_memalloc_mv, + .internal_memfree = hptiop_internal_memfree_mv, + .alloc_pci_res = hptiop_alloc_pci_res_mv, + .release_pci_res = hptiop_release_pci_res_mv, + .enable_intr = hptiop_enable_intr_mv, + .disable_intr = hptiop_disable_intr_mv, + .get_config = hptiop_get_config_mv, + .set_config = hptiop_set_config_mv, + .iop_intr = hptiop_intr_mv, + .post_msg = hptiop_post_msg_mv, + .post_req = hptiop_post_req_mv, + .do_ioctl = hptiop_do_ioctl_mv, +}; + +static driver_t hptiop_pci_driver = { + driver_name, + driver_methods, + sizeof(struct hpt_iop_hba) +}; + +DRIVER_MODULE(hptiop, pci, hptiop_pci_driver, hptiop_devclass, 0, 0); + +static int hptiop_probe(device_t dev) +{ + struct hpt_iop_hba *hba; + u_int32_t id; + static char buf[256]; + int sas = 0; + struct hptiop_adapter_ops *ops; + + if (pci_get_vendor(dev) != 0x1103) + return (ENXIO); + + id = pci_get_device(dev); + + switch (id) { + case 0x4320: + sas = 1; + case 0x3220: + case 0x3320: + case 0x3410: + case 0x3520: + case 0x3510: + case 0x3511: + case 0x3521: + case 0x3522: + case 0x3540: + ops = &hptiop_itl_ops; + break; + case 0x3120: + case 0x3122: + case 0x3020: + ops = &hptiop_mv_ops; + break; + default: + return (ENXIO); + } + + device_printf(dev, "adapter at PCI %d:%d:%d, IRQ %d\n", + pci_get_bus(dev), pci_get_slot(dev), + pci_get_function(dev), pci_get_irq(dev)); + + ksprintf(buf, "RocketRAID %x %s Controller\n", + id, sas ? "SAS" : "SATA"); + device_set_desc_copy(dev, buf); + + hba = (struct hpt_iop_hba *)device_get_softc(dev); + bzero(hba, sizeof(struct hpt_iop_hba)); + hba->ops = ops; + + KdPrint(("hba->ops=%p\n", hba->ops)); + return 0; +} + +static int hptiop_attach(device_t dev) +{ + struct hpt_iop_hba *hba = (struct hpt_iop_hba *)device_get_softc(dev); + struct hpt_iop_request_get_config iop_config; + struct hpt_iop_request_set_config set_config; + int rid = 0; + struct cam_devq *devq; + struct ccb_setasync ccb; + u_int32_t unit = device_get_unit(dev); + + device_printf(dev, "%d RocketRAID 3xxx/4xxx controller driver %s\n", + unit, driver_version); + + KdPrint(("hptiop: attach(%d, %d/%d/%d) ops=%p\n", unit, + pci_get_bus(dev), pci_get_slot(dev), + pci_get_function(dev), hba->ops)); + + pci_enable_busmaster(dev); + hba->pcidev = dev; + hba->pciunit = unit; + + if (hba->ops->alloc_pci_res(hba)) + return ENXIO; + + if (hba->ops->iop_wait_ready(hba, 2000)) { + device_printf(dev, "adapter is not ready\n"); + goto release_pci_res; + } + + lockinit(&hba->lock, "hptioplock", 0, LK_CANRECURSE); + + if (bus_dma_tag_create(NULL,/* parent */ + 1, /* alignment */ + 0, /* boundary */ + BUS_SPACE_MAXADDR, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filter, filterarg */ + BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ + BUS_SPACE_UNRESTRICTED, /* nsegments */ + BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ + 0, /* flags */ + &hba->parent_dmat /* tag */)) + { + device_printf(dev, "alloc parent_dmat failed\n"); + goto release_pci_res; + } + + if (hba->ops->internal_memalloc) { + if (hba->ops->internal_memalloc(hba)) { + device_printf(dev, "alloc srb_dmat failed\n"); + goto destroy_parent_tag; + } + } + + if (hba->ops->get_config(hba, &iop_config)) { + device_printf(dev, "get iop config failed.\n"); + goto get_config_failed; + } + + hba->firmware_version = iop_config.firmware_version; + hba->interface_version = iop_config.interface_version; + hba->max_requests = iop_config.max_requests; + hba->max_devices = iop_config.max_devices; + hba->max_request_size = iop_config.request_size; + hba->max_sg_count = iop_config.max_sg_count; + + if (bus_dma_tag_create(hba->parent_dmat,/* parent */ + 4, /* alignment */ + BUS_SPACE_MAXADDR_32BIT+1, /* boundary */ + BUS_SPACE_MAXADDR, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filter, filterarg */ + PAGE_SIZE * (hba->max_sg_count-1), /* maxsize */ + hba->max_sg_count, /* nsegments */ + 0x20000, /* maxsegsize */ + BUS_DMA_ALLOCNOW, /* flags */ + &hba->io_dmat /* tag */)) + { + device_printf(dev, "alloc io_dmat failed\n"); + goto get_config_failed; + } + + if (bus_dma_tag_create(hba->parent_dmat,/* parent */ + 1, /* alignment */ + 0, /* boundary */ + BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filter, filterarg */ + HPT_SRB_MAX_SIZE * HPT_SRB_MAX_QUEUE_SIZE + 0x20, + 1, /* nsegments */ + BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ + 0, /* flags */ + &hba->srb_dmat /* tag */)) + { + device_printf(dev, "alloc srb_dmat failed\n"); + goto destroy_io_dmat; + } + + if (bus_dmamem_alloc(hba->srb_dmat, (void **)&hba->uncached_ptr, + BUS_DMA_WAITOK | BUS_DMA_COHERENT, + &hba->srb_dmamap) != 0) + { + device_printf(dev, "srb bus_dmamem_alloc failed!\n"); + goto destroy_srb_dmat; + } + + if (bus_dmamap_load(hba->srb_dmat, + hba->srb_dmamap, hba->uncached_ptr, + (HPT_SRB_MAX_SIZE * HPT_SRB_MAX_QUEUE_SIZE) + 0x20, + hptiop_map_srb, hba, 0)) + { + device_printf(dev, "bus_dmamap_load failed!\n"); + goto srb_dmamem_free; + } + + if ((devq = cam_simq_alloc(hba->max_requests - 1 )) == NULL) { + device_printf(dev, "cam_simq_alloc failed\n"); + goto srb_dmamap_unload; + } + + hba->sim = cam_sim_alloc(hptiop_action, hptiop_poll, driver_name, + hba, unit, &sim_mplock, hba->max_requests - 1, 1, devq); + if (!hba->sim) { + device_printf(dev, "cam_sim_alloc failed\n"); + cam_simq_release(devq); + goto srb_dmamap_unload; + } + if (xpt_bus_register(hba->sim, 0) != CAM_SUCCESS) + { + device_printf(dev, "xpt_bus_register failed\n"); + goto free_cam_sim; + } + + if (xpt_create_path(&hba->path, /*periph */ NULL, + cam_sim_path(hba->sim), CAM_TARGET_WILDCARD, + CAM_LUN_WILDCARD) != CAM_REQ_CMP) { + device_printf(dev, "xpt_create_path failed\n"); + goto deregister_xpt_bus; + } + + bzero(&set_config, sizeof(set_config)); + set_config.iop_id = unit; + set_config.vbus_id = cam_sim_path(hba->sim); + set_config.max_host_request_size = HPT_SRB_MAX_REQ_SIZE; + + if (hba->ops->set_config(hba, &set_config)) { + device_printf(dev, "set iop config failed.\n"); + goto free_hba_path; + } + + xpt_setup_ccb(&ccb.ccb_h, hba->path, /*priority*/5); + ccb.ccb_h.func_code = XPT_SASYNC_CB; + ccb.event_enable = (AC_FOUND_DEVICE | AC_LOST_DEVICE); + ccb.callback = hptiop_async; + ccb.callback_arg = hba->sim; + xpt_action((union ccb *)&ccb); + + rid = 0; + if ((hba->irq_res = bus_alloc_resource(hba->pcidev, SYS_RES_IRQ, + &rid, 0, ~0ul, 1, RF_SHAREABLE | RF_ACTIVE)) == NULL) { + device_printf(dev, "allocate irq failed!\n"); + goto free_hba_path; + } + + if (bus_setup_intr(hba->pcidev, hba->irq_res, 0, + hptiop_pci_intr, hba, &hba->irq_handle, NULL)) + { + device_printf(dev, "allocate intr function failed!\n"); + goto free_irq_resource; + } + + if (hptiop_send_sync_msg(hba, + IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) { + device_printf(dev, "fail to start background task\n"); + goto teartown_irq_resource; + } + + hba->ops->enable_intr(hba); + + hba->ioctl_dev = make_dev(&hptiop_ops, unit, + UID_ROOT, GID_WHEEL /*GID_OPERATOR*/, + S_IRUSR | S_IWUSR, "%s%d", driver_name, unit); + + hba->ioctl_dev->si_drv1 = hba; + + hptiop_rescan_bus(hba); + + return 0; + + +teartown_irq_resource: + bus_teardown_intr(dev, hba->irq_res, hba->irq_handle); + +free_irq_resource: + bus_release_resource(dev, SYS_RES_IRQ, 0, hba->irq_res); + +free_hba_path: + xpt_free_path(hba->path); + +deregister_xpt_bus: + xpt_bus_deregister(cam_sim_path(hba->sim)); + +free_cam_sim: + cam_sim_free(hba->sim); + +srb_dmamap_unload: + if (hba->uncached_ptr) + bus_dmamap_unload(hba->srb_dmat, hba->srb_dmamap); + +srb_dmamem_free: + if (hba->uncached_ptr) + bus_dmamem_free(hba->srb_dmat, + hba->uncached_ptr, hba->srb_dmamap); + +destroy_srb_dmat: + if (hba->srb_dmat) + bus_dma_tag_destroy(hba->srb_dmat); + +destroy_io_dmat: + if (hba->io_dmat) + bus_dma_tag_destroy(hba->io_dmat); + +get_config_failed: + if (hba->ops->internal_memfree) + hba->ops->internal_memfree(hba); + +destroy_parent_tag: + if (hba->parent_dmat) + bus_dma_tag_destroy(hba->parent_dmat); + +release_pci_res: + if (hba->ops->release_pci_res) + hba->ops->release_pci_res(hba); + + return ENXIO; +} + +static int hptiop_detach(device_t dev) +{ + struct hpt_iop_hba * hba = (struct hpt_iop_hba *)device_get_softc(dev); + int i; + int error = EBUSY; + + hptiop_lock_adapter(hba); + for (i = 0; i < hba->max_devices; i++) + if (hptiop_os_query_remove_device(hba, i)) { + device_printf(dev, "%d file system is busy. id=%d", + hba->pciunit, i); + goto out; + } + + if ((error = hptiop_shutdown(dev)) != 0) + goto out; + if (hptiop_send_sync_msg(hba, + IOPMU_INBOUND_MSG0_STOP_BACKGROUND_TASK, 60000)) + goto out; + + hptiop_release_resource(hba); + error = 0; +out: + hptiop_unlock_adapter(hba); + return error; +} + +static int hptiop_shutdown(device_t dev) +{ + struct hpt_iop_hba * hba = (struct hpt_iop_hba *)device_get_softc(dev); + + int error = 0; + + if (hba->flag & HPT_IOCTL_FLAG_OPEN) { + device_printf(dev, "%d device is busy", hba->pciunit); + return EBUSY; + } + + hba->ops->disable_intr(hba); + + if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_SHUTDOWN, 60000)) + error = EBUSY; + + return error; +} + +static void hptiop_pci_intr(void *arg) +{ + struct hpt_iop_hba * hba = (struct hpt_iop_hba *)arg; + hptiop_lock_adapter(hba); + hba->ops->iop_intr(hba); + hptiop_unlock_adapter(hba); +} + +static void hptiop_poll(struct cam_sim *sim) +{ + hptiop_pci_intr(cam_sim_softc(sim)); +} + +static void hptiop_async(void * callback_arg, u_int32_t code, + struct cam_path * path, void * arg) +{ +} + +static void hptiop_enable_intr_itl(struct hpt_iop_hba *hba) +{ + BUS_SPACE_WRT4_ITL(outbound_intmask, + ~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0)); +} + +static void hptiop_enable_intr_mv(struct hpt_iop_hba *hba) +{ + u_int32_t int_mask; + + int_mask = BUS_SPACE_RD4_MV0(outbound_intmask); + + int_mask |= MVIOP_MU_OUTBOUND_INT_POSTQUEUE + | MVIOP_MU_OUTBOUND_INT_MSG; + BUS_SPACE_WRT4_MV0(outbound_intmask,int_mask); +} + +static void hptiop_disable_intr_itl(struct hpt_iop_hba *hba) +{ + u_int32_t int_mask; + + int_mask = BUS_SPACE_RD4_ITL(outbound_intmask); + + int_mask |= IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0; + BUS_SPACE_WRT4_ITL(outbound_intmask, int_mask); + BUS_SPACE_RD4_ITL(outbound_intstatus); +} + +static void hptiop_disable_intr_mv(struct hpt_iop_hba *hba) +{ + u_int32_t int_mask; + int_mask = BUS_SPACE_RD4_MV0(outbound_intmask); + + int_mask &= ~(MVIOP_MU_OUTBOUND_INT_MSG + | MVIOP_MU_OUTBOUND_INT_POSTQUEUE); + BUS_SPACE_WRT4_MV0(outbound_intmask,int_mask); + BUS_SPACE_RD4_MV0(outbound_intmask); +} + +static int hptiop_reset_adapter(struct hpt_iop_hba * hba) +{ + return hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000); +} + +static void *hptiop_get_srb(struct hpt_iop_hba * hba) +{ + struct hpt_iop_srb * srb; + + if (hba->srb_list) { + srb = hba->srb_list; + hba->srb_list = srb->next; + return srb; + } + + return NULL; +} + +static void hptiop_free_srb(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb) +{ + srb->next = hba->srb_list; + hba->srb_list = srb; +} + +static void hptiop_action(struct cam_sim *sim, union ccb *ccb) +{ + struct hpt_iop_hba * hba = (struct hpt_iop_hba *)cam_sim_softc(sim); + struct hpt_iop_srb * srb; + + switch (ccb->ccb_h.func_code) { + + case XPT_SCSI_IO: + hptiop_lock_adapter(hba); + if (ccb->ccb_h.target_lun != 0 || + ccb->ccb_h.target_id >= hba->max_devices || + (ccb->ccb_h.flags & CAM_CDB_PHYS)) + { + ccb->ccb_h.status = CAM_TID_INVALID; + xpt_done(ccb); + goto scsi_done; + } + + if ((srb = hptiop_get_srb(hba)) == NULL) { + device_printf(hba->pcidev, "srb allocated failed"); + ccb->ccb_h.status = CAM_REQ_CMP_ERR; + xpt_done(ccb); + goto scsi_done; + } + + srb->ccb = ccb; + + if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) + hptiop_post_scsi_command(srb, NULL, 0, 0); + else if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) { + if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) { + int error; + + error = bus_dmamap_load(hba->io_dmat, + srb->dma_map, + ccb->csio.data_ptr, + ccb->csio.dxfer_len, + hptiop_post_scsi_command, + srb, 0); + + if (error && error != EINPROGRESS) { + device_printf(hba->pcidev, + "%d bus_dmamap_load error %d", + hba->pciunit, error); + xpt_freeze_simq(hba->sim, 1); + ccb->ccb_h.status = CAM_REQ_CMP_ERR; +invalid: + hptiop_free_srb(hba, srb); + xpt_done(ccb); + goto scsi_done; + } + } + else { + device_printf(hba->pcidev, + "CAM_DATA_PHYS not supported"); + ccb->ccb_h.status = CAM_REQ_CMP_ERR; + goto invalid; + } + } + else { + struct bus_dma_segment *segs; + + if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0 || + (ccb->ccb_h.flags & CAM_DATA_PHYS) != 0) { + device_printf(hba->pcidev, "SCSI cmd failed"); + ccb->ccb_h.status=CAM_PROVIDE_FAIL; + goto invalid; + } + + segs = (struct bus_dma_segment *)ccb->csio.data_ptr; + hptiop_post_scsi_command(srb, segs, + ccb->csio.sglist_cnt, 0); + } + +scsi_done: + hptiop_unlock_adapter(hba); + return; + + case XPT_RESET_BUS: + device_printf(hba->pcidev, "reset adapter"); + hptiop_lock_adapter(hba); + hba->msg_done = 0; + hptiop_reset_adapter(hba); + hptiop_unlock_adapter(hba); + break; + + case XPT_GET_TRAN_SETTINGS: + case XPT_SET_TRAN_SETTINGS: + ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; + break; + + case XPT_CALC_GEOMETRY: + ccb->ccg.heads = 255; + ccb->ccg.secs_per_track = 63; + ccb->ccg.cylinders = ccb->ccg.volume_size / + (ccb->ccg.heads * ccb->ccg.secs_per_track); + ccb->ccb_h.status = CAM_REQ_CMP; + break; + + case XPT_PATH_INQ: + { + struct ccb_pathinq *cpi = &ccb->cpi; + + cpi->version_num = 1; + cpi->hba_inquiry = PI_SDTR_ABLE; + cpi->target_sprt = 0; + cpi->hba_misc = PIM_NOBUSRESET; + cpi->hba_eng_cnt = 0; + cpi->max_target = hba->max_devices; + cpi->max_lun = 0; + cpi->unit_number = cam_sim_unit(sim); + cpi->bus_id = cam_sim_bus(sim); + cpi->initiator_id = hba->max_devices; + cpi->base_transfer_speed = 3300; + + strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); + strncpy(cpi->hba_vid, "HPT ", HBA_IDLEN); + strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); + cpi->transport = XPORT_SPI; + cpi->transport_version = 2; + cpi->protocol = PROTO_SCSI; + cpi->protocol_version = SCSI_REV_2; + cpi->ccb_h.status = CAM_REQ_CMP; + break; + } + + default: + ccb->ccb_h.status = CAM_REQ_INVALID; + break; + } + + xpt_done(ccb); + return; +} + +static void hptiop_post_req_itl(struct hpt_iop_hba *hba, + struct hpt_iop_srb *srb, + bus_dma_segment_t *segs, int nsegs) +{ + int idx; + union ccb *ccb = srb->ccb; + u_int8_t *cdb; + + if (ccb->ccb_h.flags & CAM_CDB_POINTER) + cdb = ccb->csio.cdb_io.cdb_ptr; + else + cdb = ccb->csio.cdb_io.cdb_bytes; + + KdPrint(("ccb=%p %x-%x-%x\n", + ccb, *(u_int32_t *)cdb, *((u_int32_t *)cdb+1), *((u_int32_t *)cdb+2))); + + if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) { + u_int32_t iop_req32; + struct hpt_iop_request_scsi_command req; + + iop_req32 = BUS_SPACE_RD4_ITL(inbound_queue); + + if (iop_req32 == IOPMU_QUEUE_EMPTY) { + device_printf(hba->pcidev, "invaild req offset\n"); + ccb->ccb_h.status = CAM_BUSY; + bus_dmamap_unload(hba->io_dmat, srb->dma_map); + hptiop_free_srb(hba, srb); + xpt_done(ccb); + return; + } + + if (ccb->csio.dxfer_len && nsegs > 0) { + struct hpt_iopsg *psg = req.sg_list; + for (idx = 0; idx < nsegs; idx++, psg++) { + psg->pci_address = (u_int64_t)segs[idx].ds_addr; + psg->size = segs[idx].ds_len; + psg->eot = 0; + } + psg[-1].eot = 1; + } + + bcopy(cdb, req.cdb, ccb->csio.cdb_len); + + req.header.size = offsetof(struct hpt_iop_request_scsi_command, sg_list) + + nsegs*sizeof(struct hpt_iopsg); + req.header.type = IOP_REQUEST_TYPE_SCSI_COMMAND; + req.header.flags = 0; + req.header.result = IOP_RESULT_PENDING; + req.header.context = (u_int64_t)(unsigned long)srb; + req.dataxfer_length = ccb->csio.dxfer_len; + req.channel = 0; + req.target = ccb->ccb_h.target_id; + req.lun = ccb->ccb_h.target_lun; + + bus_space_write_region_1(hba->bar0t, hba->bar0h, iop_req32, + (u_int8_t *)&req, req.header.size); + + if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { + bus_dmamap_sync(hba->io_dmat, + srb->dma_map, BUS_DMASYNC_PREREAD); + } + else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) + bus_dmamap_sync(hba->io_dmat, + srb->dma_map, BUS_DMASYNC_PREWRITE); + + BUS_SPACE_WRT4_ITL(inbound_queue,iop_req32); + } else { + struct hpt_iop_request_scsi_command *req; + + req = (struct hpt_iop_request_scsi_command *)srb; + if (ccb->csio.dxfer_len && nsegs > 0) { + struct hpt_iopsg *psg = req->sg_list; + for (idx = 0; idx < nsegs; idx++, psg++) { + psg->pci_address = + (u_int64_t)segs[idx].ds_addr; + psg->size = segs[idx].ds_len; + psg->eot = 0; + } + psg[-1].eot = 1; + } + + bcopy(cdb, req->cdb, ccb->csio.cdb_len); + + req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND; + req->header.result = IOP_RESULT_PENDING; + req->dataxfer_length = ccb->csio.dxfer_len; + req->channel = 0; + req->target = ccb->ccb_h.target_id; + req->lun = ccb->ccb_h.target_lun; + req->header.size = offsetof(struct hpt_iop_request_scsi_command, sg_list) + + nsegs*sizeof(struct hpt_iopsg); + req->header.context = (u_int64_t)srb->index | + IOPMU_QUEUE_ADDR_HOST_BIT; + req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT; + + if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { + bus_dmamap_sync(hba->io_dmat, + srb->dma_map, BUS_DMASYNC_PREREAD); + }else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { + bus_dmamap_sync(hba->io_dmat, + srb->dma_map, BUS_DMASYNC_PREWRITE); + } + + if (hba->firmware_version > 0x01020000 + || hba->interface_version > 0x01020000) { + u_int32_t size_bits; + + if (req->header.size < 256) + size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT; + else if (req->header.size < 512) + size_bits = IOPMU_QUEUE_ADDR_HOST_BIT; + else + size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT + | IOPMU_QUEUE_ADDR_HOST_BIT; + + BUS_SPACE_WRT4_ITL(inbound_queue, + (u_int32_t)srb->phy_addr | size_bits); + } else + BUS_SPACE_WRT4_ITL(inbound_queue, (u_int32_t)srb->phy_addr + |IOPMU_QUEUE_ADDR_HOST_BIT); + } +} + +static void hptiop_post_req_mv(struct hpt_iop_hba *hba, + struct hpt_iop_srb *srb, + bus_dma_segment_t *segs, int nsegs) +{ + int idx, size; + union ccb *ccb = srb->ccb; + u_int8_t *cdb; + struct hpt_iop_request_scsi_command *req; + u_int64_t req_phy; + + req = (struct hpt_iop_request_scsi_command *)srb; + req_phy = srb->phy_addr; + + if (ccb->csio.dxfer_len && nsegs > 0) { + struct hpt_iopsg *psg = req->sg_list; + for (idx = 0; idx < nsegs; idx++, psg++) { + psg->pci_address = (u_int64_t)segs[idx].ds_addr; + psg->size = segs[idx].ds_len; + psg->eot = 0; + } + psg[-1].eot = 1; + } + if (ccb->ccb_h.flags & CAM_CDB_POINTER) + cdb = ccb->csio.cdb_io.cdb_ptr; + else + cdb = ccb->csio.cdb_io.cdb_bytes; + + bcopy(cdb, req->cdb, ccb->csio.cdb_len); + req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND; + req->header.result = IOP_RESULT_PENDING; + req->dataxfer_length = ccb->csio.dxfer_len; + req->channel = 0; + req->target = ccb->ccb_h.target_id; + req->lun = ccb->ccb_h.target_lun; + req->header.size = sizeof(struct hpt_iop_request_scsi_command) + - sizeof(struct hpt_iopsg) + + nsegs * sizeof(struct hpt_iopsg); + if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { + bus_dmamap_sync(hba->io_dmat, + srb->dma_map, BUS_DMASYNC_PREREAD); + } + else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) + bus_dmamap_sync(hba->io_dmat, + srb->dma_map, BUS_DMASYNC_PREWRITE); + req->header.context = (u_int64_t)srb->index + << MVIOP_REQUEST_NUMBER_START_BIT + | MVIOP_CMD_TYPE_SCSI; + req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT; + size = req->header.size >> 8; + hptiop_mv_inbound_write(req_phy + | MVIOP_MU_QUEUE_ADDR_HOST_BIT + | (size > 3 ? 3 : size), hba); +} + +static void hptiop_post_scsi_command(void *arg, bus_dma_segment_t *segs, + int nsegs, int error) +{ + struct hpt_iop_srb *srb = (struct hpt_iop_srb *)arg; + union ccb *ccb = srb->ccb; + struct hpt_iop_hba *hba = srb->hba; + + if (error || nsegs > hba->max_sg_count) { + KdPrint(("hptiop: func_code=%x tid=%x lun=%x nsegs=%d\n", + ccb->ccb_h.func_code, + ccb->ccb_h.target_id, + ccb->ccb_h.target_lun, nsegs)); + ccb->ccb_h.status = CAM_BUSY; + bus_dmamap_unload(hba->io_dmat, srb->dma_map); + hptiop_free_srb(hba, srb); + xpt_done(ccb); + return; + } + + hba->ops->post_req(hba, srb, segs, nsegs); +} + +static void hptiop_mv_map_ctlcfg(void *arg, bus_dma_segment_t *segs, + int nsegs, int error) +{ + struct hpt_iop_hba *hba = (struct hpt_iop_hba *)arg; + hba->ctlcfgcmd_phy = ((u_int64_t)segs->ds_addr + 0x1F) + & ~(u_int64_t)0x1F; + hba->ctlcfg_ptr = (u_int8_t *)(((unsigned long)hba->ctlcfg_ptr + 0x1F) + & ~0x1F); +} + +static void hptiop_map_srb(void *arg, bus_dma_segment_t *segs, + int nsegs, int error) +{ + struct hpt_iop_hba * hba = (struct hpt_iop_hba *)arg; + bus_addr_t phy_addr = (segs->ds_addr + 0x1F) & ~(bus_addr_t)0x1F; + struct hpt_iop_srb *srb, *tmp_srb; + int i; + + if (error || nsegs == 0) { + device_printf(hba->pcidev, "hptiop_map_srb error"); + return; + } + + /* map srb */ + srb = (struct hpt_iop_srb *) + (((unsigned long)hba->uncached_ptr + 0x1F) + & ~(unsigned long)0x1F); + + for (i = 0; i < HPT_SRB_MAX_QUEUE_SIZE; i++) { + tmp_srb = (struct hpt_iop_srb *) + ((char *)srb + i * HPT_SRB_MAX_SIZE); + if (((unsigned long)tmp_srb & 0x1F) == 0) { + if (bus_dmamap_create(hba->io_dmat, + 0, &tmp_srb->dma_map)) { + device_printf(hba->pcidev, "dmamap create failed"); + return; + } + + bzero(tmp_srb, sizeof(struct hpt_iop_srb)); + tmp_srb->hba = hba; + tmp_srb->index = i; + if (hba->ctlcfg_ptr == 0) {/*itl iop*/ + tmp_srb->phy_addr = (u_int64_t)(u_int32_t) + (phy_addr >> 5); + if (phy_addr & IOPMU_MAX_MEM_SUPPORT_MASK_32G) + tmp_srb->srb_flag = + HPT_SRB_FLAG_HIGH_MEM_ACESS; + } else { + tmp_srb->phy_addr = phy_addr; + } + + hptiop_free_srb(hba, tmp_srb); + hba->srb[i] = tmp_srb; + phy_addr += HPT_SRB_MAX_SIZE; + } + else { + device_printf(hba->pcidev, "invalid alignment"); + return; + } + } +} + +static void hptiop_os_message_callback(struct hpt_iop_hba * hba, u_int32_t msg) +{ + hba->msg_done = 1; +} + +static int hptiop_os_query_remove_device(struct hpt_iop_hba * hba, + int target_id) +{ + struct cam_periph *periph = NULL; + struct cam_path *path; + int status, retval = 0; + + status = xpt_create_path(&path, NULL, hba->sim->path_id, target_id, 0); + + if (status == CAM_REQ_CMP) { + if ((periph = cam_periph_find(path, "da")) != NULL) { + if (periph->refcount >= 1) { + device_printf(hba->pcidev, "%d ," + "target_id=0x%x," + "refcount=%d", + hba->pciunit, target_id, periph->refcount); + retval = -1; + } + } + xpt_free_path(path); + } + return retval; +} + +static void hptiop_release_resource(struct hpt_iop_hba *hba) +{ + int i; + if (hba->path) { + struct ccb_setasync ccb; + + xpt_setup_ccb(&ccb.ccb_h, hba->path, /*priority*/5); + ccb.ccb_h.func_code = XPT_SASYNC_CB; + ccb.event_enable = 0; + ccb.callback = hptiop_async; + ccb.callback_arg = hba->sim; + xpt_action((union ccb *)&ccb); + xpt_free_path(hba->path); + } + + if (hba->sim) { + xpt_bus_deregister(cam_sim_path(hba->sim)); + cam_sim_free(hba->sim); + } + + if (hba->ctlcfg_dmat) { + bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap); + bus_dmamem_free(hba->ctlcfg_dmat, + hba->ctlcfg_ptr, hba->ctlcfg_dmamap); + bus_dma_tag_destroy(hba->ctlcfg_dmat); + } + + for (i = 0; i < HPT_SRB_MAX_QUEUE_SIZE; i++) { + struct hpt_iop_srb *srb = hba->srb[i]; + if (srb->dma_map) + bus_dmamap_destroy(hba->io_dmat, srb->dma_map); + } + + if (hba->srb_dmat) { + bus_dmamap_unload(hba->srb_dmat, hba->srb_dmamap); + bus_dmamap_destroy(hba->srb_dmat, hba->srb_dmamap); + bus_dma_tag_destroy(hba->srb_dmat); + } + + if (hba->io_dmat) + bus_dma_tag_destroy(hba->io_dmat); + + if (hba->parent_dmat) + bus_dma_tag_destroy(hba->parent_dmat); + + if (hba->irq_handle) + bus_teardown_intr(hba->pcidev, hba->irq_res, hba->irq_handle); + + if (hba->irq_res) + bus_release_resource(hba->pcidev, SYS_RES_IRQ, + 0, hba->irq_res); + + if (hba->bar0_res) + bus_release_resource(hba->pcidev, SYS_RES_MEMORY, + hba->bar0_rid, hba->bar0_res); + if (hba->bar2_res) + bus_release_resource(hba->pcidev, SYS_RES_MEMORY, + hba->bar2_rid, hba->bar2_res); + if (hba->ioctl_dev) + destroy_dev(hba->ioctl_dev); + dev_ops_remove_minor(&hptiop_ops, device_get_unit(hba->pcidev)); +} diff --git a/sys/dev/raid/hptiop/hptiop.h b/sys/dev/raid/hptiop/hptiop.h new file mode 100644 index 0000000000..114c82aa51 --- /dev/null +++ b/sys/dev/raid/hptiop/hptiop.h @@ -0,0 +1,390 @@ +/* + * HighPoint RR3xxx/4xxx RAID Driver for FreeBSD + * Copyright (C) 2007-2008 HighPoint Technologies, Inc. All Rights Reserved. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD: src/sys/dev/hptiop/hptiop.h,v 1.5 2009/03/25 06:27:56 delphij Exp $ + */ + +#ifndef _HPTIOP_H +#define _HPTIOP_H + +#define DBG 0 + +#ifdef DBG +int hpt_iop_dbg_level = 0; +#define KdPrint(x) do { if (hpt_iop_dbg_level) kprintf x; } while (0) +#define HPT_ASSERT(x) assert(x) +#else +#define KdPrint(x) +#define HPT_ASSERT(x) +#endif + +#define HPT_SRB_MAX_REQ_SIZE 600 +#define HPT_SRB_MAX_QUEUE_SIZE 0x100 + +/* beyond 64G mem */ +#define HPT_SRB_FLAG_HIGH_MEM_ACESS 0x1 +#define HPT_SRB_MAX_SIZE ((sizeof(struct hpt_iop_srb) + 0x1f) & ~0x1f) +#ifndef offsetof +#define offsetof(TYPE, MEM) ((size_t)&((TYPE*)0)->MEM) +#endif + +#ifndef MIN +#define MIN(a, b) ((a) < (b) ? (a) : (b)) +#endif + +#define HPT_IOCTL_MAGIC 0xA1B2C3D4 +#define HPT_IOCTL_MAGIC32 0x1A2B3C4D + +struct hpt_iopmu_itl { + u_int32_t resrved0[4]; + u_int32_t inbound_msgaddr0; + u_int32_t inbound_msgaddr1; + u_int32_t outbound_msgaddr0; + u_int32_t outbound_msgaddr1; + u_int32_t inbound_doorbell; + u_int32_t inbound_intstatus; + u_int32_t inbound_intmask; + u_int32_t outbound_doorbell; + u_int32_t outbound_intstatus; + u_int32_t outbound_intmask; + u_int32_t reserved1[2]; + u_int32_t inbound_queue; + u_int32_t outbound_queue; +}; + +#define IOPMU_QUEUE_EMPTY 0xffffffff +#define IOPMU_QUEUE_MASK_HOST_BITS 0xf0000000 +#define IOPMU_QUEUE_ADDR_HOST_BIT 0x80000000 +#define IOPMU_QUEUE_REQUEST_SIZE_BIT 0x40000000 +#define IOPMU_QUEUE_REQUEST_RESULT_BIT 0x40000000 +#define IOPMU_MAX_MEM_SUPPORT_MASK_64G 0xfffffff000000000ull +#define IOPMU_MAX_MEM_SUPPORT_MASK_32G 0xfffffff800000000ull + +#define IOPMU_OUTBOUND_INT_MSG0 1 +#define IOPMU_OUTBOUND_INT_MSG1 2 +#define IOPMU_OUTBOUND_INT_DOORBELL 4 +#define IOPMU_OUTBOUND_INT_POSTQUEUE 8 +#define IOPMU_OUTBOUND_INT_PCI 0x10 + +#define IOPMU_INBOUND_INT_MSG0 1 +#define IOPMU_INBOUND_INT_MSG1 2 +#define IOPMU_INBOUND_INT_DOORBELL 4 +#define IOPMU_INBOUND_INT_ERROR 8 +#define IOPMU_INBOUND_INT_POSTQUEUE 0x10 + +#define MVIOP_QUEUE_LEN 512 +struct hpt_iopmu_mv { + u_int32_t inbound_head; + u_int32_t inbound_tail; + u_int32_t outbound_head; + u_int32_t outbound_tail; + u_int32_t inbound_msg; + u_int32_t outbound_msg; + u_int32_t reserve[10]; + u_int64_t inbound_q[MVIOP_QUEUE_LEN]; + u_int64_t outbound_q[MVIOP_QUEUE_LEN]; +}; + +struct hpt_iopmv_regs { + u_int32_t reserved[0x20400 / 4]; + u_int32_t inbound_doorbell; + u_int32_t inbound_intmask; + u_int32_t outbound_doorbell; + u_int32_t outbound_intmask; +}; + +#define MVIOP_IOCTLCFG_SIZE 0x800 +#define MVIOP_MU_QUEUE_ADDR_HOST_MASK (~(0x1full)) +#define MVIOP_MU_QUEUE_ADDR_HOST_BIT 4 + +#define MVIOP_MU_QUEUE_ADDR_IOP_HIGH32 0xffffffff +#define MVIOP_MU_QUEUE_REQUEST_RESULT_BIT 1 +#define MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT 2 + +#define MVIOP_MU_INBOUND_INT_MSG 1 +#define MVIOP_MU_INBOUND_INT_POSTQUEUE 2 +#define MVIOP_MU_OUTBOUND_INT_MSG 1 +#define MVIOP_MU_OUTBOUND_INT_POSTQUEUE 2 + +#define MVIOP_CMD_TYPE_GET_CONFIG (1 << 5) +#define MVIOP_CMD_TYPE_SET_CONFIG (1 << 6) +#define MVIOP_CMD_TYPE_SCSI (1 << 7) +#define MVIOP_CMD_TYPE_IOCTL (1 << 8) +#define MVIOP_CMD_TYPE_BLOCK (1 << 9) + +#define MVIOP_REQUEST_NUMBER_START_BIT 16 + +enum hpt_iopmu_message { + /* host-to-iop messages */ + IOPMU_INBOUND_MSG0_NOP = 0, + IOPMU_INBOUND_MSG0_RESET, + IOPMU_INBOUND_MSG0_FLUSH, + IOPMU_INBOUND_MSG0_SHUTDOWN, + IOPMU_INBOUND_MSG0_STOP_BACKGROUND_TASK, + IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, + IOPMU_INBOUND_MSG0_MAX = 0xff, + /* iop-to-host messages */ + IOPMU_OUTBOUND_MSG0_REGISTER_DEVICE_0 = 0x100, + IOPMU_OUTBOUND_MSG0_REGISTER_DEVICE_MAX = 0x1ff, + IOPMU_OUTBOUND_MSG0_UNREGISTER_DEVICE_0 = 0x200, + IOPMU_OUTBOUND_MSG0_UNREGISTER_DEVICE_MAX = 0x2ff, + IOPMU_OUTBOUND_MSG0_REVALIDATE_DEVICE_0 = 0x300, + IOPMU_OUTBOUND_MSG0_REVALIDATE_DEVICE_MAX = 0x3ff, +}; + +#define IOP_REQUEST_FLAG_SYNC_REQUEST 1 +#define IOP_REQUEST_FLAG_BIST_REQUEST 2 +#define IOP_REQUEST_FLAG_REMAPPED 4 +#define IOP_REQUEST_FLAG_OUTPUT_CONTEXT 8 + +enum hpt_iop_request_type { + IOP_REQUEST_TYPE_GET_CONFIG = 0, + IOP_REQUEST_TYPE_SET_CONFIG, + IOP_REQUEST_TYPE_BLOCK_COMMAND, + IOP_REQUEST_TYPE_SCSI_COMMAND, + IOP_REQUEST_TYPE_IOCTL_COMMAND, + IOP_REQUEST_TYPE_MAX +}; + +enum hpt_iop_result_type { + IOP_RESULT_PENDING = 0, + IOP_RESULT_SUCCESS, + IOP_RESULT_FAIL, + IOP_RESULT_BUSY, + IOP_RESULT_RESET, + IOP_RESULT_INVALID_REQUEST, + IOP_RESULT_BAD_TARGET, + IOP_RESULT_CHECK_CONDITION, +}; + +struct hpt_iop_request_header { + u_int32_t size; + u_int32_t type; + u_int32_t flags; + u_int32_t result; + u_int64_t context; /* host context */ +}; + +struct hpt_iop_request_get_config { + struct hpt_iop_request_header header; + u_int32_t interface_version; + u_int32_t firmware_version; + u_int32_t max_requests; + u_int32_t request_size; + u_int32_t max_sg_count; + u_int32_t data_transfer_length; + u_int32_t alignment_mask; + u_int32_t max_devices; + u_int32_t sdram_size; +}; + +struct hpt_iop_request_set_config { + struct hpt_iop_request_header header; + u_int32_t iop_id; + u_int16_t vbus_id; + u_int16_t max_host_request_size; + u_int32_t reserve[6]; +}; + +struct hpt_iopsg { + u_int32_t size; + u_int32_t eot; /* non-zero: end of table */ + u_int64_t pci_address; +}; + +#define IOP_BLOCK_COMMAND_READ 1 +#define IOP_BLOCK_COMMAND_WRITE 2 +#define IOP_BLOCK_COMMAND_VERIFY 3 +#define IOP_BLOCK_COMMAND_FLUSH 4 +#define IOP_BLOCK_COMMAND_SHUTDOWN 5 +struct hpt_iop_request_block_command { + struct hpt_iop_request_header header; + u_int8_t channel; + u_int8_t target; + u_int8_t lun; + u_int8_t pad1; + u_int16_t command; /* IOP_BLOCK_COMMAND_{READ,WRITE} */ + u_int16_t sectors; + u_int64_t lba; + struct hpt_iopsg sg_list[1]; +}; + +struct hpt_iop_request_scsi_command { + struct hpt_iop_request_header header; + u_int8_t channel; + u_int8_t target; + u_int8_t lun; + u_int8_t pad1; + u_int8_t cdb[16]; + u_int32_t dataxfer_length; + struct hpt_iopsg sg_list[1]; +}; + +struct hpt_iop_request_ioctl_command { + struct hpt_iop_request_header header; + u_int32_t ioctl_code; + u_int32_t inbuf_size; + u_int32_t outbuf_size; + u_int32_t bytes_returned; + u_int8_t buf[1]; + /* out data should be put at buf[(inbuf_size+3)&~3] */ +}; + +struct hpt_iop_ioctl_param { + u_int32_t Magic; /* used to check if it's a valid ioctl packet */ + u_int32_t dwIoControlCode; /* operation control code */ + unsigned long lpInBuffer; /* input data buffer */ + u_int32_t nInBufferSize; /* size of input data buffer */ + unsigned long lpOutBuffer; /* output data buffer */ + u_int32_t nOutBufferSize; /* size of output data buffer */ + unsigned long lpBytesReturned; /* count of HPT_U8s returned */ +} __packed; + +#define HPT_IOCTL_FLAG_OPEN 1 +#define HPT_CTL_CODE_BSD_TO_IOP(x) ((x)-0xff00) + +typedef struct cdev * ioctl_dev_t; + +typedef struct thread * ioctl_thread_t; + +struct hpt_iop_hba { + struct hptiop_adapter_ops *ops; + union { + struct { + struct hpt_iopmu_itl *mu; + } itl; + struct { + struct hpt_iopmv_regs *regs; + struct hpt_iopmu_mv *mu; + } mv; + } u; + + struct hpt_iop_hba *next; + + u_int32_t firmware_version; + u_int32_t interface_version; + u_int32_t max_devices; + u_int32_t max_requests; + u_int32_t max_request_size; + u_int32_t max_sg_count; + + u_int32_t msg_done; + + device_t pcidev; + u_int32_t pciunit; + ioctl_dev_t ioctl_dev; + + bus_dma_tag_t parent_dmat; + bus_dma_tag_t io_dmat; + bus_dma_tag_t srb_dmat; + bus_dma_tag_t ctlcfg_dmat; + + bus_dmamap_t srb_dmamap; + bus_dmamap_t ctlcfg_dmamap; + + struct resource *bar0_res; + bus_space_tag_t bar0t; + bus_space_handle_t bar0h; + int bar0_rid; + + struct resource *bar2_res; + bus_space_tag_t bar2t; + bus_space_handle_t bar2h; + int bar2_rid; + + /* to release */ + u_int8_t *uncached_ptr; + void *ctlcfg_ptr; + /* for scsi request block */ + struct hpt_iop_srb *srb_list; + /* for interrupt */ + struct resource *irq_res; + void *irq_handle; + + /* for ioctl and set/get config */ + struct resource *ctlcfg_res; + void *ctlcfg_handle; + u_int64_t ctlcfgcmd_phy; + u_int32_t config_done; + + /* other resources */ + struct cam_sim *sim; + struct cam_path *path; + void *req; + struct lock lock; +#define HPT_IOCTL_FLAG_OPEN 1 + u_int32_t flag; + struct hpt_iop_srb* srb[HPT_SRB_MAX_QUEUE_SIZE]; +}; + +struct hptiop_adapter_ops { + int (*iop_wait_ready)(struct hpt_iop_hba *hba, u_int32_t millisec); + int (*internal_memalloc)(struct hpt_iop_hba *hba); + int (*internal_memfree)(struct hpt_iop_hba *hba); + int (*alloc_pci_res)(struct hpt_iop_hba *hba); + void (*release_pci_res)(struct hpt_iop_hba *hba); + void (*enable_intr)(struct hpt_iop_hba *hba); + void (*disable_intr)(struct hpt_iop_hba *hba); + int (*get_config)(struct hpt_iop_hba *hba, + struct hpt_iop_request_get_config *config); + int (*set_config)(struct hpt_iop_hba *hba, + struct hpt_iop_request_set_config *config); + int (*iop_intr)(struct hpt_iop_hba *hba); + void (*post_msg)(struct hpt_iop_hba *hba, u_int32_t msg); + void (*post_req)(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb, bus_dma_segment_t *segs, int nsegs); + int (*do_ioctl)(struct hpt_iop_hba *hba, struct hpt_iop_ioctl_param * pParams); +}; + +struct hpt_iop_srb { + u_int8_t req[HPT_SRB_MAX_REQ_SIZE]; + struct hpt_iop_hba *hba; + union ccb *ccb; + struct hpt_iop_srb *next; + bus_dmamap_t dma_map; + u_int64_t phy_addr; + u_int32_t srb_flag; + int index; +}; + +#define hptiop_lock_adapter(hba) lockmgr(&(hba)->lock, LK_EXCLUSIVE) +#define hptiop_unlock_adapter(hba) lockmgr(&(hba)->lock, LK_RELEASE) + +#define HPT_OSM_TIMEOUT (20*hz) /* timeout value for OS commands */ + +#define HPT_DO_IOCONTROL _IOW('H', 0, struct hpt_iop_ioctl_param) +#define HPT_SCAN_BUS _IO('H', 1) + +static __inline int hptiop_sleep(struct hpt_iop_hba *hba, void *ident, + int priority, const char *wmesg, int timo) +{ + + int retval; + + retval = lksleep(ident, &hba->lock, priority, wmesg, timo); + + return retval; + +} + +#endif