hptiop(4): Add some more PCI IDs.
[dragonfly.git] / sys / dev / raid / hptiop / hptiop.c
1 /*
2  * HighPoint RR3xxx/4xxx RAID Driver for FreeBSD
3  * Copyright (C) 2007-2008 HighPoint Technologies, Inc. All Rights Reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD: src/sys/dev/hptiop/hptiop.c,v 1.14 2012/08/06 05:27:26 delphij Exp $
27  */
28
29 #include <sys/param.h>
30 #include <sys/types.h>
31 #include <sys/cons.h>
32 #include <sys/time.h>
33 #include <sys/systm.h>
34
35 #include <sys/stat.h>
36 #include <sys/malloc.h>
37 #include <sys/conf.h>
38 #include <sys/libkern.h>
39 #include <sys/kernel.h>
40
41 #include <sys/kthread.h>
42 #include <sys/lock.h>
43 #include <sys/module.h>
44
45 #include <sys/eventhandler.h>
46 #include <sys/bus.h>
47 #include <sys/taskqueue.h>
48 #include <sys/device.h>
49 #include <sys/mplock2.h>
50
51 #include <machine/stdarg.h>
52 #include <sys/rman.h>
53
54 #include <vm/vm.h>
55 #include <vm/pmap.h>
56
57 #include <bus/pci/pcireg.h>
58 #include <bus/pci/pcivar.h>
59
60 #include <bus/cam/cam.h>
61 #include <bus/cam/cam_ccb.h>
62 #include <bus/cam/cam_sim.h>
63 #include <bus/cam/cam_xpt_periph.h>
64 #include <bus/cam/cam_xpt_sim.h>
65 #include <bus/cam/cam_debug.h>
66 #include <bus/cam/cam_periph.h>
67 #include <bus/cam/scsi/scsi_all.h>
68 #include <bus/cam/scsi/scsi_message.h>
69
70 #include <dev/raid/hptiop/hptiop.h>
71
72 static char driver_name[] = "hptiop";
73 static char driver_version[] = "v1.3 (010208)";
74
75 static devclass_t hptiop_devclass;
76
77 static int hptiop_send_sync_msg(struct hpt_iop_hba *hba,
78                                 u_int32_t msg, u_int32_t millisec);
79 static void hptiop_request_callback_itl(struct hpt_iop_hba *hba,
80                                                         u_int32_t req);
81 static void hptiop_request_callback_mv(struct hpt_iop_hba *hba, u_int64_t req);
82 static void hptiop_os_message_callback(struct hpt_iop_hba *hba, u_int32_t msg);
83 static int  hptiop_do_ioctl_itl(struct hpt_iop_hba *hba,
84                                 struct hpt_iop_ioctl_param *pParams);
85 static int  hptiop_do_ioctl_mv(struct hpt_iop_hba *hba,
86                                 struct hpt_iop_ioctl_param *pParams);
87 static void hptiop_bus_scan_cb(struct cam_periph *periph, union ccb *ccb);
88 static int  hptiop_rescan_bus(struct hpt_iop_hba *hba);
89 static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba);
90 static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba *hba);
91 static int hptiop_get_config_itl(struct hpt_iop_hba *hba,
92                                 struct hpt_iop_request_get_config *config);
93 static int hptiop_get_config_mv(struct hpt_iop_hba *hba,
94                                 struct hpt_iop_request_get_config *config);
95 static int hptiop_set_config_itl(struct hpt_iop_hba *hba,
96                                 struct hpt_iop_request_set_config *config);
97 static int hptiop_set_config_mv(struct hpt_iop_hba *hba,
98                                 struct hpt_iop_request_set_config *config);
99 static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba);
100 static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba);
101 static int  hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba,
102                         u_int32_t req32, struct hpt_iop_ioctl_param *pParams);
103 static int  hptiop_post_ioctl_command_mv(struct hpt_iop_hba *hba,
104                                 struct hpt_iop_request_ioctl_command *req,
105                                 struct hpt_iop_ioctl_param *pParams);
106 static void hptiop_post_req_itl(struct hpt_iop_hba *hba,
107                                 struct hpt_iop_srb *srb,
108                                 bus_dma_segment_t *segs, int nsegs);
109 static void hptiop_post_req_mv(struct hpt_iop_hba *hba,
110                                 struct hpt_iop_srb *srb,
111                                 bus_dma_segment_t *segs, int nsegs);
112 static void hptiop_post_msg_itl(struct hpt_iop_hba *hba, u_int32_t msg);
113 static void hptiop_post_msg_mv(struct hpt_iop_hba *hba, u_int32_t msg);
114 static void hptiop_enable_intr_itl(struct hpt_iop_hba *hba);
115 static void hptiop_enable_intr_mv(struct hpt_iop_hba *hba);
116 static void hptiop_disable_intr_itl(struct hpt_iop_hba *hba);
117 static void hptiop_disable_intr_mv(struct hpt_iop_hba *hba);
118 static void hptiop_free_srb(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb);
119 static int  hptiop_os_query_remove_device(struct hpt_iop_hba *hba, int tid);
120 static int  hptiop_probe(device_t dev);
121 static int  hptiop_attach(device_t dev);
122 static int  hptiop_detach(device_t dev);
123 static int  hptiop_shutdown(device_t dev);
124 static void hptiop_action(struct cam_sim *sim, union ccb *ccb);
125 static void hptiop_poll(struct cam_sim *sim);
126 static void hptiop_async(void *callback_arg, u_int32_t code,
127                                         struct cam_path *path, void *arg);
128 static void hptiop_pci_intr(void *arg);
129 static void hptiop_release_resource(struct hpt_iop_hba *hba);
130 static int  hptiop_reset_adapter(struct hpt_iop_hba *hba);
131
132 static d_open_t hptiop_open;
133 static d_close_t hptiop_close;
134 static d_ioctl_t hptiop_ioctl;
135
136 static struct dev_ops hptiop_ops = {
137         { driver_name, 0, 0 },
138         .d_open = hptiop_open,
139         .d_close = hptiop_close,
140         .d_ioctl = hptiop_ioctl,
141 };
142
143 #define hba_from_dev(dev) ((struct hpt_iop_hba *)(dev)->si_drv1)
144
145 #define BUS_SPACE_WRT4_ITL(offset, value) bus_space_write_4(hba->bar0t,\
146                 hba->bar0h, offsetof(struct hpt_iopmu_itl, offset), (value))
147 #define BUS_SPACE_RD4_ITL(offset) bus_space_read_4(hba->bar0t,\
148                 hba->bar0h, offsetof(struct hpt_iopmu_itl, offset))
149
150 #define BUS_SPACE_WRT4_MV0(offset, value) bus_space_write_4(hba->bar0t,\
151                 hba->bar0h, offsetof(struct hpt_iopmv_regs, offset), value)
152 #define BUS_SPACE_RD4_MV0(offset) bus_space_read_4(hba->bar0t,\
153                 hba->bar0h, offsetof(struct hpt_iopmv_regs, offset))
154 #define BUS_SPACE_WRT4_MV2(offset, value) bus_space_write_4(hba->bar2t,\
155                 hba->bar2h, offsetof(struct hpt_iopmu_mv, offset), value)
156 #define BUS_SPACE_RD4_MV2(offset) bus_space_read_4(hba->bar2t,\
157                 hba->bar2h, offsetof(struct hpt_iopmu_mv, offset))
158
159 static int hptiop_open(struct dev_open_args *ap)
160 {
161         cdev_t dev = ap->a_head.a_dev;
162         struct hpt_iop_hba *hba = hba_from_dev(dev);
163
164         if (hba==NULL)
165                 return ENXIO;
166         if (hba->flag & HPT_IOCTL_FLAG_OPEN)
167                 return EBUSY;
168         hba->flag |= HPT_IOCTL_FLAG_OPEN;
169         return 0;
170 }
171
172 static int hptiop_close(struct dev_close_args *ap)
173 {
174         cdev_t dev = ap->a_head.a_dev;
175         struct hpt_iop_hba *hba = hba_from_dev(dev);
176         hba->flag &= ~(u_int32_t)HPT_IOCTL_FLAG_OPEN;
177         return 0;
178 }
179
180 static int hptiop_ioctl(struct dev_ioctl_args *ap)
181 {
182         cdev_t dev = ap->a_head.a_dev;
183         u_long cmd = ap->a_cmd;
184         caddr_t data = ap->a_data;
185         int ret = EFAULT;
186         struct hpt_iop_hba *hba = hba_from_dev(dev);
187
188         get_mplock();
189
190         switch (cmd) {
191         case HPT_DO_IOCONTROL:
192                 ret = hba->ops->do_ioctl(hba,
193                                 (struct hpt_iop_ioctl_param *)data);
194                 break;
195         case HPT_SCAN_BUS:
196                 ret = hptiop_rescan_bus(hba);
197                 break;
198         }
199
200         rel_mplock();
201
202         return ret;
203 }
204
205 static u_int64_t hptiop_mv_outbound_read(struct hpt_iop_hba *hba)
206 {
207         u_int64_t p;
208         u_int32_t outbound_tail = BUS_SPACE_RD4_MV2(outbound_tail);
209         u_int32_t outbound_head = BUS_SPACE_RD4_MV2(outbound_head);
210
211         if (outbound_tail != outbound_head) {
212                 bus_space_read_region_4(hba->bar2t, hba->bar2h,
213                         offsetof(struct hpt_iopmu_mv,
214                                 outbound_q[outbound_tail]),
215                         (u_int32_t *)&p, 2);
216
217                 outbound_tail++;
218
219                 if (outbound_tail == MVIOP_QUEUE_LEN)
220                         outbound_tail = 0;
221
222                 BUS_SPACE_WRT4_MV2(outbound_tail, outbound_tail);
223                 return p;
224         } else
225                 return 0;
226 }
227
228 static void hptiop_mv_inbound_write(u_int64_t p, struct hpt_iop_hba *hba)
229 {
230         u_int32_t inbound_head = BUS_SPACE_RD4_MV2(inbound_head);
231         u_int32_t head = inbound_head + 1;
232
233         if (head == MVIOP_QUEUE_LEN)
234                 head = 0;
235
236         bus_space_write_region_4(hba->bar2t, hba->bar2h,
237                         offsetof(struct hpt_iopmu_mv, inbound_q[inbound_head]),
238                         (u_int32_t *)&p, 2);
239         BUS_SPACE_WRT4_MV2(inbound_head, head);
240         BUS_SPACE_WRT4_MV0(inbound_doorbell, MVIOP_MU_INBOUND_INT_POSTQUEUE);
241 }
242
243 static void hptiop_post_msg_itl(struct hpt_iop_hba *hba, u_int32_t msg)
244 {
245         BUS_SPACE_WRT4_ITL(inbound_msgaddr0, msg);
246         BUS_SPACE_RD4_ITL(outbound_intstatus);
247 }
248
249 static void hptiop_post_msg_mv(struct hpt_iop_hba *hba, u_int32_t msg)
250 {
251
252         BUS_SPACE_WRT4_MV2(inbound_msg, msg);
253         BUS_SPACE_WRT4_MV0(inbound_doorbell, MVIOP_MU_INBOUND_INT_MSG);
254
255         BUS_SPACE_RD4_MV0(outbound_intmask);
256 }
257
258 static int hptiop_wait_ready_itl(struct hpt_iop_hba * hba, u_int32_t millisec)
259 {
260         u_int32_t req=0;
261         int i;
262
263         for (i = 0; i < millisec; i++) {
264                 req = BUS_SPACE_RD4_ITL(inbound_queue);
265                 if (req != IOPMU_QUEUE_EMPTY)
266                         break;
267                 DELAY(1000);
268         }
269
270         if (req!=IOPMU_QUEUE_EMPTY) {
271                 BUS_SPACE_WRT4_ITL(outbound_queue, req);
272                 BUS_SPACE_RD4_ITL(outbound_intstatus);
273                 return 0;
274         }
275
276         return -1;
277 }
278
279 static int hptiop_wait_ready_mv(struct hpt_iop_hba * hba, u_int32_t millisec)
280 {
281         if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec))
282                 return -1;
283
284         return 0;
285 }
286
287 static void hptiop_request_callback_itl(struct hpt_iop_hba * hba,
288                                                         u_int32_t index)
289 {
290         struct hpt_iop_srb *srb;
291         struct hpt_iop_request_scsi_command *req=NULL;
292         union ccb *ccb;
293         u_int8_t *cdb;
294         u_int32_t result, temp, dxfer;
295         u_int64_t temp64;
296
297         if (index & IOPMU_QUEUE_MASK_HOST_BITS) { /*host req*/
298                 if (hba->firmware_version > 0x01020000 ||
299                         hba->interface_version > 0x01020000) {
300                         srb = hba->srb[index & ~(u_int32_t)
301                                 (IOPMU_QUEUE_ADDR_HOST_BIT
302                                 | IOPMU_QUEUE_REQUEST_RESULT_BIT)];
303                         req = (struct hpt_iop_request_scsi_command *)srb;
304                         if (index & IOPMU_QUEUE_REQUEST_RESULT_BIT)
305                                 result = IOP_RESULT_SUCCESS;
306                         else
307                                 result = req->header.result;
308                 } else {
309                         srb = hba->srb[index &
310                                 ~(u_int32_t)IOPMU_QUEUE_ADDR_HOST_BIT];
311                         req = (struct hpt_iop_request_scsi_command *)srb;
312                         result = req->header.result;
313                 }
314                 dxfer = req->dataxfer_length;
315                 goto srb_complete;
316         }
317
318         /*iop req*/
319         temp = bus_space_read_4(hba->bar0t, hba->bar0h, index +
320                 offsetof(struct hpt_iop_request_header, type));
321         result = bus_space_read_4(hba->bar0t, hba->bar0h, index +
322                 offsetof(struct hpt_iop_request_header, result));
323         switch(temp) {
324         case IOP_REQUEST_TYPE_IOCTL_COMMAND:
325         {
326                 temp64 = 0;
327                 bus_space_write_region_4(hba->bar0t, hba->bar0h, index +
328                         offsetof(struct hpt_iop_request_header, context),
329                         (u_int32_t *)&temp64, 2);
330                 wakeup((void *)((unsigned long)hba->u.itl.mu + index));
331                 break;
332         }
333
334         case IOP_REQUEST_TYPE_SCSI_COMMAND:
335                 bus_space_read_region_4(hba->bar0t, hba->bar0h, index +
336                         offsetof(struct hpt_iop_request_header, context),
337                         (u_int32_t *)&temp64, 2);
338                 srb = (struct hpt_iop_srb *)(unsigned long)temp64;
339                 dxfer = bus_space_read_4(hba->bar0t, hba->bar0h,
340                                 index + offsetof(struct hpt_iop_request_scsi_command,
341                                 dataxfer_length));
342 srb_complete:
343                 ccb = (union ccb *)srb->ccb;
344                 if (ccb->ccb_h.flags & CAM_CDB_POINTER)
345                         cdb = ccb->csio.cdb_io.cdb_ptr;
346                 else
347                         cdb = ccb->csio.cdb_io.cdb_bytes;
348
349                 if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */
350                         ccb->ccb_h.status = CAM_REQ_CMP;
351                         goto scsi_done;
352                 }
353
354                 switch (result) {
355                 case IOP_RESULT_SUCCESS:
356                         switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
357                         case CAM_DIR_IN:
358                                 bus_dmamap_sync(hba->io_dmat,
359                                         srb->dma_map, BUS_DMASYNC_POSTREAD);
360                                 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
361                                 break;
362                         case CAM_DIR_OUT:
363                                 bus_dmamap_sync(hba->io_dmat,
364                                         srb->dma_map, BUS_DMASYNC_POSTWRITE);
365                                 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
366                                 break;
367                         }
368
369                         ccb->ccb_h.status = CAM_REQ_CMP;
370                         break;
371
372                 case IOP_RESULT_BAD_TARGET:
373                         ccb->ccb_h.status = CAM_DEV_NOT_THERE;
374                         break;
375                 case IOP_RESULT_BUSY:
376                         ccb->ccb_h.status = CAM_BUSY;
377                         break;
378                 case IOP_RESULT_INVALID_REQUEST:
379                         ccb->ccb_h.status = CAM_REQ_INVALID;
380                         break;
381                 case IOP_RESULT_FAIL:
382                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
383                         break;
384                 case IOP_RESULT_RESET:
385                         ccb->ccb_h.status = CAM_BUSY;
386                         break;
387                 case IOP_RESULT_CHECK_CONDITION:
388                         memset(&ccb->csio.sense_data, 0,
389                             sizeof(ccb->csio.sense_data));
390                         if (dxfer < ccb->csio.sense_len)
391                                 ccb->csio.sense_resid = ccb->csio.sense_len -
392                                     dxfer;
393                         else
394                                 ccb->csio.sense_resid = 0;
395                         if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) {/*iop*/
396                                 bus_space_read_region_1(hba->bar0t, hba->bar0h,
397                                         index + offsetof(struct hpt_iop_request_scsi_command,
398                                         sg_list), (u_int8_t *)&ccb->csio.sense_data,
399                                         MIN(dxfer, sizeof(ccb->csio.sense_data)));
400                         } else {
401                                 memcpy(&ccb->csio.sense_data, &req->sg_list,
402                                         MIN(dxfer, sizeof(ccb->csio.sense_data)));
403                         }
404                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
405                         ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
406                         ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
407                         break;
408                 default:
409                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
410                         break;
411                 }
412 scsi_done:
413                 if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS)
414                         BUS_SPACE_WRT4_ITL(outbound_queue, index);
415
416                 ccb->csio.resid = ccb->csio.dxfer_len - dxfer;
417
418                 hptiop_free_srb(hba, srb);
419                 xpt_done(ccb);
420                 break;
421         }
422 }
423
424 static void hptiop_drain_outbound_queue_itl(struct hpt_iop_hba *hba)
425 {
426         u_int32_t req, temp;
427
428         while ((req = BUS_SPACE_RD4_ITL(outbound_queue)) !=IOPMU_QUEUE_EMPTY) {
429                 if (req & IOPMU_QUEUE_MASK_HOST_BITS)
430                         hptiop_request_callback_itl(hba, req);
431                 else {
432                         temp = bus_space_read_4(hba->bar0t,
433                                         hba->bar0h,req +
434                                         offsetof(struct hpt_iop_request_header,
435                                                 flags));
436                         if (temp & IOP_REQUEST_FLAG_SYNC_REQUEST) {
437                                 u_int64_t temp64;
438                                 bus_space_read_region_4(hba->bar0t,
439                                         hba->bar0h,req +
440                                         offsetof(struct hpt_iop_request_header,
441                                                 context),
442                                         (u_int32_t *)&temp64, 2);
443                                 if (temp64) {
444                                         hptiop_request_callback_itl(hba, req);
445                                 } else {
446                                         temp64 = 1;
447                                         bus_space_write_region_4(hba->bar0t,
448                                                 hba->bar0h,req +
449                                                 offsetof(struct hpt_iop_request_header,
450                                                         context),
451                                                 (u_int32_t *)&temp64, 2);
452                                 }
453                         } else
454                                 hptiop_request_callback_itl(hba, req);
455                 }
456         }
457 }
458
459 static int hptiop_intr_itl(struct hpt_iop_hba * hba)
460 {
461         u_int32_t status;
462         int ret = 0;
463
464         status = BUS_SPACE_RD4_ITL(outbound_intstatus);
465
466         if (status & IOPMU_OUTBOUND_INT_MSG0) {
467                 u_int32_t msg = BUS_SPACE_RD4_ITL(outbound_msgaddr0);
468                 KdPrint(("hptiop: received outbound msg %x\n", msg));
469                 BUS_SPACE_WRT4_ITL(outbound_intstatus, IOPMU_OUTBOUND_INT_MSG0);
470                 hptiop_os_message_callback(hba, msg);
471                 ret = 1;
472         }
473
474         if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) {
475                 hptiop_drain_outbound_queue_itl(hba);
476                 ret = 1;
477         }
478
479         return ret;
480 }
481
482 static void hptiop_request_callback_mv(struct hpt_iop_hba * hba,
483                                                         u_int64_t _tag)
484 {
485         u_int32_t context = (u_int32_t)_tag;
486
487         if (context & MVIOP_CMD_TYPE_SCSI) {
488                 struct hpt_iop_srb *srb;
489                 struct hpt_iop_request_scsi_command *req;
490                 union ccb *ccb;
491                 u_int8_t *cdb;
492
493                 srb = hba->srb[context >> MVIOP_REQUEST_NUMBER_START_BIT];
494                 req = (struct hpt_iop_request_scsi_command *)srb;
495                 ccb = (union ccb *)srb->ccb;
496                 if (ccb->ccb_h.flags & CAM_CDB_POINTER)
497                         cdb = ccb->csio.cdb_io.cdb_ptr;
498                 else
499                         cdb = ccb->csio.cdb_io.cdb_bytes;
500
501                 if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */
502                         ccb->ccb_h.status = CAM_REQ_CMP;
503                         goto scsi_done;
504                 }
505                 if (context & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT)
506                         req->header.result = IOP_RESULT_SUCCESS;
507
508                 switch (req->header.result) {
509                 case IOP_RESULT_SUCCESS:
510                         switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
511                         case CAM_DIR_IN:
512                                 bus_dmamap_sync(hba->io_dmat,
513                                         srb->dma_map, BUS_DMASYNC_POSTREAD);
514                                 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
515                                 break;
516                         case CAM_DIR_OUT:
517                                 bus_dmamap_sync(hba->io_dmat,
518                                         srb->dma_map, BUS_DMASYNC_POSTWRITE);
519                                 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
520                                 break;
521                         }
522                         ccb->ccb_h.status = CAM_REQ_CMP;
523                         break;
524                 case IOP_RESULT_BAD_TARGET:
525                         ccb->ccb_h.status = CAM_DEV_NOT_THERE;
526                         break;
527                 case IOP_RESULT_BUSY:
528                         ccb->ccb_h.status = CAM_BUSY;
529                         break;
530                 case IOP_RESULT_INVALID_REQUEST:
531                         ccb->ccb_h.status = CAM_REQ_INVALID;
532                         break;
533                 case IOP_RESULT_FAIL:
534                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
535                         break;
536                 case IOP_RESULT_RESET:
537                         ccb->ccb_h.status = CAM_BUSY;
538                         break;
539                 case IOP_RESULT_CHECK_CONDITION:
540                         memset(&ccb->csio.sense_data, 0,
541                             sizeof(ccb->csio.sense_data));
542                         if (req->dataxfer_length < ccb->csio.sense_len)
543                                 ccb->csio.sense_resid = ccb->csio.sense_len -
544                                     req->dataxfer_length;
545                         else
546                                 ccb->csio.sense_resid = 0;
547                         memcpy(&ccb->csio.sense_data, &req->sg_list,
548                                 MIN(req->dataxfer_length, sizeof(ccb->csio.sense_data)));
549                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
550                         ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
551                         ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
552                         break;
553                 default:
554                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
555                         break;
556                 }
557 scsi_done:
558                 ccb->csio.resid = ccb->csio.dxfer_len - req->dataxfer_length;
559
560                 hptiop_free_srb(hba, srb);
561                 xpt_done(ccb);
562         } else if (context & MVIOP_CMD_TYPE_IOCTL) {
563                 struct hpt_iop_request_ioctl_command *req = hba->ctlcfg_ptr;
564                 if (context & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT)
565                         hba->config_done = 1;
566                 else
567                         hba->config_done = -1;
568                 wakeup(req);
569         } else if (context &
570                         (MVIOP_CMD_TYPE_SET_CONFIG |
571                                 MVIOP_CMD_TYPE_GET_CONFIG))
572                 hba->config_done = 1;
573         else {
574                 device_printf(hba->pcidev, "wrong callback type\n");
575         }
576 }
577
578 static void hptiop_drain_outbound_queue_mv(struct hpt_iop_hba * hba)
579 {
580         u_int64_t req;
581
582         while ((req = hptiop_mv_outbound_read(hba))) {
583                 if (req & MVIOP_MU_QUEUE_ADDR_HOST_BIT) {
584                         if (req & MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT) {
585                                 hptiop_request_callback_mv(hba, req);
586                         }
587                 }
588         }
589 }
590
591 static int hptiop_intr_mv(struct hpt_iop_hba * hba)
592 {
593         u_int32_t status;
594         int ret = 0;
595
596         status = BUS_SPACE_RD4_MV0(outbound_doorbell);
597
598         if (status)
599                 BUS_SPACE_WRT4_MV0(outbound_doorbell, ~status);
600
601         if (status & MVIOP_MU_OUTBOUND_INT_MSG) {
602                 u_int32_t msg = BUS_SPACE_RD4_MV2(outbound_msg);
603                 KdPrint(("hptiop: received outbound msg %x\n", msg));
604                 hptiop_os_message_callback(hba, msg);
605                 ret = 1;
606         }
607
608         if (status & MVIOP_MU_OUTBOUND_INT_POSTQUEUE) {
609                 hptiop_drain_outbound_queue_mv(hba);
610                 ret = 1;
611         }
612
613         return ret;
614 }
615
616 static int hptiop_send_sync_request_itl(struct hpt_iop_hba * hba,
617                                         u_int32_t req32, u_int32_t millisec)
618 {
619         u_int32_t i;
620         u_int64_t temp64;
621
622         BUS_SPACE_WRT4_ITL(inbound_queue, req32);
623         BUS_SPACE_RD4_ITL(outbound_intstatus);
624
625         for (i = 0; i < millisec; i++) {
626                 hptiop_intr_itl(hba);
627                 bus_space_read_region_4(hba->bar0t, hba->bar0h, req32 +
628                         offsetof(struct hpt_iop_request_header, context),
629                         (u_int32_t *)&temp64, 2);
630                 if (temp64)
631                         return 0;
632                 DELAY(1000);
633         }
634
635         return -1;
636 }
637
638 static int hptiop_send_sync_request_mv(struct hpt_iop_hba *hba,
639                                         void *req, u_int32_t millisec)
640 {
641         u_int32_t i;
642         u_int64_t phy_addr;
643         hba->config_done = 0;
644
645         phy_addr = hba->ctlcfgcmd_phy |
646                         (u_int64_t)MVIOP_MU_QUEUE_ADDR_HOST_BIT;
647         ((struct hpt_iop_request_get_config *)req)->header.flags |=
648                 IOP_REQUEST_FLAG_SYNC_REQUEST |
649                 IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
650         hptiop_mv_inbound_write(phy_addr, hba);
651         BUS_SPACE_RD4_MV0(outbound_intmask);
652
653         for (i = 0; i < millisec; i++) {
654                 hptiop_intr_mv(hba);
655                 if (hba->config_done)
656                         return 0;
657                 DELAY(1000);
658         }
659         return -1;
660 }
661
662 static int hptiop_send_sync_msg(struct hpt_iop_hba *hba,
663                                         u_int32_t msg, u_int32_t millisec)
664 {
665         u_int32_t i;
666
667         hba->msg_done = 0;
668         hba->ops->post_msg(hba, msg);
669
670         for (i=0; i<millisec; i++) {
671                 hba->ops->iop_intr(hba);
672                 if (hba->msg_done)
673                         break;
674                 DELAY(1000);
675         }
676
677         return hba->msg_done? 0 : -1;
678 }
679
680 static int hptiop_get_config_itl(struct hpt_iop_hba * hba,
681                                 struct hpt_iop_request_get_config * config)
682 {
683         u_int32_t req32;
684
685         config->header.size = sizeof(struct hpt_iop_request_get_config);
686         config->header.type = IOP_REQUEST_TYPE_GET_CONFIG;
687         config->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST;
688         config->header.result = IOP_RESULT_PENDING;
689         config->header.context = 0;
690
691         req32 = BUS_SPACE_RD4_ITL(inbound_queue);
692         if (req32 == IOPMU_QUEUE_EMPTY)
693                 return -1;
694
695         bus_space_write_region_4(hba->bar0t, hba->bar0h,
696                         req32, (u_int32_t *)config,
697                         sizeof(struct hpt_iop_request_header) >> 2);
698
699         if (hptiop_send_sync_request_itl(hba, req32, 20000)) {
700                 KdPrint(("hptiop: get config send cmd failed"));
701                 return -1;
702         }
703
704         bus_space_read_region_4(hba->bar0t, hba->bar0h,
705                         req32, (u_int32_t *)config,
706                         sizeof(struct hpt_iop_request_get_config) >> 2);
707
708         BUS_SPACE_WRT4_ITL(outbound_queue, req32);
709
710         return 0;
711 }
712
713 static int hptiop_get_config_mv(struct hpt_iop_hba * hba,
714                                 struct hpt_iop_request_get_config * config)
715 {
716         struct hpt_iop_request_get_config *req;
717
718         if (!(req = hba->ctlcfg_ptr))
719                 return -1;
720
721         req->header.flags = 0;
722         req->header.type = IOP_REQUEST_TYPE_GET_CONFIG;
723         req->header.size = sizeof(struct hpt_iop_request_get_config);
724         req->header.result = IOP_RESULT_PENDING;
725         req->header.context = MVIOP_CMD_TYPE_GET_CONFIG;
726
727         if (hptiop_send_sync_request_mv(hba, req, 20000)) {
728                 KdPrint(("hptiop: get config send cmd failed"));
729                 return -1;
730         }
731
732         *config = *req;
733         return 0;
734 }
735
736 static int hptiop_set_config_itl(struct hpt_iop_hba *hba,
737                                 struct hpt_iop_request_set_config *config)
738 {
739         u_int32_t req32;
740
741         req32 = BUS_SPACE_RD4_ITL(inbound_queue);
742
743         if (req32 == IOPMU_QUEUE_EMPTY)
744                 return -1;
745
746         config->header.size = sizeof(struct hpt_iop_request_set_config);
747         config->header.type = IOP_REQUEST_TYPE_SET_CONFIG;
748         config->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST;
749         config->header.result = IOP_RESULT_PENDING;
750         config->header.context = 0;
751
752         bus_space_write_region_4(hba->bar0t, hba->bar0h, req32,
753                 (u_int32_t *)config,
754                 sizeof(struct hpt_iop_request_set_config) >> 2);
755
756         if (hptiop_send_sync_request_itl(hba, req32, 20000)) {
757                 KdPrint(("hptiop: set config send cmd failed"));
758                 return -1;
759         }
760
761         BUS_SPACE_WRT4_ITL(outbound_queue, req32);
762
763         return 0;
764 }
765
766 static int hptiop_set_config_mv(struct hpt_iop_hba *hba,
767                                 struct hpt_iop_request_set_config *config)
768 {
769         struct hpt_iop_request_set_config *req;
770
771         if (!(req = hba->ctlcfg_ptr))
772                 return -1;
773
774         memcpy((u_int8_t *)req + sizeof(struct hpt_iop_request_header),
775                 (u_int8_t *)config + sizeof(struct hpt_iop_request_header),
776                 sizeof(struct hpt_iop_request_set_config) -
777                         sizeof(struct hpt_iop_request_header));
778
779         req->header.flags = 0;
780         req->header.type = IOP_REQUEST_TYPE_SET_CONFIG;
781         req->header.size = sizeof(struct hpt_iop_request_set_config);
782         req->header.result = IOP_RESULT_PENDING;
783         req->header.context = MVIOP_CMD_TYPE_SET_CONFIG;
784
785         if (hptiop_send_sync_request_mv(hba, req, 20000)) {
786                 KdPrint(("hptiop: set config send cmd failed"));
787                 return -1;
788         }
789
790         return 0;
791 }
792
793 static int hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba,
794                                 u_int32_t req32,
795                                 struct hpt_iop_ioctl_param *pParams)
796 {
797         u_int64_t temp64;
798         struct hpt_iop_request_ioctl_command req;
799
800         if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) >
801                         (hba->max_request_size -
802                         offsetof(struct hpt_iop_request_ioctl_command, buf))) {
803                 device_printf(hba->pcidev, "request size beyond max value");
804                 return -1;
805         }
806
807         req.header.size = offsetof(struct hpt_iop_request_ioctl_command, buf)
808                 + pParams->nInBufferSize;
809         req.header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND;
810         req.header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST;
811         req.header.result = IOP_RESULT_PENDING;
812         req.header.context = req32 + (u_int64_t)(unsigned long)hba->u.itl.mu;
813         req.ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode);
814         req.inbuf_size = pParams->nInBufferSize;
815         req.outbuf_size = pParams->nOutBufferSize;
816         req.bytes_returned = 0;
817
818         bus_space_write_region_4(hba->bar0t, hba->bar0h, req32, (u_int32_t *)&req,
819                 offsetof(struct hpt_iop_request_ioctl_command, buf)>>2);
820
821         hptiop_lock_adapter(hba);
822
823         BUS_SPACE_WRT4_ITL(inbound_queue, req32);
824         BUS_SPACE_RD4_ITL(outbound_intstatus);
825
826         bus_space_read_region_4(hba->bar0t, hba->bar0h, req32 +
827                 offsetof(struct hpt_iop_request_ioctl_command, header.context),
828                 (u_int32_t *)&temp64, 2);
829         while (temp64) {
830                 if (hptiop_sleep(hba, (void *)((unsigned long)hba->u.itl.mu + req32),
831                                 0, "hptctl", HPT_OSM_TIMEOUT)==0)
832                         break;
833                 hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
834                 bus_space_read_region_4(hba->bar0t, hba->bar0h,req32 +
835                         offsetof(struct hpt_iop_request_ioctl_command,
836                                 header.context),
837                         (u_int32_t *)&temp64, 2);
838         }
839
840         hptiop_unlock_adapter(hba);
841         return 0;
842 }
843
844 static int hptiop_bus_space_copyin(struct hpt_iop_hba *hba, u_int32_t bus, void *user, int size)
845 {
846         unsigned char byte;
847         int i;
848
849         for (i=0; i<size; i++) {
850                 if (copyin((u_int8_t *)user + i, &byte, 1))
851                         return -1;
852                 bus_space_write_1(hba->bar0t, hba->bar0h, bus + i, byte);
853         }
854
855         return 0;
856 }
857
858 static int hptiop_bus_space_copyout(struct hpt_iop_hba *hba, u_int32_t bus, void *user, int size)
859 {
860         unsigned char byte;
861         int i;
862
863         for (i=0; i<size; i++) {
864                 byte = bus_space_read_1(hba->bar0t, hba->bar0h, bus + i);
865                 if (copyout(&byte, (u_int8_t *)user + i, 1))
866                         return -1;
867         }
868
869         return 0;
870 }
871
872 static int hptiop_do_ioctl_itl(struct hpt_iop_hba *hba,
873                                 struct hpt_iop_ioctl_param * pParams)
874 {
875         u_int32_t req32;
876         u_int32_t result;
877
878         if ((pParams->Magic != HPT_IOCTL_MAGIC) &&
879                 (pParams->Magic != HPT_IOCTL_MAGIC32))
880                 return EFAULT;
881
882         req32 = BUS_SPACE_RD4_ITL(inbound_queue);
883         if (req32 == IOPMU_QUEUE_EMPTY)
884                 return EFAULT;
885
886         if (pParams->nInBufferSize)
887                 if (hptiop_bus_space_copyin(hba, req32 +
888                         offsetof(struct hpt_iop_request_ioctl_command, buf),
889                         (void *)pParams->lpInBuffer, pParams->nInBufferSize))
890                         goto invalid;
891
892         if (hptiop_post_ioctl_command_itl(hba, req32, pParams))
893                 goto invalid;
894
895         result = bus_space_read_4(hba->bar0t, hba->bar0h, req32 +
896                         offsetof(struct hpt_iop_request_ioctl_command,
897                                 header.result));
898
899         if (result == IOP_RESULT_SUCCESS) {
900                 if (pParams->nOutBufferSize)
901                         if (hptiop_bus_space_copyout(hba, req32 +
902                                 offsetof(struct hpt_iop_request_ioctl_command, buf) +
903                                         ((pParams->nInBufferSize + 3) & ~3),
904                                 (void *)pParams->lpOutBuffer, pParams->nOutBufferSize))
905                                 goto invalid;
906
907                 if (pParams->lpBytesReturned) {
908                         if (hptiop_bus_space_copyout(hba, req32 +
909                                 offsetof(struct hpt_iop_request_ioctl_command, bytes_returned),
910                                 (void *)pParams->lpBytesReturned, sizeof(unsigned  long)))
911                                 goto invalid;
912                 }
913
914                 BUS_SPACE_WRT4_ITL(outbound_queue, req32);
915
916                 return 0;
917         } else{
918 invalid:
919                 BUS_SPACE_WRT4_ITL(outbound_queue, req32);
920
921                 return EFAULT;
922         }
923 }
924
925 static int hptiop_post_ioctl_command_mv(struct hpt_iop_hba *hba,
926                                 struct hpt_iop_request_ioctl_command *req,
927                                 struct hpt_iop_ioctl_param *pParams)
928 {
929         u_int64_t req_phy;
930         int size = 0;
931
932         if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) >
933                         (hba->max_request_size -
934                         offsetof(struct hpt_iop_request_ioctl_command, buf))) {
935                 device_printf(hba->pcidev, "request size beyond max value");
936                 return -1;
937         }
938
939         req->ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode);
940         req->inbuf_size = pParams->nInBufferSize;
941         req->outbuf_size = pParams->nOutBufferSize;
942         req->header.size = offsetof(struct hpt_iop_request_ioctl_command, buf)
943                                         + pParams->nInBufferSize;
944         req->header.context = (u_int64_t)MVIOP_CMD_TYPE_IOCTL;
945         req->header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND;
946         req->header.result = IOP_RESULT_PENDING;
947         req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
948         size = req->header.size >> 8;
949         size = size > 3 ? 3 : size;
950         req_phy = hba->ctlcfgcmd_phy | MVIOP_MU_QUEUE_ADDR_HOST_BIT | size;
951         hptiop_mv_inbound_write(req_phy, hba);
952
953         BUS_SPACE_RD4_MV0(outbound_intmask);
954
955         while (hba->config_done == 0) {
956                 if (hptiop_sleep(hba, req, 0,
957                         "hptctl", HPT_OSM_TIMEOUT)==0)
958                         continue;
959                 hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
960         }
961         return 0;
962 }
963
964 static int hptiop_do_ioctl_mv(struct hpt_iop_hba *hba,
965                                 struct hpt_iop_ioctl_param *pParams)
966 {
967         struct hpt_iop_request_ioctl_command *req;
968
969         if ((pParams->Magic != HPT_IOCTL_MAGIC) &&
970                 (pParams->Magic != HPT_IOCTL_MAGIC32))
971                 return EFAULT;
972
973         req = (struct hpt_iop_request_ioctl_command *)(hba->ctlcfg_ptr);
974         hba->config_done = 0;
975         hptiop_lock_adapter(hba);
976         if (pParams->nInBufferSize)
977                 if (copyin((void *)pParams->lpInBuffer,
978                                 req->buf, pParams->nInBufferSize))
979                         goto invalid;
980         if (hptiop_post_ioctl_command_mv(hba, req, pParams))
981                 goto invalid;
982
983         if (hba->config_done == 1) {
984                 if (pParams->nOutBufferSize)
985                         if (copyout(req->buf +
986                                 ((pParams->nInBufferSize + 3) & ~3),
987                                 (void *)pParams->lpOutBuffer,
988                                 pParams->nOutBufferSize))
989                                 goto invalid;
990
991                 if (pParams->lpBytesReturned)
992                         if (copyout(&req->bytes_returned,
993                                 (void*)pParams->lpBytesReturned,
994                                 sizeof(u_int32_t)))
995                                 goto invalid;
996                 hptiop_unlock_adapter(hba);
997                 return 0;
998         } else{
999 invalid:
1000                 hptiop_unlock_adapter(hba);
1001                 return EFAULT;
1002         }
1003 }
1004
1005 static int  hptiop_rescan_bus(struct hpt_iop_hba * hba)
1006 {
1007         union ccb           *ccb;
1008
1009         if ((ccb = xpt_alloc_ccb()) == NULL)
1010                 return(ENOMEM);
1011         if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, cam_sim_path(hba->sim),
1012                 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1013                 xpt_free_ccb(ccb);
1014                 return(EIO);
1015         }
1016
1017         xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, 5/*priority (low)*/);
1018         ccb->ccb_h.func_code = XPT_SCAN_BUS;
1019         ccb->ccb_h.cbfcnp = hptiop_bus_scan_cb;
1020         ccb->crcn.flags = CAM_FLAG_NONE;
1021         xpt_action(ccb);
1022         return(0);
1023 }
1024
1025 static void hptiop_bus_scan_cb(struct cam_periph *periph, union ccb *ccb)
1026 {
1027         xpt_free_path(ccb->ccb_h.path);
1028         kfree(ccb, M_TEMP);
1029 }
1030
1031 static  bus_dmamap_callback_t   hptiop_map_srb;
1032 static  bus_dmamap_callback_t   hptiop_post_scsi_command;
1033 static  bus_dmamap_callback_t   hptiop_mv_map_ctlcfg;
1034
1035 static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba)
1036 {
1037         hba->bar0_rid = 0x10;
1038         hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
1039                         SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
1040
1041         if (hba->bar0_res == NULL) {
1042                 device_printf(hba->pcidev,
1043                         "failed to get iop base adrress.\n");
1044                 return -1;
1045         }
1046         hba->bar0t = rman_get_bustag(hba->bar0_res);
1047         hba->bar0h = rman_get_bushandle(hba->bar0_res);
1048         hba->u.itl.mu = (struct hpt_iopmu_itl *)
1049                                 rman_get_virtual(hba->bar0_res);
1050
1051         if (!hba->u.itl.mu) {
1052                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1053                                         hba->bar0_rid, hba->bar0_res);
1054                 device_printf(hba->pcidev, "alloc mem res failed\n");
1055                 return -1;
1056         }
1057
1058         return 0;
1059 }
1060
1061 static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba *hba)
1062 {
1063         hba->bar0_rid = 0x10;
1064         hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
1065                         SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
1066
1067         if (hba->bar0_res == NULL) {
1068                 device_printf(hba->pcidev, "failed to get iop bar0.\n");
1069                 return -1;
1070         }
1071         hba->bar0t = rman_get_bustag(hba->bar0_res);
1072         hba->bar0h = rman_get_bushandle(hba->bar0_res);
1073         hba->u.mv.regs = (struct hpt_iopmv_regs *)
1074                                 rman_get_virtual(hba->bar0_res);
1075
1076         if (!hba->u.mv.regs) {
1077                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1078                                         hba->bar0_rid, hba->bar0_res);
1079                 device_printf(hba->pcidev, "alloc bar0 mem res failed\n");
1080                 return -1;
1081         }
1082
1083         hba->bar2_rid = 0x18;
1084         hba->bar2_res = bus_alloc_resource_any(hba->pcidev,
1085                         SYS_RES_MEMORY, &hba->bar2_rid, RF_ACTIVE);
1086
1087         if (hba->bar2_res == NULL) {
1088                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1089                                         hba->bar0_rid, hba->bar0_res);
1090                 device_printf(hba->pcidev, "failed to get iop bar2.\n");
1091                 return -1;
1092         }
1093
1094         hba->bar2t = rman_get_bustag(hba->bar2_res);
1095         hba->bar2h = rman_get_bushandle(hba->bar2_res);
1096         hba->u.mv.mu = (struct hpt_iopmu_mv *)rman_get_virtual(hba->bar2_res);
1097
1098         if (!hba->u.mv.mu) {
1099                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1100                                         hba->bar0_rid, hba->bar0_res);
1101                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1102                                         hba->bar2_rid, hba->bar2_res);
1103                 device_printf(hba->pcidev, "alloc mem bar2 res failed\n");
1104                 return -1;
1105         }
1106
1107         return 0;
1108 }
1109
1110 static void hptiop_release_pci_res_itl(struct hpt_iop_hba *hba)
1111 {
1112         if (hba->bar0_res)
1113                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1114                         hba->bar0_rid, hba->bar0_res);
1115 }
1116
1117 static void hptiop_release_pci_res_mv(struct hpt_iop_hba *hba)
1118 {
1119         if (hba->bar0_res)
1120                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1121                         hba->bar0_rid, hba->bar0_res);
1122         if (hba->bar2_res)
1123                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1124                         hba->bar2_rid, hba->bar2_res);
1125 }
1126
1127 static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba)
1128 {
1129         if (bus_dma_tag_create(hba->parent_dmat,
1130                                 1,
1131                                 0,
1132                                 BUS_SPACE_MAXADDR_32BIT,
1133                                 BUS_SPACE_MAXADDR,
1134                                 NULL, NULL,
1135                                 0x800 - 0x8,
1136                                 1,
1137                                 BUS_SPACE_MAXSIZE_32BIT,
1138                                 BUS_DMA_ALLOCNOW,
1139                                 &hba->ctlcfg_dmat)) {
1140                 device_printf(hba->pcidev, "alloc ctlcfg_dmat failed\n");
1141                 return -1;
1142         }
1143
1144         if (bus_dmamem_alloc(hba->ctlcfg_dmat, (void **)&hba->ctlcfg_ptr,
1145                 BUS_DMA_WAITOK | BUS_DMA_COHERENT,
1146                 &hba->ctlcfg_dmamap) != 0) {
1147                         device_printf(hba->pcidev,
1148                                         "bus_dmamem_alloc failed!\n");
1149                         bus_dma_tag_destroy(hba->ctlcfg_dmat);
1150                         return -1;
1151         }
1152
1153         if (bus_dmamap_load(hba->ctlcfg_dmat,
1154                         hba->ctlcfg_dmamap, hba->ctlcfg_ptr,
1155                         MVIOP_IOCTLCFG_SIZE,
1156                         hptiop_mv_map_ctlcfg, hba, 0)) {
1157                 device_printf(hba->pcidev, "bus_dmamap_load failed!\n");
1158                 if (hba->ctlcfg_dmat) {
1159                         bus_dmamem_free(hba->ctlcfg_dmat,
1160                                 hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1161                         bus_dma_tag_destroy(hba->ctlcfg_dmat);
1162                 }
1163                 return -1;
1164         }
1165
1166         return 0;
1167 }
1168
1169 static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba)
1170 {
1171         if (hba->ctlcfg_dmat) {
1172                 bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
1173                 bus_dmamem_free(hba->ctlcfg_dmat,
1174                                         hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1175                 bus_dma_tag_destroy(hba->ctlcfg_dmat);
1176         }
1177
1178         return 0;
1179 }
1180
1181 /*
1182  * CAM driver interface
1183  */
1184 static device_method_t driver_methods[] = {
1185         /* Device interface */
1186         DEVMETHOD(device_probe,     hptiop_probe),
1187         DEVMETHOD(device_attach,    hptiop_attach),
1188         DEVMETHOD(device_detach,    hptiop_detach),
1189         DEVMETHOD(device_shutdown,  hptiop_shutdown),
1190         { 0, 0 }
1191 };
1192
1193 static struct hptiop_adapter_ops hptiop_itl_ops = {
1194         .iop_wait_ready    = hptiop_wait_ready_itl,
1195         .internal_memalloc = 0,
1196         .internal_memfree  = 0,
1197         .alloc_pci_res     = hptiop_alloc_pci_res_itl,
1198         .release_pci_res   = hptiop_release_pci_res_itl,
1199         .enable_intr       = hptiop_enable_intr_itl,
1200         .disable_intr      = hptiop_disable_intr_itl,
1201         .get_config        = hptiop_get_config_itl,
1202         .set_config        = hptiop_set_config_itl,
1203         .iop_intr          = hptiop_intr_itl,
1204         .post_msg          = hptiop_post_msg_itl,
1205         .post_req          = hptiop_post_req_itl,
1206         .do_ioctl          = hptiop_do_ioctl_itl,
1207 };
1208
1209 static struct hptiop_adapter_ops hptiop_mv_ops = {
1210         .iop_wait_ready    = hptiop_wait_ready_mv,
1211         .internal_memalloc = hptiop_internal_memalloc_mv,
1212         .internal_memfree  = hptiop_internal_memfree_mv,
1213         .alloc_pci_res     = hptiop_alloc_pci_res_mv,
1214         .release_pci_res   = hptiop_release_pci_res_mv,
1215         .enable_intr       = hptiop_enable_intr_mv,
1216         .disable_intr      = hptiop_disable_intr_mv,
1217         .get_config        = hptiop_get_config_mv,
1218         .set_config        = hptiop_set_config_mv,
1219         .iop_intr          = hptiop_intr_mv,
1220         .post_msg          = hptiop_post_msg_mv,
1221         .post_req          = hptiop_post_req_mv,
1222         .do_ioctl          = hptiop_do_ioctl_mv,
1223 };
1224
1225 static driver_t hptiop_pci_driver = {
1226         driver_name,
1227         driver_methods,
1228         sizeof(struct hpt_iop_hba)
1229 };
1230
1231 DRIVER_MODULE(hptiop, pci, hptiop_pci_driver, hptiop_devclass, NULL, NULL);
1232 MODULE_DEPEND(hptiop, cam, 1, 1, 1);
1233 MODULE_VERSION(hptiop, 1);
1234
1235 static int hptiop_probe(device_t dev)
1236 {
1237         struct hpt_iop_hba *hba;
1238         u_int32_t id;
1239         static char buf[256];
1240         int sas = 0;
1241         struct hptiop_adapter_ops *ops;
1242
1243         if (pci_get_vendor(dev) != 0x1103)
1244                 return (ENXIO);
1245
1246         id = pci_get_device(dev);
1247
1248         switch (id) {
1249                 case 0x4210:
1250                 case 0x4211:
1251                 case 0x4310:
1252                 case 0x4311:
1253                 case 0x4320:
1254                 case 0x4321:
1255                 case 0x4322:
1256                         sas = 1;
1257                 case 0x3220:
1258                 case 0x3320:
1259                 case 0x3410:
1260                 case 0x3520:
1261                 case 0x3510:
1262                 case 0x3511:
1263                 case 0x3521:
1264                 case 0x3522:
1265                 case 0x3530:
1266                 case 0x3540:
1267                 case 0x3560:
1268                         ops = &hptiop_itl_ops;
1269                         break;
1270                 case 0x3020:
1271                 case 0x3120:
1272                 case 0x3122:
1273                         ops = &hptiop_mv_ops;
1274                         break;
1275                 default:
1276                         return (ENXIO);
1277         }
1278
1279         device_printf(dev, "adapter at PCI %d:%d:%d, IRQ %d\n",
1280                 pci_get_bus(dev), pci_get_slot(dev),
1281                 pci_get_function(dev), pci_get_irq(dev));
1282
1283         ksprintf(buf, "RocketRAID %x %s Controller",
1284                                 id, sas ? "SAS" : "SATA");
1285         device_set_desc_copy(dev, buf);
1286
1287         hba = (struct hpt_iop_hba *)device_get_softc(dev);
1288         bzero(hba, sizeof(struct hpt_iop_hba));
1289         hba->ops = ops;
1290
1291         KdPrint(("hba->ops=%p\n", hba->ops));
1292         return 0;
1293 }
1294
1295 static int hptiop_attach(device_t dev)
1296 {
1297         struct hpt_iop_hba *hba = (struct hpt_iop_hba *)device_get_softc(dev);
1298         struct hpt_iop_request_get_config  iop_config;
1299         struct hpt_iop_request_set_config  set_config;
1300         int rid = 0;
1301         struct cam_devq *devq;
1302         struct ccb_setasync ccb;
1303         u_int32_t unit = device_get_unit(dev);
1304
1305         device_printf(dev, "RocketRAID 3xxx/4xxx controller driver %s\n",
1306             driver_version);
1307
1308         KdPrint(("hptiop: attach(%d, %d/%d/%d) ops=%p\n", unit,
1309                 pci_get_bus(dev), pci_get_slot(dev),
1310                 pci_get_function(dev), hba->ops));
1311
1312         pci_enable_busmaster(dev);
1313         hba->pcidev = dev;
1314
1315         if (hba->ops->alloc_pci_res(hba))
1316                 return ENXIO;
1317
1318         if (hba->ops->iop_wait_ready(hba, 2000)) {
1319                 device_printf(dev, "adapter is not ready\n");
1320                 goto release_pci_res;
1321         }
1322
1323         lockinit(&hba->lock, "hptioplock", 0, LK_CANRECURSE);
1324
1325         if (bus_dma_tag_create(NULL,/* parent */
1326                         1,  /* alignment */
1327                         0, /* boundary */
1328                         BUS_SPACE_MAXADDR,  /* lowaddr */
1329                         BUS_SPACE_MAXADDR,  /* highaddr */
1330                         NULL, NULL,         /* filter, filterarg */
1331                         BUS_SPACE_MAXSIZE_32BIT,    /* maxsize */
1332                         BUS_SPACE_UNRESTRICTED, /* nsegments */
1333                         BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
1334                         0,      /* flags */
1335                         &hba->parent_dmat   /* tag */))
1336         {
1337                 device_printf(dev, "alloc parent_dmat failed\n");
1338                 goto release_pci_res;
1339         }
1340
1341         if (hba->ops->internal_memalloc) {
1342                 if (hba->ops->internal_memalloc(hba)) {
1343                         device_printf(dev, "alloc srb_dmat failed\n");
1344                         goto destroy_parent_tag;
1345                 }
1346         }
1347
1348         if (hba->ops->get_config(hba, &iop_config)) {
1349                 device_printf(dev, "get iop config failed.\n");
1350                 goto get_config_failed;
1351         }
1352
1353         hba->firmware_version = iop_config.firmware_version;
1354         hba->interface_version = iop_config.interface_version;
1355         hba->max_requests = iop_config.max_requests;
1356         hba->max_devices = iop_config.max_devices;
1357         hba->max_request_size = iop_config.request_size;
1358         hba->max_sg_count = iop_config.max_sg_count;
1359
1360         if (bus_dma_tag_create(hba->parent_dmat,/* parent */
1361                         4,  /* alignment */
1362                         BUS_SPACE_MAXADDR_32BIT+1, /* boundary */
1363                         BUS_SPACE_MAXADDR,  /* lowaddr */
1364                         BUS_SPACE_MAXADDR,  /* highaddr */
1365                         NULL, NULL,         /* filter, filterarg */
1366                         PAGE_SIZE * (hba->max_sg_count-1),  /* maxsize */
1367                         hba->max_sg_count,  /* nsegments */
1368                         0x20000,    /* maxsegsize */
1369                         BUS_DMA_ALLOCNOW,       /* flags */
1370                         &hba->io_dmat   /* tag */))
1371         {
1372                 device_printf(dev, "alloc io_dmat failed\n");
1373                 goto get_config_failed;
1374         }
1375
1376         if (bus_dma_tag_create(hba->parent_dmat,/* parent */
1377                         1,  /* alignment */
1378                         0, /* boundary */
1379                         BUS_SPACE_MAXADDR_32BIT,    /* lowaddr */
1380                         BUS_SPACE_MAXADDR,  /* highaddr */
1381                         NULL, NULL,         /* filter, filterarg */
1382                         HPT_SRB_MAX_SIZE * HPT_SRB_MAX_QUEUE_SIZE + 0x20,
1383                         1,  /* nsegments */
1384                         BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
1385                         0,      /* flags */
1386                         &hba->srb_dmat  /* tag */))
1387         {
1388                 device_printf(dev, "alloc srb_dmat failed\n");
1389                 goto destroy_io_dmat;
1390         }
1391
1392         if (bus_dmamem_alloc(hba->srb_dmat, (void **)&hba->uncached_ptr,
1393                         BUS_DMA_WAITOK | BUS_DMA_COHERENT,
1394                         &hba->srb_dmamap) != 0)
1395         {
1396                 device_printf(dev, "srb bus_dmamem_alloc failed!\n");
1397                 goto destroy_srb_dmat;
1398         }
1399
1400         if (bus_dmamap_load(hba->srb_dmat,
1401                         hba->srb_dmamap, hba->uncached_ptr,
1402                         (HPT_SRB_MAX_SIZE * HPT_SRB_MAX_QUEUE_SIZE) + 0x20,
1403                         hptiop_map_srb, hba, 0))
1404         {
1405                 device_printf(dev, "bus_dmamap_load failed!\n");
1406                 goto srb_dmamem_free;
1407         }
1408
1409         if ((devq = cam_simq_alloc(hba->max_requests - 1 )) == NULL) {
1410                 device_printf(dev, "cam_simq_alloc failed\n");
1411                 goto srb_dmamap_unload;
1412         }
1413
1414         hba->sim = cam_sim_alloc(hptiop_action, hptiop_poll, driver_name,
1415                         hba, unit, &sim_mplock, hba->max_requests - 1, 1, devq);
1416         if (!hba->sim) {
1417                 device_printf(dev, "cam_sim_alloc failed\n");
1418                 cam_simq_release(devq);
1419                 goto srb_dmamap_unload;
1420         }
1421         if (xpt_bus_register(hba->sim, 0) != CAM_SUCCESS)
1422         {
1423                 device_printf(dev, "xpt_bus_register failed\n");
1424                 goto free_cam_sim;
1425         }
1426
1427         if (xpt_create_path(&hba->path, /*periph */ NULL,
1428                         cam_sim_path(hba->sim), CAM_TARGET_WILDCARD,
1429                         CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1430                 device_printf(dev, "xpt_create_path failed\n");
1431                 goto deregister_xpt_bus;
1432         }
1433
1434         bzero(&set_config, sizeof(set_config));
1435         set_config.iop_id = unit;
1436         set_config.vbus_id = cam_sim_path(hba->sim);
1437         set_config.max_host_request_size = HPT_SRB_MAX_REQ_SIZE;
1438
1439         if (hba->ops->set_config(hba, &set_config)) {
1440                 device_printf(dev, "set iop config failed.\n");
1441                 goto free_hba_path;
1442         }
1443
1444         xpt_setup_ccb(&ccb.ccb_h, hba->path, /*priority*/5);
1445         ccb.ccb_h.func_code = XPT_SASYNC_CB;
1446         ccb.event_enable = (AC_FOUND_DEVICE | AC_LOST_DEVICE);
1447         ccb.callback = hptiop_async;
1448         ccb.callback_arg = hba->sim;
1449         xpt_action((union ccb *)&ccb);
1450
1451         rid = 0;
1452         if ((hba->irq_res = bus_alloc_resource(hba->pcidev, SYS_RES_IRQ,
1453                         &rid, 0, ~0ul, 1, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
1454                 device_printf(dev, "allocate irq failed!\n");
1455                 goto free_hba_path;
1456         }
1457
1458         if (bus_setup_intr(hba->pcidev, hba->irq_res, 0,
1459                                 hptiop_pci_intr, hba, &hba->irq_handle, NULL))
1460         {
1461                 device_printf(dev, "allocate intr function failed!\n");
1462                 goto free_irq_resource;
1463         }
1464
1465         if (hptiop_send_sync_msg(hba,
1466                         IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) {
1467                 device_printf(dev, "fail to start background task\n");
1468                 goto teartown_irq_resource;
1469         }
1470
1471         hba->ops->enable_intr(hba);
1472
1473         hba->ioctl_dev = make_dev(&hptiop_ops, unit,
1474                                 UID_ROOT, GID_WHEEL /*GID_OPERATOR*/,
1475                                 S_IRUSR | S_IWUSR, "%s%d", driver_name, unit);
1476
1477         hba->ioctl_dev->si_drv1 = hba;
1478
1479         hptiop_rescan_bus(hba);
1480
1481         return 0;
1482
1483
1484 teartown_irq_resource:
1485         bus_teardown_intr(dev, hba->irq_res, hba->irq_handle);
1486
1487 free_irq_resource:
1488         bus_release_resource(dev, SYS_RES_IRQ, 0, hba->irq_res);
1489
1490 free_hba_path:
1491         xpt_free_path(hba->path);
1492
1493 deregister_xpt_bus:
1494         xpt_bus_deregister(cam_sim_path(hba->sim));
1495
1496 free_cam_sim:
1497         cam_sim_free(hba->sim);
1498
1499 srb_dmamap_unload:
1500         if (hba->uncached_ptr)
1501                 bus_dmamap_unload(hba->srb_dmat, hba->srb_dmamap);
1502
1503 srb_dmamem_free:
1504         if (hba->uncached_ptr)
1505                 bus_dmamem_free(hba->srb_dmat,
1506                         hba->uncached_ptr, hba->srb_dmamap);
1507
1508 destroy_srb_dmat:
1509         if (hba->srb_dmat)
1510                 bus_dma_tag_destroy(hba->srb_dmat);
1511
1512 destroy_io_dmat:
1513         if (hba->io_dmat)
1514                 bus_dma_tag_destroy(hba->io_dmat);
1515
1516 get_config_failed:
1517         if (hba->ops->internal_memfree)
1518                 hba->ops->internal_memfree(hba);
1519
1520 destroy_parent_tag:
1521         if (hba->parent_dmat)
1522                 bus_dma_tag_destroy(hba->parent_dmat);
1523
1524 release_pci_res:
1525         if (hba->ops->release_pci_res)
1526                 hba->ops->release_pci_res(hba);
1527
1528         return ENXIO;
1529 }
1530
1531 static int hptiop_detach(device_t dev)
1532 {
1533         struct hpt_iop_hba * hba = (struct hpt_iop_hba *)device_get_softc(dev);
1534         int i;
1535         int error = EBUSY;
1536
1537         hptiop_lock_adapter(hba);
1538         for (i = 0; i < hba->max_devices; i++)
1539                 if (hptiop_os_query_remove_device(hba, i)) {
1540                         device_printf(dev, "file system is busy. id=%d", i);
1541                         goto out;
1542                 }
1543
1544         if ((error = hptiop_shutdown(dev)) != 0)
1545                 goto out;
1546         if (hptiop_send_sync_msg(hba,
1547                 IOPMU_INBOUND_MSG0_STOP_BACKGROUND_TASK, 60000))
1548                 goto out;
1549
1550         hptiop_release_resource(hba);
1551         error = 0;
1552 out:
1553         hptiop_unlock_adapter(hba);
1554         return error;
1555 }
1556
1557 static int hptiop_shutdown(device_t dev)
1558 {
1559         struct hpt_iop_hba * hba = (struct hpt_iop_hba *)device_get_softc(dev);
1560
1561         int error = 0;
1562
1563         if (hba->flag & HPT_IOCTL_FLAG_OPEN) {
1564                 device_printf(dev, "device is busy");
1565                 return EBUSY;
1566         }
1567
1568         hba->ops->disable_intr(hba);
1569
1570         if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_SHUTDOWN, 60000))
1571                 error = EBUSY;
1572
1573         return error;
1574 }
1575
1576 static void hptiop_pci_intr(void *arg)
1577 {
1578         struct hpt_iop_hba * hba = (struct hpt_iop_hba *)arg;
1579         hptiop_lock_adapter(hba);
1580         hba->ops->iop_intr(hba);
1581         hptiop_unlock_adapter(hba);
1582 }
1583
1584 static void hptiop_poll(struct cam_sim *sim)
1585 {
1586         hptiop_pci_intr(cam_sim_softc(sim));
1587 }
1588
1589 static void hptiop_async(void * callback_arg, u_int32_t code,
1590                                         struct cam_path * path, void * arg)
1591 {
1592 }
1593
1594 static void hptiop_enable_intr_itl(struct hpt_iop_hba *hba)
1595 {
1596         BUS_SPACE_WRT4_ITL(outbound_intmask,
1597                 ~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0));
1598 }
1599
1600 static void hptiop_enable_intr_mv(struct hpt_iop_hba *hba)
1601 {
1602         u_int32_t int_mask;
1603
1604         int_mask = BUS_SPACE_RD4_MV0(outbound_intmask);
1605
1606         int_mask |= MVIOP_MU_OUTBOUND_INT_POSTQUEUE
1607                         | MVIOP_MU_OUTBOUND_INT_MSG;
1608         BUS_SPACE_WRT4_MV0(outbound_intmask,int_mask);
1609 }
1610
1611 static void hptiop_disable_intr_itl(struct hpt_iop_hba *hba)
1612 {
1613         u_int32_t int_mask;
1614
1615         int_mask = BUS_SPACE_RD4_ITL(outbound_intmask);
1616
1617         int_mask |= IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0;
1618         BUS_SPACE_WRT4_ITL(outbound_intmask, int_mask);
1619         BUS_SPACE_RD4_ITL(outbound_intstatus);
1620 }
1621
1622 static void hptiop_disable_intr_mv(struct hpt_iop_hba *hba)
1623 {
1624         u_int32_t int_mask;
1625         int_mask = BUS_SPACE_RD4_MV0(outbound_intmask);
1626
1627         int_mask &= ~(MVIOP_MU_OUTBOUND_INT_MSG
1628                         | MVIOP_MU_OUTBOUND_INT_POSTQUEUE);
1629         BUS_SPACE_WRT4_MV0(outbound_intmask,int_mask);
1630         BUS_SPACE_RD4_MV0(outbound_intmask);
1631 }
1632
1633 static int hptiop_reset_adapter(struct hpt_iop_hba * hba)
1634 {
1635         return hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
1636 }
1637
1638 static void *hptiop_get_srb(struct hpt_iop_hba * hba)
1639 {
1640         struct hpt_iop_srb * srb;
1641
1642         if (hba->srb_list) {
1643                 srb = hba->srb_list;
1644                 hba->srb_list = srb->next;
1645                 return srb;
1646         }
1647
1648         return NULL;
1649 }
1650
1651 static void hptiop_free_srb(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb)
1652 {
1653         srb->next = hba->srb_list;
1654         hba->srb_list = srb;
1655 }
1656
1657 static void hptiop_action(struct cam_sim *sim, union ccb *ccb)
1658 {
1659         struct hpt_iop_hba * hba = (struct hpt_iop_hba *)cam_sim_softc(sim);
1660         struct hpt_iop_srb * srb;
1661
1662         switch (ccb->ccb_h.func_code) {
1663
1664         case XPT_SCSI_IO:
1665                 hptiop_lock_adapter(hba);
1666                 if (ccb->ccb_h.target_lun != 0 ||
1667                         ccb->ccb_h.target_id >= hba->max_devices ||
1668                         (ccb->ccb_h.flags & CAM_CDB_PHYS))
1669                 {
1670                         ccb->ccb_h.status = CAM_TID_INVALID;
1671                         xpt_done(ccb);
1672                         goto scsi_done;
1673                 }
1674
1675                 if ((srb = hptiop_get_srb(hba)) == NULL) {
1676                         device_printf(hba->pcidev, "srb allocated failed");
1677                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1678                         xpt_done(ccb);
1679                         goto scsi_done;
1680                 }
1681
1682                 srb->ccb = ccb;
1683
1684                 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
1685                         hptiop_post_scsi_command(srb, NULL, 0, 0);
1686                 else if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
1687                         if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) {
1688                                 int error;
1689
1690                                 error = bus_dmamap_load(hba->io_dmat,
1691                                                 srb->dma_map,
1692                                                 ccb->csio.data_ptr,
1693                                                 ccb->csio.dxfer_len,
1694                                                 hptiop_post_scsi_command,
1695                                                 srb, 0);
1696
1697                                 if (error && error != EINPROGRESS) {
1698                                         device_printf(hba->pcidev,
1699                                             "bus_dmamap_load error %d", error);
1700                                         xpt_freeze_simq(hba->sim, 1);
1701                                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1702 invalid:
1703                                         hptiop_free_srb(hba, srb);
1704                                         xpt_done(ccb);
1705                                         goto scsi_done;
1706                                 }
1707                         }
1708                         else {
1709                                 device_printf(hba->pcidev,
1710                                         "CAM_DATA_PHYS not supported");
1711                                 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1712                                 goto invalid;
1713                         }
1714                 }
1715                 else {
1716                         struct bus_dma_segment *segs;
1717
1718                         if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0 ||
1719                                 (ccb->ccb_h.flags & CAM_DATA_PHYS) != 0) {
1720                                 device_printf(hba->pcidev, "SCSI cmd failed");
1721                                 ccb->ccb_h.status=CAM_PROVIDE_FAIL;
1722                                 goto invalid;
1723                         }
1724
1725                         segs = (struct bus_dma_segment *)ccb->csio.data_ptr;
1726                         hptiop_post_scsi_command(srb, segs,
1727                                                 ccb->csio.sglist_cnt, 0);
1728                 }
1729
1730 scsi_done:
1731                 hptiop_unlock_adapter(hba);
1732                 return;
1733
1734         case XPT_RESET_BUS:
1735                 device_printf(hba->pcidev, "reset adapter");
1736                 hptiop_lock_adapter(hba);
1737                 hba->msg_done = 0;
1738                 hptiop_reset_adapter(hba);
1739                 hptiop_unlock_adapter(hba);
1740                 break;
1741
1742         case XPT_GET_TRAN_SETTINGS:
1743         case XPT_SET_TRAN_SETTINGS:
1744                 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1745                 break;
1746
1747         case XPT_CALC_GEOMETRY:
1748                 cam_calc_geometry(&ccb->ccg, 1);
1749                 break;
1750
1751         case XPT_PATH_INQ:
1752         {
1753                 struct ccb_pathinq *cpi = &ccb->cpi;
1754
1755                 cpi->version_num = 1;
1756                 cpi->hba_inquiry = PI_SDTR_ABLE;
1757                 cpi->target_sprt = 0;
1758                 cpi->hba_misc = PIM_NOBUSRESET;
1759                 cpi->hba_eng_cnt = 0;
1760                 cpi->max_target = hba->max_devices;
1761                 cpi->max_lun = 0;
1762                 cpi->unit_number = cam_sim_unit(sim);
1763                 cpi->bus_id = cam_sim_bus(sim);
1764                 cpi->initiator_id = hba->max_devices;
1765                 cpi->base_transfer_speed = 3300;
1766
1767                 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1768                 strncpy(cpi->hba_vid, "HPT   ", HBA_IDLEN);
1769                 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1770                 cpi->transport = XPORT_SPI;
1771                 cpi->transport_version = 2;
1772                 cpi->protocol = PROTO_SCSI;
1773                 cpi->protocol_version = SCSI_REV_2;
1774                 cpi->ccb_h.status = CAM_REQ_CMP;
1775                 break;
1776         }
1777
1778         default:
1779                 ccb->ccb_h.status = CAM_REQ_INVALID;
1780                 break;
1781         }
1782
1783         xpt_done(ccb);
1784         return;
1785 }
1786
1787 static void hptiop_post_req_itl(struct hpt_iop_hba *hba,
1788                                 struct hpt_iop_srb *srb,
1789                                 bus_dma_segment_t *segs, int nsegs)
1790 {
1791         int idx;
1792         union ccb *ccb = srb->ccb;
1793         u_int8_t *cdb;
1794
1795         if (ccb->ccb_h.flags & CAM_CDB_POINTER)
1796                 cdb = ccb->csio.cdb_io.cdb_ptr;
1797         else
1798                 cdb = ccb->csio.cdb_io.cdb_bytes;
1799
1800         KdPrint(("ccb=%p %x-%x-%x\n",
1801                 ccb, *(u_int32_t *)cdb, *((u_int32_t *)cdb+1), *((u_int32_t *)cdb+2)));
1802
1803         if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) {
1804                 u_int32_t iop_req32;
1805                 struct hpt_iop_request_scsi_command req;
1806
1807                 iop_req32 = BUS_SPACE_RD4_ITL(inbound_queue);
1808
1809                 if (iop_req32 == IOPMU_QUEUE_EMPTY) {
1810                         device_printf(hba->pcidev, "invaild req offset\n");
1811                         ccb->ccb_h.status = CAM_BUSY;
1812                         bus_dmamap_unload(hba->io_dmat, srb->dma_map);
1813                         hptiop_free_srb(hba, srb);
1814                         xpt_done(ccb);
1815                         return;
1816                 }
1817
1818                 if (ccb->csio.dxfer_len && nsegs > 0) {
1819                         struct hpt_iopsg *psg = req.sg_list;
1820                         for (idx = 0; idx < nsegs; idx++, psg++) {
1821                                 psg->pci_address = (u_int64_t)segs[idx].ds_addr;
1822                                 psg->size = segs[idx].ds_len;
1823                                 psg->eot = 0;
1824                         }
1825                         psg[-1].eot = 1;
1826                 }
1827
1828                 bcopy(cdb, req.cdb, ccb->csio.cdb_len);
1829
1830                 req.header.size = offsetof(struct hpt_iop_request_scsi_command, sg_list)
1831                                 + nsegs*sizeof(struct hpt_iopsg);
1832                 req.header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
1833                 req.header.flags = 0;
1834                 req.header.result = IOP_RESULT_PENDING;
1835                 req.header.context = (u_int64_t)(unsigned long)srb;
1836                 req.dataxfer_length = ccb->csio.dxfer_len;
1837                 req.channel =  0;
1838                 req.target =  ccb->ccb_h.target_id;
1839                 req.lun =  ccb->ccb_h.target_lun;
1840
1841                 bus_space_write_region_1(hba->bar0t, hba->bar0h, iop_req32,
1842                         (u_int8_t *)&req, req.header.size);
1843
1844                 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1845                         bus_dmamap_sync(hba->io_dmat,
1846                                 srb->dma_map, BUS_DMASYNC_PREREAD);
1847                 }
1848                 else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
1849                         bus_dmamap_sync(hba->io_dmat,
1850                                 srb->dma_map, BUS_DMASYNC_PREWRITE);
1851
1852                 BUS_SPACE_WRT4_ITL(inbound_queue,iop_req32);
1853         } else {
1854                 struct hpt_iop_request_scsi_command *req;
1855
1856                 req = (struct hpt_iop_request_scsi_command *)srb;
1857                 if (ccb->csio.dxfer_len && nsegs > 0) {
1858                         struct hpt_iopsg *psg = req->sg_list;
1859                         for (idx = 0; idx < nsegs; idx++, psg++) {
1860                                 psg->pci_address =
1861                                         (u_int64_t)segs[idx].ds_addr;
1862                                 psg->size = segs[idx].ds_len;
1863                                 psg->eot = 0;
1864                         }
1865                         psg[-1].eot = 1;
1866                 }
1867
1868                 bcopy(cdb, req->cdb, ccb->csio.cdb_len);
1869
1870                 req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
1871                 req->header.result = IOP_RESULT_PENDING;
1872                 req->dataxfer_length = ccb->csio.dxfer_len;
1873                 req->channel =  0;
1874                 req->target =  ccb->ccb_h.target_id;
1875                 req->lun =  ccb->ccb_h.target_lun;
1876                 req->header.size = offsetof(struct hpt_iop_request_scsi_command, sg_list)
1877                         + nsegs*sizeof(struct hpt_iopsg);
1878                 req->header.context = (u_int64_t)srb->index |
1879                                                 IOPMU_QUEUE_ADDR_HOST_BIT;
1880                 req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
1881
1882                 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1883                         bus_dmamap_sync(hba->io_dmat,
1884                                 srb->dma_map, BUS_DMASYNC_PREREAD);
1885                 }else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1886                         bus_dmamap_sync(hba->io_dmat,
1887                                 srb->dma_map, BUS_DMASYNC_PREWRITE);
1888                 }
1889
1890                 if (hba->firmware_version > 0x01020000
1891                         || hba->interface_version > 0x01020000) {
1892                         u_int32_t size_bits;
1893
1894                         if (req->header.size < 256)
1895                                 size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT;
1896                         else if (req->header.size < 512)
1897                                 size_bits = IOPMU_QUEUE_ADDR_HOST_BIT;
1898                         else
1899                                 size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT
1900                                                 | IOPMU_QUEUE_ADDR_HOST_BIT;
1901
1902                         BUS_SPACE_WRT4_ITL(inbound_queue,
1903                                 (u_int32_t)srb->phy_addr | size_bits);
1904                 } else
1905                         BUS_SPACE_WRT4_ITL(inbound_queue, (u_int32_t)srb->phy_addr
1906                                 |IOPMU_QUEUE_ADDR_HOST_BIT);
1907         }
1908 }
1909
1910 static void hptiop_post_req_mv(struct hpt_iop_hba *hba,
1911                                 struct hpt_iop_srb *srb,
1912                                 bus_dma_segment_t *segs, int nsegs)
1913 {
1914         int idx, size;
1915         union ccb *ccb = srb->ccb;
1916         u_int8_t *cdb;
1917         struct hpt_iop_request_scsi_command *req;
1918         u_int64_t req_phy;
1919
1920         req = (struct hpt_iop_request_scsi_command *)srb;
1921         req_phy = srb->phy_addr;
1922
1923         if (ccb->csio.dxfer_len && nsegs > 0) {
1924                 struct hpt_iopsg *psg = req->sg_list;
1925                 for (idx = 0; idx < nsegs; idx++, psg++) {
1926                         psg->pci_address = (u_int64_t)segs[idx].ds_addr;
1927                         psg->size = segs[idx].ds_len;
1928                         psg->eot = 0;
1929                 }
1930                 psg[-1].eot = 1;
1931         }
1932         if (ccb->ccb_h.flags & CAM_CDB_POINTER)
1933                 cdb = ccb->csio.cdb_io.cdb_ptr;
1934         else
1935                 cdb = ccb->csio.cdb_io.cdb_bytes;
1936
1937         bcopy(cdb, req->cdb, ccb->csio.cdb_len);
1938         req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
1939         req->header.result = IOP_RESULT_PENDING;
1940         req->dataxfer_length = ccb->csio.dxfer_len;
1941         req->channel = 0;
1942         req->target =  ccb->ccb_h.target_id;
1943         req->lun =  ccb->ccb_h.target_lun;
1944         req->header.size = sizeof(struct hpt_iop_request_scsi_command)
1945                                 - sizeof(struct hpt_iopsg)
1946                                 + nsegs * sizeof(struct hpt_iopsg);
1947         if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1948                 bus_dmamap_sync(hba->io_dmat,
1949                         srb->dma_map, BUS_DMASYNC_PREREAD);
1950         }
1951         else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
1952                 bus_dmamap_sync(hba->io_dmat,
1953                         srb->dma_map, BUS_DMASYNC_PREWRITE);
1954         req->header.context = (u_int64_t)srb->index
1955                                         << MVIOP_REQUEST_NUMBER_START_BIT
1956                                         | MVIOP_CMD_TYPE_SCSI;
1957         req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
1958         size = req->header.size >> 8;
1959         hptiop_mv_inbound_write(req_phy
1960                         | MVIOP_MU_QUEUE_ADDR_HOST_BIT
1961                         | (size > 3 ? 3 : size), hba);
1962 }
1963
1964 static void hptiop_post_scsi_command(void *arg, bus_dma_segment_t *segs,
1965                                         int nsegs, int error)
1966 {
1967         struct hpt_iop_srb *srb = (struct hpt_iop_srb *)arg;
1968         union ccb *ccb = srb->ccb;
1969         struct hpt_iop_hba *hba = srb->hba;
1970
1971         if (error || nsegs > hba->max_sg_count) {
1972                 KdPrint(("hptiop: func_code=%x tid=%x lun=%x nsegs=%d\n",
1973                         ccb->ccb_h.func_code,
1974                         ccb->ccb_h.target_id,
1975                         ccb->ccb_h.target_lun, nsegs));
1976                 ccb->ccb_h.status = CAM_BUSY;
1977                 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
1978                 hptiop_free_srb(hba, srb);
1979                 xpt_done(ccb);
1980                 return;
1981         }
1982
1983         hba->ops->post_req(hba, srb, segs, nsegs);
1984 }
1985
1986 static void hptiop_mv_map_ctlcfg(void *arg, bus_dma_segment_t *segs,
1987                                 int nsegs, int error)
1988 {
1989         struct hpt_iop_hba *hba = (struct hpt_iop_hba *)arg;
1990         hba->ctlcfgcmd_phy = ((u_int64_t)segs->ds_addr + 0x1F)
1991                                 & ~(u_int64_t)0x1F;
1992         hba->ctlcfg_ptr = (u_int8_t *)(((unsigned long)hba->ctlcfg_ptr + 0x1F)
1993                                 & ~0x1F);
1994 }
1995
1996 static void hptiop_map_srb(void *arg, bus_dma_segment_t *segs,
1997                                 int nsegs, int error)
1998 {
1999         struct hpt_iop_hba * hba = (struct hpt_iop_hba *)arg;
2000         bus_addr_t phy_addr = (segs->ds_addr + 0x1F) & ~(bus_addr_t)0x1F;
2001         struct hpt_iop_srb *srb, *tmp_srb;
2002         int i;
2003
2004         if (error || nsegs == 0) {
2005                 device_printf(hba->pcidev, "hptiop_map_srb error");
2006                 return;
2007         }
2008
2009         /* map srb */
2010         srb = (struct hpt_iop_srb *)
2011                 (((unsigned long)hba->uncached_ptr + 0x1F)
2012                 & ~(unsigned long)0x1F);
2013
2014         for (i = 0; i < HPT_SRB_MAX_QUEUE_SIZE; i++) {
2015                 tmp_srb = (struct hpt_iop_srb *)
2016                                         ((char *)srb + i * HPT_SRB_MAX_SIZE);
2017                 if (((unsigned long)tmp_srb & 0x1F) == 0) {
2018                         if (bus_dmamap_create(hba->io_dmat,
2019                                                 0, &tmp_srb->dma_map)) {
2020                                 device_printf(hba->pcidev, "dmamap create failed");
2021                                 return;
2022                         }
2023
2024                         bzero(tmp_srb, sizeof(struct hpt_iop_srb));
2025                         tmp_srb->hba = hba;
2026                         tmp_srb->index = i;
2027                         if (hba->ctlcfg_ptr == 0) {/*itl iop*/
2028                                 tmp_srb->phy_addr = (u_int64_t)(u_int32_t)
2029                                                         (phy_addr >> 5);
2030                                 if (phy_addr & IOPMU_MAX_MEM_SUPPORT_MASK_32G)
2031                                         tmp_srb->srb_flag =
2032                                                 HPT_SRB_FLAG_HIGH_MEM_ACESS;
2033                         } else {
2034                                 tmp_srb->phy_addr = phy_addr;
2035                         }
2036
2037                         hptiop_free_srb(hba, tmp_srb);
2038                         hba->srb[i] = tmp_srb;
2039                         phy_addr += HPT_SRB_MAX_SIZE;
2040                 }
2041                 else {
2042                         device_printf(hba->pcidev, "invalid alignment");
2043                         return;
2044                 }
2045         }
2046 }
2047
2048 static void hptiop_os_message_callback(struct hpt_iop_hba * hba, u_int32_t msg)
2049 {
2050                 hba->msg_done = 1;
2051 }
2052
2053 static  int hptiop_os_query_remove_device(struct hpt_iop_hba * hba,
2054                                                 int target_id)
2055 {
2056         struct cam_periph       *periph = NULL;
2057         struct cam_path         *path;
2058         int                     status, retval = 0;
2059
2060         status = xpt_create_path(&path, NULL, hba->sim->path_id, target_id, 0);
2061
2062         if (status == CAM_REQ_CMP) {
2063                 if ((periph = cam_periph_find(path, "da")) != NULL) {
2064                         if (periph->refcount >= 1) {
2065                                 device_printf(hba->pcidev, "target_id=0x%x,"
2066                                     "refcount=%d", target_id, periph->refcount);
2067                                 retval = -1;
2068                         }
2069                 }
2070                 xpt_free_path(path);
2071         }
2072         return retval;
2073 }
2074
2075 static void hptiop_release_resource(struct hpt_iop_hba *hba)
2076 {
2077         int i;
2078         if (hba->path) {
2079                 struct ccb_setasync ccb;
2080
2081                 xpt_setup_ccb(&ccb.ccb_h, hba->path, /*priority*/5);
2082                 ccb.ccb_h.func_code = XPT_SASYNC_CB;
2083                 ccb.event_enable = 0;
2084                 ccb.callback = hptiop_async;
2085                 ccb.callback_arg = hba->sim;
2086                 xpt_action((union ccb *)&ccb);
2087                 xpt_free_path(hba->path);
2088         }
2089
2090         if (hba->sim) {
2091                 xpt_bus_deregister(cam_sim_path(hba->sim));
2092                 cam_sim_free(hba->sim);
2093         }
2094
2095         if (hba->ctlcfg_dmat) {
2096                 bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
2097                 bus_dmamem_free(hba->ctlcfg_dmat,
2098                                         hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
2099                 bus_dma_tag_destroy(hba->ctlcfg_dmat);
2100         }
2101
2102         for (i = 0; i < HPT_SRB_MAX_QUEUE_SIZE; i++) {
2103                 struct hpt_iop_srb *srb = hba->srb[i];
2104                 if (srb->dma_map)
2105                         bus_dmamap_destroy(hba->io_dmat, srb->dma_map);
2106         }
2107
2108         if (hba->srb_dmat) {
2109                 bus_dmamap_unload(hba->srb_dmat, hba->srb_dmamap);
2110                 bus_dmamap_destroy(hba->srb_dmat, hba->srb_dmamap);
2111                 bus_dma_tag_destroy(hba->srb_dmat);
2112         }
2113
2114         if (hba->io_dmat)
2115                 bus_dma_tag_destroy(hba->io_dmat);
2116
2117         if (hba->parent_dmat)
2118                 bus_dma_tag_destroy(hba->parent_dmat);
2119
2120         if (hba->irq_handle)
2121                 bus_teardown_intr(hba->pcidev, hba->irq_res, hba->irq_handle);
2122
2123         if (hba->irq_res)
2124                 bus_release_resource(hba->pcidev, SYS_RES_IRQ,
2125                                         0, hba->irq_res);
2126
2127         if (hba->bar0_res)
2128                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
2129                                         hba->bar0_rid, hba->bar0_res);
2130         if (hba->bar2_res)
2131                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
2132                                         hba->bar2_rid, hba->bar2_res);
2133         if (hba->ioctl_dev)
2134                 destroy_dev(hba->ioctl_dev);
2135         dev_ops_remove_minor(&hptiop_ops, device_get_unit(hba->pcidev));
2136 }