2 * Copyright (c) 2010, LSI Corp.
4 * Author : Manjunath Ranganathaiah
5 * Support: freebsdraid@lsi.com
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of the <ORGANIZATION> nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
29 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
31 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
34 * $FreeBSD: src/sys/dev/tws/tws.c,v 1.3 2007/05/09 04:16:32 mrangana Exp $
37 #include <dev/raid/tws/tws.h>
38 #include <dev/raid/tws/tws_services.h>
39 #include <dev/raid/tws/tws_hdm.h>
41 #include <bus/cam/cam.h>
42 #include <bus/cam/cam_ccb.h>
44 MALLOC_DEFINE(M_TWS, "twsbuf", "buffers used by tws driver");
45 int tws_queue_depth = TWS_MAX_REQS;
46 int tws_enable_msi = 0;
47 int tws_enable_msix = 0;
52 extern int tws_cam_attach(struct tws_softc *sc);
53 extern void tws_cam_detach(struct tws_softc *sc);
54 extern int tws_init_ctlr(struct tws_softc *sc);
55 extern boolean tws_ctlr_ready(struct tws_softc *sc);
56 extern void tws_turn_off_interrupts(struct tws_softc *sc);
57 extern void tws_q_insert_tail(struct tws_softc *sc, struct tws_request *req,
59 extern struct tws_request *tws_q_remove_request(struct tws_softc *sc,
60 struct tws_request *req, u_int8_t q_type );
61 extern struct tws_request *tws_q_remove_head(struct tws_softc *sc,
63 extern boolean tws_get_response(struct tws_softc *sc, u_int16_t *req_id);
64 extern boolean tws_ctlr_reset(struct tws_softc *sc);
65 extern void tws_intr(void *arg);
66 extern int tws_use_32bit_sgls;
69 struct tws_request *tws_get_request(struct tws_softc *sc, u_int16_t type);
70 int tws_init_connect(struct tws_softc *sc, u_int16_t mc);
71 void tws_send_event(struct tws_softc *sc, u_int8_t event);
72 uint8_t tws_get_state(struct tws_softc *sc);
73 void tws_release_request(struct tws_request *req);
77 /* Function prototypes */
78 static d_open_t tws_open;
79 static d_close_t tws_close;
80 static d_read_t tws_read;
81 static d_write_t tws_write;
82 extern d_ioctl_t tws_ioctl;
84 static int tws_init(struct tws_softc *sc);
85 static void tws_dmamap_cmds_load_cbfn(void *arg, bus_dma_segment_t *segs,
88 static int tws_init_reqs(struct tws_softc *sc, u_int32_t dma_mem_size);
89 static int tws_init_aen_q(struct tws_softc *sc);
90 static int tws_init_trace_q(struct tws_softc *sc);
91 static int tws_setup_irq(struct tws_softc *sc);
92 static int tws_setup_intr(struct tws_softc *sc, int irqs);
95 /* Character device entry points */
97 static struct dev_ops tws_ops = {
100 .d_close = tws_close,
102 .d_write = tws_write,
103 .d_ioctl = tws_ioctl,
107 * In the cdevsw routines, we find our softc by using the si_drv1 member
108 * of struct cdev. We set this variable to point to our softc in our
109 * attach routine when we create the /dev entry.
113 tws_open(struct dev_open_args *ap)
115 cdev_t dev = ap->a_head.a_dev;
116 struct tws_softc *sc = dev->si_drv1;
119 TWS_TRACE_DEBUG(sc, "entry", dev, oflags);
124 tws_close(struct dev_close_args *ap)
126 cdev_t dev = ap->a_head.a_dev;
127 struct tws_softc *sc = dev->si_drv1;
130 TWS_TRACE_DEBUG(sc, "entry", dev, fflag);
135 tws_read(struct dev_read_args *ap)
137 cdev_t dev = ap->a_head.a_dev;
138 struct tws_softc *sc = dev->si_drv1;
141 TWS_TRACE_DEBUG(sc, "entry", dev, ioflag);
146 tws_write(struct dev_write_args *ap)
148 cdev_t dev = ap->a_head.a_dev;
149 struct tws_softc *sc = dev->si_drv1;
152 TWS_TRACE_DEBUG(sc, "entry", dev, ioflag);
156 /* PCI Support Functions */
159 * Compare the device ID of this device against the IDs that this driver
160 * supports. If there is a match, set the description and return success.
163 tws_probe(device_t dev)
165 static u_int8_t first_ctlr = 1;
167 if ((pci_get_vendor(dev) == TWS_VENDOR_ID) &&
168 (pci_get_device(dev) == TWS_DEVICE_ID)) {
169 device_set_desc(dev, "LSI 3ware SAS/SATA Storage Controller");
171 kprintf("LSI 3ware device driver for SAS/SATA storage "
172 "controllers, version: %s\n", TWS_DRIVER_VERSION_STRING);
181 /* Attach function is only called if the probe is successful. */
184 tws_attach(device_t dev)
186 struct tws_softc *sc = device_get_softc(dev);
191 /* Look up our softc and initialize its fields. */
193 sc->device_id = pci_get_device(dev);
194 sc->subvendor_id = pci_get_subvendor(dev);
195 sc->subdevice_id = pci_get_subdevice(dev);
197 /* Intialize mutexes */
198 lockinit(&sc->q_lock, "tws_q_lock", 0, LK_CANRECURSE);
199 lockinit(&sc->sim_lock, "tws_sim_lock", 0, LK_CANRECURSE);
200 lockinit(&sc->gen_lock, "tws_gen_lock", 0, LK_CANRECURSE);
201 lockinit(&sc->io_lock, "tws_io_lock", 0, LK_CANRECURSE);
203 if ( tws_init_trace_q(sc) == FAILURE )
204 kprintf("trace init failure\n");
205 /* send init event */
206 lockmgr(&sc->gen_lock, LK_EXCLUSIVE);
207 tws_send_event(sc, TWS_INIT_START);
208 lockmgr(&sc->gen_lock, LK_RELEASE);
211 #if _BYTE_ORDER == _BIG_ENDIAN
212 TWS_TRACE(sc, "BIG endian", 0, 0);
214 /* sysctl context setup */
215 sysctl_ctx_init(&sc->tws_clist);
216 sc->tws_oidp = SYSCTL_ADD_NODE(&sc->tws_clist,
217 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
218 device_get_nameunit(dev),
220 if ( sc->tws_oidp == NULL ) {
221 tws_log(sc, SYSCTL_TREE_NODE_ADD);
224 SYSCTL_ADD_STRING(&sc->tws_clist, SYSCTL_CHILDREN(sc->tws_oidp),
225 OID_AUTO, "driver_version", CTLFLAG_RD,
226 TWS_DRIVER_VERSION_STRING, 0, "TWS driver version");
228 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
229 if ( (cmd & PCIM_CMD_PORTEN) == 0) {
230 tws_log(sc, PCI_COMMAND_READ);
233 /* Force the busmaster enable bit on. */
234 cmd |= PCIM_CMD_BUSMASTEREN;
235 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
237 bar = pci_read_config(dev, TWS_PCI_BAR0, 4);
238 TWS_TRACE_DEBUG(sc, "bar0 ", bar, 0);
239 bar = pci_read_config(dev, TWS_PCI_BAR1, 4);
240 bar = bar & ~TWS_BIT2;
241 TWS_TRACE_DEBUG(sc, "bar1 ", bar, 0);
243 /* MFA base address is BAR2 register used for
244 * push mode. Firmware will evatualy move to
245 * pull mode during witch this needs to change
247 #ifndef TWS_PULL_MODE_ENABLE
248 sc->mfa_base = (u_int64_t)pci_read_config(dev, TWS_PCI_BAR2, 4);
249 sc->mfa_base = sc->mfa_base & ~TWS_BIT2;
250 TWS_TRACE_DEBUG(sc, "bar2 ", sc->mfa_base, 0);
253 /* allocate MMIO register space */
254 sc->reg_res_id = TWS_PCI_BAR1; /* BAR1 offset */
255 if ((sc->reg_res = bus_alloc_resource(dev, SYS_RES_MEMORY,
256 &(sc->reg_res_id), 0, ~0, 1, RF_ACTIVE))
258 tws_log(sc, ALLOC_MEMORY_RES);
261 sc->bus_tag = rman_get_bustag(sc->reg_res);
262 sc->bus_handle = rman_get_bushandle(sc->reg_res);
264 #ifndef TWS_PULL_MODE_ENABLE
265 /* Allocate bus space for inbound mfa */
266 sc->mfa_res_id = TWS_PCI_BAR2; /* BAR2 offset */
267 if ((sc->mfa_res = bus_alloc_resource(dev, SYS_RES_MEMORY,
268 &(sc->mfa_res_id), 0, ~0, 0x100000, RF_ACTIVE))
270 tws_log(sc, ALLOC_MEMORY_RES);
273 sc->bus_mfa_tag = rman_get_bustag(sc->mfa_res);
274 sc->bus_mfa_handle = rman_get_bushandle(sc->mfa_res);
277 /* Allocate and register our interrupt. */
278 sc->intr_type = TWS_INTx; /* default */
280 if ( tws_enable_msi )
281 sc->intr_type = TWS_MSI;
282 if ( tws_setup_irq(sc) == FAILURE ) {
283 tws_log(sc, ALLOC_MEMORY_RES);
288 callout_init(&sc->print_stats_handle);
289 callout_init(&sc->reset_cb_handle);
290 callout_init(&sc->reinit_handle);
293 * Create a /dev entry for this device. The kernel will assign us
294 * a major number automatically. We use the unit number of this
295 * device as the minor number and name the character device
298 sc->tws_cdev = make_dev(&tws_ops, device_get_unit(dev),
299 UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR, "tws%u",
300 device_get_unit(dev));
301 sc->tws_cdev->si_drv1 = sc;
303 if ( tws_init(sc) == FAILURE ) {
304 tws_log(sc, TWS_INIT_FAILURE);
307 if ( tws_init_ctlr(sc) == FAILURE ) {
308 tws_log(sc, TWS_CTLR_INIT_FAILURE);
311 if ((error = tws_cam_attach(sc))) {
312 tws_log(sc, TWS_CAM_ATTACH);
315 /* send init complete event */
316 lockmgr(&sc->gen_lock, LK_EXCLUSIVE);
317 tws_send_event(sc, TWS_INIT_COMPLETE);
318 lockmgr(&sc->gen_lock, LK_RELEASE);
320 TWS_TRACE_DEBUG(sc, "attached successfully", 0, sc->device_id);
324 for(i=0;i<sc->irqs;i++) {
325 if (sc->intr_handle[i]) {
326 if ((error = bus_teardown_intr(sc->tws_dev,
327 sc->irq_res[i], sc->intr_handle[i])))
328 TWS_TRACE(sc, "bus teardown intr", 0, error);
331 destroy_dev(sc->tws_cdev);
332 dev_ops_remove_minor(&tws_ops, device_get_unit(sc->tws_dev));
334 for(i=0;i<sc->irqs;i++) {
335 if ( sc->irq_res[i] ){
336 if (bus_release_resource(sc->tws_dev,
337 SYS_RES_IRQ, sc->irq_res_id[i], sc->irq_res[i]))
338 TWS_TRACE(sc, "bus irq res", 0, 0);
341 #ifndef TWS_PULL_MODE_ENABLE
345 if (bus_release_resource(sc->tws_dev,
346 SYS_RES_MEMORY, sc->mfa_res_id, sc->mfa_res))
347 TWS_TRACE(sc, "bus release ", 0, sc->mfa_res_id);
350 if (bus_release_resource(sc->tws_dev,
351 SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res))
352 TWS_TRACE(sc, "bus release2 ", 0, sc->reg_res_id);
355 lockuninit(&sc->q_lock);
356 lockuninit(&sc->sim_lock);
357 lockuninit(&sc->gen_lock);
358 lockuninit(&sc->io_lock);
359 sysctl_ctx_free(&sc->tws_clist);
366 tws_detach(device_t dev)
368 struct tws_softc *sc = device_get_softc(dev);
372 TWS_TRACE_DEBUG(sc, "entry", 0, 0);
374 lockmgr(&sc->gen_lock, LK_EXCLUSIVE);
375 tws_send_event(sc, TWS_UNINIT_START);
376 lockmgr(&sc->gen_lock, LK_RELEASE);
378 /* needs to disable interrupt before detaching from cam */
379 tws_turn_off_interrupts(sc);
380 /* clear door bell */
381 tws_write_reg(sc, TWS_I2O0_HOBDBC, ~0, 4);
382 reg = tws_read_reg(sc, TWS_I2O0_HIMASK, 4);
383 TWS_TRACE_DEBUG(sc, "turn-off-intr", reg, 0);
384 sc->obfl_q_overrun = false;
385 tws_init_connect(sc, 1);
387 /* Teardown the state in our softc created in our attach routine. */
388 /* Disconnect the interrupt handler. */
389 for(i=0;i<sc->irqs;i++) {
390 if (sc->intr_handle[i]) {
391 if ((error = bus_teardown_intr(sc->tws_dev,
392 sc->irq_res[i], sc->intr_handle[i])))
393 TWS_TRACE(sc, "bus teardown intr", 0, error);
396 /* Release irq resource */
397 for(i=0;i<sc->irqs;i++) {
398 if ( sc->irq_res[i] ){
399 if (bus_release_resource(sc->tws_dev,
400 SYS_RES_IRQ, sc->irq_res_id[i], sc->irq_res[i]))
401 TWS_TRACE(sc, "bus release irq resource",
402 i, sc->irq_res_id[i]);
405 if ( sc->intr_type == TWS_MSI ) {
406 pci_release_msi(sc->tws_dev);
411 /* Release memory resource */
413 if (bus_release_resource(sc->tws_dev,
414 SYS_RES_MEMORY, sc->mfa_res_id, sc->mfa_res))
415 TWS_TRACE(sc, "bus release mem resource", 0, sc->mfa_res_id);
418 if (bus_release_resource(sc->tws_dev,
419 SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res))
420 TWS_TRACE(sc, "bus release mem resource", 0, sc->reg_res_id);
423 kfree(sc->reqs, M_TWS);
424 kfree(sc->sense_bufs, M_TWS);
425 kfree(sc->scan_ccb, M_TWS);
426 kfree(sc->aen_q.q, M_TWS);
427 kfree(sc->trace_q.q, M_TWS);
428 lockuninit(&sc->q_lock);
429 lockuninit(&sc->sim_lock);
430 lockuninit(&sc->gen_lock);
431 lockuninit(&sc->io_lock);
432 destroy_dev(sc->tws_cdev);
433 dev_ops_remove_minor(&tws_ops, device_get_unit(sc->tws_dev));
434 sysctl_ctx_free(&sc->tws_clist);
439 tws_setup_intr(struct tws_softc *sc, int irqs)
443 for(i=0;i<irqs;i++) {
444 if ((error = bus_setup_intr(sc->tws_dev, sc->irq_res[i],
446 tws_intr, sc, &sc->intr_handle[i], NULL))) {
447 tws_log(sc, SETUP_INTR_RES);
457 tws_setup_irq(struct tws_softc *sc)
462 cmd = pci_read_config(sc->tws_dev, PCIR_COMMAND, 2);
463 switch(sc->intr_type) {
466 pci_write_config(sc->tws_dev, PCIR_COMMAND, cmd, 2);
468 sc->irq_res_id[0] = 0;
469 sc->irq_res[0] = bus_alloc_resource_any(sc->tws_dev, SYS_RES_IRQ,
470 &sc->irq_res_id[0], RF_SHAREABLE | RF_ACTIVE);
471 if ( ! sc->irq_res[0] )
473 if ( tws_setup_intr(sc, sc->irqs) == FAILURE )
475 device_printf(sc->tws_dev, "Using legacy INTx\n");
479 pci_write_config(sc->tws_dev, PCIR_COMMAND, cmd, 2);
481 sc->irq_res_id[0] = 1;
483 if (pci_alloc_msi(sc->tws_dev, &messages) != 0 ) {
484 TWS_TRACE(sc, "pci alloc msi fail", 0, messages);
487 sc->irq_res[0] = bus_alloc_resource_any(sc->tws_dev, SYS_RES_IRQ,
488 &sc->irq_res_id[0], RF_SHAREABLE | RF_ACTIVE);
490 if ( !sc->irq_res[0] )
492 if ( tws_setup_intr(sc, sc->irqs) == FAILURE )
494 device_printf(sc->tws_dev, "Using MSI\n");
503 tws_init(struct tws_softc *sc)
506 u_int32_t max_sg_elements;
507 u_int32_t dma_mem_size;
512 if ( tws_queue_depth > TWS_MAX_REQS )
513 tws_queue_depth = TWS_MAX_REQS;
514 if (tws_queue_depth < TWS_RESERVED_REQS+1)
515 tws_queue_depth = TWS_RESERVED_REQS+1;
516 sc->is64bit = (sizeof(bus_addr_t) == 8) ? true : false;
517 max_sg_elements = (sc->is64bit && !tws_use_32bit_sgls) ?
518 TWS_MAX_64BIT_SG_ELEMENTS :
519 TWS_MAX_32BIT_SG_ELEMENTS;
520 dma_mem_size = (sizeof(struct tws_command_packet) * tws_queue_depth) +
522 if ( bus_dma_tag_create(NULL, /* parent */
523 TWS_ALIGNMENT, /* alignment */
525 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
526 BUS_SPACE_MAXADDR, /* highaddr */
527 NULL, NULL, /* filter, filterarg */
528 BUS_SPACE_MAXSIZE, /* maxsize */
529 max_sg_elements, /* numsegs */
530 BUS_SPACE_MAXSIZE, /* maxsegsize */
532 &sc->parent_tag /* tag */
534 TWS_TRACE_DEBUG(sc, "DMA parent tag Create fail", max_sg_elements,
538 /* In bound message frame requires 16byte alignment.
539 * Outbound MF's can live with 4byte alignment - for now just
542 if ( bus_dma_tag_create(sc->parent_tag, /* parent */
543 TWS_IN_MF_ALIGNMENT, /* alignment */
545 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
546 BUS_SPACE_MAXADDR, /* highaddr */
547 NULL, NULL, /* filter, filterarg */
548 dma_mem_size, /* maxsize */
550 BUS_SPACE_MAXSIZE, /* maxsegsize */
552 &sc->cmd_tag /* tag */
554 TWS_TRACE_DEBUG(sc, "DMA cmd tag Create fail", max_sg_elements, sc->is64bit);
558 if (bus_dmamem_alloc(sc->cmd_tag, &sc->dma_mem,
559 BUS_DMA_NOWAIT, &sc->cmd_map)) {
560 TWS_TRACE_DEBUG(sc, "DMA mem alloc fail", max_sg_elements, sc->is64bit);
564 /* if bus_dmamem_alloc succeeds then bus_dmamap_load will succeed */
566 error = bus_dmamap_load(sc->cmd_tag, sc->cmd_map, sc->dma_mem,
567 dma_mem_size, tws_dmamap_cmds_load_cbfn,
568 &sc->dma_mem_phys, 0);
570 if ( error == EINPROGRESS )
571 TWS_TRACE_DEBUG(sc, "req queued", max_sg_elements, sc->is64bit);
574 * Create a dma tag for data buffers; size will be the maximum
575 * possible I/O size (128kB).
577 if (bus_dma_tag_create(sc->parent_tag, /* parent */
578 TWS_ALIGNMENT, /* alignment */
580 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
581 BUS_SPACE_MAXADDR, /* highaddr */
582 NULL, NULL, /* filter, filterarg */
583 TWS_MAX_IO_SIZE, /* maxsize */
584 max_sg_elements, /* nsegments */
585 TWS_MAX_IO_SIZE, /* maxsegsize */
586 BUS_DMA_ALLOCALL | /* flags */
590 &sc->data_tag /* tag */)) {
591 TWS_TRACE_DEBUG(sc, "DMA cmd tag Create fail", max_sg_elements, sc->is64bit);
595 sc->reqs = kmalloc(sizeof(struct tws_request) * tws_queue_depth, M_TWS,
597 sc->sense_bufs = kmalloc(sizeof(struct tws_sense) * tws_queue_depth, M_TWS,
599 sc->scan_ccb = kmalloc(sizeof(union ccb), M_TWS, M_WAITOK | M_ZERO);
601 if ( !tws_ctlr_ready(sc) )
602 if( !tws_ctlr_reset(sc) )
605 bzero(&sc->stats, sizeof(struct tws_stats));
607 tws_turn_off_interrupts(sc);
610 * enable pull mode by setting bit1 .
611 * setting bit0 to 1 will enable interrupt coalesing
615 #ifdef TWS_PULL_MODE_ENABLE
617 reg = tws_read_reg(sc, TWS_I2O0_CTL, 4);
618 TWS_TRACE_DEBUG(sc, "i20 ctl", reg, TWS_I2O0_CTL);
619 tws_write_reg(sc, TWS_I2O0_CTL, reg | TWS_BIT1, 4);
623 TWS_TRACE_DEBUG(sc, "dma_mem_phys", sc->dma_mem_phys, TWS_I2O0_CTL);
624 if ( tws_init_reqs(sc, dma_mem_size) == FAILURE )
626 if ( tws_init_aen_q(sc) == FAILURE )
634 tws_init_aen_q(struct tws_softc *sc)
639 sc->aen_q.overflow=0;
640 sc->aen_q.q = kmalloc(sizeof(struct tws_event_packet)*sc->aen_q.depth,
641 M_TWS, M_WAITOK | M_ZERO);
646 tws_init_trace_q(struct tws_softc *sc)
650 sc->trace_q.depth=256;
651 sc->trace_q.overflow=0;
652 sc->trace_q.q = kmalloc(sizeof(struct tws_trace_rec)*sc->trace_q.depth,
653 M_TWS, M_WAITOK | M_ZERO);
658 tws_init_reqs(struct tws_softc *sc, u_int32_t dma_mem_size)
661 struct tws_command_packet *cmd_buf;
662 cmd_buf = (struct tws_command_packet *)sc->dma_mem;
665 bzero(cmd_buf, dma_mem_size);
666 TWS_TRACE_DEBUG(sc, "phy cmd", sc->dma_mem_phys, 0);
667 lockmgr(&sc->q_lock, LK_EXCLUSIVE);
668 for ( i=0; i< tws_queue_depth; i++)
670 if (bus_dmamap_create(sc->data_tag, 0, &sc->reqs[i].dma_map)) {
671 /* log a ENOMEM failure msg here */
674 sc->reqs[i].cmd_pkt = &cmd_buf[i];
676 sc->sense_bufs[i].hdr = &cmd_buf[i].hdr ;
677 sc->sense_bufs[i].hdr_pkt_phy = sc->dma_mem_phys +
678 (i * sizeof(struct tws_command_packet));
679 sc->sense_bufs[i].posted = false;
681 sc->reqs[i].cmd_pkt_phy = sc->dma_mem_phys +
682 sizeof(struct tws_command_header) +
683 (i * sizeof(struct tws_command_packet));
684 sc->reqs[i].request_id = i;
687 sc->reqs[i].cmd_pkt->hdr.header_desc.size_header = 128;
689 sc->reqs[i].state = TWS_REQ_STATE_FREE;
690 if ( i >= TWS_RESERVED_REQS )
691 tws_q_insert_tail(sc, &sc->reqs[i], TWS_FREE_Q);
693 lockmgr(&sc->q_lock, LK_RELEASE);
698 tws_dmamap_cmds_load_cbfn(void *arg, bus_dma_segment_t *segs,
702 /* kprintf("command load done \n"); */
704 *((bus_addr_t *)arg) = segs[0].ds_addr;
708 tws_send_event(struct tws_softc *sc, u_int8_t event)
710 KKASSERT(lockstatus(&sc->gen_lock, curthread) != 0);
711 TWS_TRACE_DEBUG(sc, "received event ", 0, event);
715 sc->tws_state = TWS_INIT;
718 case TWS_INIT_COMPLETE:
719 KASSERT(sc->tws_state == TWS_INIT , ("invalid state transition"));
720 sc->tws_state = TWS_ONLINE;
723 case TWS_RESET_START:
724 /* multiple reset ? */
725 KASSERT(sc->tws_state != TWS_RESET, ("invalid state transition"));
727 /* we can transition to reset state from any state */
728 sc->tws_prev_state = sc->tws_state;
729 sc->tws_state = TWS_RESET;
732 case TWS_RESET_COMPLETE:
733 KASSERT(sc->tws_state == TWS_RESET, ("invalid state transition"));
734 sc->tws_state = sc->tws_prev_state;
737 case TWS_SCAN_FAILURE:
738 KASSERT(sc->tws_state == TWS_ONLINE , ("invalid state transition"));
739 sc->tws_state = TWS_OFFLINE;
742 case TWS_UNINIT_START:
743 KASSERT(sc->tws_state == TWS_ONLINE || sc->tws_state == TWS_OFFLINE,
744 ("invalid state transition"));
745 sc->tws_state = TWS_UNINIT;
752 tws_get_state(struct tws_softc *sc)
755 return((u_int8_t)sc->tws_state);
759 /* Called during system shutdown after sync. */
762 tws_shutdown(device_t dev)
765 struct tws_softc *sc = device_get_softc(dev);
767 TWS_TRACE_DEBUG(sc, "entry", 0, 0);
769 tws_turn_off_interrupts(sc);
770 tws_init_connect(sc, 1);
776 * Device suspend routine.
779 tws_suspend(device_t dev)
781 struct tws_softc *sc = device_get_softc(dev);
784 TWS_TRACE_DEBUG(sc, "entry", 0, 0);
789 * Device resume routine.
792 tws_resume(device_t dev)
795 struct tws_softc *sc = device_get_softc(dev);
798 TWS_TRACE_DEBUG(sc, "entry", 0, 0);
804 tws_get_request(struct tws_softc *sc, u_int16_t type)
806 struct tws_request *r = NULL;
809 case TWS_INTERNAL_CMD_REQ :
810 lockmgr(&sc->gen_lock, LK_EXCLUSIVE);
812 if ( r->state != TWS_REQ_STATE_FREE ) {
815 r->state = TWS_REQ_STATE_BUSY;
817 lockmgr(&sc->gen_lock, LK_RELEASE);
819 case TWS_AEN_FETCH_REQ :
820 lockmgr(&sc->gen_lock, LK_EXCLUSIVE);
822 if ( r->state != TWS_REQ_STATE_FREE ) {
825 r->state = TWS_REQ_STATE_BUSY;
827 lockmgr(&sc->gen_lock, LK_RELEASE);
829 case TWS_PASSTHRU_REQ :
830 lockmgr(&sc->gen_lock, LK_EXCLUSIVE);
832 if ( r->state != TWS_REQ_STATE_FREE ) {
835 r->state = TWS_REQ_STATE_BUSY;
837 lockmgr(&sc->gen_lock, LK_RELEASE);
839 case TWS_GETSET_PARAM_REQ :
840 lockmgr(&sc->gen_lock, LK_EXCLUSIVE);
842 if ( r->state != TWS_REQ_STATE_FREE ) {
845 r->state = TWS_REQ_STATE_BUSY;
847 lockmgr(&sc->gen_lock, LK_RELEASE);
849 case TWS_SCSI_IO_REQ :
850 lockmgr(&sc->q_lock, LK_EXCLUSIVE);
851 r = tws_q_remove_head(sc, TWS_FREE_Q);
853 r->state = TWS_REQ_STATE_TRAN;
854 lockmgr(&sc->q_lock, LK_RELEASE);
857 TWS_TRACE_DEBUG(sc, "Unknown req type", 0, type);
863 bzero(&r->cmd_pkt->cmd, sizeof(struct tws_command_apache));
864 callout_init(&r->thandle);
868 r->flags = TWS_DIR_UNKNOWN;
869 r->error_code = TWS_REQ_ERR_INVALID;
872 r->next = r->prev = NULL;
878 tws_release_request(struct tws_request *req)
881 struct tws_softc *sc = req->sc;
883 TWS_TRACE_DEBUG(sc, "entry", sc, 0);
884 lockmgr(&sc->q_lock, LK_EXCLUSIVE);
885 tws_q_insert_tail(sc, req, TWS_FREE_Q);
886 lockmgr(&sc->q_lock, LK_RELEASE);
889 static device_method_t tws_methods[] = {
890 /* Device interface */
891 DEVMETHOD(device_probe, tws_probe),
892 DEVMETHOD(device_attach, tws_attach),
893 DEVMETHOD(device_detach, tws_detach),
894 DEVMETHOD(device_shutdown, tws_shutdown),
895 DEVMETHOD(device_suspend, tws_suspend),
896 DEVMETHOD(device_resume, tws_resume),
898 DEVMETHOD(bus_print_child, bus_generic_print_child),
899 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
903 static driver_t tws_driver = {
906 sizeof(struct tws_softc)
910 static devclass_t tws_devclass;
912 /* DEFINE_CLASS_0(tws, tws_driver, tws_methods, sizeof(struct tws_softc)); */
913 DRIVER_MODULE(tws, pci, tws_driver, tws_devclass, 0, 0);
914 MODULE_DEPEND(tws, cam, 1, 1, 1);
915 MODULE_DEPEND(tws, pci, 1, 1, 1);
917 TUNABLE_INT("hw.tws.queue_depth", &tws_queue_depth);
918 #if 0 /* XXX swildner */
919 TUNABLE_INT("hw.tws.enable_msi", &tws_enable_msi);