2 * Copyright (c) 2010, LSI Corp.
4 * Author : Manjunath Ranganathaiah
5 * Support: freebsdraid@lsi.com
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of the <ORGANIZATION> nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
29 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
31 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
34 * $FreeBSD: src/sys/dev/tws/tws_cam.c,v 1.3 2007/05/09 04:16:32 mrangana Exp $
37 #include <dev/raid/tws/tws.h>
38 #include <dev/raid/tws/tws_services.h>
39 #include <dev/raid/tws/tws_hdm.h>
40 #include <dev/raid/tws/tws_user.h>
41 #include <bus/cam/cam.h>
42 #include <bus/cam/cam_ccb.h>
43 #include <bus/cam/cam_sim.h>
44 #include <bus/cam/cam_xpt_sim.h>
45 #include <bus/cam/cam_debug.h>
46 #include <bus/cam/cam_periph.h>
48 #include <bus/cam/scsi/scsi_all.h>
49 #include <bus/cam/scsi/scsi_message.h>
51 static int tws_cam_depth=(TWS_MAX_REQS - TWS_RESERVED_REQS);
52 static char tws_sev_str[5][8]={"","ERROR","WARNING","INFO","DEBUG"};
54 static void tws_action(struct cam_sim *sim, union ccb *ccb);
55 static void tws_poll(struct cam_sim *sim);
56 static void tws_bus_scan_cb(struct cam_periph *periph, union ccb *ccb);
57 static void tws_scsi_complete(struct tws_request *req);
61 void tws_unmap_request(struct tws_softc *sc, struct tws_request *req);
62 int32_t tws_map_request(struct tws_softc *sc, struct tws_request *req);
63 int tws_bus_scan(struct tws_softc *sc);
64 int tws_cam_attach(struct tws_softc *sc);
65 void tws_cam_detach(struct tws_softc *sc);
66 void tws_reset(void *arg);
68 static void tws_reset_cb(void *arg);
69 static void tws_reinit(void *arg);
70 static int32_t tws_execute_scsi(struct tws_softc *sc, union ccb *ccb);
71 static void tws_freeze_simq(struct tws_softc *sc);
72 static void tws_release_simq(struct tws_softc *sc);
73 static void tws_dmamap_data_load_cbfn(void *arg, bus_dma_segment_t *segs,
75 static void tws_fill_sg_list(struct tws_softc *sc, void *sgl_src,
76 void *sgl_dest, u_int16_t num_sgl_entries);
77 static void tws_err_complete(struct tws_softc *sc, u_int64_t mfa);
78 static void tws_scsi_err_complete(struct tws_request *req,
79 struct tws_command_header *hdr);
80 static void tws_passthru_err_complete(struct tws_request *req,
81 struct tws_command_header *hdr);
84 static void tws_timeout(void *arg);
85 static void tws_intr_attn_aen(struct tws_softc *sc);
86 static void tws_intr_attn_error(struct tws_softc *sc);
87 static void tws_intr_resp(struct tws_softc *sc);
88 void tws_intr(void *arg);
89 void tws_cmd_complete(struct tws_request *req);
90 void tws_aen_complete(struct tws_request *req);
91 int tws_send_scsi_cmd(struct tws_softc *sc, int cmd);
92 void tws_getset_param_complete(struct tws_request *req);
93 int tws_set_param(struct tws_softc *sc, u_int32_t table_id, u_int32_t param_id,
94 u_int32_t param_size, void *data);
95 int tws_get_param(struct tws_softc *sc, u_int32_t table_id, u_int32_t param_id,
96 u_int32_t param_size, void *data);
99 extern struct tws_request *tws_get_request(struct tws_softc *sc,
101 extern void *tws_release_request(struct tws_request *req);
102 extern int tws_submit_command(struct tws_softc *sc, struct tws_request *req);
103 extern boolean tws_get_response(struct tws_softc *sc,
104 u_int16_t *req_id, u_int64_t *mfa);
105 extern void tws_q_insert_tail(struct tws_softc *sc, struct tws_request *req,
107 extern struct tws_request * tws_q_remove_request(struct tws_softc *sc,
108 struct tws_request *req, u_int8_t q_type );
109 extern void tws_send_event(struct tws_softc *sc, u_int8_t event);
111 extern struct tws_sense *
112 tws_find_sense_from_mfa(struct tws_softc *sc, u_int64_t mfa);
114 extern void tws_fetch_aen(void *arg);
115 extern void tws_disable_db_intr(struct tws_softc *sc);
116 extern void tws_enable_db_intr(struct tws_softc *sc);
117 extern void tws_passthru_complete(struct tws_request *req);
118 extern void tws_aen_synctime_with_host(struct tws_softc *sc);
119 extern void tws_circular_aenq_insert(struct tws_softc *sc,
120 struct tws_circular_q *cq, struct tws_event_packet *aen);
121 extern int tws_use_32bit_sgls;
122 extern boolean tws_ctlr_reset(struct tws_softc *sc);
123 extern struct tws_request * tws_q_remove_tail(struct tws_softc *sc,
125 extern void tws_turn_off_interrupts(struct tws_softc *sc);
126 extern void tws_turn_on_interrupts(struct tws_softc *sc);
127 extern int tws_init_connect(struct tws_softc *sc, u_int16_t mc);
128 extern void tws_init_obfl_q(struct tws_softc *sc);
129 extern uint8_t tws_get_state(struct tws_softc *sc);
130 extern void tws_assert_soft_reset(struct tws_softc *sc);
131 extern boolean tws_ctlr_ready(struct tws_softc *sc);
132 extern u_int16_t tws_poll4_response(struct tws_softc *sc, u_int64_t *mfa);
137 tws_cam_attach(struct tws_softc *sc)
139 struct cam_devq *devq;
142 TWS_TRACE_DEBUG(sc, "entry", 0, sc);
143 /* Create a device queue for sim */
146 * if the user sets cam depth to less than 1
147 * cam may get confused
149 if ( tws_cam_depth < 1 )
151 if ( tws_cam_depth > (tws_queue_depth - TWS_RESERVED_REQS) )
152 tws_cam_depth = tws_queue_depth - TWS_RESERVED_REQS;
154 TWS_TRACE_DEBUG(sc, "depths,ctlr,cam", tws_queue_depth, tws_cam_depth);
156 if ((devq = cam_simq_alloc(tws_cam_depth)) == NULL) {
157 tws_log(sc, CAM_SIMQ_ALLOC);
162 * Create a SIM entry. Though we can support tws_cam_depth
163 * simultaneous requests, we claim to be able to handle only
164 * (tws_cam_depth), so that we always have reserved requests
165 * packet available to service ioctls and internal commands.
167 sc->sim = cam_sim_alloc(tws_action, tws_poll, "tws", sc,
168 device_get_unit(sc->tws_dev),
170 tws_cam_depth, 1, devq);
172 cam_simq_release(devq);
173 if (sc->sim == NULL) {
174 tws_log(sc, CAM_SIM_ALLOC);
176 /* Register the bus. */
177 lockmgr(&sc->sim_lock, LK_EXCLUSIVE);
178 if (xpt_bus_register(sc->sim, 0) != CAM_SUCCESS) {
179 cam_sim_free(sc->sim);
180 sc->sim = NULL; /* so cam_detach will not try to free it */
181 lockmgr(&sc->sim_lock, LK_RELEASE);
182 tws_log(sc, TWS_XPT_BUS_REGISTER);
185 if (xpt_create_path(&sc->path, NULL, cam_sim_path(sc->sim),
187 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
188 xpt_bus_deregister(cam_sim_path(sc->sim));
189 cam_sim_free(sc->sim);
190 tws_log(sc, TWS_XPT_CREATE_PATH);
191 lockmgr(&sc->sim_lock, LK_RELEASE);
194 if ((error = tws_bus_scan(sc))) {
195 tws_log(sc, TWS_BUS_SCAN_REQ);
196 lockmgr(&sc->sim_lock, LK_RELEASE);
199 lockmgr(&sc->sim_lock, LK_RELEASE);
205 tws_cam_detach(struct tws_softc *sc)
207 TWS_TRACE_DEBUG(sc, "entry", 0, 0);
208 lockmgr(&sc->sim_lock, LK_EXCLUSIVE);
210 xpt_free_path(sc->path);
212 xpt_bus_deregister(cam_sim_path(sc->sim));
213 cam_sim_free(sc->sim);
215 lockmgr(&sc->sim_lock, LK_RELEASE);
219 tws_bus_scan(struct tws_softc *sc)
221 struct cam_path *path;
224 TWS_TRACE_DEBUG(sc, "entry", sc, 0);
225 KASSERT(sc->sim, ("sim not allocated"));
226 KKASSERT(lockstatus(&sc->sim_lock, curthread) != 0);
230 bzero(ccb, sizeof(union ccb));
231 if (xpt_create_path(&path, xpt_periph, cam_sim_path(sc->sim),
232 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
234 /* lockmgr(&sc->sim_lock, LK_RELEASE); */
237 xpt_setup_ccb(&ccb->ccb_h, path, 5);
238 ccb->ccb_h.func_code = XPT_SCAN_BUS;
239 ccb->ccb_h.cbfcnp = tws_bus_scan_cb;
240 ccb->crcn.flags = CAM_FLAG_NONE;
247 tws_bus_scan_cb(struct cam_periph *periph, union ccb *ccb)
249 struct tws_softc *sc = periph->softc;
251 /* calling trace results in non-sleepable lock head panic
252 using printf to debug */
254 if (ccb->ccb_h.status != CAM_REQ_CMP) {
255 kprintf("cam_scan failure\n");
257 lockmgr(&sc->gen_lock, LK_EXCLUSIVE);
258 tws_send_event(sc, TWS_SCAN_FAILURE);
259 lockmgr(&sc->gen_lock, LK_RELEASE);
262 xpt_free_path(ccb->ccb_h.path);
266 tws_action(struct cam_sim *sim, union ccb *ccb)
268 struct tws_softc *sc = (struct tws_softc *)cam_sim_softc(sim);
270 switch( ccb->ccb_h.func_code ) {
273 if ( tws_execute_scsi(sc, ccb) )
274 TWS_TRACE_DEBUG(sc, "execute scsi failed", 0, 0);
279 TWS_TRACE_DEBUG(sc, "abort i/o", 0, 0);
280 ccb->ccb_h.status = CAM_UA_ABORT;
286 TWS_TRACE_DEBUG(sc, "reset bus", sim, ccb);
289 case XPT_SET_TRAN_SETTINGS:
291 TWS_TRACE_DEBUG(sc, "set tran settings", sim, ccb);
292 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
297 case XPT_GET_TRAN_SETTINGS:
299 TWS_TRACE_DEBUG(sc, "get tran settings", sim, ccb);
301 ccb->cts.protocol = PROTO_SCSI;
302 ccb->cts.protocol_version = SCSI_REV_2;
303 ccb->cts.transport = XPORT_SPI;
304 ccb->cts.transport_version = 2;
306 ccb->cts.xport_specific.spi.valid = CTS_SPI_VALID_DISC;
307 ccb->cts.xport_specific.spi.flags = CTS_SPI_FLAGS_DISC_ENB;
308 ccb->cts.proto_specific.scsi.valid = CTS_SCSI_VALID_TQ;
309 ccb->cts.proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB;
310 ccb->ccb_h.status = CAM_REQ_CMP;
315 case XPT_CALC_GEOMETRY:
317 TWS_TRACE_DEBUG(sc, "calc geometry(ccb,block-size)", ccb,
318 ccb->ccg.block_size);
319 cam_calc_geometry(&ccb->ccg, 1/* extended */);
326 TWS_TRACE_DEBUG(sc, "path inquiry", sim, ccb);
327 ccb->cpi.version_num = 1;
328 ccb->cpi.hba_inquiry = 0;
329 ccb->cpi.target_sprt = 0;
330 ccb->cpi.hba_misc = 0;
331 ccb->cpi.hba_eng_cnt = 0;
332 ccb->cpi.max_target = TWS_MAX_NUM_UNITS;
333 ccb->cpi.max_lun = TWS_MAX_NUM_LUNS - 1;
334 ccb->cpi.unit_number = cam_sim_unit(sim);
335 ccb->cpi.bus_id = cam_sim_bus(sim);
336 ccb->cpi.initiator_id = TWS_SCSI_INITIATOR_ID;
337 ccb->cpi.base_transfer_speed = 300000;
338 strncpy(ccb->cpi.sim_vid, "FreeBSD", SIM_IDLEN);
339 strncpy(ccb->cpi.hba_vid, "3ware", HBA_IDLEN);
340 strncpy(ccb->cpi.dev_name, cam_sim_name(sim), DEV_IDLEN);
341 ccb->cpi.transport = XPORT_SPI;
342 ccb->cpi.transport_version = 2;
343 ccb->cpi.protocol = PROTO_SCSI;
344 ccb->cpi.protocol_version = SCSI_REV_2;
345 ccb->ccb_h.status = CAM_REQ_CMP;
351 TWS_TRACE_DEBUG(sc, "default", sim, ccb);
352 ccb->ccb_h.status = CAM_REQ_INVALID;
359 tws_scsi_complete(struct tws_request *req)
361 struct tws_softc *sc = req->sc;
363 lockmgr(&sc->q_lock, LK_EXCLUSIVE);
364 tws_q_remove_request(sc, req, TWS_BUSY_Q);
365 lockmgr(&sc->q_lock, LK_RELEASE);
367 callout_stop(&req->ccb_ptr->ccb_h.timeout_ch);
368 tws_unmap_request(req->sc, req);
371 lockmgr(&sc->sim_lock, LK_EXCLUSIVE);
372 req->ccb_ptr->ccb_h.status = CAM_REQ_CMP;
373 xpt_done(req->ccb_ptr);
374 lockmgr(&sc->sim_lock, LK_RELEASE);
376 lockmgr(&sc->q_lock, LK_EXCLUSIVE);
377 tws_q_insert_tail(sc, req, TWS_FREE_Q);
378 lockmgr(&sc->q_lock, LK_RELEASE);
383 tws_getset_param_complete(struct tws_request *req)
385 struct tws_softc *sc = req->sc;
387 TWS_TRACE_DEBUG(sc, "getset complete", req, req->request_id);
389 callout_stop(&req->thandle);
390 tws_unmap_request(sc, req);
392 kfree(req->data, M_TWS);
394 lockmgr(&sc->gen_lock, LK_EXCLUSIVE);
395 req->state = TWS_REQ_STATE_FREE;
396 lockmgr(&sc->gen_lock, LK_RELEASE);
401 tws_aen_complete(struct tws_request *req)
403 struct tws_softc *sc = req->sc;
404 struct tws_command_header *sense;
405 struct tws_event_packet event;
406 u_int16_t aen_code=0;
408 TWS_TRACE_DEBUG(sc, "aen complete", 0, req->request_id);
410 callout_stop(&req->thandle);
411 tws_unmap_request(sc, req);
413 sense = (struct tws_command_header *)req->data;
415 TWS_TRACE_DEBUG(sc,"sense code, key",sense->sense_data[0],
416 sense->sense_data[2]);
417 TWS_TRACE_DEBUG(sc,"sense rid, seve",sense->header_desc.request_id,
418 sense->status_block.res__severity);
419 TWS_TRACE_DEBUG(sc,"sense srcnum, error",sense->status_block.srcnum,
420 sense->status_block.error);
421 TWS_TRACE_DEBUG(sc,"sense shdr, ssense",sense->header_desc.size_header,
422 sense->header_desc.size_sense);
424 aen_code = sense->status_block.error;
426 switch ( aen_code ) {
427 case TWS_AEN_SYNC_TIME_WITH_HOST :
428 tws_aen_synctime_with_host(sc);
430 case TWS_AEN_QUEUE_EMPTY :
433 bzero(&event, sizeof(struct tws_event_packet));
434 event.sequence_id = sc->seq_id;
435 event.time_stamp_sec = (u_int32_t)TWS_LOCAL_TIME;
436 event.aen_code = sense->status_block.error;
437 event.severity = sense->status_block.res__severity & 0x7;
438 event.event_src = TWS_SRC_CTRL_EVENT;
439 strcpy(event.severity_str, tws_sev_str[event.severity]);
440 event.retrieved = TWS_AEN_NOT_RETRIEVED;
442 bcopy(sense->err_specific_desc, event.parameter_data,
443 TWS_ERROR_SPECIFIC_DESC_LEN);
444 event.parameter_data[TWS_ERROR_SPECIFIC_DESC_LEN - 1] = '\0';
445 event.parameter_len = (u_int8_t)strlen(event.parameter_data)+1;
447 if ( event.parameter_len < TWS_ERROR_SPECIFIC_DESC_LEN ) {
448 event.parameter_len += ((u_int8_t)strlen(event.parameter_data +
449 event.parameter_len) + 1);
452 device_printf(sc->tws_dev, "%s: (0x%02X: 0x%04X): %s: %s\n",
456 event.parameter_data +
457 (strlen(event.parameter_data) + 1),
458 event.parameter_data);
460 lockmgr(&sc->gen_lock, LK_EXCLUSIVE);
461 tws_circular_aenq_insert(sc, &sc->aen_q, &event);
463 lockmgr(&sc->gen_lock, LK_RELEASE);
468 kfree(req->data, M_TWS);
470 lockmgr(&sc->gen_lock, LK_EXCLUSIVE);
471 req->state = TWS_REQ_STATE_FREE;
472 lockmgr(&sc->gen_lock, LK_RELEASE);
474 if ( aen_code != TWS_AEN_QUEUE_EMPTY ) {
475 /* timeout(tws_fetch_aen, sc, 1);*/
476 sc->stats.num_aens++;
483 tws_cmd_complete(struct tws_request *req)
485 struct tws_softc *sc = req->sc;
487 callout_stop(&req->ccb_ptr->ccb_h.timeout_ch);
488 tws_unmap_request(sc, req);
493 tws_err_complete(struct tws_softc *sc, u_int64_t mfa)
496 struct tws_command_header *hdr;
497 struct tws_sense *sen;
498 struct tws_request *req;
500 u_int32_t reg, status;
503 TWS_TRACE_DEBUG(sc, "null mfa", 0, mfa);
506 /* lookup the sense */
507 sen = tws_find_sense_from_mfa(sc, mfa);
509 TWS_TRACE_DEBUG(sc, "found null req", 0, mfa);
513 TWS_TRACE_DEBUG(sc, "sen, hdr", sen, hdr);
514 req_id = hdr->header_desc.request_id;
515 req = &sc->reqs[req_id];
516 TWS_TRACE_DEBUG(sc, "req, id", req, req_id);
517 if ( req->error_code != TWS_REQ_SUBMIT_SUCCESS )
518 TWS_TRACE_DEBUG(sc, "submit failure?", 0, req->error_code);
522 case TWS_PASSTHRU_REQ :
523 tws_passthru_err_complete(req, hdr);
525 case TWS_GETSET_PARAM_REQ :
526 tws_getset_param_complete(req);
528 case TWS_SCSI_IO_REQ :
529 tws_scsi_err_complete(req, hdr);
534 lockmgr(&sc->io_lock, LK_EXCLUSIVE);
535 hdr->header_desc.size_header = 128;
536 reg = (u_int32_t)( mfa>>32);
537 tws_write_reg(sc, TWS_I2O0_HOBQPH, reg, 4);
538 reg = (u_int32_t)(mfa);
539 tws_write_reg(sc, TWS_I2O0_HOBQPL, reg, 4);
541 status = tws_read_reg(sc, TWS_I2O0_STATUS, 4);
542 if ( status & TWS_BIT13 ) {
543 TWS_TRACE_DEBUG(sc, "OBFL Overrun", status, TWS_I2O0_STATUS);
544 sc->obfl_q_overrun = true;
547 lockmgr(&sc->io_lock, LK_RELEASE);
552 tws_scsi_err_complete(struct tws_request *req, struct tws_command_header *hdr)
554 u_int8_t *sense_data;
555 struct tws_softc *sc = req->sc;
556 union ccb *ccb = req->ccb_ptr;
558 TWS_TRACE_DEBUG(sc, "sbe, cmd_status", hdr->status_block.error,
559 req->cmd_pkt->cmd.pkt_a.status);
560 if ( hdr->status_block.error == TWS_ERROR_LOGICAL_UNIT_NOT_SUPPORTED ||
561 hdr->status_block.error == TWS_ERROR_UNIT_OFFLINE ) {
563 if ( ccb->ccb_h.target_lun ) {
564 TWS_TRACE_DEBUG(sc, "invalid lun error",0,0);
565 ccb->ccb_h.status |= CAM_LUN_INVALID;
567 TWS_TRACE_DEBUG(sc, "invalid target error",0,0);
568 ccb->ccb_h.status |= CAM_TID_INVALID;
572 TWS_TRACE_DEBUG(sc, "scsi status error",0,0);
573 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
574 if (((ccb->csio.cdb_io.cdb_bytes[0] == 0x1A) &&
575 (hdr->status_block.error == TWS_ERROR_NOT_SUPPORTED))) {
576 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID;
577 TWS_TRACE_DEBUG(sc, "page mode not supported",0,0);
581 /* if there were no error simply mark complete error */
582 if (ccb->ccb_h.status == 0)
583 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
585 sense_data = (u_int8_t *)&ccb->csio.sense_data;
587 memcpy(sense_data, hdr->sense_data, TWS_SENSE_DATA_LENGTH );
588 ccb->csio.sense_len = TWS_SENSE_DATA_LENGTH;
589 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
591 ccb->csio.scsi_status = req->cmd_pkt->cmd.pkt_a.status;
593 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
594 lockmgr(&sc->sim_lock, LK_EXCLUSIVE);
596 lockmgr(&sc->sim_lock, LK_RELEASE);
598 callout_stop(&req->ccb_ptr->ccb_h.timeout_ch);
599 tws_unmap_request(req->sc, req);
600 lockmgr(&sc->q_lock, LK_EXCLUSIVE);
601 tws_q_remove_request(sc, req, TWS_BUSY_Q);
602 tws_q_insert_tail(sc, req, TWS_FREE_Q);
603 lockmgr(&sc->q_lock, LK_RELEASE);
608 tws_passthru_err_complete(struct tws_request *req,
609 struct tws_command_header *hdr)
612 TWS_TRACE_DEBUG(req->sc, "entry", hdr, req->request_id);
613 req->error_code = hdr->status_block.error;
614 memcpy(&(req->cmd_pkt->hdr), hdr, sizeof(struct tws_command_header));
615 tws_passthru_complete(req);
619 tws_drain_busy_queue(struct tws_softc *sc)
622 struct tws_request *req;
623 TWS_TRACE_DEBUG(sc, "entry", 0, 0);
625 lockmgr(&sc->q_lock, LK_EXCLUSIVE);
626 req = tws_q_remove_tail(sc, TWS_BUSY_Q);
627 lockmgr(&sc->q_lock, LK_RELEASE);
629 callout_stop(&req->ccb_ptr->ccb_h.timeout_ch);
630 tws_unmap_request(req->sc, req);
632 TWS_TRACE_DEBUG(sc, "drained", 0, req->request_id);
634 lockmgr(&sc->sim_lock, LK_EXCLUSIVE);
635 req->ccb_ptr->ccb_h.status = CAM_REQUEUE_REQ;
636 xpt_done(req->ccb_ptr);
637 lockmgr(&sc->sim_lock, LK_RELEASE);
639 lockmgr(&sc->q_lock, LK_EXCLUSIVE);
640 tws_q_insert_tail(sc, req, TWS_FREE_Q);
641 req = tws_q_remove_tail(sc, TWS_BUSY_Q);
642 lockmgr(&sc->q_lock, LK_RELEASE);
648 tws_drain_reserved_reqs(struct tws_softc *sc)
651 struct tws_request *r;
654 if ( r->state != TWS_REQ_STATE_FREE ) {
655 TWS_TRACE_DEBUG(sc, "drained aen req", 0, 0);
656 callout_stop(&r->thandle);
657 tws_unmap_request(sc, r);
658 kfree(r->data, M_TWS);
659 lockmgr(&sc->gen_lock, LK_EXCLUSIVE);
660 r->state = TWS_REQ_STATE_FREE;
661 lockmgr(&sc->gen_lock, LK_RELEASE);
664 if ( r->state != TWS_REQ_STATE_FREE ) {
665 TWS_TRACE_DEBUG(sc, "drained passthru req", 0, 0);
666 r->error_code = TWS_REQ_REQUEUE;
667 tws_passthru_complete(r);
670 if ( r->state != TWS_REQ_STATE_FREE ) {
671 TWS_TRACE_DEBUG(sc, "drained set param req", 0, 0);
672 tws_getset_param_complete(r);
678 tws_drain_response_queue(struct tws_softc *sc)
685 tws_execute_scsi(struct tws_softc *sc, union ccb *ccb)
687 struct tws_command_packet *cmd_pkt;
688 struct tws_request *req;
689 struct ccb_hdr *ccb_h = &(ccb->ccb_h);
690 struct ccb_scsiio *csio = &(ccb->csio);
694 KKASSERT(lockstatus(&sc->sim_lock, curthread) != 0);
695 if (ccb_h->target_id >= TWS_MAX_NUM_UNITS) {
696 TWS_TRACE_DEBUG(sc, "traget id too big", ccb_h->target_id, ccb_h->target_lun);
697 ccb_h->status |= CAM_TID_INVALID;
701 if (ccb_h->target_lun >= TWS_MAX_NUM_LUNS) {
702 TWS_TRACE_DEBUG(sc, "target lun 2 big", ccb_h->target_id, ccb_h->target_lun);
703 ccb_h->status |= CAM_LUN_INVALID;
708 if(ccb_h->flags & CAM_CDB_PHYS) {
709 TWS_TRACE_DEBUG(sc, "cdb phy", ccb_h->target_id, ccb_h->target_lun);
710 ccb_h->status = CAM_REQ_CMP_ERR;
716 * We are going to work on this request. Mark it as enqueued (though
717 * we don't actually queue it...)
719 ccb_h->status |= CAM_SIM_QUEUED;
721 req = tws_get_request(sc, TWS_SCSI_IO_REQ);
723 TWS_TRACE_DEBUG(sc, "no reqs", ccb_h->target_id, ccb_h->target_lun);
724 /* tws_freeze_simq(sc); */
725 ccb_h->status |= CAM_REQUEUE_REQ;
730 if((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
731 if(ccb_h->flags & CAM_DIR_IN)
732 req->flags = TWS_DIR_IN;
734 req->flags = TWS_DIR_OUT;
736 req->flags = TWS_DIR_NONE; /* no data */
739 req->type = TWS_SCSI_IO_REQ;
740 req->cb = tws_scsi_complete;
742 cmd_pkt = req->cmd_pkt;
743 /* cmd_pkt->hdr.header_desc.size_header = 128; */
744 cmd_pkt->cmd.pkt_a.res__opcode = TWS_FW_CMD_EXECUTE_SCSI;
745 cmd_pkt->cmd.pkt_a.unit = ccb_h->target_id;
746 cmd_pkt->cmd.pkt_a.status = 0;
747 cmd_pkt->cmd.pkt_a.sgl_offset = 16;
750 lun = ccb_h->target_lun & 0XF;
752 cmd_pkt->cmd.pkt_a.lun_l4__req_id = lun | req->request_id;
754 lun = ccb_h->target_lun & 0XF0;
756 cmd_pkt->cmd.pkt_a.lun_h4__sgl_entries = lun;
759 if ( csio->cdb_len > 16 )
760 TWS_TRACE(sc, "cdb len too big", ccb_h->target_id, csio->cdb_len);
763 if(ccb_h->flags & CAM_CDB_POINTER)
764 bcopy(csio->cdb_io.cdb_ptr, cmd_pkt->cmd.pkt_a.cdb, csio->cdb_len);
766 bcopy(csio->cdb_io.cdb_bytes, cmd_pkt->cmd.pkt_a.cdb, csio->cdb_len);
768 if (!(ccb_h->flags & CAM_DATA_PHYS)) {
769 /* Virtual data addresses. Need to convert them... */
770 if (!(ccb_h->flags & CAM_SCATTER_VALID)) {
771 if (csio->dxfer_len > TWS_MAX_IO_SIZE) {
772 TWS_TRACE(sc, "I/O is big", csio->dxfer_len, 0);
773 tws_release_request(req);
774 ccb_h->status = CAM_REQ_TOO_BIG;
779 req->length = csio->dxfer_len;
781 req->data = csio->data_ptr;
782 /* there is 1 sgl_entrie */
783 /* cmd_pkt->cmd.pkt_a.lun_h4__sgl_entries |= 1; */
786 TWS_TRACE_DEBUG(sc, "got sglist", ccb_h->target_id, ccb_h->target_lun);
787 tws_release_request(req);
788 ccb_h->status = CAM_REQ_CMP_ERR;
793 /* Data addresses are physical. */
794 TWS_TRACE_DEBUG(sc, "Phy data addr", ccb_h->target_id, ccb_h->target_lun);
795 tws_release_request(req);
796 ccb_h->status = CAM_REQ_CMP_ERR;
797 ccb_h->status |= CAM_RELEASE_SIMQ;
798 ccb_h->status &= ~CAM_SIM_QUEUED;
805 * tws_map_load_data_callback will fill in the SGL,
806 * and submit the I/O.
808 sc->stats.scsi_ios++;
809 callout_reset(&ccb_h->timeout_ch, (ccb_h->timeout * hz)/1000, tws_timeout,
811 error = tws_map_request(sc, req);
817 tws_send_scsi_cmd(struct tws_softc *sc, int cmd)
820 struct tws_request *req;
821 struct tws_command_packet *cmd_pkt;
824 TWS_TRACE_DEBUG(sc, "entry",sc, cmd);
825 req = tws_get_request(sc, TWS_AEN_FETCH_REQ);
830 req->type = TWS_AEN_FETCH_REQ;
831 req->cb = tws_aen_complete;
833 cmd_pkt = req->cmd_pkt;
834 cmd_pkt->cmd.pkt_a.res__opcode = TWS_FW_CMD_EXECUTE_SCSI;
835 cmd_pkt->cmd.pkt_a.status = 0;
836 cmd_pkt->cmd.pkt_a.unit = 0;
837 cmd_pkt->cmd.pkt_a.sgl_offset = 16;
838 cmd_pkt->cmd.pkt_a.lun_l4__req_id = req->request_id;
840 cmd_pkt->cmd.pkt_a.cdb[0] = (u_int8_t)cmd;
841 cmd_pkt->cmd.pkt_a.cdb[4] = 128;
843 req->length = TWS_SECTOR_SIZE;
844 req->data = kmalloc(TWS_SECTOR_SIZE, M_TWS, M_NOWAIT | M_ZERO);
845 if ( req->data == NULL )
847 req->flags = TWS_DIR_IN;
849 callout_reset(&req->thandle, (TWS_IO_TIMEOUT * hz), tws_timeout, req);
850 error = tws_map_request(sc, req);
856 tws_set_param(struct tws_softc *sc, u_int32_t table_id, u_int32_t param_id,
857 u_int32_t param_size, void *data)
859 struct tws_request *req;
860 struct tws_command_packet *cmd_pkt;
861 union tws_command_giga *cmd;
862 struct tws_getset_param *param;
865 req = tws_get_request(sc, TWS_GETSET_PARAM_REQ);
867 TWS_TRACE_DEBUG(sc, "null req", 0, 0);
871 req->length = TWS_SECTOR_SIZE;
872 req->data = kmalloc(TWS_SECTOR_SIZE, M_TWS, M_NOWAIT | M_ZERO);
873 if ( req->data == NULL )
875 param = (struct tws_getset_param *)req->data;
877 req->cb = tws_getset_param_complete;
878 req->flags = TWS_DIR_OUT;
879 cmd_pkt = req->cmd_pkt;
881 cmd = &cmd_pkt->cmd.pkt_g;
882 cmd->param.sgl_off__opcode =
883 BUILD_SGL_OFF__OPCODE(2, TWS_FW_CMD_SET_PARAM);
884 cmd->param.request_id = (u_int8_t)req->request_id;
885 cmd->param.host_id__unit = 0;
886 cmd->param.param_count = 1;
887 cmd->param.size = 2; /* map routine will add sgls */
889 /* Specify which parameter we want to set. */
890 param->table_id = (table_id | TWS_9K_PARAM_DESCRIPTOR);
891 param->parameter_id = (u_int8_t)(param_id);
892 param->parameter_size_bytes = (u_int16_t)param_size;
893 memcpy(param->data, data, param_size);
895 callout_reset(&req->thandle, (TWS_IO_TIMEOUT * hz), tws_timeout, req);
896 error = tws_map_request(sc, req);
902 tws_get_param(struct tws_softc *sc, u_int32_t table_id, u_int32_t param_id,
903 u_int32_t param_size, void *data)
905 struct tws_request *req;
906 struct tws_command_packet *cmd_pkt;
907 union tws_command_giga *cmd;
908 struct tws_getset_param *param;
914 req = tws_get_request(sc, TWS_GETSET_PARAM_REQ);
916 TWS_TRACE_DEBUG(sc, "null req", 0, 0);
920 req->length = TWS_SECTOR_SIZE;
921 req->data = kmalloc(TWS_SECTOR_SIZE, M_TWS, M_NOWAIT | M_ZERO);
922 if ( req->data == NULL )
924 param = (struct tws_getset_param *)req->data;
927 req->flags = TWS_DIR_IN;
928 cmd_pkt = req->cmd_pkt;
930 cmd = &cmd_pkt->cmd.pkt_g;
931 cmd->param.sgl_off__opcode =
932 BUILD_SGL_OFF__OPCODE(2, TWS_FW_CMD_GET_PARAM);
933 cmd->param.request_id = (u_int8_t)req->request_id;
934 cmd->param.host_id__unit = 0;
935 cmd->param.param_count = 1;
936 cmd->param.size = 2; /* map routine will add sgls */
938 /* Specify which parameter we want to set. */
939 param->table_id = (table_id | TWS_9K_PARAM_DESCRIPTOR);
940 param->parameter_id = (u_int8_t)(param_id);
941 param->parameter_size_bytes = (u_int16_t)param_size;
943 tws_map_request(sc, req);
944 reqid = tws_poll4_response(sc, &mfa);
945 tws_unmap_request(sc, req);
947 if ( reqid == TWS_GETSET_PARAM_REQ ) {
948 memcpy(data, param->data, param_size);
954 kfree(req->data, M_TWS);
955 lockmgr(&sc->gen_lock, LK_EXCLUSIVE);
956 req->state = TWS_REQ_STATE_FREE;
957 lockmgr(&sc->gen_lock, LK_RELEASE);
963 tws_unmap_request(struct tws_softc *sc, struct tws_request *req)
966 if (req->data != NULL) {
967 if ( req->flags & TWS_DIR_IN )
968 bus_dmamap_sync(sc->data_tag, req->dma_map,
969 BUS_DMASYNC_POSTREAD);
970 if ( req->flags & TWS_DIR_OUT )
971 bus_dmamap_sync(sc->data_tag, req->dma_map,
972 BUS_DMASYNC_POSTWRITE);
973 lockmgr(&sc->io_lock, LK_EXCLUSIVE);
974 bus_dmamap_unload(sc->data_tag, req->dma_map);
975 lockmgr(&sc->io_lock, LK_RELEASE);
980 tws_map_request(struct tws_softc *sc, struct tws_request *req)
985 /* If the command involves data, map that too. */
986 if (req->data != NULL) {
988 * Map the data buffer into bus space and build the SG list.
990 lockmgr(&sc->io_lock, LK_EXCLUSIVE);
991 error = bus_dmamap_load(sc->data_tag, req->dma_map,
992 req->data, req->length,
993 tws_dmamap_data_load_cbfn, req,
995 lockmgr(&sc->io_lock, LK_RELEASE);
997 if (error == EINPROGRESS) {
998 TWS_TRACE(sc, "in progress", 0, error);
999 /* tws_freeze_simq(sc); */
1000 error = TWS_REQ_ERR_INPROGRESS;
1002 } else { /* no data involved */
1003 error = tws_submit_command(sc, req);
1005 req->error_code = error;
1011 tws_dmamap_data_load_cbfn(void *arg, bus_dma_segment_t *segs,
1012 int nseg, int error)
1015 struct tws_request *req = (struct tws_request *)arg;
1016 struct tws_softc *sc = req->sc;
1017 u_int16_t sgls = nseg;
1019 struct tws_cmd_generic *gcmd;
1021 if ( error == EFBIG )
1022 TWS_TRACE(sc, "not enough data segs", 0, nseg);
1025 if ( req->flags & TWS_DIR_IN )
1026 bus_dmamap_sync(req->sc->data_tag, req->dma_map,
1027 BUS_DMASYNC_PREREAD);
1028 if ( req->flags & TWS_DIR_OUT )
1029 bus_dmamap_sync(req->sc->data_tag, req->dma_map,
1030 BUS_DMASYNC_PREWRITE);
1032 if ( (req->type == TWS_PASSTHRU_REQ &&
1033 GET_OPCODE(req->cmd_pkt->cmd.pkt_a.res__opcode) !=
1034 TWS_FW_CMD_EXECUTE_SCSI) ||
1035 req->type == TWS_GETSET_PARAM_REQ) {
1036 gcmd = &req->cmd_pkt->cmd.pkt_g.generic;
1037 sgl_ptr = (u_int32_t *)(gcmd) + gcmd->size;
1038 gcmd->size += sgls *
1039 ((req->sc->is64bit && !tws_use_32bit_sgls) ? 4 :2 );
1040 tws_fill_sg_list(req->sc, segs, sgl_ptr, sgls);
1043 tws_fill_sg_list(req->sc, segs,
1044 (void *)req->cmd_pkt->cmd.pkt_a.sg_list, sgls);
1045 req->cmd_pkt->cmd.pkt_a.lun_h4__sgl_entries |= sgls ;
1050 req->error_code = tws_submit_command(req->sc, req);
1056 tws_fill_sg_list(struct tws_softc *sc, void *sgl_src, void *sgl_dest,
1057 u_int16_t num_sgl_entries)
1061 if ( sc->is64bit ) {
1062 struct tws_sg_desc64 *sgl_s = (struct tws_sg_desc64 *)sgl_src;
1064 if ( !tws_use_32bit_sgls ) {
1065 struct tws_sg_desc64 *sgl_d = (struct tws_sg_desc64 *)sgl_dest;
1066 if ( num_sgl_entries > TWS_MAX_64BIT_SG_ELEMENTS )
1067 TWS_TRACE(sc, "64bit sg overflow", num_sgl_entries, 0);
1068 for (i = 0; i < num_sgl_entries; i++) {
1069 sgl_d[i].address = sgl_s->address;
1070 sgl_d[i].length = sgl_s->length;
1072 sgl_d[i].reserved = 0;
1073 sgl_s = (struct tws_sg_desc64 *) (((u_int8_t *)sgl_s) +
1074 sizeof(bus_dma_segment_t));
1077 struct tws_sg_desc32 *sgl_d = (struct tws_sg_desc32 *)sgl_dest;
1078 if ( num_sgl_entries > TWS_MAX_32BIT_SG_ELEMENTS )
1079 TWS_TRACE(sc, "32bit sg overflow", num_sgl_entries, 0);
1080 for (i = 0; i < num_sgl_entries; i++) {
1081 sgl_d[i].address = sgl_s->address;
1082 sgl_d[i].length = sgl_s->length;
1084 sgl_s = (struct tws_sg_desc64 *) (((u_int8_t *)sgl_s) +
1085 sizeof(bus_dma_segment_t));
1089 struct tws_sg_desc32 *sgl_s = (struct tws_sg_desc32 *)sgl_src;
1090 struct tws_sg_desc32 *sgl_d = (struct tws_sg_desc32 *)sgl_dest;
1092 if ( num_sgl_entries > TWS_MAX_32BIT_SG_ELEMENTS )
1093 TWS_TRACE(sc, "32bit sg overflow", num_sgl_entries, 0);
1096 for (i = 0; i < num_sgl_entries; i++) {
1097 sgl_d[i].address = sgl_s[i].address;
1098 sgl_d[i].length = sgl_s[i].length;
1108 struct tws_softc *sc = (struct tws_softc *)arg;
1109 u_int32_t histat=0, db=0;
1111 KASSERT(sc, ("null softc"));
1113 sc->stats.num_intrs++;
1114 histat = tws_read_reg(sc, TWS_I2O0_HISTAT, 4);
1115 if ( histat & TWS_BIT2 ) {
1116 TWS_TRACE_DEBUG(sc, "door bell :)", histat, TWS_I2O0_HISTAT);
1117 db = tws_read_reg(sc, TWS_I2O0_IOBDB, 4);
1118 if ( db & TWS_BIT21 ) {
1119 tws_intr_attn_error(sc);
1122 if ( db & TWS_BIT18 ) {
1123 tws_intr_attn_aen(sc);
1127 if ( histat & TWS_BIT3 ) {
1133 tws_intr_attn_aen(struct tws_softc *sc)
1137 /* maskoff db intrs untill all the aens are fetched */
1138 /* tws_disable_db_intr(sc); */
1140 tws_write_reg(sc, TWS_I2O0_HOBDBC, TWS_BIT18, 4);
1141 db = tws_read_reg(sc, TWS_I2O0_IOBDB, 4);
1146 tws_intr_attn_error(struct tws_softc *sc)
1150 TWS_TRACE(sc, "attn error", 0, 0);
1151 tws_write_reg(sc, TWS_I2O0_HOBDBC, ~0, 4);
1152 db = tws_read_reg(sc, TWS_I2O0_IOBDB, 4);
1153 device_printf(sc->tws_dev, "Micro controller error.\n");
1158 tws_intr_resp(struct tws_softc *sc)
1163 while ( tws_get_response(sc, &req_id, &mfa) ) {
1164 sc->stats.reqs_out++;
1165 if ( req_id == TWS_INVALID_REQID ) {
1166 TWS_TRACE_DEBUG(sc, "invalid req_id", mfa, req_id);
1167 sc->stats.reqs_errored++;
1168 tws_err_complete(sc, mfa);
1172 sc->reqs[req_id].cb(&sc->reqs[req_id]);
1179 tws_poll(struct cam_sim *sim)
1181 struct tws_softc *sc = (struct tws_softc *)cam_sim_softc(sim);
1182 TWS_TRACE_DEBUG(sc, "entry", 0, 0);
1187 tws_timeout(void *arg)
1189 struct tws_request *req = (struct tws_request *)arg;
1190 struct tws_softc *sc = req->sc;
1193 if ( tws_get_state(sc) != TWS_RESET ) {
1194 device_printf(sc->tws_dev, "Request timed out.\n");
1200 tws_reset(void *arg)
1203 struct tws_softc *sc = (struct tws_softc *)arg;
1205 if ( tws_get_state(sc) == TWS_RESET ) {
1208 device_printf(sc->tws_dev, "Resetting controller\n");
1209 lockmgr(&sc->gen_lock, LK_EXCLUSIVE);
1210 tws_send_event(sc, TWS_RESET_START);
1211 lockmgr(&sc->gen_lock, LK_RELEASE);
1213 tws_turn_off_interrupts(sc);
1214 lockmgr(&sc->sim_lock, LK_EXCLUSIVE);
1215 tws_freeze_simq(sc);
1216 lockmgr(&sc->sim_lock, LK_RELEASE);
1218 tws_assert_soft_reset(sc);
1219 callout_reset(&sc->reset_cb_handle, hz/10, tws_reset_cb, sc);
1223 tws_reset_cb(void *arg)
1226 struct tws_softc *sc = (struct tws_softc *)arg;
1229 if ( tws_get_state(sc) != TWS_RESET ) {
1232 reg = tws_read_reg(sc, TWS_I2O0_SCRPD3, 4);
1233 if (!( reg & TWS_BIT13 )) {
1234 callout_reset(&sc->reset_cb_handle, hz/10, tws_reset_cb, sc);
1237 tws_drain_response_queue(sc);
1238 tws_drain_busy_queue(sc);
1239 tws_drain_reserved_reqs(sc);
1240 callout_reset(&sc->reinit_handle, 5*hz, tws_reinit, sc);
1244 tws_reinit(void *arg)
1247 struct tws_softc *sc = (struct tws_softc *)arg;
1248 static int timeout_val=0, try=2 ;
1250 if ( !tws_ctlr_ready(sc) ) {
1252 if ( timeout_val >= TWS_RESET_TIMEOUT ) {
1255 tws_assert_soft_reset(sc);
1258 callout_reset(&sc->reinit_handle, 5*hz, tws_reinit, sc);
1264 sc->obfl_q_overrun = false;
1265 if ( tws_init_connect(sc, tws_queue_depth) ) {
1266 TWS_TRACE_DEBUG(sc, "initConnect failed", 0, sc->is64bit);
1268 tws_init_obfl_q(sc);
1270 lockmgr(&sc->sim_lock, LK_EXCLUSIVE);
1271 tws_release_simq(sc);
1272 lockmgr(&sc->sim_lock, LK_RELEASE);
1273 tws_turn_on_interrupts(sc);
1275 lockmgr(&sc->gen_lock, LK_EXCLUSIVE);
1276 tws_send_event(sc, TWS_RESET_COMPLETE);
1277 lockmgr(&sc->gen_lock, LK_RELEASE);
1280 wakeup((void *)&sc->chan);
1287 tws_freeze_simq(struct tws_softc *sc)
1290 TWS_TRACE_DEBUG(sc, "freezeing", 0, 0);
1291 KKASSERT(lockstatus(&sc->sim_lock, curthread) != 0);
1292 xpt_freeze_simq(sc->sim, 1);
1296 tws_release_simq(struct tws_softc *sc)
1299 TWS_TRACE_DEBUG(sc, "unfreezeing", 0, 0);
1300 KKASSERT(lockstatus(&sc->sim_lock, curthread) != 0);
1301 xpt_release_simq(sc->sim, 1);
1306 TUNABLE_INT("hw.tws.cam_depth", &tws_cam_depth);