Remove bogus (void *) casts.
[dragonfly.git] / sys / dev / raid / tws / tws_cam.c
CommitLineData
33190b70
SW
1/*
2 * Copyright (c) 2010, LSI Corp.
3 * All rights reserved.
4 * Author : Manjunath Ranganathaiah
5 * Support: freebsdraid@lsi.com
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of the <ORGANIZATION> nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
29 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
31 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
33 *
34 * $FreeBSD: src/sys/dev/tws/tws_cam.c,v 1.3 2007/05/09 04:16:32 mrangana Exp $
35 */
36
37#include <dev/raid/tws/tws.h>
38#include <dev/raid/tws/tws_services.h>
39#include <dev/raid/tws/tws_hdm.h>
40#include <dev/raid/tws/tws_user.h>
41#include <bus/cam/cam.h>
42#include <bus/cam/cam_ccb.h>
43#include <bus/cam/cam_sim.h>
44#include <bus/cam/cam_xpt_sim.h>
45#include <bus/cam/cam_debug.h>
46#include <bus/cam/cam_periph.h>
47
48#include <bus/cam/scsi/scsi_all.h>
49#include <bus/cam/scsi/scsi_message.h>
50
51static int tws_cam_depth=(TWS_MAX_REQS - TWS_RESERVED_REQS);
52static char tws_sev_str[5][8]={"","ERROR","WARNING","INFO","DEBUG"};
53
54static void tws_action(struct cam_sim *sim, union ccb *ccb);
55static void tws_poll(struct cam_sim *sim);
56static void tws_bus_scan_cb(struct cam_periph *periph, union ccb *ccb);
57static void tws_scsi_complete(struct tws_request *req);
58
59
60
61void tws_unmap_request(struct tws_softc *sc, struct tws_request *req);
62int32_t tws_map_request(struct tws_softc *sc, struct tws_request *req);
63int tws_bus_scan(struct tws_softc *sc);
64int tws_cam_attach(struct tws_softc *sc);
65void tws_cam_detach(struct tws_softc *sc);
66void tws_reset(void *arg);
67
68static void tws_reset_cb(void *arg);
69static void tws_reinit(void *arg);
70static int32_t tws_execute_scsi(struct tws_softc *sc, union ccb *ccb);
71static void tws_freeze_simq(struct tws_softc *sc);
72static void tws_release_simq(struct tws_softc *sc);
73static void tws_dmamap_data_load_cbfn(void *arg, bus_dma_segment_t *segs,
74 int nseg, int error);
75static void tws_fill_sg_list(struct tws_softc *sc, void *sgl_src,
76 void *sgl_dest, u_int16_t num_sgl_entries);
77static void tws_err_complete(struct tws_softc *sc, u_int64_t mfa);
78static void tws_scsi_err_complete(struct tws_request *req,
79 struct tws_command_header *hdr);
80static void tws_passthru_err_complete(struct tws_request *req,
81 struct tws_command_header *hdr);
82
83
84static void tws_timeout(void *arg);
85static void tws_intr_attn_aen(struct tws_softc *sc);
86static void tws_intr_attn_error(struct tws_softc *sc);
87static void tws_intr_resp(struct tws_softc *sc);
88void tws_intr(void *arg);
89void tws_cmd_complete(struct tws_request *req);
90void tws_aen_complete(struct tws_request *req);
91int tws_send_scsi_cmd(struct tws_softc *sc, int cmd);
92void tws_getset_param_complete(struct tws_request *req);
93int tws_set_param(struct tws_softc *sc, u_int32_t table_id, u_int32_t param_id,
94 u_int32_t param_size, void *data);
95int tws_get_param(struct tws_softc *sc, u_int32_t table_id, u_int32_t param_id,
96 u_int32_t param_size, void *data);
97
98
99extern struct tws_request *tws_get_request(struct tws_softc *sc,
100 u_int16_t type);
101extern void *tws_release_request(struct tws_request *req);
102extern int tws_submit_command(struct tws_softc *sc, struct tws_request *req);
103extern boolean tws_get_response(struct tws_softc *sc,
104 u_int16_t *req_id, u_int64_t *mfa);
105extern void tws_q_insert_tail(struct tws_softc *sc, struct tws_request *req,
106 u_int8_t q_type );
107extern struct tws_request * tws_q_remove_request(struct tws_softc *sc,
108 struct tws_request *req, u_int8_t q_type );
109extern void tws_send_event(struct tws_softc *sc, u_int8_t event);
110
111extern struct tws_sense *
112tws_find_sense_from_mfa(struct tws_softc *sc, u_int64_t mfa);
113
114extern void tws_fetch_aen(void *arg);
115extern void tws_disable_db_intr(struct tws_softc *sc);
116extern void tws_enable_db_intr(struct tws_softc *sc);
117extern void tws_passthru_complete(struct tws_request *req);
118extern void tws_aen_synctime_with_host(struct tws_softc *sc);
119extern void tws_circular_aenq_insert(struct tws_softc *sc,
120 struct tws_circular_q *cq, struct tws_event_packet *aen);
121extern int tws_use_32bit_sgls;
122extern boolean tws_ctlr_reset(struct tws_softc *sc);
123extern struct tws_request * tws_q_remove_tail(struct tws_softc *sc,
124 u_int8_t q_type );
125extern void tws_turn_off_interrupts(struct tws_softc *sc);
126extern void tws_turn_on_interrupts(struct tws_softc *sc);
127extern int tws_init_connect(struct tws_softc *sc, u_int16_t mc);
128extern void tws_init_obfl_q(struct tws_softc *sc);
129extern uint8_t tws_get_state(struct tws_softc *sc);
130extern void tws_assert_soft_reset(struct tws_softc *sc);
131extern boolean tws_ctlr_ready(struct tws_softc *sc);
132extern u_int16_t tws_poll4_response(struct tws_softc *sc, u_int64_t *mfa);
133
134
135
136int
137tws_cam_attach(struct tws_softc *sc)
138{
139 struct cam_devq *devq;
140 int error;
141
142 TWS_TRACE_DEBUG(sc, "entry", 0, sc);
143 /* Create a device queue for sim */
144
145 /*
146 * if the user sets cam depth to less than 1
147 * cam may get confused
148 */
149 if ( tws_cam_depth < 1 )
150 tws_cam_depth = 1;
151 if ( tws_cam_depth > (tws_queue_depth - TWS_RESERVED_REQS) )
152 tws_cam_depth = tws_queue_depth - TWS_RESERVED_REQS;
153
154 TWS_TRACE_DEBUG(sc, "depths,ctlr,cam", tws_queue_depth, tws_cam_depth);
155
156 if ((devq = cam_simq_alloc(tws_cam_depth)) == NULL) {
157 tws_log(sc, CAM_SIMQ_ALLOC);
158 return(ENOMEM);
159 }
160
161 /*
162 * Create a SIM entry. Though we can support tws_cam_depth
163 * simultaneous requests, we claim to be able to handle only
164 * (tws_cam_depth), so that we always have reserved requests
165 * packet available to service ioctls and internal commands.
166 */
167 sc->sim = cam_sim_alloc(tws_action, tws_poll, "tws", sc,
168 device_get_unit(sc->tws_dev),
169 &sc->sim_lock,
170 tws_cam_depth, 1, devq);
171 /* 1, 1, devq); */
172 cam_simq_release(devq);
173 if (sc->sim == NULL) {
174 tws_log(sc, CAM_SIM_ALLOC);
175 }
176 /* Register the bus. */
177 lockmgr(&sc->sim_lock, LK_EXCLUSIVE);
178 if (xpt_bus_register(sc->sim, 0) != CAM_SUCCESS) {
179 cam_sim_free(sc->sim);
180 sc->sim = NULL; /* so cam_detach will not try to free it */
181 lockmgr(&sc->sim_lock, LK_RELEASE);
182 tws_log(sc, TWS_XPT_BUS_REGISTER);
183 return(ENXIO);
184 }
185 if (xpt_create_path(&sc->path, NULL, cam_sim_path(sc->sim),
186 CAM_TARGET_WILDCARD,
187 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
188 xpt_bus_deregister(cam_sim_path(sc->sim));
189 cam_sim_free(sc->sim);
190 tws_log(sc, TWS_XPT_CREATE_PATH);
191 lockmgr(&sc->sim_lock, LK_RELEASE);
192 return(ENXIO);
193 }
194 if ((error = tws_bus_scan(sc))) {
195 tws_log(sc, TWS_BUS_SCAN_REQ);
196 lockmgr(&sc->sim_lock, LK_RELEASE);
197 return(error);
198 }
199 lockmgr(&sc->sim_lock, LK_RELEASE);
200
201 return(0);
202}
203
204void
205tws_cam_detach(struct tws_softc *sc)
206{
207 TWS_TRACE_DEBUG(sc, "entry", 0, 0);
208 lockmgr(&sc->sim_lock, LK_EXCLUSIVE);
209 if (sc->path)
210 xpt_free_path(sc->path);
211 if (sc->sim) {
212 xpt_bus_deregister(cam_sim_path(sc->sim));
213 cam_sim_free(sc->sim);
214 }
215 lockmgr(&sc->sim_lock, LK_RELEASE);
216}
217
218int
219tws_bus_scan(struct tws_softc *sc)
220{
221 struct cam_path *path;
222 union ccb *ccb;
223
224 TWS_TRACE_DEBUG(sc, "entry", sc, 0);
225 KASSERT(sc->sim, ("sim not allocated"));
226 KKASSERT(lockstatus(&sc->sim_lock, curthread) != 0);
227
228 ccb = sc->scan_ccb;
229
230 bzero(ccb, sizeof(union ccb));
231 if (xpt_create_path(&path, xpt_periph, cam_sim_path(sc->sim),
232 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
233 kfree(ccb, M_TEMP);
234 /* lockmgr(&sc->sim_lock, LK_RELEASE); */
235 return(EIO);
236 }
237 xpt_setup_ccb(&ccb->ccb_h, path, 5);
238 ccb->ccb_h.func_code = XPT_SCAN_BUS;
239 ccb->ccb_h.cbfcnp = tws_bus_scan_cb;
240 ccb->crcn.flags = CAM_FLAG_NONE;
241 xpt_action(ccb);
242
243 return(0);
244}
245
246static void
247tws_bus_scan_cb(struct cam_periph *periph, union ccb *ccb)
248{
249 struct tws_softc *sc = periph->softc;
250
251 /* calling trace results in non-sleepable lock head panic
252 using printf to debug */
253
254 if (ccb->ccb_h.status != CAM_REQ_CMP) {
255 kprintf("cam_scan failure\n");
256
257 lockmgr(&sc->gen_lock, LK_EXCLUSIVE);
258 tws_send_event(sc, TWS_SCAN_FAILURE);
259 lockmgr(&sc->gen_lock, LK_RELEASE);
260 }
261
262 xpt_free_path(ccb->ccb_h.path);
263}
264
265static void
266tws_action(struct cam_sim *sim, union ccb *ccb)
267{
268 struct tws_softc *sc = (struct tws_softc *)cam_sim_softc(sim);
269
270 switch( ccb->ccb_h.func_code ) {
271 case XPT_SCSI_IO:
272 {
273 if ( tws_execute_scsi(sc, ccb) )
274 TWS_TRACE_DEBUG(sc, "execute scsi failed", 0, 0);
275 break;
276 }
277 case XPT_ABORT:
278 {
279 TWS_TRACE_DEBUG(sc, "abort i/o", 0, 0);
280 ccb->ccb_h.status = CAM_UA_ABORT;
281 xpt_done(ccb);
282 break;
283 }
284 case XPT_RESET_BUS:
285 {
286 TWS_TRACE_DEBUG(sc, "reset bus", sim, ccb);
287 break;
288 }
289 case XPT_SET_TRAN_SETTINGS:
290 {
291 TWS_TRACE_DEBUG(sc, "set tran settings", sim, ccb);
292 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
293 xpt_done(ccb);
294
295 break;
296 }
297 case XPT_GET_TRAN_SETTINGS:
298 {
299 TWS_TRACE_DEBUG(sc, "get tran settings", sim, ccb);
300
301 ccb->cts.protocol = PROTO_SCSI;
302 ccb->cts.protocol_version = SCSI_REV_2;
303 ccb->cts.transport = XPORT_SPI;
304 ccb->cts.transport_version = 2;
305
306 ccb->cts.xport_specific.spi.valid = CTS_SPI_VALID_DISC;
307 ccb->cts.xport_specific.spi.flags = CTS_SPI_FLAGS_DISC_ENB;
308 ccb->cts.proto_specific.scsi.valid = CTS_SCSI_VALID_TQ;
309 ccb->cts.proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB;
310 ccb->ccb_h.status = CAM_REQ_CMP;
311 xpt_done(ccb);
312
313 break;
314 }
315 case XPT_CALC_GEOMETRY:
316 {
317 TWS_TRACE_DEBUG(sc, "calc geometry(ccb,block-size)", ccb,
318 ccb->ccg.block_size);
319 cam_calc_geometry(&ccb->ccg, 1/* extended */);
320 xpt_done(ccb);
321
322 break;
323 }
324 case XPT_PATH_INQ:
325 {
326 TWS_TRACE_DEBUG(sc, "path inquiry", sim, ccb);
327 ccb->cpi.version_num = 1;
328 ccb->cpi.hba_inquiry = 0;
329 ccb->cpi.target_sprt = 0;
330 ccb->cpi.hba_misc = 0;
331 ccb->cpi.hba_eng_cnt = 0;
332 ccb->cpi.max_target = TWS_MAX_NUM_UNITS;
333 ccb->cpi.max_lun = TWS_MAX_NUM_LUNS - 1;
334 ccb->cpi.unit_number = cam_sim_unit(sim);
335 ccb->cpi.bus_id = cam_sim_bus(sim);
336 ccb->cpi.initiator_id = TWS_SCSI_INITIATOR_ID;
337 ccb->cpi.base_transfer_speed = 300000;
338 strncpy(ccb->cpi.sim_vid, "FreeBSD", SIM_IDLEN);
339 strncpy(ccb->cpi.hba_vid, "3ware", HBA_IDLEN);
340 strncpy(ccb->cpi.dev_name, cam_sim_name(sim), DEV_IDLEN);
341 ccb->cpi.transport = XPORT_SPI;
342 ccb->cpi.transport_version = 2;
343 ccb->cpi.protocol = PROTO_SCSI;
344 ccb->cpi.protocol_version = SCSI_REV_2;
345 ccb->ccb_h.status = CAM_REQ_CMP;
346 xpt_done(ccb);
347
348 break;
349 }
350 default:
351 TWS_TRACE_DEBUG(sc, "default", sim, ccb);
352 ccb->ccb_h.status = CAM_REQ_INVALID;
353 xpt_done(ccb);
354 break;
355 }
356}
357
358static void
359tws_scsi_complete(struct tws_request *req)
360{
361 struct tws_softc *sc = req->sc;
362
363 lockmgr(&sc->q_lock, LK_EXCLUSIVE);
364 tws_q_remove_request(sc, req, TWS_BUSY_Q);
365 lockmgr(&sc->q_lock, LK_RELEASE);
366
367 callout_stop(&req->ccb_ptr->ccb_h.timeout_ch);
368 tws_unmap_request(req->sc, req);
369
370
371 lockmgr(&sc->sim_lock, LK_EXCLUSIVE);
372 req->ccb_ptr->ccb_h.status = CAM_REQ_CMP;
373 xpt_done(req->ccb_ptr);
374 lockmgr(&sc->sim_lock, LK_RELEASE);
375
376 lockmgr(&sc->q_lock, LK_EXCLUSIVE);
377 tws_q_insert_tail(sc, req, TWS_FREE_Q);
378 lockmgr(&sc->q_lock, LK_RELEASE);
379
380}
381
382void
383tws_getset_param_complete(struct tws_request *req)
384{
385 struct tws_softc *sc = req->sc;
386
387 TWS_TRACE_DEBUG(sc, "getset complete", req, req->request_id);
388
389 callout_stop(&req->thandle);
390 tws_unmap_request(sc, req);
391
392 kfree(req->data, M_TWS);
393
394 lockmgr(&sc->gen_lock, LK_EXCLUSIVE);
395 req->state = TWS_REQ_STATE_FREE;
396 lockmgr(&sc->gen_lock, LK_RELEASE);
397
398}
399
400void
401tws_aen_complete(struct tws_request *req)
402{
403 struct tws_softc *sc = req->sc;
404 struct tws_command_header *sense;
405 struct tws_event_packet event;
406 u_int16_t aen_code=0;
407
408 TWS_TRACE_DEBUG(sc, "aen complete", 0, req->request_id);
409
410 callout_stop(&req->thandle);
411 tws_unmap_request(sc, req);
412
413 sense = (struct tws_command_header *)req->data;
414
415 TWS_TRACE_DEBUG(sc,"sense code, key",sense->sense_data[0],
416 sense->sense_data[2]);
417 TWS_TRACE_DEBUG(sc,"sense rid, seve",sense->header_desc.request_id,
418 sense->status_block.res__severity);
419 TWS_TRACE_DEBUG(sc,"sense srcnum, error",sense->status_block.srcnum,
420 sense->status_block.error);
421 TWS_TRACE_DEBUG(sc,"sense shdr, ssense",sense->header_desc.size_header,
422 sense->header_desc.size_sense);
423
424 aen_code = sense->status_block.error;
425
426 switch ( aen_code ) {
427 case TWS_AEN_SYNC_TIME_WITH_HOST :
428 tws_aen_synctime_with_host(sc);
429 break;
430 case TWS_AEN_QUEUE_EMPTY :
431 break;
432 default :
433 bzero(&event, sizeof(struct tws_event_packet));
434 event.sequence_id = sc->seq_id;
435 event.time_stamp_sec = (u_int32_t)TWS_LOCAL_TIME;
436 event.aen_code = sense->status_block.error;
437 event.severity = sense->status_block.res__severity & 0x7;
438 event.event_src = TWS_SRC_CTRL_EVENT;
439 strcpy(event.severity_str, tws_sev_str[event.severity]);
440 event.retrieved = TWS_AEN_NOT_RETRIEVED;
441
442 bcopy(sense->err_specific_desc, event.parameter_data,
443 TWS_ERROR_SPECIFIC_DESC_LEN);
444 event.parameter_data[TWS_ERROR_SPECIFIC_DESC_LEN - 1] = '\0';
445 event.parameter_len = (u_int8_t)strlen(event.parameter_data)+1;
446
447 if ( event.parameter_len < TWS_ERROR_SPECIFIC_DESC_LEN ) {
448 event.parameter_len += ((u_int8_t)strlen(event.parameter_data +
449 event.parameter_len) + 1);
450 }
451
452 device_printf(sc->tws_dev, "%s: (0x%02X: 0x%04X): %s: %s\n",
453 event.severity_str,
454 event.event_src,
455 event.aen_code,
456 event.parameter_data +
457 (strlen(event.parameter_data) + 1),
458 event.parameter_data);
459
460 lockmgr(&sc->gen_lock, LK_EXCLUSIVE);
461 tws_circular_aenq_insert(sc, &sc->aen_q, &event);
462 sc->seq_id++;
463 lockmgr(&sc->gen_lock, LK_RELEASE);
464 break;
465
466 }
467
468 kfree(req->data, M_TWS);
469
470 lockmgr(&sc->gen_lock, LK_EXCLUSIVE);
471 req->state = TWS_REQ_STATE_FREE;
472 lockmgr(&sc->gen_lock, LK_RELEASE);
473
474 if ( aen_code != TWS_AEN_QUEUE_EMPTY ) {
475 /* timeout(tws_fetch_aen, sc, 1);*/
476 sc->stats.num_aens++;
4e1af74f 477 tws_fetch_aen(sc);
33190b70
SW
478 }
479
480}
481
482void
483tws_cmd_complete(struct tws_request *req)
484{
485 struct tws_softc *sc = req->sc;
486
487 callout_stop(&req->ccb_ptr->ccb_h.timeout_ch);
488 tws_unmap_request(sc, req);
489
490}
491
492static void
493tws_err_complete(struct tws_softc *sc, u_int64_t mfa)
494{
495
496 struct tws_command_header *hdr;
497 struct tws_sense *sen;
498 struct tws_request *req;
499 u_int16_t req_id;
500 u_int32_t reg, status;
501
502 if ( !mfa ) {
503 TWS_TRACE_DEBUG(sc, "null mfa", 0, mfa);
504 return;
505 } else {
506 /* lookup the sense */
507 sen = tws_find_sense_from_mfa(sc, mfa);
508 if ( sen == NULL ) {
509 TWS_TRACE_DEBUG(sc, "found null req", 0, mfa);
510 return;
511 }
512 hdr = sen->hdr;
513 TWS_TRACE_DEBUG(sc, "sen, hdr", sen, hdr);
514 req_id = hdr->header_desc.request_id;
515 req = &sc->reqs[req_id];
516 TWS_TRACE_DEBUG(sc, "req, id", req, req_id);
517 if ( req->error_code != TWS_REQ_SUBMIT_SUCCESS )
518 TWS_TRACE_DEBUG(sc, "submit failure?", 0, req->error_code);
519 }
520
521 switch (req->type) {
522 case TWS_PASSTHRU_REQ :
523 tws_passthru_err_complete(req, hdr);
524 break;
525 case TWS_GETSET_PARAM_REQ :
526 tws_getset_param_complete(req);
527 break;
528 case TWS_SCSI_IO_REQ :
529 tws_scsi_err_complete(req, hdr);
530 break;
531
532 }
533
534 lockmgr(&sc->io_lock, LK_EXCLUSIVE);
535 hdr->header_desc.size_header = 128;
536 reg = (u_int32_t)( mfa>>32);
537 tws_write_reg(sc, TWS_I2O0_HOBQPH, reg, 4);
538 reg = (u_int32_t)(mfa);
539 tws_write_reg(sc, TWS_I2O0_HOBQPL, reg, 4);
540
541 status = tws_read_reg(sc, TWS_I2O0_STATUS, 4);
542 if ( status & TWS_BIT13 ) {
543 TWS_TRACE_DEBUG(sc, "OBFL Overrun", status, TWS_I2O0_STATUS);
544 sc->obfl_q_overrun = true;
545 sen->posted = false;
546 }
547 lockmgr(&sc->io_lock, LK_RELEASE);
548
549}
550
551static void
552tws_scsi_err_complete(struct tws_request *req, struct tws_command_header *hdr)
553{
554 u_int8_t *sense_data;
555 struct tws_softc *sc = req->sc;
556 union ccb *ccb = req->ccb_ptr;
557
558 TWS_TRACE_DEBUG(sc, "sbe, cmd_status", hdr->status_block.error,
559 req->cmd_pkt->cmd.pkt_a.status);
560 if ( hdr->status_block.error == TWS_ERROR_LOGICAL_UNIT_NOT_SUPPORTED ||
561 hdr->status_block.error == TWS_ERROR_UNIT_OFFLINE ) {
562
563 if ( ccb->ccb_h.target_lun ) {
564 TWS_TRACE_DEBUG(sc, "invalid lun error",0,0);
565 ccb->ccb_h.status |= CAM_LUN_INVALID;
566 } else {
567 TWS_TRACE_DEBUG(sc, "invalid target error",0,0);
568 ccb->ccb_h.status |= CAM_TID_INVALID;
569 }
570
571 } else {
572 TWS_TRACE_DEBUG(sc, "scsi status error",0,0);
573 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
574 if (((ccb->csio.cdb_io.cdb_bytes[0] == 0x1A) &&
575 (hdr->status_block.error == TWS_ERROR_NOT_SUPPORTED))) {
576 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID;
577 TWS_TRACE_DEBUG(sc, "page mode not supported",0,0);
578 }
579 }
580
581 /* if there were no error simply mark complete error */
582 if (ccb->ccb_h.status == 0)
583 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
584
585 sense_data = (u_int8_t *)&ccb->csio.sense_data;
586 if (sense_data) {
587 memcpy(sense_data, hdr->sense_data, TWS_SENSE_DATA_LENGTH );
588 ccb->csio.sense_len = TWS_SENSE_DATA_LENGTH;
589 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
590 }
591 ccb->csio.scsi_status = req->cmd_pkt->cmd.pkt_a.status;
592
593 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
594 lockmgr(&sc->sim_lock, LK_EXCLUSIVE);
595 xpt_done(ccb);
596 lockmgr(&sc->sim_lock, LK_RELEASE);
597
598 callout_stop(&req->ccb_ptr->ccb_h.timeout_ch);
599 tws_unmap_request(req->sc, req);
600 lockmgr(&sc->q_lock, LK_EXCLUSIVE);
601 tws_q_remove_request(sc, req, TWS_BUSY_Q);
602 tws_q_insert_tail(sc, req, TWS_FREE_Q);
603 lockmgr(&sc->q_lock, LK_RELEASE);
604
605}
606
607static void
608tws_passthru_err_complete(struct tws_request *req,
609 struct tws_command_header *hdr)
610{
611
612 TWS_TRACE_DEBUG(req->sc, "entry", hdr, req->request_id);
613 req->error_code = hdr->status_block.error;
614 memcpy(&(req->cmd_pkt->hdr), hdr, sizeof(struct tws_command_header));
615 tws_passthru_complete(req);
616}
617
618static void
619tws_drain_busy_queue(struct tws_softc *sc)
620{
621
622 struct tws_request *req;
623 TWS_TRACE_DEBUG(sc, "entry", 0, 0);
624
625 lockmgr(&sc->q_lock, LK_EXCLUSIVE);
626 req = tws_q_remove_tail(sc, TWS_BUSY_Q);
627 lockmgr(&sc->q_lock, LK_RELEASE);
628 while ( req ) {
629 callout_stop(&req->ccb_ptr->ccb_h.timeout_ch);
630 tws_unmap_request(req->sc, req);
631
632 TWS_TRACE_DEBUG(sc, "drained", 0, req->request_id);
633
634 lockmgr(&sc->sim_lock, LK_EXCLUSIVE);
635 req->ccb_ptr->ccb_h.status = CAM_REQUEUE_REQ;
636 xpt_done(req->ccb_ptr);
637 lockmgr(&sc->sim_lock, LK_RELEASE);
638
639 lockmgr(&sc->q_lock, LK_EXCLUSIVE);
640 tws_q_insert_tail(sc, req, TWS_FREE_Q);
641 req = tws_q_remove_tail(sc, TWS_BUSY_Q);
642 lockmgr(&sc->q_lock, LK_RELEASE);
643 }
644
645}
646
647static void
648tws_drain_reserved_reqs(struct tws_softc *sc)
649{
650
651 struct tws_request *r;
652
653 r = &sc->reqs[1];
654 if ( r->state != TWS_REQ_STATE_FREE ) {
655 TWS_TRACE_DEBUG(sc, "drained aen req", 0, 0);
656 callout_stop(&r->thandle);
657 tws_unmap_request(sc, r);
658 kfree(r->data, M_TWS);
659 lockmgr(&sc->gen_lock, LK_EXCLUSIVE);
660 r->state = TWS_REQ_STATE_FREE;
661 lockmgr(&sc->gen_lock, LK_RELEASE);
662 }
663 r = &sc->reqs[2];
664 if ( r->state != TWS_REQ_STATE_FREE ) {
665 TWS_TRACE_DEBUG(sc, "drained passthru req", 0, 0);
666 r->error_code = TWS_REQ_REQUEUE;
667 tws_passthru_complete(r);
668 }
669 r = &sc->reqs[3];
670 if ( r->state != TWS_REQ_STATE_FREE ) {
671 TWS_TRACE_DEBUG(sc, "drained set param req", 0, 0);
672 tws_getset_param_complete(r);
673 }
674
675}
676
677static void
678tws_drain_response_queue(struct tws_softc *sc)
679{
680 tws_intr_resp(sc);
681}
682
683
684static int32_t
685tws_execute_scsi(struct tws_softc *sc, union ccb *ccb)
686{
687 struct tws_command_packet *cmd_pkt;
688 struct tws_request *req;
689 struct ccb_hdr *ccb_h = &(ccb->ccb_h);
690 struct ccb_scsiio *csio = &(ccb->csio);
691 int error;
692 u_int16_t lun;
693
694 KKASSERT(lockstatus(&sc->sim_lock, curthread) != 0);
695 if (ccb_h->target_id >= TWS_MAX_NUM_UNITS) {
696 TWS_TRACE_DEBUG(sc, "traget id too big", ccb_h->target_id, ccb_h->target_lun);
697 ccb_h->status |= CAM_TID_INVALID;
698 xpt_done(ccb);
699 return(0);
700 }
701 if (ccb_h->target_lun >= TWS_MAX_NUM_LUNS) {
702 TWS_TRACE_DEBUG(sc, "target lun 2 big", ccb_h->target_id, ccb_h->target_lun);
703 ccb_h->status |= CAM_LUN_INVALID;
704 xpt_done(ccb);
705 return(0);
706 }
707
708 if(ccb_h->flags & CAM_CDB_PHYS) {
709 TWS_TRACE_DEBUG(sc, "cdb phy", ccb_h->target_id, ccb_h->target_lun);
710 ccb_h->status = CAM_REQ_CMP_ERR;
711 xpt_done(ccb);
712 return(0);
713 }
714
715 /*
716 * We are going to work on this request. Mark it as enqueued (though
717 * we don't actually queue it...)
718 */
719 ccb_h->status |= CAM_SIM_QUEUED;
720
721 req = tws_get_request(sc, TWS_SCSI_IO_REQ);
722 if ( !req ) {
723 TWS_TRACE_DEBUG(sc, "no reqs", ccb_h->target_id, ccb_h->target_lun);
724 /* tws_freeze_simq(sc); */
725 ccb_h->status |= CAM_REQUEUE_REQ;
726 xpt_done(ccb);
727 return(0);
728 }
729
730 if((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
731 if(ccb_h->flags & CAM_DIR_IN)
732 req->flags = TWS_DIR_IN;
733 else
734 req->flags = TWS_DIR_OUT;
735 } else {
736 req->flags = TWS_DIR_NONE; /* no data */
737 }
738
739 req->type = TWS_SCSI_IO_REQ;
740 req->cb = tws_scsi_complete;
741
742 cmd_pkt = req->cmd_pkt;
743 /* cmd_pkt->hdr.header_desc.size_header = 128; */
744 cmd_pkt->cmd.pkt_a.res__opcode = TWS_FW_CMD_EXECUTE_SCSI;
745 cmd_pkt->cmd.pkt_a.unit = ccb_h->target_id;
746 cmd_pkt->cmd.pkt_a.status = 0;
747 cmd_pkt->cmd.pkt_a.sgl_offset = 16;
748
749 /* lower nibble */
750 lun = ccb_h->target_lun & 0XF;
751 lun = lun << 12;
752 cmd_pkt->cmd.pkt_a.lun_l4__req_id = lun | req->request_id;
753 /* upper nibble */
754 lun = ccb_h->target_lun & 0XF0;
755 lun = lun << 8;
756 cmd_pkt->cmd.pkt_a.lun_h4__sgl_entries = lun;
757
758#ifdef TWS_DEBUG
759 if ( csio->cdb_len > 16 )
760 TWS_TRACE(sc, "cdb len too big", ccb_h->target_id, csio->cdb_len);
761#endif
762
763 if(ccb_h->flags & CAM_CDB_POINTER)
764 bcopy(csio->cdb_io.cdb_ptr, cmd_pkt->cmd.pkt_a.cdb, csio->cdb_len);
765 else
766 bcopy(csio->cdb_io.cdb_bytes, cmd_pkt->cmd.pkt_a.cdb, csio->cdb_len);
767
768 if (!(ccb_h->flags & CAM_DATA_PHYS)) {
769 /* Virtual data addresses. Need to convert them... */
770 if (!(ccb_h->flags & CAM_SCATTER_VALID)) {
771 if (csio->dxfer_len > TWS_MAX_IO_SIZE) {
772 TWS_TRACE(sc, "I/O is big", csio->dxfer_len, 0);
773 tws_release_request(req);
774 ccb_h->status = CAM_REQ_TOO_BIG;
775 xpt_done(ccb);
776 return(0);
777 }
778
779 req->length = csio->dxfer_len;
780 if (req->length) {
781 req->data = csio->data_ptr;
782 /* there is 1 sgl_entrie */
783 /* cmd_pkt->cmd.pkt_a.lun_h4__sgl_entries |= 1; */
784 }
785 } else {
786 TWS_TRACE_DEBUG(sc, "got sglist", ccb_h->target_id, ccb_h->target_lun);
787 tws_release_request(req);
788 ccb_h->status = CAM_REQ_CMP_ERR;
789 xpt_done(ccb);
790 return(0);
791 }
792 } else {
793 /* Data addresses are physical. */
794 TWS_TRACE_DEBUG(sc, "Phy data addr", ccb_h->target_id, ccb_h->target_lun);
795 tws_release_request(req);
796 ccb_h->status = CAM_REQ_CMP_ERR;
797 ccb_h->status |= CAM_RELEASE_SIMQ;
798 ccb_h->status &= ~CAM_SIM_QUEUED;
799 xpt_done(ccb);
800 return(0);
801 }
802 /* save ccb ptr */
803 req->ccb_ptr = ccb;
804 /*
805 * tws_map_load_data_callback will fill in the SGL,
806 * and submit the I/O.
807 */
808 sc->stats.scsi_ios++;
809 callout_reset(&ccb_h->timeout_ch, (ccb_h->timeout * hz)/1000, tws_timeout,
810 req);
811 error = tws_map_request(sc, req);
812 return(error);
813}
814
815
816int
817tws_send_scsi_cmd(struct tws_softc *sc, int cmd)
818{
819
820 struct tws_request *req;
821 struct tws_command_packet *cmd_pkt;
822 int error;
823
824 TWS_TRACE_DEBUG(sc, "entry",sc, cmd);
825 req = tws_get_request(sc, TWS_AEN_FETCH_REQ);
826
827 if ( req == NULL )
828 return(ENOMEM);
829
830 req->type = TWS_AEN_FETCH_REQ;
831 req->cb = tws_aen_complete;
832
833 cmd_pkt = req->cmd_pkt;
834 cmd_pkt->cmd.pkt_a.res__opcode = TWS_FW_CMD_EXECUTE_SCSI;
835 cmd_pkt->cmd.pkt_a.status = 0;
836 cmd_pkt->cmd.pkt_a.unit = 0;
837 cmd_pkt->cmd.pkt_a.sgl_offset = 16;
838 cmd_pkt->cmd.pkt_a.lun_l4__req_id = req->request_id;
839
840 cmd_pkt->cmd.pkt_a.cdb[0] = (u_int8_t)cmd;
841 cmd_pkt->cmd.pkt_a.cdb[4] = 128;
842
843 req->length = TWS_SECTOR_SIZE;
0c374e73 844 req->data = kmalloc(TWS_SECTOR_SIZE, M_TWS, M_NOWAIT | M_ZERO);
33190b70
SW
845 if ( req->data == NULL )
846 return(ENOMEM);
33190b70
SW
847 req->flags = TWS_DIR_IN;
848
849 callout_reset(&req->thandle, (TWS_IO_TIMEOUT * hz), tws_timeout, req);
850 error = tws_map_request(sc, req);
851 return(error);
852
853}
854
855int
856tws_set_param(struct tws_softc *sc, u_int32_t table_id, u_int32_t param_id,
857 u_int32_t param_size, void *data)
858{
859 struct tws_request *req;
860 struct tws_command_packet *cmd_pkt;
861 union tws_command_giga *cmd;
862 struct tws_getset_param *param;
863 int error;
864
865 req = tws_get_request(sc, TWS_GETSET_PARAM_REQ);
866 if ( req == NULL ) {
867 TWS_TRACE_DEBUG(sc, "null req", 0, 0);
868 return(ENOMEM);
869 }
870
871 req->length = TWS_SECTOR_SIZE;
0c374e73 872 req->data = kmalloc(TWS_SECTOR_SIZE, M_TWS, M_NOWAIT | M_ZERO);
33190b70
SW
873 if ( req->data == NULL )
874 return(ENOMEM);
33190b70
SW
875 param = (struct tws_getset_param *)req->data;
876
877 req->cb = tws_getset_param_complete;
878 req->flags = TWS_DIR_OUT;
879 cmd_pkt = req->cmd_pkt;
880
881 cmd = &cmd_pkt->cmd.pkt_g;
882 cmd->param.sgl_off__opcode =
883 BUILD_SGL_OFF__OPCODE(2, TWS_FW_CMD_SET_PARAM);
884 cmd->param.request_id = (u_int8_t)req->request_id;
885 cmd->param.host_id__unit = 0;
886 cmd->param.param_count = 1;
887 cmd->param.size = 2; /* map routine will add sgls */
888
889 /* Specify which parameter we want to set. */
890 param->table_id = (table_id | TWS_9K_PARAM_DESCRIPTOR);
891 param->parameter_id = (u_int8_t)(param_id);
892 param->parameter_size_bytes = (u_int16_t)param_size;
893 memcpy(param->data, data, param_size);
894
895 callout_reset(&req->thandle, (TWS_IO_TIMEOUT * hz), tws_timeout, req);
896 error = tws_map_request(sc, req);
897 return(error);
898
899}
900
901int
902tws_get_param(struct tws_softc *sc, u_int32_t table_id, u_int32_t param_id,
903 u_int32_t param_size, void *data)
904{
905 struct tws_request *req;
906 struct tws_command_packet *cmd_pkt;
907 union tws_command_giga *cmd;
908 struct tws_getset_param *param;
909 u_int16_t reqid;
910 u_int64_t mfa;
911 int error = SUCCESS;
912
913
914 req = tws_get_request(sc, TWS_GETSET_PARAM_REQ);
915 if ( req == NULL ) {
916 TWS_TRACE_DEBUG(sc, "null req", 0, 0);
917 return(FAILURE);
918 }
919
920 req->length = TWS_SECTOR_SIZE;
0c374e73 921 req->data = kmalloc(TWS_SECTOR_SIZE, M_TWS, M_NOWAIT | M_ZERO);
33190b70
SW
922 if ( req->data == NULL )
923 return(FAILURE);
33190b70
SW
924 param = (struct tws_getset_param *)req->data;
925
926 req->cb = NULL;
927 req->flags = TWS_DIR_IN;
928 cmd_pkt = req->cmd_pkt;
929
930 cmd = &cmd_pkt->cmd.pkt_g;
931 cmd->param.sgl_off__opcode =
932 BUILD_SGL_OFF__OPCODE(2, TWS_FW_CMD_GET_PARAM);
933 cmd->param.request_id = (u_int8_t)req->request_id;
934 cmd->param.host_id__unit = 0;
935 cmd->param.param_count = 1;
936 cmd->param.size = 2; /* map routine will add sgls */
937
938 /* Specify which parameter we want to set. */
939 param->table_id = (table_id | TWS_9K_PARAM_DESCRIPTOR);
940 param->parameter_id = (u_int8_t)(param_id);
941 param->parameter_size_bytes = (u_int16_t)param_size;
942
943 tws_map_request(sc, req);
944 reqid = tws_poll4_response(sc, &mfa);
945 tws_unmap_request(sc, req);
946
947 if ( reqid == TWS_GETSET_PARAM_REQ ) {
948 memcpy(data, param->data, param_size);
949 } else {
950 error = FAILURE;
951
952 }
953
954 kfree(req->data, M_TWS);
955 lockmgr(&sc->gen_lock, LK_EXCLUSIVE);
956 req->state = TWS_REQ_STATE_FREE;
957 lockmgr(&sc->gen_lock, LK_RELEASE);
958 return(error);
959
960}
961
962void
963tws_unmap_request(struct tws_softc *sc, struct tws_request *req)
964{
965
966 if (req->data != NULL) {
967 if ( req->flags & TWS_DIR_IN )
968 bus_dmamap_sync(sc->data_tag, req->dma_map,
969 BUS_DMASYNC_POSTREAD);
970 if ( req->flags & TWS_DIR_OUT )
971 bus_dmamap_sync(sc->data_tag, req->dma_map,
972 BUS_DMASYNC_POSTWRITE);
973 lockmgr(&sc->io_lock, LK_EXCLUSIVE);
974 bus_dmamap_unload(sc->data_tag, req->dma_map);
975 lockmgr(&sc->io_lock, LK_RELEASE);
976 }
977}
978
979int32_t
980tws_map_request(struct tws_softc *sc, struct tws_request *req)
981{
982 int32_t error = 0;
983
984
985 /* If the command involves data, map that too. */
986 if (req->data != NULL) {
987 /*
988 * Map the data buffer into bus space and build the SG list.
989 */
990 lockmgr(&sc->io_lock, LK_EXCLUSIVE);
991 error = bus_dmamap_load(sc->data_tag, req->dma_map,
992 req->data, req->length,
993 tws_dmamap_data_load_cbfn, req,
994 BUS_DMA_WAITOK);
995 lockmgr(&sc->io_lock, LK_RELEASE);
996
997 if (error == EINPROGRESS) {
998 TWS_TRACE(sc, "in progress", 0, error);
999 /* tws_freeze_simq(sc); */
1000 error = TWS_REQ_ERR_INPROGRESS;
1001 }
1002 } else { /* no data involved */
1003 error = tws_submit_command(sc, req);
1004 }
1005 req->error_code = error;
1006 return(error);
1007}
1008
1009
1010static void
1011tws_dmamap_data_load_cbfn(void *arg, bus_dma_segment_t *segs,
1012 int nseg, int error)
1013{
1014
1015 struct tws_request *req = (struct tws_request *)arg;
1016 struct tws_softc *sc = req->sc;
1017 u_int16_t sgls = nseg;
1018 void *sgl_ptr;
1019 struct tws_cmd_generic *gcmd;
1020
1021 if ( error == EFBIG )
1022 TWS_TRACE(sc, "not enough data segs", 0, nseg);
1023
1024
1025 if ( req->flags & TWS_DIR_IN )
1026 bus_dmamap_sync(req->sc->data_tag, req->dma_map,
1027 BUS_DMASYNC_PREREAD);
1028 if ( req->flags & TWS_DIR_OUT )
1029 bus_dmamap_sync(req->sc->data_tag, req->dma_map,
1030 BUS_DMASYNC_PREWRITE);
1031 if ( segs ) {
1032 if ( (req->type == TWS_PASSTHRU_REQ &&
1033 GET_OPCODE(req->cmd_pkt->cmd.pkt_a.res__opcode) !=
1034 TWS_FW_CMD_EXECUTE_SCSI) ||
1035 req->type == TWS_GETSET_PARAM_REQ) {
1036 gcmd = &req->cmd_pkt->cmd.pkt_g.generic;
1037 sgl_ptr = (u_int32_t *)(gcmd) + gcmd->size;
1038 gcmd->size += sgls *
1039 ((req->sc->is64bit && !tws_use_32bit_sgls) ? 4 :2 );
4e1af74f 1040 tws_fill_sg_list(req->sc, segs, sgl_ptr, sgls);
33190b70
SW
1041
1042 } else {
4e1af74f 1043 tws_fill_sg_list(req->sc, segs,
33190b70
SW
1044 (void *)req->cmd_pkt->cmd.pkt_a.sg_list, sgls);
1045 req->cmd_pkt->cmd.pkt_a.lun_h4__sgl_entries |= sgls ;
1046 }
1047 }
1048
1049
1050 req->error_code = tws_submit_command(req->sc, req);
1051
1052}
1053
1054
1055static void
1056tws_fill_sg_list(struct tws_softc *sc, void *sgl_src, void *sgl_dest,
1057 u_int16_t num_sgl_entries)
1058{
1059 int i;
1060
1061 if ( sc->is64bit ) {
1062 struct tws_sg_desc64 *sgl_s = (struct tws_sg_desc64 *)sgl_src;
1063
1064 if ( !tws_use_32bit_sgls ) {
1065 struct tws_sg_desc64 *sgl_d = (struct tws_sg_desc64 *)sgl_dest;
1066 if ( num_sgl_entries > TWS_MAX_64BIT_SG_ELEMENTS )
1067 TWS_TRACE(sc, "64bit sg overflow", num_sgl_entries, 0);
1068 for (i = 0; i < num_sgl_entries; i++) {
1069 sgl_d[i].address = sgl_s->address;
1070 sgl_d[i].length = sgl_s->length;
1071 sgl_d[i].flag = 0;
1072 sgl_d[i].reserved = 0;
1073 sgl_s = (struct tws_sg_desc64 *) (((u_int8_t *)sgl_s) +
1074 sizeof(bus_dma_segment_t));
1075 }
1076 } else {
1077 struct tws_sg_desc32 *sgl_d = (struct tws_sg_desc32 *)sgl_dest;
1078 if ( num_sgl_entries > TWS_MAX_32BIT_SG_ELEMENTS )
1079 TWS_TRACE(sc, "32bit sg overflow", num_sgl_entries, 0);
1080 for (i = 0; i < num_sgl_entries; i++) {
1081 sgl_d[i].address = sgl_s->address;
1082 sgl_d[i].length = sgl_s->length;
1083 sgl_d[i].flag = 0;
1084 sgl_s = (struct tws_sg_desc64 *) (((u_int8_t *)sgl_s) +
1085 sizeof(bus_dma_segment_t));
1086 }
1087 }
1088 } else {
1089 struct tws_sg_desc32 *sgl_s = (struct tws_sg_desc32 *)sgl_src;
1090 struct tws_sg_desc32 *sgl_d = (struct tws_sg_desc32 *)sgl_dest;
1091
1092 if ( num_sgl_entries > TWS_MAX_32BIT_SG_ELEMENTS )
1093 TWS_TRACE(sc, "32bit sg overflow", num_sgl_entries, 0);
1094
1095
1096 for (i = 0; i < num_sgl_entries; i++) {
1097 sgl_d[i].address = sgl_s[i].address;
1098 sgl_d[i].length = sgl_s[i].length;
1099 sgl_d[i].flag = 0;
1100 }
1101 }
1102}
1103
1104
1105void
1106tws_intr(void *arg)
1107{
1108 struct tws_softc *sc = (struct tws_softc *)arg;
1109 u_int32_t histat=0, db=0;
1110
1111 KASSERT(sc, ("null softc"));
1112
1113 sc->stats.num_intrs++;
1114 histat = tws_read_reg(sc, TWS_I2O0_HISTAT, 4);
1115 if ( histat & TWS_BIT2 ) {
1116 TWS_TRACE_DEBUG(sc, "door bell :)", histat, TWS_I2O0_HISTAT);
1117 db = tws_read_reg(sc, TWS_I2O0_IOBDB, 4);
1118 if ( db & TWS_BIT21 ) {
1119 tws_intr_attn_error(sc);
1120 return;
1121 }
1122 if ( db & TWS_BIT18 ) {
1123 tws_intr_attn_aen(sc);
1124 }
1125 }
1126
1127 if ( histat & TWS_BIT3 ) {
1128 tws_intr_resp(sc);
1129 }
1130}
1131
1132static void
1133tws_intr_attn_aen(struct tws_softc *sc)
1134{
1135 u_int32_t db=0;
1136
1137 /* maskoff db intrs untill all the aens are fetched */
1138 /* tws_disable_db_intr(sc); */
4e1af74f 1139 tws_fetch_aen(sc);
33190b70
SW
1140 tws_write_reg(sc, TWS_I2O0_HOBDBC, TWS_BIT18, 4);
1141 db = tws_read_reg(sc, TWS_I2O0_IOBDB, 4);
1142
1143}
1144
1145static void
1146tws_intr_attn_error(struct tws_softc *sc)
1147{
1148 u_int32_t db=0;
1149
1150 TWS_TRACE(sc, "attn error", 0, 0);
1151 tws_write_reg(sc, TWS_I2O0_HOBDBC, ~0, 4);
1152 db = tws_read_reg(sc, TWS_I2O0_IOBDB, 4);
1153 device_printf(sc->tws_dev, "Micro controller error.\n");
1154 tws_reset(sc);
1155}
1156
1157static void
1158tws_intr_resp(struct tws_softc *sc)
1159{
1160 u_int16_t req_id;
1161 u_int64_t mfa;
1162
1163 while ( tws_get_response(sc, &req_id, &mfa) ) {
1164 sc->stats.reqs_out++;
1165 if ( req_id == TWS_INVALID_REQID ) {
1166 TWS_TRACE_DEBUG(sc, "invalid req_id", mfa, req_id);
1167 sc->stats.reqs_errored++;
1168 tws_err_complete(sc, mfa);
1169 continue;
1170 }
1171
1172 sc->reqs[req_id].cb(&sc->reqs[req_id]);
1173 }
1174
1175}
1176
1177
1178static void
1179tws_poll(struct cam_sim *sim)
1180{
1181 struct tws_softc *sc = (struct tws_softc *)cam_sim_softc(sim);
1182 TWS_TRACE_DEBUG(sc, "entry", 0, 0);
4e1af74f 1183 tws_intr(sc);
33190b70
SW
1184}
1185
1186void
1187tws_timeout(void *arg)
1188{
1189 struct tws_request *req = (struct tws_request *)arg;
1190 struct tws_softc *sc = req->sc;
1191
1192
1193 if ( tws_get_state(sc) != TWS_RESET ) {
1194 device_printf(sc->tws_dev, "Request timed out.\n");
4e1af74f 1195 tws_reset(sc);
33190b70
SW
1196 }
1197}
1198
1199void
1200tws_reset(void *arg)
1201{
1202
1203 struct tws_softc *sc = (struct tws_softc *)arg;
1204
1205 if ( tws_get_state(sc) == TWS_RESET ) {
1206 return;
1207 }
1208 device_printf(sc->tws_dev, "Resetting controller\n");
1209 lockmgr(&sc->gen_lock, LK_EXCLUSIVE);
1210 tws_send_event(sc, TWS_RESET_START);
1211 lockmgr(&sc->gen_lock, LK_RELEASE);
1212
1213 tws_turn_off_interrupts(sc);
1214 lockmgr(&sc->sim_lock, LK_EXCLUSIVE);
1215 tws_freeze_simq(sc);
1216 lockmgr(&sc->sim_lock, LK_RELEASE);
1217
1218 tws_assert_soft_reset(sc);
1219 callout_reset(&sc->reset_cb_handle, hz/10, tws_reset_cb, sc);
1220}
1221
1222static void
1223tws_reset_cb(void *arg)
1224{
1225
1226 struct tws_softc *sc = (struct tws_softc *)arg;
1227 u_int32_t reg;
1228
1229 if ( tws_get_state(sc) != TWS_RESET ) {
1230 return;
1231 }
1232 reg = tws_read_reg(sc, TWS_I2O0_SCRPD3, 4);
1233 if (!( reg & TWS_BIT13 )) {
1234 callout_reset(&sc->reset_cb_handle, hz/10, tws_reset_cb, sc);
1235 return;
1236 }
1237 tws_drain_response_queue(sc);
1238 tws_drain_busy_queue(sc);
1239 tws_drain_reserved_reqs(sc);
1240 callout_reset(&sc->reinit_handle, 5*hz, tws_reinit, sc);
1241}
1242
1243static void
1244tws_reinit(void *arg)
1245{
1246
1247 struct tws_softc *sc = (struct tws_softc *)arg;
1248 static int timeout_val=0, try=2 ;
1249
1250 if ( !tws_ctlr_ready(sc) ) {
1251 timeout_val += 5;
1252 if ( timeout_val >= TWS_RESET_TIMEOUT ) {
1253 timeout_val = 0;
1254 if ( try )
1255 tws_assert_soft_reset(sc);
1256 try--;
1257 }
1258 callout_reset(&sc->reinit_handle, 5*hz, tws_reinit, sc);
1259 return;
1260 }
1261
1262 timeout_val=0;
1263 try = 2;
1264 sc->obfl_q_overrun = false;
1265 if ( tws_init_connect(sc, tws_queue_depth) ) {
1266 TWS_TRACE_DEBUG(sc, "initConnect failed", 0, sc->is64bit);
1267 }
1268 tws_init_obfl_q(sc);
1269
1270 lockmgr(&sc->sim_lock, LK_EXCLUSIVE);
1271 tws_release_simq(sc);
1272 lockmgr(&sc->sim_lock, LK_RELEASE);
1273 tws_turn_on_interrupts(sc);
1274
1275 lockmgr(&sc->gen_lock, LK_EXCLUSIVE);
1276 tws_send_event(sc, TWS_RESET_COMPLETE);
1277 lockmgr(&sc->gen_lock, LK_RELEASE);
1278 if ( sc->chan ) {
1279 sc->chan = 0;
1280 wakeup((void *)&sc->chan);
1281 }
1282
1283}
1284
1285
1286static void
1287tws_freeze_simq(struct tws_softc *sc)
1288{
1289
1290 TWS_TRACE_DEBUG(sc, "freezeing", 0, 0);
1291 KKASSERT(lockstatus(&sc->sim_lock, curthread) != 0);
1292 xpt_freeze_simq(sc->sim, 1);
1293
1294}
1295static void
1296tws_release_simq(struct tws_softc *sc)
1297{
1298
1299 TWS_TRACE_DEBUG(sc, "unfreezeing", 0, 0);
1300 KKASSERT(lockstatus(&sc->sim_lock, curthread) != 0);
1301 xpt_release_simq(sc->sim, 1);
1302
1303}
1304
1305
1306TUNABLE_INT("hw.tws.cam_depth", &tws_cam_depth);