Rip out bad spl manipulating junk from mpt and clean it up.
[dragonfly.git] / sys / dev / disk / mpt / mpt_freebsd.c
... / ...
CommitLineData
1/* $FreeBSD: src/sys/dev/mpt/mpt_freebsd.c,v 1.3.2.3 2002/09/24 21:37:25 mjacob Exp $ */
2/* $DragonFly: src/sys/dev/disk/mpt/mpt_freebsd.c,v 1.8 2005/06/16 15:48:59 dillon Exp $ */
3/*
4 * FreeBSD/CAM specific routines for LSI '909 FC adapters.
5 * FreeBSD Version.
6 *
7 * Copyright (c) 2000, 2001 by Greg Ansley
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice immediately at the beginning of the file, without modification,
14 * this list of conditions, and the following disclaimer.
15 * 2. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
22 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30/*
31 * Additional Copyright (c) 2002 by Matthew Jacob under same license.
32 */
33
34#include "mpt_freebsd.h"
35
36static void mpt_poll(struct cam_sim *);
37static timeout_t mpttimeout;
38static timeout_t mpttimeout2;
39static void mpt_action(struct cam_sim *, union ccb *);
40static int mpt_setwidth(mpt_softc_t *, int, int);
41static int mpt_setsync(mpt_softc_t *, int, int, int);
42
43void
44mpt_cam_attach(mpt_softc_t *mpt)
45{
46 struct cam_sim *sim;
47 int maxq;
48
49 mpt->bus = 0;
50 maxq = (mpt->mpt_global_credits < MPT_MAX_REQUESTS(mpt))?
51 mpt->mpt_global_credits : MPT_MAX_REQUESTS(mpt);
52
53
54 /*
55 * Construct our SIM entry.
56 */
57 sim = cam_sim_alloc(mpt_action, mpt_poll, "mpt", mpt,
58 mpt->unit, 1, maxq, NULL);
59 if (sim == NULL)
60 return;
61
62 /*
63 * Register exactly the bus.
64 */
65
66 if (xpt_bus_register(sim, 0) != CAM_SUCCESS) {
67 cam_sim_free(sim);
68 return;
69 }
70
71 if (xpt_create_path(&mpt->path, NULL, cam_sim_path(sim),
72 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
73 xpt_bus_deregister(cam_sim_path(sim));
74 cam_sim_free(sim);
75 return;
76 }
77 mpt->sim = sim;
78}
79
80void
81mpt_cam_detach(mpt_softc_t *mpt)
82{
83 if (mpt->sim != NULL) {
84 xpt_free_path(mpt->path);
85 xpt_bus_deregister(cam_sim_path(mpt->sim));
86 cam_sim_free(mpt->sim);
87 mpt->sim = NULL;
88 }
89}
90
91/* This routine is used after a system crash to dump core onto the
92 * swap device.
93 */
94static void
95mpt_poll(struct cam_sim *sim)
96{
97 mpt_softc_t *mpt = (mpt_softc_t *) cam_sim_softc(sim);
98 MPT_LOCK(mpt);
99 mpt_intr(mpt);
100 MPT_UNLOCK(mpt);
101}
102
103/*
104 * This routine is called if the 9x9 does not return completion status
105 * for a command after a CAM specified time.
106 */
107static void
108mpttimeout(void *arg)
109{
110 request_t *req;
111 union ccb *ccb = arg;
112 u_int32_t oseq;
113 mpt_softc_t *mpt;
114
115 mpt = ccb->ccb_h.ccb_mpt_ptr;
116 MPT_LOCK(mpt);
117 req = ccb->ccb_h.ccb_req_ptr;
118 oseq = req->sequence;
119 mpt->timeouts++;
120 if (mpt_intr(mpt)) {
121 if (req->sequence != oseq) {
122 device_printf(mpt->dev, "bullet missed in timeout\n");
123 MPT_UNLOCK(mpt);
124 return;
125 }
126 device_printf(mpt->dev, "bullet U-turned in timeout: got us\n");
127 }
128 device_printf(mpt->dev,
129 "time out on request index = 0x%02x sequence = 0x%08x\n",
130 req->index, req->sequence);
131 mpt_check_doorbell(mpt);
132 device_printf(mpt->dev, "Status %08X; Mask %08X; Doorbell %08X\n",
133 mpt_read(mpt, MPT_OFFSET_INTR_STATUS),
134 mpt_read(mpt, MPT_OFFSET_INTR_MASK),
135 mpt_read(mpt, MPT_OFFSET_DOORBELL) );
136 printf("request state %s\n", mpt_req_state(req->debug));
137 if (ccb != req->ccb) {
138 printf("time out: ccb %p != req->ccb %p\n",
139 ccb,req->ccb);
140 }
141 mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf);
142 req->debug = REQ_TIMEOUT;
143 req->ccb = NULL;
144 req->link.sle_next = (void *) mpt;
145 callout_reset(&req->timeout, hz / 10, mpttimeout2, req);
146 ccb->ccb_h.status = CAM_CMD_TIMEOUT;
147 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
148 mpt->outofbeer = 0;
149 xpt_done(ccb);
150 MPT_UNLOCK(mpt);
151}
152
153static void
154mpttimeout2(void *arg)
155{
156 request_t *req = arg;
157 if (req->debug == REQ_TIMEOUT) {
158 mpt_softc_t *mpt = (mpt_softc_t *) req->link.sle_next;
159
160 MPT_LOCK(mpt);
161 mpt_free_request(mpt, req);
162 MPT_UNLOCK(mpt);
163 }
164}
165
166/*
167 * Callback routine from "bus_dmamap_load" or in simple case called directly.
168 *
169 * Takes a list of physical segments and builds the SGL for SCSI IO command
170 * and forwards the commard to the IOC after one last check that CAM has not
171 * aborted the transaction.
172 */
173static void
174mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
175{
176 request_t *req;
177 union ccb *ccb;
178 mpt_softc_t *mpt;
179 MSG_SCSI_IO_REQUEST *mpt_req;
180 SGE_SIMPLE32 *se;
181
182 req = (request_t *)arg;
183 ccb = req->ccb;
184
185 mpt = ccb->ccb_h.ccb_mpt_ptr;
186 req = ccb->ccb_h.ccb_req_ptr;
187 mpt_req = req->req_vbuf;
188
189 MPT_LOCK(mpt);
190
191 if (error == 0 && nseg > MPT_SGL_MAX) {
192 error = EFBIG;
193 }
194
195 if (error != 0) {
196 if (error != EFBIG)
197 device_printf(mpt->dev, "bus_dmamap_load returned %d\n",
198 error);
199 if (ccb->ccb_h.status == CAM_REQ_INPROG) {
200 xpt_freeze_devq(ccb->ccb_h.path, 1);
201 ccb->ccb_h.status = CAM_DEV_QFRZN;
202 if (error == EFBIG)
203 ccb->ccb_h.status |= CAM_REQ_TOO_BIG;
204 else
205 ccb->ccb_h.status |= CAM_REQ_CMP_ERR;
206 }
207 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
208 xpt_done(ccb);
209 mpt_free_request(mpt, req);
210 MPT_UNLOCK(mpt);
211 return;
212 }
213
214 if (nseg > MPT_NSGL_FIRST(mpt)) {
215 int i, nleft = nseg;
216 u_int32_t flags;
217 bus_dmasync_op_t op;
218 SGE_CHAIN32 *ce;
219
220 mpt_req->DataLength = ccb->csio.dxfer_len;
221 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
222 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
223 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
224
225 se = (SGE_SIMPLE32 *) &mpt_req->SGL;
226 for (i = 0; i < MPT_NSGL_FIRST(mpt) - 1; i++, se++, dm_segs++) {
227 u_int32_t tf;
228
229 bzero(se, sizeof (*se));
230 se->Address = dm_segs->ds_addr;
231 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
232 tf = flags;
233 if (i == MPT_NSGL_FIRST(mpt) - 2) {
234 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
235 }
236 MPI_pSGE_SET_FLAGS(se, tf);
237 nleft -= 1;
238 }
239
240 /*
241 * Tell the IOC where to find the first chain element
242 */
243 mpt_req->ChainOffset = ((char *)se - (char *)mpt_req) >> 2;
244
245 /*
246 * Until we're finished with all segments...
247 */
248 while (nleft) {
249 int ntodo;
250 /*
251 * Construct the chain element that point to the
252 * next segment.
253 */
254 ce = (SGE_CHAIN32 *) se++;
255 if (nleft > MPT_NSGL(mpt)) {
256 ntodo = MPT_NSGL(mpt) - 1;
257 ce->NextChainOffset = (MPT_RQSL(mpt) -
258 sizeof (SGE_SIMPLE32)) >> 2;
259 ce->Length = MPT_NSGL(mpt)
260 * sizeof(SGE_SIMPLE32);
261 } else {
262 ntodo = nleft;
263 ce->NextChainOffset = 0;
264 ce->Length = ntodo * sizeof (SGE_SIMPLE32);
265 }
266 ce->Address = req->req_pbuf +
267 ((char *)se - (char *)mpt_req);
268 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
269 for (i = 0; i < ntodo; i++, se++, dm_segs++) {
270 u_int32_t tf;
271
272 bzero(se, sizeof (*se));
273 se->Address = dm_segs->ds_addr;
274 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
275 tf = flags;
276 if (i == ntodo - 1) {
277 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
278 if (ce->NextChainOffset == 0) {
279 tf |=
280 MPI_SGE_FLAGS_END_OF_LIST |
281 MPI_SGE_FLAGS_END_OF_BUFFER;
282 }
283 }
284 MPI_pSGE_SET_FLAGS(se, tf);
285 nleft -= 1;
286 }
287
288 }
289
290 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
291 op = BUS_DMASYNC_PREREAD;
292 else
293 op = BUS_DMASYNC_PREWRITE;
294 if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) {
295 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
296 }
297 } else if (nseg > 0) {
298 int i;
299 u_int32_t flags;
300 bus_dmasync_op_t op;
301
302 mpt_req->DataLength = ccb->csio.dxfer_len;
303 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
304 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
305 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
306
307 /* Copy the segments into our SG list */
308 se = (SGE_SIMPLE32 *) &mpt_req->SGL;
309 for (i = 0; i < nseg; i++, se++, dm_segs++) {
310 u_int32_t tf;
311
312 bzero(se, sizeof (*se));
313 se->Address = dm_segs->ds_addr;
314 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
315 tf = flags;
316 if (i == nseg - 1) {
317 tf |=
318 MPI_SGE_FLAGS_LAST_ELEMENT |
319 MPI_SGE_FLAGS_END_OF_BUFFER |
320 MPI_SGE_FLAGS_END_OF_LIST;
321 }
322 MPI_pSGE_SET_FLAGS(se, tf);
323 }
324
325 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
326 op = BUS_DMASYNC_PREREAD;
327 else
328 op = BUS_DMASYNC_PREWRITE;
329 if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) {
330 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
331 }
332 } else {
333 se = (SGE_SIMPLE32 *) &mpt_req->SGL;
334 /*
335 * No data to transfer so we just make a single simple SGL
336 * with zero length.
337 */
338 MPI_pSGE_SET_FLAGS(se,
339 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
340 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
341 }
342
343 /*
344 * Last time we need to check if this CCB needs to be aborted.
345 */
346 if (ccb->ccb_h.status != CAM_REQ_INPROG) {
347 if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0)
348 bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
349 mpt_free_request(mpt, req);
350 xpt_done(ccb);
351 MPT_UNLOCK(mpt);
352 return;
353 }
354
355 ccb->ccb_h.status |= CAM_SIM_QUEUED;
356 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
357 callout_reset(&ccb->ccb_h.timeout_ch,
358 (ccb->ccb_h.timeout * hz) / 1000, mpttimeout, ccb);
359 }
360 if (mpt->verbose > 1)
361 mpt_print_scsi_io_request(mpt_req);
362 mpt_send_cmd(mpt, req);
363 MPT_UNLOCK(mpt);
364}
365
366static void
367mpt_start(union ccb *ccb)
368{
369 request_t *req;
370 struct mpt_softc *mpt;
371 MSG_SCSI_IO_REQUEST *mpt_req;
372 struct ccb_scsiio *csio = &ccb->csio;
373 struct ccb_hdr *ccbh = &ccb->ccb_h;
374
375 /* Get the pointer for the physical addapter */
376 mpt = ccb->ccb_h.ccb_mpt_ptr;
377 MPT_LOCK(mpt);
378
379 /* Get a request structure off the free list */
380 if ((req = mpt_get_request(mpt)) == NULL) {
381 if (mpt->outofbeer == 0) {
382 mpt->outofbeer = 1;
383 xpt_freeze_simq(mpt->sim, 1);
384 if (mpt->verbose > 1) {
385 device_printf(mpt->dev, "FREEZEQ\n");
386 }
387 }
388 ccb->ccb_h.status = CAM_REQUEUE_REQ;
389 xpt_done(ccb);
390 MPT_UNLOCK(mpt);
391 return;
392 }
393
394 /* Link the ccb and the request structure so we can find */
395 /* the other knowing either the request or the ccb */
396 req->ccb = ccb;
397 ccb->ccb_h.ccb_req_ptr = req;
398
399 /* Now we build the command for the IOC */
400 mpt_req = req->req_vbuf;
401 bzero(mpt_req, sizeof *mpt_req);
402
403 mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
404 mpt_req->Bus = mpt->bus;
405
406 mpt_req->SenseBufferLength =
407 (csio->sense_len < MPT_SENSE_SIZE) ?
408 csio->sense_len : MPT_SENSE_SIZE;
409
410 /* We use the message context to find the request structure when we */
411 /* Get the command competion interrupt from the FC IOC. */
412 mpt_req->MsgContext = req->index;
413
414 /* Which physical device to do the I/O on */
415 mpt_req->TargetID = ccb->ccb_h.target_id;
416 mpt_req->LUN[1] = ccb->ccb_h.target_lun;
417
418 /* Set the direction of the transfer */
419 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
420 mpt_req->Control = MPI_SCSIIO_CONTROL_READ;
421 else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
422 mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE;
423 else
424 mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER;
425
426 if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
427 switch(ccb->csio.tag_action) {
428 case MSG_HEAD_OF_Q_TAG:
429 mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ;
430 break;
431 case MSG_ACA_TASK:
432 mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ;
433 break;
434 case MSG_ORDERED_Q_TAG:
435 mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ;
436 break;
437 case MSG_SIMPLE_Q_TAG:
438 default:
439 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
440 break;
441 }
442 } else {
443 if (mpt->is_fc)
444 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
445 else
446 mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
447 }
448
449 if (mpt->is_fc == 0) {
450 if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) {
451 mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT;
452 }
453 }
454
455 /* Copy the scsi command block into place */
456 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0)
457 bcopy(csio->cdb_io.cdb_ptr, mpt_req->CDB, csio->cdb_len);
458 else
459 bcopy(csio->cdb_io.cdb_bytes, mpt_req->CDB, csio->cdb_len);
460
461 mpt_req->CDBLength = csio->cdb_len;
462 mpt_req->DataLength = csio->dxfer_len;
463 mpt_req->SenseBufferLowAddr = req->sense_pbuf;
464
465 /*
466 * If we have any data to send with this command,
467 * map it into bus space.
468 */
469
470 if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
471 if ((ccbh->flags & CAM_SCATTER_VALID) == 0) {
472 /*
473 * We've been given a pointer to a single buffer.
474 */
475 if ((ccbh->flags & CAM_DATA_PHYS) == 0) {
476 /*
477 * Virtual address that needs to translated into
478 * one or more physical pages.
479 */
480 int error;
481
482 error = bus_dmamap_load(mpt->buffer_dmat,
483 req->dmap, csio->data_ptr, csio->dxfer_len,
484 mpt_execute_req, req, 0);
485 if (error == EINPROGRESS) {
486 /*
487 * So as to maintain ordering,
488 * freeze the controller queue
489 * until our mapping is
490 * returned.
491 */
492 xpt_freeze_simq(mpt->sim, 1);
493 ccbh->status |= CAM_RELEASE_SIMQ;
494 }
495 } else {
496 /*
497 * We have been given a pointer to single
498 * physical buffer.
499 */
500 struct bus_dma_segment seg;
501 seg.ds_addr = (bus_addr_t)csio->data_ptr;
502 seg.ds_len = csio->dxfer_len;
503 mpt_execute_req(req, &seg, 1, 0);
504 }
505 } else {
506 /*
507 * We have been given a list of addresses.
508 * These case could be easily done but they are not
509 * currently generated by the CAM subsystem so there
510 * is no point in wasting the time right now.
511 */
512 struct bus_dma_segment *segs;
513 if ((ccbh->flags & CAM_SG_LIST_PHYS) == 0) {
514 mpt_execute_req(req, NULL, 0, EFAULT);
515 } else {
516 /* Just use the segments provided */
517 segs = (struct bus_dma_segment *)csio->data_ptr;
518 mpt_execute_req(req, segs, csio->sglist_cnt,
519 (csio->sglist_cnt < MPT_SGL_MAX)?
520 0 : EFBIG);
521 }
522 }
523 } else {
524 mpt_execute_req(req, NULL, 0, 0);
525 }
526 MPT_UNLOCK(mpt);
527}
528
529static int
530mpt_bus_reset(union ccb *ccb)
531{
532 int error;
533 request_t *req;
534 mpt_softc_t *mpt;
535 MSG_SCSI_TASK_MGMT *reset_req;
536
537 /* Get the pointer for the physical adapter */
538 mpt = ccb->ccb_h.ccb_mpt_ptr;
539
540 /* Get a request structure off the free list */
541 if ((req = mpt_get_request(mpt)) == NULL) {
542 return (CAM_REQUEUE_REQ);
543 }
544
545 /* Link the ccb and the request structure so we can find */
546 /* the other knowing either the request or the ccb */
547 req->ccb = ccb;
548 ccb->ccb_h.ccb_req_ptr = req;
549
550 reset_req = req->req_vbuf;
551 bzero(reset_req, sizeof *reset_req);
552
553 reset_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
554 reset_req->MsgContext = req->index;
555 reset_req->TaskType = MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS;
556 if (mpt->is_fc) {
557 /*
558 * Should really be TARGET_RESET_OPTION
559 */
560 reset_req->MsgFlags =
561 MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION;
562 }
563 /* Which physical device Reset */
564 reset_req->TargetID = ccb->ccb_h.target_id;
565 reset_req->LUN[1] = ccb->ccb_h.target_lun;
566
567 ccb->ccb_h.status |= CAM_SIM_QUEUED;
568
569 error = mpt_send_handshake_cmd(mpt,
570 sizeof (MSG_SCSI_TASK_MGMT), reset_req);
571 if (error) {
572 device_printf(mpt->dev,
573 "mpt_bus_reset: mpt_send_handshake return %d\n", error);
574 return (CAM_REQ_CMP_ERR);
575 } else {
576 return (CAM_REQ_CMP);
577 }
578}
579
580/*
581 * Process an asynchronous event from the IOC.
582 */
583static void mpt_ctlop(mpt_softc_t *, void *, u_int32_t);
584static void mpt_event_notify_reply(mpt_softc_t *mpt, MSG_EVENT_NOTIFY_REPLY *);
585
586void
587mpt_ctlop(mpt_softc_t *mpt, void *vmsg, u_int32_t reply)
588{
589 MSG_DEFAULT_REPLY *dmsg = vmsg;
590
591 if (dmsg->Function == MPI_FUNCTION_EVENT_NOTIFICATION) {
592 mpt_event_notify_reply(mpt, vmsg);
593 mpt_free_reply(mpt, (reply << 1));
594 } else if (dmsg->Function == MPI_FUNCTION_EVENT_ACK) {
595 mpt_free_reply(mpt, (reply << 1));
596 } else if (dmsg->Function == MPI_FUNCTION_PORT_ENABLE) {
597 MSG_PORT_ENABLE_REPLY *msg = vmsg;
598 int index = msg->MsgContext & ~0x80000000;
599 if (mpt->verbose > 1) {
600 device_printf(mpt->dev, "enable port reply idx %d\n",
601 index);
602 }
603 if (index >= 0 && index < MPT_MAX_REQUESTS(mpt)) {
604 request_t *req = &mpt->request_pool[index];
605 req->debug = REQ_DONE;
606 }
607 mpt_free_reply(mpt, (reply << 1));
608 } else if (dmsg->Function == MPI_FUNCTION_CONFIG) {
609 MSG_CONFIG_REPLY *msg = vmsg;
610 int index = msg->MsgContext & ~0x80000000;
611 if (index >= 0 && index < MPT_MAX_REQUESTS(mpt)) {
612 request_t *req = &mpt->request_pool[index];
613 req->debug = REQ_DONE;
614 req->sequence = reply;
615 } else {
616 mpt_free_reply(mpt, (reply << 1));
617 }
618 } else {
619 device_printf(mpt->dev, "unknown mpt_ctlop: %x\n",
620 dmsg->Function);
621 }
622}
623
624static void
625mpt_event_notify_reply(mpt_softc_t *mpt, MSG_EVENT_NOTIFY_REPLY *msg)
626{
627 switch(msg->Event) {
628 case MPI_EVENT_LOG_DATA:
629 /* Some error occured that LSI wants logged */
630 device_printf(mpt->dev,
631 "\tEvtLogData: IOCLogInfo: 0x%08x\n",
632 msg->IOCLogInfo);
633 device_printf(mpt->dev, "\tEvtLogData: Event Data:");
634 {
635 int i;
636 for (i = 0; i < msg->EventDataLength; i++) {
637 device_printf(mpt->dev,
638 " %08X", msg->Data[i]);
639 }
640 }
641 device_printf(mpt->dev, "\n");
642 break;
643
644 case MPI_EVENT_UNIT_ATTENTION:
645 device_printf(mpt->dev,
646 "Bus: 0x%02x TargetID: 0x%02x\n",
647 (msg->Data[0] >> 8) & 0xff, msg->Data[0] & 0xff);
648 break;
649
650 case MPI_EVENT_IOC_BUS_RESET:
651 /* We generated a bus reset */
652 device_printf(mpt->dev, "IOC Bus Reset Port: %d\n",
653 (msg->Data[0] >> 8) & 0xff);
654 break;
655
656 case MPI_EVENT_EXT_BUS_RESET:
657 /* Someone else generated a bus reset */
658 device_printf(mpt->dev, "Ext Bus Reset\n");
659 /*
660 * These replies don't return EventData like the MPI
661 * spec says they do
662 */
663/* xpt_async(AC_BUS_RESET, path, NULL); */
664 break;
665
666 case MPI_EVENT_RESCAN:
667 /*
668 * In general this means a device has been added
669 * to the loop.
670 */
671 device_printf(mpt->dev,
672 "Rescan Port: %d\n", (msg->Data[0] >> 8) & 0xff);
673/* xpt_async(AC_FOUND_DEVICE, path, NULL); */
674 break;
675
676 case MPI_EVENT_LINK_STATUS_CHANGE:
677 device_printf(mpt->dev, "Port %d: LinkState: %s\n",
678 (msg->Data[1] >> 8) & 0xff,
679 ((msg->Data[0] & 0xff) == 0)? "Failed" : "Active");
680 break;
681
682 case MPI_EVENT_LOOP_STATE_CHANGE:
683 switch ((msg->Data[0] >> 16) & 0xff) {
684 case 0x01:
685 device_printf(mpt->dev,
686 "Port 0x%x: FC LinkEvent: LIP(%02X,%02X) (Loop Initialization)\n",
687 (msg->Data[1] >> 8) & 0xff,
688 (msg->Data[0] >> 8) & 0xff,
689 (msg->Data[0] ) & 0xff);
690 switch ((msg->Data[0] >> 8) & 0xff) {
691 case 0xF7:
692 if ((msg->Data[0] & 0xff) == 0xF7) {
693 printf("Device needs AL_PA\n");
694 } else {
695 printf("Device %02X doesn't like FC performance\n",
696 msg->Data[0] & 0xFF);
697 }
698 break;
699 case 0xF8:
700 if ((msg->Data[0] & 0xff) == 0xF7) {
701 printf("Device had loop failure at its receiver prior to acquiring AL_PA\n");
702 } else {
703 printf("Device %02X detected loop failure at its receiver\n",
704 msg->Data[0] & 0xFF);
705 }
706 break;
707 default:
708 printf("Device %02X requests that device %02X reset itself\n",
709 msg->Data[0] & 0xFF,
710 (msg->Data[0] >> 8) & 0xFF);
711 break;
712 }
713 break;
714 case 0x02:
715 device_printf(mpt->dev, "Port 0x%x: FC LinkEvent: LPE(%02X,%02X) (Loop Port Enable)\n",
716 (msg->Data[1] >> 8) & 0xff, /* Port */
717 (msg->Data[0] >> 8) & 0xff, /* Character 3 */
718 (msg->Data[0] ) & 0xff /* Character 4 */
719 );
720 break;
721 case 0x03:
722 device_printf(mpt->dev, "Port 0x%x: FC LinkEvent: LPB(%02X,%02X) (Loop Port Bypass)\n",
723 (msg->Data[1] >> 8) & 0xff, /* Port */
724 (msg->Data[0] >> 8) & 0xff, /* Character 3 */
725 (msg->Data[0] ) & 0xff /* Character 4 */
726 );
727 break;
728 default:
729 device_printf(mpt->dev, "Port 0x%x: FC LinkEvent: Unknown FC event (%02X %02X %02X)\n",
730 (msg->Data[1] >> 8) & 0xff, /* Port */
731 (msg->Data[0] >> 16) & 0xff, /* Event */
732 (msg->Data[0] >> 8) & 0xff, /* Character 3 */
733 (msg->Data[0] ) & 0xff /* Character 4 */
734 );
735 }
736 break;
737
738 case MPI_EVENT_LOGOUT:
739 device_printf(mpt->dev, "FC Logout Port: %d N_PortID: %02X\n",
740 (msg->Data[1] >> 8) & 0xff,
741 msg->Data[0]);
742 break;
743 case MPI_EVENT_EVENT_CHANGE:
744 /* This is just an acknowledgement of our
745 mpt_send_event_request */
746 break;
747 default:
748 device_printf(mpt->dev, "Unknown event %X\n", msg->Event);
749 }
750 if (msg->AckRequired) {
751 MSG_EVENT_ACK *ackp;
752 request_t *req;
753 if ((req = mpt_get_request(mpt)) == NULL) {
754 panic("unable to get request to acknowledge notify");
755 }
756 ackp = (MSG_EVENT_ACK *) req->req_vbuf;
757 bzero(ackp, sizeof *ackp);
758 ackp->Function = MPI_FUNCTION_EVENT_ACK;
759 ackp->Event = msg->Event;
760 ackp->EventContext = msg->EventContext;
761 ackp->MsgContext = req->index | 0x80000000;
762 mpt_check_doorbell(mpt);
763 mpt_send_cmd(mpt, req);
764 }
765}
766
767void
768mpt_done(mpt_softc_t *mpt, u_int32_t reply)
769{
770 int index;
771 request_t *req;
772 union ccb *ccb;
773 MSG_REQUEST_HEADER *mpt_req;
774 MSG_SCSI_IO_REPLY *mpt_reply;
775
776 index = -1; /* Shutup the complier */
777
778 if ((reply & MPT_CONTEXT_REPLY) == 0) {
779 /* context reply */
780 mpt_reply = NULL;
781 index = reply & MPT_CONTEXT_MASK;
782 } else {
783 unsigned *pReply;
784
785 bus_dmamap_sync(mpt->reply_dmat, mpt->reply_dmap,
786 BUS_DMASYNC_POSTREAD);
787 /* address reply (Error) */
788 mpt_reply = MPT_REPLY_PTOV(mpt, reply);
789 if (mpt->verbose > 1) {
790 pReply = (unsigned *) mpt_reply;
791 device_printf(mpt->dev, "Address Reply (index %u)\n",
792 mpt_reply->MsgContext & 0xffff);
793 device_printf(mpt->dev, "%08X %08X %08X %08X\n",
794 pReply[0], pReply[1], pReply[2], pReply[3]);
795 device_printf(mpt->dev, "%08X %08X %08X %08X\n",
796 pReply[4], pReply[5], pReply[6], pReply[7]);
797 device_printf(mpt->dev, "%08X %08X %08X %08X\n\n",
798 pReply[8], pReply[9], pReply[10], pReply[11]);
799 }
800 index = mpt_reply->MsgContext;
801 }
802
803 /*
804 * Address reply with MessageContext high bit set
805 * This is most likely a notify message so we try
806 * to process it then free it
807 */
808 if ((index & 0x80000000) != 0) {
809 if (mpt_reply != NULL) {
810 mpt_ctlop(mpt, mpt_reply, reply);
811 } else {
812 device_printf(mpt->dev,
813 "mpt_done: index 0x%x, NULL reply\n", index);
814 }
815 return;
816 }
817
818 /* Did we end up with a valid index into the table? */
819 if (index < 0 || index >= MPT_MAX_REQUESTS(mpt)) {
820 printf("mpt_done: invalid index (%x) in reply\n", index);
821 return;
822 }
823
824 req = &mpt->request_pool[index];
825
826 /* Make sure memory hasn't been trashed */
827 if (req->index != index) {
828 printf("mpt_done: corrupted request struct");
829 return;
830 }
831
832 /* Short cut for task management replys; nothing more for us to do */
833 mpt_req = req->req_vbuf;
834 if (mpt_req->Function == MPI_FUNCTION_SCSI_TASK_MGMT) {
835 if (mpt->verbose > 1) {
836 device_printf(mpt->dev, "mpt_done: TASK MGMT\n");
837 }
838 goto done;
839 }
840
841 if (mpt_req->Function == MPI_FUNCTION_PORT_ENABLE) {
842 goto done;
843 }
844
845 /*
846 * At this point it better be a SCSI IO command, but don't
847 * crash if it isn't
848 */
849 if (mpt_req->Function != MPI_FUNCTION_SCSI_IO_REQUEST) {
850 goto done;
851 }
852
853 /* Recover the CAM control block from the request structure */
854 ccb = req->ccb;
855
856 /* Can't have had a SCSI command with out a CAM control block */
857 if (ccb == NULL || (ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) {
858 device_printf(mpt->dev,
859 "mpt_done: corrupted ccb, index = 0x%02x seq = 0x%08x",
860 req->index, req->sequence);
861 printf(" request state %s\nmpt_request:\n",
862 mpt_req_state(req->debug));
863 mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf);
864
865 if (mpt_reply != NULL) {
866 printf("\nmpt_done: reply:\n");
867 mpt_print_reply(MPT_REPLY_PTOV(mpt, reply));
868 } else {
869 printf("\nmpt_done: context reply: 0x%08x\n", reply);
870 }
871 goto done;
872 }
873
874 callout_stop(&ccb->ccb_h.timeout_ch);
875
876 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
877 bus_dmasync_op_t op;
878
879 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
880 op = BUS_DMASYNC_POSTREAD;
881 } else {
882 op = BUS_DMASYNC_POSTWRITE;
883 }
884 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
885 bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
886 }
887 ccb->csio.resid = 0;
888
889 if (mpt_reply == NULL) {
890 /* Context reply; report that the command was successfull */
891 ccb->ccb_h.status = CAM_REQ_CMP;
892 ccb->csio.scsi_status = SCSI_STATUS_OK;
893 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
894 if (mpt->outofbeer) {
895 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
896 mpt->outofbeer = 0;
897 if (mpt->verbose > 1) {
898 device_printf(mpt->dev, "THAWQ\n");
899 }
900 }
901 MPT_LOCK(mpt);
902 xpt_done(ccb);
903 MPT_UNLOCK(mpt);
904 goto done;
905 }
906
907 ccb->csio.scsi_status = mpt_reply->SCSIStatus;
908 switch(mpt_reply->IOCStatus) {
909 case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
910 ccb->ccb_h.status = CAM_DATA_RUN_ERR;
911 break;
912
913 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
914 /*
915 * Yikes, Tagged queue full comes through this path!
916 *
917 * So we'll change it to a status error and anything
918 * that returns status should probably be a status
919 * error as well.
920 */
921 ccb->csio.resid =
922 ccb->csio.dxfer_len - mpt_reply->TransferCount;
923 if (mpt_reply->SCSIState & MPI_SCSI_STATE_NO_SCSI_STATUS) {
924 ccb->ccb_h.status = CAM_DATA_RUN_ERR;
925 break;
926 }
927#if 0
928device_printf(mpt->dev, "underrun, scsi status is %x\n", ccb->csio.scsi_status);
929 ccb->csio.scsi_status = SCSI_STATUS_QUEUE_FULL;
930#endif
931 /* Fall through */
932 case MPI_IOCSTATUS_SUCCESS:
933 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
934 switch (ccb->csio.scsi_status) {
935 case SCSI_STATUS_OK:
936 ccb->ccb_h.status = CAM_REQ_CMP;
937 break;
938 default:
939 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
940 break;
941 }
942 break;
943 case MPI_IOCSTATUS_BUSY:
944 case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
945 ccb->ccb_h.status = CAM_BUSY;
946 break;
947
948 case MPI_IOCSTATUS_SCSI_INVALID_BUS:
949 case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
950 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
951 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
952 break;
953
954 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
955 ccb->ccb_h.status = CAM_DATA_RUN_ERR;
956 break;
957
958 case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR:
959 case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR:
960 ccb->ccb_h.status = CAM_UNCOR_PARITY;
961 break;
962
963 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
964 ccb->ccb_h.status = CAM_REQ_CMP;
965 break;
966
967 case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
968 ccb->ccb_h.status = CAM_UA_TERMIO;
969 break;
970
971 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
972 ccb->ccb_h.status = CAM_REQ_TERMIO;
973 break;
974
975 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
976 ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
977 break;
978
979 default:
980 ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
981 break;
982 }
983
984 if ((mpt_reply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID) != 0) {
985 if (ccb->ccb_h.flags & (CAM_SENSE_PHYS | CAM_SENSE_PTR)) {
986 ccb->ccb_h.status |= CAM_AUTOSENSE_FAIL;
987 } else {
988 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
989 ccb->csio.sense_resid = mpt_reply->SenseCount;
990 bcopy(req->sense_vbuf, &ccb->csio.sense_data,
991 ccb->csio.sense_len);
992 }
993 } else if (mpt_reply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_FAILED) {
994 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
995 ccb->ccb_h.status |= CAM_AUTOSENSE_FAIL;
996 }
997
998 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
999 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
1000 ccb->ccb_h.status |= CAM_DEV_QFRZN;
1001 xpt_freeze_devq(ccb->ccb_h.path, 1);
1002 }
1003 }
1004
1005
1006 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1007 if (mpt->outofbeer) {
1008 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
1009 mpt->outofbeer = 0;
1010 if (mpt->verbose > 1) {
1011 device_printf(mpt->dev, "THAWQ\n");
1012 }
1013 }
1014 MPT_LOCK(mpt);
1015 xpt_done(ccb);
1016 MPT_UNLOCK(mpt);
1017
1018done:
1019 /* If IOC done with this request free it up */
1020 if (mpt_reply == NULL || (mpt_reply->MsgFlags & 0x80) == 0)
1021 mpt_free_request(mpt, req);
1022
1023 /* If address reply; give the buffer back to the IOC */
1024 if (mpt_reply != NULL)
1025 mpt_free_reply(mpt, (reply << 1));
1026}
1027
1028static void
1029mpt_action(struct cam_sim *sim, union ccb *ccb)
1030{
1031 int tgt, error;
1032 mpt_softc_t *mpt;
1033 struct ccb_trans_settings *cts;
1034
1035 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mpt_action\n"));
1036
1037 mpt = (mpt_softc_t *)cam_sim_softc(sim);
1038
1039 ccb->ccb_h.ccb_mpt_ptr = mpt;
1040
1041 switch (ccb->ccb_h.func_code) {
1042 case XPT_RESET_BUS:
1043 if (mpt->verbose > 1)
1044 device_printf(mpt->dev, "XPT_RESET_BUS\n");
1045 error = mpt_bus_reset(ccb);
1046 switch (error) {
1047 case CAM_REQ_INPROG:
1048 break;
1049 case CAM_REQUEUE_REQ:
1050 if (mpt->outofbeer == 0) {
1051 mpt->outofbeer = 1;
1052 xpt_freeze_simq(sim, 1);
1053 if (mpt->verbose > 1) {
1054 device_printf(mpt->dev, "FREEZEQ\n");
1055 }
1056 }
1057 ccb->ccb_h.status = CAM_REQUEUE_REQ;
1058 MPT_LOCK(mpt);
1059 xpt_done(ccb);
1060 MPT_UNLOCK(mpt);
1061 break;
1062
1063 case CAM_REQ_CMP:
1064 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1065 ccb->ccb_h.status |= CAM_REQ_CMP;
1066 if (mpt->outofbeer) {
1067 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
1068 mpt->outofbeer = 0;
1069 if (mpt->verbose > 1) {
1070 device_printf(mpt->dev, "THAWQ\n");
1071 }
1072 }
1073 MPT_LOCK(mpt);
1074 xpt_done(ccb);
1075 MPT_UNLOCK(mpt);
1076 break;
1077
1078 default:
1079 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1080 MPT_LOCK(mpt);
1081 xpt_done(ccb);
1082 MPT_UNLOCK(mpt);
1083 }
1084 break;
1085
1086 case XPT_SCSI_IO: /* Execute the requested I/O operation */
1087 /*
1088 * Do a couple of preliminary checks...
1089 */
1090 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
1091 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
1092 ccb->ccb_h.status = CAM_REQ_INVALID;
1093 MPT_LOCK(mpt);
1094 xpt_done(ccb);
1095 MPT_UNLOCK(mpt);
1096 break;
1097 }
1098 }
1099 /* Max supported CDB length is 16 bytes */
1100 if (ccb->csio.cdb_len >
1101 sizeof (((PTR_MSG_SCSI_IO_REQUEST)0)->CDB)) {
1102 ccb->ccb_h.status = CAM_REQ_INVALID;
1103 MPT_LOCK(mpt);
1104 xpt_done(ccb);
1105 MPT_UNLOCK(mpt);
1106 return;
1107 }
1108 ccb->csio.scsi_status = SCSI_STATUS_OK;
1109 mpt_start(ccb);
1110 break;
1111
1112 case XPT_ABORT:
1113 /*
1114 * XXX: Need to implement
1115 */
1116 ccb->ccb_h.status = CAM_UA_ABORT;
1117 MPT_LOCK(mpt);
1118 xpt_done(ccb);
1119 MPT_UNLOCK(mpt);
1120 break;
1121
1122#ifdef CAM_NEW_TRAN_CODE
1123#define IS_CURRENT_SETTINGS(c) (c->type == CTS_TYPE_CURRENT_SETTINGS)
1124#else
1125#define IS_CURRENT_SETTINGS(c) (c->flags & CCB_TRANS_CURRENT_SETTINGS)
1126#endif
1127#define DP_DISC_ENABLE 0x1
1128#define DP_DISC_DISABL 0x2
1129#define DP_DISC (DP_DISC_ENABLE|DP_DISC_DISABL)
1130
1131#define DP_TQING_ENABLE 0x4
1132#define DP_TQING_DISABL 0x8
1133#define DP_TQING (DP_TQING_ENABLE|DP_TQING_DISABL)
1134
1135#define DP_WIDE 0x10
1136#define DP_NARROW 0x20
1137#define DP_WIDTH (DP_WIDE|DP_NARROW)
1138
1139#define DP_SYNC 0x40
1140
1141 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */
1142 cts = &ccb->cts;
1143 if (!IS_CURRENT_SETTINGS(cts)) {
1144 ccb->ccb_h.status = CAM_REQ_INVALID;
1145 MPT_LOCK(mpt);
1146 xpt_done(ccb);
1147 MPT_UNLOCK(mpt);
1148 break;
1149 }
1150 tgt = cts->ccb_h.target_id;
1151 if (mpt->is_fc == 0) {
1152 u_int8_t dval = 0;
1153 u_int period = 0, offset = 0;
1154#ifndef CAM_NEW_TRAN_CODE
1155 if (cts->valid & CCB_TRANS_DISC_VALID) {
1156 dval |= DP_DISC_ENABLE;
1157 }
1158 if (cts->valid & CCB_TRANS_TQ_VALID) {
1159 dval |= DP_TQING_ENABLE;
1160 }
1161 if (cts->valid & CCB_TRANS_BUS_WIDTH_VALID) {
1162 if (cts->bus_width)
1163 dval |= DP_WIDE;
1164 else
1165 dval |= DP_NARROW;
1166 }
1167 /*
1168 * Any SYNC RATE of nonzero and SYNC_OFFSET
1169 * of nonzero will cause us to go to the
1170 * selected (from NVRAM) maximum value for
1171 * this device. At a later point, we'll
1172 * allow finer control.
1173 */
1174 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) &&
1175 (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID)) {
1176 dval |= DP_SYNC;
1177 period = cts->sync_period;
1178 offset = cts->sync_offset;
1179 }
1180#else
1181 struct ccb_trans_settings_scsi *scsi =
1182 &cts->proto_specific.scsi;
1183 struct ccb_trans_settings_spi *spi =
1184 &cts->xport_specific.spi;
1185
1186 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
1187 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0)
1188 dval |= DP_DISC_ENABLE;
1189 else
1190 dval |= DP_DISC_DISABL;
1191 }
1192
1193 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
1194 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0)
1195 dval |= DP_TQING_ENABLE;
1196 else
1197 dval |= DP_TQING_DISABL;
1198 }
1199
1200 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
1201 if (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT)
1202 dval |= DP_WIDE;
1203 else
1204 dval |= DP_NARROW;
1205 }
1206
1207 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) &&
1208 (spi->valid & CTS_SPI_VALID_SYNC_RATE) &&
1209 (spi->sync_period && spi->sync_offset)) {
1210 dval |= DP_SYNC;
1211 period = spi->sync_period;
1212 offset = spi->sync_offset;
1213 }
1214#endif
1215 if (dval & DP_DISC_ENABLE) {
1216 mpt->mpt_disc_enable |= (1 << tgt);
1217 } else if (dval & DP_DISC_DISABL) {
1218 mpt->mpt_disc_enable &= ~(1 << tgt);
1219 }
1220 if (dval & DP_TQING_ENABLE) {
1221 mpt->mpt_tag_enable |= (1 << tgt);
1222 } else if (dval & DP_TQING_DISABL) {
1223 mpt->mpt_tag_enable &= ~(1 << tgt);
1224 }
1225 if (dval & DP_WIDTH) {
1226 if (mpt_setwidth(mpt, tgt, dval & DP_WIDE)) {
1227 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1228 MPT_LOCK(mpt);
1229 xpt_done(ccb);
1230 MPT_UNLOCK(mpt);
1231 break;
1232 }
1233 }
1234 if (dval & DP_SYNC) {
1235 if (mpt_setsync(mpt, tgt, period, offset)) {
1236 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1237 MPT_LOCK(mpt);
1238 xpt_done(ccb);
1239 MPT_UNLOCK(mpt);
1240 break;
1241 }
1242 }
1243 if (mpt->verbose > 1) {
1244 device_printf(mpt->dev,
1245 "SET tgt %d flags %x period %x off %x\n",
1246 tgt, dval, period, offset);
1247 }
1248 }
1249 ccb->ccb_h.status = CAM_REQ_CMP;
1250 MPT_LOCK(mpt);
1251 xpt_done(ccb);
1252 MPT_UNLOCK(mpt);
1253 break;
1254
1255 case XPT_GET_TRAN_SETTINGS:
1256 cts = &ccb->cts;
1257 tgt = cts->ccb_h.target_id;
1258 if (mpt->is_fc) {
1259#ifndef CAM_NEW_TRAN_CODE
1260 /*
1261 * a lot of normal SCSI things don't make sense.
1262 */
1263 cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
1264 cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
1265 /*
1266 * How do you measure the width of a high
1267 * speed serial bus? Well, in bytes.
1268 *
1269 * Offset and period make no sense, though, so we set
1270 * (above) a 'base' transfer speed to be gigabit.
1271 */
1272 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
1273#else
1274 struct ccb_trans_settings_fc *fc =
1275 &cts->xport_specific.fc;
1276
1277 cts->protocol = PROTO_SCSI;
1278 cts->protocol_version = SCSI_REV_2;
1279 cts->transport = XPORT_FC;
1280 cts->transport_version = 0;
1281
1282 fc->valid = CTS_FC_VALID_SPEED;
1283 fc->bitrate = 100000; /* XXX: Need for 2Gb/s */
1284 /* XXX: need a port database for each target */
1285#endif
1286 } else {
1287#ifdef CAM_NEW_TRAN_CODE
1288 struct ccb_trans_settings_scsi *scsi =
1289 &cts->proto_specific.scsi;
1290 struct ccb_trans_settings_spi *spi =
1291 &cts->xport_specific.spi;
1292#endif
1293 u_int8_t dval, pval, oval;
1294
1295 /*
1296 * We aren't going off of Port PAGE2 params for
1297 * tagged queuing or disconnect capabilities
1298 * for current settings. For goal settings,
1299 * we assert all capabilities- we've had some
1300 * problems with reading NVRAM data.
1301 */
1302 if (IS_CURRENT_SETTINGS(cts)) {
1303 fCONFIG_PAGE_SCSI_DEVICE_0 tmp;
1304 dval = 0;
1305
1306 tmp = mpt->mpt_dev_page0[tgt];
1307 if (mpt_read_cfg_page(mpt, tgt, &tmp.Header)) {
1308 device_printf(mpt->dev,
1309 "cannot get target %d DP0\n", tgt);
1310 } else {
1311 if (mpt->verbose > 1) {
1312 device_printf(mpt->dev,
1313 "SPI Tgt %d Page 0: NParms %x Information %x\n",
1314 tgt,
1315 tmp.NegotiatedParameters,
1316 tmp.Information);
1317 }
1318 }
1319
1320 if (tmp.NegotiatedParameters &
1321 MPI_SCSIDEVPAGE0_NP_WIDE)
1322 dval |= DP_WIDE;
1323
1324 if (mpt->mpt_disc_enable & (1 << tgt)) {
1325 dval |= DP_DISC_ENABLE;
1326 }
1327 if (mpt->mpt_tag_enable & (1 << tgt)) {
1328 dval |= DP_TQING_ENABLE;
1329 }
1330 oval = (tmp.NegotiatedParameters >> 16) & 0xff;
1331 pval = (tmp.NegotiatedParameters >> 8) & 0xff;
1332 } else {
1333 /*
1334 * XXX: Fix wrt NVRAM someday. Attempts
1335 * XXX: to read port page2 device data
1336 * XXX: just returns zero in these areas.
1337 */
1338 dval = DP_WIDE|DP_DISC|DP_TQING;
1339 oval = (mpt->mpt_port_page0.Capabilities >> 16);
1340 pval = (mpt->mpt_port_page0.Capabilities >> 8);
1341 }
1342#ifndef CAM_NEW_TRAN_CODE
1343 cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
1344 if (dval & DP_DISC_ENABLE) {
1345 cts->flags |= CCB_TRANS_DISC_ENB;
1346 }
1347 if (dval & DP_TQING_ENABLE) {
1348 cts->flags |= CCB_TRANS_TAG_ENB;
1349 }
1350 if (dval & DP_WIDE) {
1351 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
1352 } else {
1353 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
1354 }
1355 cts->valid = CCB_TRANS_BUS_WIDTH_VALID |
1356 CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
1357 if (oval) {
1358 cts->sync_period = pval;
1359 cts->sync_offset = oval;
1360 cts->valid |=
1361 CCB_TRANS_SYNC_RATE_VALID |
1362 CCB_TRANS_SYNC_OFFSET_VALID;
1363 }
1364#else
1365 cts->protocol = PROTO_SCSI;
1366 cts->protocol_version = SCSI_REV_2;
1367 cts->transport = XPORT_SPI;
1368 cts->transport_version = 2;
1369
1370 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
1371 spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
1372 if (dval & DP_DISC_ENABLE) {
1373 spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
1374 }
1375 if (dval & DP_TQING_ENABLE) {
1376 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
1377 }
1378 if (oval && pval) {
1379 spi->sync_offset = oval;
1380 spi->sync_period = pval;
1381 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
1382 spi->valid |= CTS_SPI_VALID_SYNC_RATE;
1383 }
1384 spi->valid |= CTS_SPI_VALID_BUS_WIDTH;
1385 if (dval & DP_WIDE) {
1386 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
1387 } else {
1388 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
1389 }
1390 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
1391 scsi->valid = CTS_SCSI_VALID_TQ;
1392 spi->valid |= CTS_SPI_VALID_DISC;
1393 } else {
1394 scsi->valid = 0;
1395 }
1396#endif
1397 if (mpt->verbose > 1) {
1398 device_printf(mpt->dev,
1399 "GET %s tgt %d flags %x period %x off %x\n",
1400 IS_CURRENT_SETTINGS(cts)? "ACTIVE" :
1401 "NVRAM", tgt, dval, pval, oval);
1402 }
1403 }
1404 ccb->ccb_h.status = CAM_REQ_CMP;
1405 MPT_LOCK(mpt);
1406 xpt_done(ccb);
1407 MPT_UNLOCK(mpt);
1408 break;
1409
1410 case XPT_CALC_GEOMETRY:
1411 {
1412 struct ccb_calc_geometry *ccg;
1413 u_int32_t secs_per_cylinder;
1414 u_int32_t size_mb;
1415
1416 ccg = &ccb->ccg;
1417 if (ccg->block_size == 0) {
1418 ccb->ccb_h.status = CAM_REQ_INVALID;
1419 MPT_LOCK(mpt);
1420 xpt_done(ccb);
1421 MPT_UNLOCK(mpt);
1422 break;
1423 }
1424
1425 size_mb = ccg->volume_size /((1024L * 1024L) / ccg->block_size);
1426 if (size_mb > 1024) {
1427 ccg->heads = 255;
1428 ccg->secs_per_track = 63;
1429 } else {
1430 ccg->heads = 64;
1431 ccg->secs_per_track = 32;
1432 }
1433 secs_per_cylinder = ccg->heads * ccg->secs_per_track;
1434 ccg->cylinders = ccg->volume_size / secs_per_cylinder;
1435 ccb->ccb_h.status = CAM_REQ_CMP;
1436 MPT_LOCK(mpt);
1437 xpt_done(ccb);
1438 MPT_UNLOCK(mpt);
1439 break;
1440 }
1441 case XPT_PATH_INQ: /* Path routing inquiry */
1442 {
1443 struct ccb_pathinq *cpi = &ccb->cpi;
1444
1445 cpi->version_num = 1;
1446 cpi->target_sprt = 0;
1447 cpi->hba_eng_cnt = 0;
1448 cpi->max_lun = 7;
1449 cpi->bus_id = cam_sim_bus(sim);
1450 if (mpt->is_fc) {
1451 cpi->max_target = 255;
1452 cpi->hba_misc = PIM_NOBUSRESET;
1453 cpi->initiator_id = cpi->max_target + 1;
1454 cpi->base_transfer_speed = 100000;
1455 cpi->hba_inquiry = PI_TAG_ABLE;
1456 } else {
1457 cpi->initiator_id = mpt->mpt_ini_id;
1458 cpi->base_transfer_speed = 3300;
1459 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
1460 cpi->hba_misc = 0;
1461 cpi->max_target = 15;
1462 }
1463
1464 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1465 strncpy(cpi->hba_vid, "LSI", HBA_IDLEN);
1466 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1467 cpi->unit_number = cam_sim_unit(sim);
1468 cpi->ccb_h.status = CAM_REQ_CMP;
1469 MPT_LOCK(mpt);
1470 xpt_done(ccb);
1471 MPT_UNLOCK(mpt);
1472 break;
1473 }
1474 default:
1475 ccb->ccb_h.status = CAM_REQ_INVALID;
1476 MPT_LOCK(mpt);
1477 xpt_done(ccb);
1478 MPT_UNLOCK(mpt);
1479 break;
1480 }
1481}
1482
1483static int
1484mpt_setwidth(mpt_softc_t *mpt, int tgt, int onoff)
1485{
1486 fCONFIG_PAGE_SCSI_DEVICE_1 tmp;
1487 tmp = mpt->mpt_dev_page1[tgt];
1488 if (onoff) {
1489 tmp.RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE;
1490 } else {
1491 tmp.RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE;
1492 }
1493 if (mpt_write_cfg_page(mpt, tgt, &tmp.Header)) {
1494 return (-1);
1495 }
1496 if (mpt_read_cfg_page(mpt, tgt, &tmp.Header)) {
1497 return (-1);
1498 }
1499 mpt->mpt_dev_page1[tgt] = tmp;
1500 if (mpt->verbose > 1) {
1501 device_printf(mpt->dev,
1502 "SPI Target %d Page 1: RequestedParameters %x Config %x\n",
1503 tgt, mpt->mpt_dev_page1[tgt].RequestedParameters,
1504 mpt->mpt_dev_page1[tgt].Configuration);
1505 }
1506 return (0);
1507}
1508
1509static int
1510mpt_setsync(mpt_softc_t *mpt, int tgt, int period, int offset)
1511{
1512 fCONFIG_PAGE_SCSI_DEVICE_1 tmp;
1513 tmp = mpt->mpt_dev_page1[tgt];
1514 tmp.RequestedParameters &=
1515 ~MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK;
1516 tmp.RequestedParameters &=
1517 ~MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK;
1518 tmp.RequestedParameters &=
1519 ~MPI_SCSIDEVPAGE1_RP_DT;
1520 tmp.RequestedParameters &=
1521 ~MPI_SCSIDEVPAGE1_RP_QAS;
1522 tmp.RequestedParameters &=
1523 ~MPI_SCSIDEVPAGE1_RP_IU;
1524 /*
1525 * XXX: For now, we're ignoring specific settings
1526 */
1527 if (period && offset) {
1528 int factor, offset, np;
1529 factor = (mpt->mpt_port_page0.Capabilities >> 8) & 0xff;
1530 offset = (mpt->mpt_port_page0.Capabilities >> 16) & 0xff;
1531 np = 0;
1532 if (factor < 0x9) {
1533 np |= MPI_SCSIDEVPAGE1_RP_QAS;
1534 np |= MPI_SCSIDEVPAGE1_RP_IU;
1535 }
1536 if (factor < 0xa) {
1537 np |= MPI_SCSIDEVPAGE1_RP_DT;
1538 }
1539 np |= (factor << 8) | (offset << 16);
1540 tmp.RequestedParameters |= np;
1541 }
1542 if (mpt_write_cfg_page(mpt, tgt, &tmp.Header)) {
1543 return (-1);
1544 }
1545 if (mpt_read_cfg_page(mpt, tgt, &tmp.Header)) {
1546 return (-1);
1547 }
1548 mpt->mpt_dev_page1[tgt] = tmp;
1549 if (mpt->verbose > 1) {
1550 device_printf(mpt->dev,
1551 "SPI Target %d Page 1: RParams %x Config %x\n",
1552 tgt, mpt->mpt_dev_page1[tgt].RequestedParameters,
1553 mpt->mpt_dev_page1[tgt].Configuration);
1554 }
1555 return (0);
1556}