1 /* $FreeBSD: src/sys/dev/mpt/mpt_freebsd.c,v 1.3.2.3 2002/09/24 21:37:25 mjacob Exp $ */
2 /* $DragonFly: src/sys/dev/disk/mpt/mpt_freebsd.c,v 1.6 2004/09/19 00:25:57 joerg Exp $ */
4 * FreeBSD/CAM specific routines for LSI '909 FC adapters.
7 * Copyright (c) 2000, 2001 by Greg Ansley
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice immediately at the beginning of the file, without modification,
14 * this list of conditions, and the following disclaimer.
15 * 2. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
22 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * Additional Copyright (c) 2002 by Matthew Jacob under same license.
34 #include "mpt_freebsd.h"
36 static void mpt_poll(struct cam_sim *);
37 static timeout_t mpttimeout;
38 static timeout_t mpttimeout2;
39 static void mpt_action(struct cam_sim *, union ccb *);
40 static int mpt_setwidth(mpt_softc_t *, int, int);
41 static int mpt_setsync(mpt_softc_t *, int, int, int);
44 mpt_cam_attach(mpt_softc_t *mpt)
50 maxq = (mpt->mpt_global_credits < MPT_MAX_REQUESTS(mpt))?
51 mpt->mpt_global_credits : MPT_MAX_REQUESTS(mpt);
55 * Construct our SIM entry.
57 sim = cam_sim_alloc(mpt_action, mpt_poll, "mpt", mpt,
58 mpt->unit, 1, maxq, NULL);
63 * Register exactly the bus.
66 if (xpt_bus_register(sim, 0) != CAM_SUCCESS) {
71 if (xpt_create_path(&mpt->path, NULL, cam_sim_path(sim),
72 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
73 xpt_bus_deregister(cam_sim_path(sim));
81 mpt_cam_detach(mpt_softc_t *mpt)
83 if (mpt->sim != NULL) {
84 xpt_free_path(mpt->path);
85 xpt_bus_deregister(cam_sim_path(mpt->sim));
86 cam_sim_free(mpt->sim);
91 /* This routine is used after a system crash to dump core onto the
95 mpt_poll(struct cam_sim *sim)
97 mpt_softc_t *mpt = (mpt_softc_t *) cam_sim_softc(sim);
104 * This routine is called if the 9x9 does not return completion status
105 * for a command after a CAM specified time.
108 mpttimeout(void *arg)
111 union ccb *ccb = arg;
115 mpt = ccb->ccb_h.ccb_mpt_ptr;
117 req = ccb->ccb_h.ccb_req_ptr;
118 oseq = req->sequence;
121 if (req->sequence != oseq) {
122 device_printf(mpt->dev, "bullet missed in timeout\n");
126 device_printf(mpt->dev, "bullet U-turned in timeout: got us\n");
128 device_printf(mpt->dev,
129 "time out on request index = 0x%02x sequence = 0x%08x\n",
130 req->index, req->sequence);
131 mpt_check_doorbell(mpt);
132 device_printf(mpt->dev, "Status %08X; Mask %08X; Doorbell %08X\n",
133 mpt_read(mpt, MPT_OFFSET_INTR_STATUS),
134 mpt_read(mpt, MPT_OFFSET_INTR_MASK),
135 mpt_read(mpt, MPT_OFFSET_DOORBELL) );
136 printf("request state %s\n", mpt_req_state(req->debug));
137 if (ccb != req->ccb) {
138 printf("time out: ccb %p != req->ccb %p\n",
141 mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf);
142 req->debug = REQ_TIMEOUT;
144 req->link.sle_next = (void *) mpt;
145 callout_reset(&req->timeout, hz / 10, mpttimeout2, req);
146 ccb->ccb_h.status = CAM_CMD_TIMEOUT;
147 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
149 MPTLOCK_2_CAMLOCK(mpt);
151 CAMLOCK_2_MPTLOCK(mpt);
156 mpttimeout2(void *arg)
158 request_t *req = arg;
159 if (req->debug == REQ_TIMEOUT) {
160 mpt_softc_t *mpt = (mpt_softc_t *) req->link.sle_next;
162 mpt_free_request(mpt, req);
168 * Callback routine from "bus_dmamap_load" or in simple case called directly.
170 * Takes a list of physical segments and builds the SGL for SCSI IO command
171 * and forwards the commard to the IOC after one last check that CAM has not
172 * aborted the transaction.
175 mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
180 MSG_SCSI_IO_REQUEST *mpt_req;
183 req = (request_t *)arg;
186 mpt = ccb->ccb_h.ccb_mpt_ptr;
187 req = ccb->ccb_h.ccb_req_ptr;
188 mpt_req = req->req_vbuf;
190 if (error == 0 && nseg > MPT_SGL_MAX) {
196 device_printf(mpt->dev, "bus_dmamap_load returned %d\n",
198 if (ccb->ccb_h.status == CAM_REQ_INPROG) {
199 xpt_freeze_devq(ccb->ccb_h.path, 1);
200 ccb->ccb_h.status = CAM_DEV_QFRZN;
202 ccb->ccb_h.status |= CAM_REQ_TOO_BIG;
204 ccb->ccb_h.status |= CAM_REQ_CMP_ERR;
206 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
208 CAMLOCK_2_MPTLOCK(mpt);
209 mpt_free_request(mpt, req);
210 MPTLOCK_2_CAMLOCK(mpt);
214 if (nseg > MPT_NSGL_FIRST(mpt)) {
220 mpt_req->DataLength = ccb->csio.dxfer_len;
221 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
222 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
223 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
225 se = (SGE_SIMPLE32 *) &mpt_req->SGL;
226 for (i = 0; i < MPT_NSGL_FIRST(mpt) - 1; i++, se++, dm_segs++) {
229 bzero(se, sizeof (*se));
230 se->Address = dm_segs->ds_addr;
231 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
233 if (i == MPT_NSGL_FIRST(mpt) - 2) {
234 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
236 MPI_pSGE_SET_FLAGS(se, tf);
241 * Tell the IOC where to find the first chain element
243 mpt_req->ChainOffset = ((char *)se - (char *)mpt_req) >> 2;
246 * Until we're finished with all segments...
251 * Construct the chain element that point to the
254 ce = (SGE_CHAIN32 *) se++;
255 if (nleft > MPT_NSGL(mpt)) {
256 ntodo = MPT_NSGL(mpt) - 1;
257 ce->NextChainOffset = (MPT_RQSL(mpt) -
258 sizeof (SGE_SIMPLE32)) >> 2;
261 ce->NextChainOffset = 0;
263 ce->Length = ntodo * sizeof (SGE_SIMPLE32);
264 ce->Address = req->req_pbuf +
265 ((char *)se - (char *)mpt_req);
266 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
267 for (i = 0; i < ntodo; i++, se++, dm_segs++) {
270 bzero(se, sizeof (*se));
271 se->Address = dm_segs->ds_addr;
272 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
274 if (i == ntodo - 1) {
275 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
276 if (ce->NextChainOffset == 0) {
278 MPI_SGE_FLAGS_END_OF_LIST |
279 MPI_SGE_FLAGS_END_OF_BUFFER;
282 MPI_pSGE_SET_FLAGS(se, tf);
288 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
289 op = BUS_DMASYNC_PREREAD;
291 op = BUS_DMASYNC_PREWRITE;
292 if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) {
293 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
295 } else if (nseg > 0) {
300 mpt_req->DataLength = ccb->csio.dxfer_len;
301 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
302 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
303 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
305 /* Copy the segments into our SG list */
306 se = (SGE_SIMPLE32 *) &mpt_req->SGL;
307 for (i = 0; i < nseg; i++, se++, dm_segs++) {
310 bzero(se, sizeof (*se));
311 se->Address = dm_segs->ds_addr;
312 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
316 MPI_SGE_FLAGS_LAST_ELEMENT |
317 MPI_SGE_FLAGS_END_OF_BUFFER |
318 MPI_SGE_FLAGS_END_OF_LIST;
320 MPI_pSGE_SET_FLAGS(se, tf);
323 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
324 op = BUS_DMASYNC_PREREAD;
326 op = BUS_DMASYNC_PREWRITE;
327 if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) {
328 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
331 se = (SGE_SIMPLE32 *) &mpt_req->SGL;
333 * No data to transfer so we just make a single simple SGL
336 MPI_pSGE_SET_FLAGS(se,
337 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
338 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
342 * Last time we need to check if this CCB needs to be aborted.
344 if (ccb->ccb_h.status != CAM_REQ_INPROG) {
345 if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0)
346 bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
347 CAMLOCK_2_MPTLOCK(mpt);
348 mpt_free_request(mpt, req);
349 MPTLOCK_2_CAMLOCK(mpt);
354 ccb->ccb_h.status |= CAM_SIM_QUEUED;
355 MPTLOCK_2_CAMLOCK(mpt);
356 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
357 callout_reset(&ccb->ccb_h.timeout_ch,
358 (ccb->ccb_h.timeout * hz) / 1000, mpttimeout, ccb);
360 if (mpt->verbose > 1)
361 mpt_print_scsi_io_request(mpt_req);
362 mpt_send_cmd(mpt, req);
363 MPTLOCK_2_CAMLOCK(mpt);
367 mpt_start(union ccb *ccb)
370 struct mpt_softc *mpt;
371 MSG_SCSI_IO_REQUEST *mpt_req;
372 struct ccb_scsiio *csio = &ccb->csio;
373 struct ccb_hdr *ccbh = &ccb->ccb_h;
375 /* Get the pointer for the physical addapter */
376 mpt = ccb->ccb_h.ccb_mpt_ptr;
378 CAMLOCK_2_MPTLOCK(mpt);
379 /* Get a request structure off the free list */
380 if ((req = mpt_get_request(mpt)) == NULL) {
381 if (mpt->outofbeer == 0) {
383 xpt_freeze_simq(mpt->sim, 1);
384 if (mpt->verbose > 1) {
385 device_printf(mpt->dev, "FREEZEQ\n");
388 MPTLOCK_2_CAMLOCK(mpt);
389 ccb->ccb_h.status = CAM_REQUEUE_REQ;
393 MPTLOCK_2_CAMLOCK(mpt);
395 /* Link the ccb and the request structure so we can find */
396 /* the other knowing either the request or the ccb */
398 ccb->ccb_h.ccb_req_ptr = req;
400 /* Now we build the command for the IOC */
401 mpt_req = req->req_vbuf;
402 bzero(mpt_req, sizeof *mpt_req);
404 mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
405 mpt_req->Bus = mpt->bus;
407 mpt_req->SenseBufferLength =
408 (csio->sense_len < MPT_SENSE_SIZE) ?
409 csio->sense_len : MPT_SENSE_SIZE;
411 /* We use the message context to find the request structure when we */
412 /* Get the command competion interrupt from the FC IOC. */
413 mpt_req->MsgContext = req->index;
415 /* Which physical device to do the I/O on */
416 mpt_req->TargetID = ccb->ccb_h.target_id;
417 mpt_req->LUN[1] = ccb->ccb_h.target_lun;
419 /* Set the direction of the transfer */
420 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
421 mpt_req->Control = MPI_SCSIIO_CONTROL_READ;
422 else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
423 mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE;
425 mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER;
427 if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
428 switch(ccb->csio.tag_action) {
429 case MSG_HEAD_OF_Q_TAG:
430 mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ;
433 mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ;
435 case MSG_ORDERED_Q_TAG:
436 mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ;
438 case MSG_SIMPLE_Q_TAG:
440 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
445 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
447 mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
450 if (mpt->is_fc == 0) {
451 if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) {
452 mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT;
456 /* Copy the scsi command block into place */
457 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0)
458 bcopy(csio->cdb_io.cdb_ptr, mpt_req->CDB, csio->cdb_len);
460 bcopy(csio->cdb_io.cdb_bytes, mpt_req->CDB, csio->cdb_len);
462 mpt_req->CDBLength = csio->cdb_len;
463 mpt_req->DataLength = csio->dxfer_len;
464 mpt_req->SenseBufferLowAddr = req->sense_pbuf;
467 * If we have any data to send with this command,
468 * map it into bus space.
471 if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
472 if ((ccbh->flags & CAM_SCATTER_VALID) == 0) {
474 * We've been given a pointer to a single buffer.
476 if ((ccbh->flags & CAM_DATA_PHYS) == 0) {
478 * Virtual address that needs to translated into
479 * one or more physical pages.
483 error = bus_dmamap_load(mpt->buffer_dmat,
484 req->dmap, csio->data_ptr, csio->dxfer_len,
485 mpt_execute_req, req, 0);
486 if (error == EINPROGRESS) {
488 * So as to maintain ordering,
489 * freeze the controller queue
490 * until our mapping is
493 xpt_freeze_simq(mpt->sim, 1);
494 ccbh->status |= CAM_RELEASE_SIMQ;
498 * We have been given a pointer to single
501 struct bus_dma_segment seg;
502 seg.ds_addr = (bus_addr_t)csio->data_ptr;
503 seg.ds_len = csio->dxfer_len;
504 mpt_execute_req(req, &seg, 1, 0);
508 * We have been given a list of addresses.
509 * These case could be easily done but they are not
510 * currently generated by the CAM subsystem so there
511 * is no point in wasting the time right now.
513 struct bus_dma_segment *segs;
514 if ((ccbh->flags & CAM_SG_LIST_PHYS) == 0) {
515 mpt_execute_req(req, NULL, 0, EFAULT);
517 /* Just use the segments provided */
518 segs = (struct bus_dma_segment *)csio->data_ptr;
519 mpt_execute_req(req, segs, csio->sglist_cnt,
520 (csio->sglist_cnt < MPT_SGL_MAX)?
525 mpt_execute_req(req, NULL, 0, 0);
530 mpt_bus_reset(union ccb *ccb)
535 MSG_SCSI_TASK_MGMT *reset_req;
537 /* Get the pointer for the physical adapter */
538 mpt = ccb->ccb_h.ccb_mpt_ptr;
540 /* Get a request structure off the free list */
541 if ((req = mpt_get_request(mpt)) == NULL) {
542 return (CAM_REQUEUE_REQ);
545 /* Link the ccb and the request structure so we can find */
546 /* the other knowing either the request or the ccb */
548 ccb->ccb_h.ccb_req_ptr = req;
550 reset_req = req->req_vbuf;
551 bzero(reset_req, sizeof *reset_req);
553 reset_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
554 reset_req->MsgContext = req->index;
555 reset_req->TaskType = MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS;
558 * Should really be TARGET_RESET_OPTION
560 reset_req->MsgFlags =
561 MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION;
563 /* Which physical device Reset */
564 reset_req->TargetID = ccb->ccb_h.target_id;
565 reset_req->LUN[1] = ccb->ccb_h.target_lun;
567 ccb->ccb_h.status |= CAM_SIM_QUEUED;
569 error = mpt_send_handshake_cmd(mpt,
570 sizeof (MSG_SCSI_TASK_MGMT), reset_req);
572 device_printf(mpt->dev,
573 "mpt_bus_reset: mpt_send_handshake return %d\n", error);
574 return (CAM_REQ_CMP_ERR);
576 return (CAM_REQ_CMP);
581 * Process an asynchronous event from the IOC.
583 static void mpt_ctlop(mpt_softc_t *, void *, u_int32_t);
584 static void mpt_event_notify_reply(mpt_softc_t *mpt, MSG_EVENT_NOTIFY_REPLY *);
587 mpt_ctlop(mpt_softc_t *mpt, void *vmsg, u_int32_t reply)
589 MSG_DEFAULT_REPLY *dmsg = vmsg;
591 if (dmsg->Function == MPI_FUNCTION_EVENT_NOTIFICATION) {
592 mpt_event_notify_reply(mpt, vmsg);
593 mpt_free_reply(mpt, (reply << 1));
594 } else if (dmsg->Function == MPI_FUNCTION_EVENT_ACK) {
595 mpt_free_reply(mpt, (reply << 1));
596 } else if (dmsg->Function == MPI_FUNCTION_PORT_ENABLE) {
597 MSG_PORT_ENABLE_REPLY *msg = vmsg;
598 int index = msg->MsgContext & ~0x80000000;
599 if (mpt->verbose > 1) {
600 device_printf(mpt->dev, "enable port reply idx %d\n",
603 if (index >= 0 && index < MPT_MAX_REQUESTS(mpt)) {
604 request_t *req = &mpt->request_pool[index];
605 req->debug = REQ_DONE;
607 mpt_free_reply(mpt, (reply << 1));
608 } else if (dmsg->Function == MPI_FUNCTION_CONFIG) {
609 MSG_CONFIG_REPLY *msg = vmsg;
610 int index = msg->MsgContext & ~0x80000000;
611 if (index >= 0 && index < MPT_MAX_REQUESTS(mpt)) {
612 request_t *req = &mpt->request_pool[index];
613 req->debug = REQ_DONE;
614 req->sequence = reply;
616 mpt_free_reply(mpt, (reply << 1));
619 device_printf(mpt->dev, "unknown mpt_ctlop: %x\n",
625 mpt_event_notify_reply(mpt_softc_t *mpt, MSG_EVENT_NOTIFY_REPLY *msg)
628 case MPI_EVENT_LOG_DATA:
629 /* Some error occured that LSI wants logged */
630 device_printf(mpt->dev,
631 "\tEvtLogData: IOCLogInfo: 0x%08x\n",
633 device_printf(mpt->dev, "\tEvtLogData: Event Data:");
636 for (i = 0; i < msg->EventDataLength; i++) {
637 device_printf(mpt->dev,
638 " %08X", msg->Data[i]);
641 device_printf(mpt->dev, "\n");
644 case MPI_EVENT_UNIT_ATTENTION:
645 device_printf(mpt->dev,
646 "Bus: 0x%02x TargetID: 0x%02x\n",
647 (msg->Data[0] >> 8) & 0xff, msg->Data[0] & 0xff);
650 case MPI_EVENT_IOC_BUS_RESET:
651 /* We generated a bus reset */
652 device_printf(mpt->dev, "IOC Bus Reset Port: %d\n",
653 (msg->Data[0] >> 8) & 0xff);
656 case MPI_EVENT_EXT_BUS_RESET:
657 /* Someone else generated a bus reset */
658 device_printf(mpt->dev, "Ext Bus Reset\n");
660 * These replies don't return EventData like the MPI
663 /* xpt_async(AC_BUS_RESET, path, NULL); */
666 case MPI_EVENT_RESCAN:
668 * In general this means a device has been added
671 device_printf(mpt->dev,
672 "Rescan Port: %d\n", (msg->Data[0] >> 8) & 0xff);
673 /* xpt_async(AC_FOUND_DEVICE, path, NULL); */
676 case MPI_EVENT_LINK_STATUS_CHANGE:
677 device_printf(mpt->dev, "Port %d: LinkState: %s\n",
678 (msg->Data[1] >> 8) & 0xff,
679 ((msg->Data[0] & 0xff) == 0)? "Failed" : "Active");
682 case MPI_EVENT_LOOP_STATE_CHANGE:
683 switch ((msg->Data[0] >> 16) & 0xff) {
685 device_printf(mpt->dev,
686 "Port 0x%x: FC LinkEvent: LIP(%02X,%02X) (Loop Initialization)\n",
687 (msg->Data[1] >> 8) & 0xff,
688 (msg->Data[0] >> 8) & 0xff,
689 (msg->Data[0] ) & 0xff);
690 switch ((msg->Data[0] >> 8) & 0xff) {
692 if ((msg->Data[0] & 0xff) == 0xF7) {
693 printf("Device needs AL_PA\n");
695 printf("Device %02X doesn't like FC performance\n",
696 msg->Data[0] & 0xFF);
700 if ((msg->Data[0] & 0xff) == 0xF7) {
701 printf("Device had loop failure at its receiver prior to acquiring AL_PA\n");
703 printf("Device %02X detected loop failure at its receiver\n",
704 msg->Data[0] & 0xFF);
708 printf("Device %02X requests that device %02X reset itself\n",
710 (msg->Data[0] >> 8) & 0xFF);
715 device_printf(mpt->dev, "Port 0x%x: FC LinkEvent: LPE(%02X,%02X) (Loop Port Enable)\n",
716 (msg->Data[1] >> 8) & 0xff, /* Port */
717 (msg->Data[0] >> 8) & 0xff, /* Character 3 */
718 (msg->Data[0] ) & 0xff /* Character 4 */
722 device_printf(mpt->dev, "Port 0x%x: FC LinkEvent: LPB(%02X,%02X) (Loop Port Bypass)\n",
723 (msg->Data[1] >> 8) & 0xff, /* Port */
724 (msg->Data[0] >> 8) & 0xff, /* Character 3 */
725 (msg->Data[0] ) & 0xff /* Character 4 */
729 device_printf(mpt->dev, "Port 0x%x: FC LinkEvent: Unknown FC event (%02X %02X %02X)\n",
730 (msg->Data[1] >> 8) & 0xff, /* Port */
731 (msg->Data[0] >> 16) & 0xff, /* Event */
732 (msg->Data[0] >> 8) & 0xff, /* Character 3 */
733 (msg->Data[0] ) & 0xff /* Character 4 */
738 case MPI_EVENT_LOGOUT:
739 device_printf(mpt->dev, "FC Logout Port: %d N_PortID: %02X\n",
740 (msg->Data[1] >> 8) & 0xff,
743 case MPI_EVENT_EVENT_CHANGE:
744 /* This is just an acknowledgement of our
745 mpt_send_event_request */
748 device_printf(mpt->dev, "Unknown event %X\n", msg->Event);
750 if (msg->AckRequired) {
753 if ((req = mpt_get_request(mpt)) == NULL) {
754 panic("unable to get request to acknowledge notify");
756 ackp = (MSG_EVENT_ACK *) req->req_vbuf;
757 bzero(ackp, sizeof *ackp);
758 ackp->Function = MPI_FUNCTION_EVENT_ACK;
759 ackp->Event = msg->Event;
760 ackp->EventContext = msg->EventContext;
761 ackp->MsgContext = req->index | 0x80000000;
762 mpt_check_doorbell(mpt);
763 mpt_send_cmd(mpt, req);
768 mpt_done(mpt_softc_t *mpt, u_int32_t reply)
773 MSG_REQUEST_HEADER *mpt_req;
774 MSG_SCSI_IO_REPLY *mpt_reply;
776 index = -1; /* Shutup the complier */
778 if ((reply & MPT_CONTEXT_REPLY) == 0) {
781 index = reply & MPT_CONTEXT_MASK;
785 bus_dmamap_sync(mpt->reply_dmat, mpt->reply_dmap,
786 BUS_DMASYNC_POSTREAD);
787 /* address reply (Error) */
788 mpt_reply = MPT_REPLY_PTOV(mpt, reply);
789 if (mpt->verbose > 1) {
790 pReply = (unsigned *) mpt_reply;
791 device_printf(mpt->dev, "Address Reply (index %u)\n",
792 mpt_reply->MsgContext & 0xffff);
793 device_printf(mpt->dev, "%08X %08X %08X %08X\n",
794 pReply[0], pReply[1], pReply[2], pReply[3]);
795 device_printf(mpt->dev, "%08X %08X %08X %08X\n",
796 pReply[4], pReply[5], pReply[6], pReply[7]);
797 device_printf(mpt->dev, "%08X %08X %08X %08X\n\n",
798 pReply[8], pReply[9], pReply[10], pReply[11]);
800 index = mpt_reply->MsgContext;
804 * Address reply with MessageContext high bit set
805 * This is most likely a notify message so we try
806 * to process it then free it
808 if ((index & 0x80000000) != 0) {
809 if (mpt_reply != NULL) {
810 mpt_ctlop(mpt, mpt_reply, reply);
812 device_printf(mpt->dev,
813 "mpt_done: index 0x%x, NULL reply\n", index);
818 /* Did we end up with a valid index into the table? */
819 if (index < 0 || index >= MPT_MAX_REQUESTS(mpt)) {
820 printf("mpt_done: invalid index (%x) in reply\n", index);
824 req = &mpt->request_pool[index];
826 /* Make sure memory hasn't been trashed */
827 if (req->index != index) {
828 printf("mpt_done: corrupted request struct");
832 /* Short cut for task management replys; nothing more for us to do */
833 mpt_req = req->req_vbuf;
834 if (mpt_req->Function == MPI_FUNCTION_SCSI_TASK_MGMT) {
835 if (mpt->verbose > 1) {
836 device_printf(mpt->dev, "mpt_done: TASK MGMT\n");
841 if (mpt_req->Function == MPI_FUNCTION_PORT_ENABLE) {
846 * At this point it better be a SCSI IO command, but don't
849 if (mpt_req->Function != MPI_FUNCTION_SCSI_IO_REQUEST) {
853 /* Recover the CAM control block from the request structure */
856 /* Can't have had a SCSI command with out a CAM control block */
857 if (ccb == NULL || (ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) {
858 device_printf(mpt->dev,
859 "mpt_done: corrupted ccb, index = 0x%02x seq = 0x%08x",
860 req->index, req->sequence);
861 printf(" request state %s\nmpt_request:\n",
862 mpt_req_state(req->debug));
863 mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf);
865 if (mpt_reply != NULL) {
866 printf("\nmpt_done: reply:\n");
867 mpt_print_reply(MPT_REPLY_PTOV(mpt, reply));
869 printf("\nmpt_done: context reply: 0x%08x\n", reply);
874 callout_stop(&ccb->ccb_h.timeout_ch);
876 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
879 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
880 op = BUS_DMASYNC_POSTREAD;
882 op = BUS_DMASYNC_POSTWRITE;
884 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
885 bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
889 if (mpt_reply == NULL) {
890 /* Context reply; report that the command was successfull */
891 ccb->ccb_h.status = CAM_REQ_CMP;
892 ccb->csio.scsi_status = SCSI_STATUS_OK;
893 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
894 if (mpt->outofbeer) {
895 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
897 if (mpt->verbose > 1) {
898 device_printf(mpt->dev, "THAWQ\n");
901 MPTLOCK_2_CAMLOCK(mpt);
903 CAMLOCK_2_MPTLOCK(mpt);
907 ccb->csio.scsi_status = mpt_reply->SCSIStatus;
908 switch(mpt_reply->IOCStatus) {
909 case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
910 ccb->ccb_h.status = CAM_DATA_RUN_ERR;
913 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
915 * Yikes, Tagged queue full comes through this path!
917 * So we'll change it to a status error and anything
918 * that returns status should probably be a status
922 ccb->csio.dxfer_len - mpt_reply->TransferCount;
923 if (mpt_reply->SCSIState & MPI_SCSI_STATE_NO_SCSI_STATUS) {
924 ccb->ccb_h.status = CAM_DATA_RUN_ERR;
928 device_printf(mpt->dev, "underrun, scsi status is %x\n", ccb->csio.scsi_status);
929 ccb->csio.scsi_status = SCSI_STATUS_QUEUE_FULL;
932 case MPI_IOCSTATUS_SUCCESS:
933 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
934 switch (ccb->csio.scsi_status) {
936 ccb->ccb_h.status = CAM_REQ_CMP;
939 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
943 case MPI_IOCSTATUS_BUSY:
944 case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
945 ccb->ccb_h.status = CAM_BUSY;
948 case MPI_IOCSTATUS_SCSI_INVALID_BUS:
949 case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
950 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
951 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
954 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
955 ccb->ccb_h.status = CAM_DATA_RUN_ERR;
958 case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR:
959 case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR:
960 ccb->ccb_h.status = CAM_UNCOR_PARITY;
963 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
964 ccb->ccb_h.status = CAM_REQ_CMP;
967 case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
968 ccb->ccb_h.status = CAM_UA_TERMIO;
971 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
972 ccb->ccb_h.status = CAM_REQ_TERMIO;
975 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
976 ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
980 ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
984 if ((mpt_reply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID) != 0) {
985 if (ccb->ccb_h.flags & (CAM_SENSE_PHYS | CAM_SENSE_PTR)) {
986 ccb->ccb_h.status |= CAM_AUTOSENSE_FAIL;
988 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
989 ccb->csio.sense_resid = mpt_reply->SenseCount;
990 bcopy(req->sense_vbuf, &ccb->csio.sense_data,
991 ccb->csio.sense_len);
993 } else if (mpt_reply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_FAILED) {
994 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
995 ccb->ccb_h.status |= CAM_AUTOSENSE_FAIL;
998 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
999 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
1000 ccb->ccb_h.status |= CAM_DEV_QFRZN;
1001 xpt_freeze_devq(ccb->ccb_h.path, 1);
1006 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1007 if (mpt->outofbeer) {
1008 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
1010 if (mpt->verbose > 1) {
1011 device_printf(mpt->dev, "THAWQ\n");
1014 MPTLOCK_2_CAMLOCK(mpt);
1016 CAMLOCK_2_MPTLOCK(mpt);
1019 /* If IOC done with this request free it up */
1020 if (mpt_reply == NULL || (mpt_reply->MsgFlags & 0x80) == 0)
1021 mpt_free_request(mpt, req);
1023 /* If address reply; give the buffer back to the IOC */
1024 if (mpt_reply != NULL)
1025 mpt_free_reply(mpt, (reply << 1));
1029 mpt_action(struct cam_sim *sim, union ccb *ccb)
1033 struct ccb_trans_settings *cts;
1035 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mpt_action\n"));
1037 mpt = (mpt_softc_t *)cam_sim_softc(sim);
1039 ccb->ccb_h.ccb_mpt_ptr = mpt;
1041 switch (ccb->ccb_h.func_code) {
1043 if (mpt->verbose > 1)
1044 device_printf(mpt->dev, "XPT_RESET_BUS\n");
1045 CAMLOCK_2_MPTLOCK(mpt);
1046 error = mpt_bus_reset(ccb);
1048 case CAM_REQ_INPROG:
1049 MPTLOCK_2_CAMLOCK(mpt);
1051 case CAM_REQUEUE_REQ:
1052 if (mpt->outofbeer == 0) {
1054 xpt_freeze_simq(sim, 1);
1055 if (mpt->verbose > 1) {
1056 device_printf(mpt->dev, "FREEZEQ\n");
1059 ccb->ccb_h.status = CAM_REQUEUE_REQ;
1060 MPTLOCK_2_CAMLOCK(mpt);
1065 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1066 ccb->ccb_h.status |= CAM_REQ_CMP;
1067 if (mpt->outofbeer) {
1068 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
1070 if (mpt->verbose > 1) {
1071 device_printf(mpt->dev, "THAWQ\n");
1074 MPTLOCK_2_CAMLOCK(mpt);
1079 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1080 MPTLOCK_2_CAMLOCK(mpt);
1085 case XPT_SCSI_IO: /* Execute the requested I/O operation */
1087 * Do a couple of preliminary checks...
1089 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
1090 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
1091 ccb->ccb_h.status = CAM_REQ_INVALID;
1096 /* Max supported CDB length is 16 bytes */
1097 if (ccb->csio.cdb_len >
1098 sizeof (((PTR_MSG_SCSI_IO_REQUEST)0)->CDB)) {
1099 ccb->ccb_h.status = CAM_REQ_INVALID;
1103 ccb->csio.scsi_status = SCSI_STATUS_OK;
1109 * XXX: Need to implement
1111 ccb->ccb_h.status = CAM_UA_ABORT;
1115 #ifdef CAM_NEW_TRAN_CODE
1116 #define IS_CURRENT_SETTINGS(c) (c->type == CTS_TYPE_CURRENT_SETTINGS)
1118 #define IS_CURRENT_SETTINGS(c) (c->flags & CCB_TRANS_CURRENT_SETTINGS)
1120 #define DP_DISC_ENABLE 0x1
1121 #define DP_DISC_DISABL 0x2
1122 #define DP_DISC (DP_DISC_ENABLE|DP_DISC_DISABL)
1124 #define DP_TQING_ENABLE 0x4
1125 #define DP_TQING_DISABL 0x8
1126 #define DP_TQING (DP_TQING_ENABLE|DP_TQING_DISABL)
1128 #define DP_WIDE 0x10
1129 #define DP_NARROW 0x20
1130 #define DP_WIDTH (DP_WIDE|DP_NARROW)
1132 #define DP_SYNC 0x40
1134 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */
1136 if (!IS_CURRENT_SETTINGS(cts)) {
1137 ccb->ccb_h.status = CAM_REQ_INVALID;
1141 tgt = cts->ccb_h.target_id;
1142 if (mpt->is_fc == 0) {
1144 u_int period = 0, offset = 0;
1145 #ifndef CAM_NEW_TRAN_CODE
1146 if (cts->valid & CCB_TRANS_DISC_VALID) {
1147 dval |= DP_DISC_ENABLE;
1149 if (cts->valid & CCB_TRANS_TQ_VALID) {
1150 dval |= DP_TQING_ENABLE;
1152 if (cts->valid & CCB_TRANS_BUS_WIDTH_VALID) {
1159 * Any SYNC RATE of nonzero and SYNC_OFFSET
1160 * of nonzero will cause us to go to the
1161 * selected (from NVRAM) maximum value for
1162 * this device. At a later point, we'll
1163 * allow finer control.
1165 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) &&
1166 (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID)) {
1168 period = cts->sync_period;
1169 offset = cts->sync_offset;
1172 struct ccb_trans_settings_scsi *scsi =
1173 &cts->proto_specific.scsi;
1174 struct ccb_trans_settings_spi *spi =
1175 &cts->xport_specific.spi;
1177 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
1178 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0)
1179 dval |= DP_DISC_ENABLE;
1181 dval |= DP_DISC_DISABL;
1184 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
1185 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0)
1186 dval |= DP_TQING_ENABLE;
1188 dval |= DP_TQING_DISABL;
1191 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
1192 if (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT)
1198 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) &&
1199 (spi->valid & CTS_SPI_VALID_SYNC_RATE) &&
1200 (spi->sync_period && spi->sync_offset)) {
1202 period = spi->sync_period;
1203 offset = spi->sync_offset;
1206 CAMLOCK_2_MPTLOCK(mpt);
1207 if (dval & DP_DISC_ENABLE) {
1208 mpt->mpt_disc_enable |= (1 << tgt);
1209 } else if (dval & DP_DISC_DISABL) {
1210 mpt->mpt_disc_enable &= ~(1 << tgt);
1212 if (dval & DP_TQING_ENABLE) {
1213 mpt->mpt_tag_enable |= (1 << tgt);
1214 } else if (dval & DP_TQING_DISABL) {
1215 mpt->mpt_tag_enable &= ~(1 << tgt);
1217 if (dval & DP_WIDTH) {
1218 if (mpt_setwidth(mpt, tgt, dval & DP_WIDE)) {
1219 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1220 MPTLOCK_2_CAMLOCK(mpt);
1225 if (dval & DP_SYNC) {
1226 if (mpt_setsync(mpt, tgt, period, offset)) {
1227 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1228 MPTLOCK_2_CAMLOCK(mpt);
1233 MPTLOCK_2_CAMLOCK(mpt);
1234 if (mpt->verbose > 1) {
1235 device_printf(mpt->dev,
1236 "SET tgt %d flags %x period %x off %x\n",
1237 tgt, dval, period, offset);
1240 ccb->ccb_h.status = CAM_REQ_CMP;
1244 case XPT_GET_TRAN_SETTINGS:
1246 tgt = cts->ccb_h.target_id;
1248 #ifndef CAM_NEW_TRAN_CODE
1250 * a lot of normal SCSI things don't make sense.
1252 cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
1253 cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
1255 * How do you measure the width of a high
1256 * speed serial bus? Well, in bytes.
1258 * Offset and period make no sense, though, so we set
1259 * (above) a 'base' transfer speed to be gigabit.
1261 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
1263 struct ccb_trans_settings_fc *fc =
1264 &cts->xport_specific.fc;
1266 cts->protocol = PROTO_SCSI;
1267 cts->protocol_version = SCSI_REV_2;
1268 cts->transport = XPORT_FC;
1269 cts->transport_version = 0;
1271 fc->valid = CTS_FC_VALID_SPEED;
1272 fc->bitrate = 100000; /* XXX: Need for 2Gb/s */
1273 /* XXX: need a port database for each target */
1276 #ifdef CAM_NEW_TRAN_CODE
1277 struct ccb_trans_settings_scsi *scsi =
1278 &cts->proto_specific.scsi;
1279 struct ccb_trans_settings_spi *spi =
1280 &cts->xport_specific.spi;
1282 u_int8_t dval, pval, oval;
1285 * We aren't going off of Port PAGE2 params for
1286 * tagged queuing or disconnect capabilities
1287 * for current settings. For goal settings,
1288 * we assert all capabilities- we've had some
1289 * problems with reading NVRAM data.
1291 if (IS_CURRENT_SETTINGS(cts)) {
1292 fCONFIG_PAGE_SCSI_DEVICE_0 tmp;
1295 tmp = mpt->mpt_dev_page0[tgt];
1296 CAMLOCK_2_MPTLOCK(mpt);
1297 if (mpt_read_cfg_page(mpt, tgt, &tmp.Header)) {
1298 device_printf(mpt->dev,
1299 "cannot get target %d DP0\n", tgt);
1301 if (mpt->verbose > 1) {
1302 device_printf(mpt->dev,
1303 "SPI Tgt %d Page 0: NParms %x Information %x\n",
1305 tmp.NegotiatedParameters,
1309 MPTLOCK_2_CAMLOCK(mpt);
1311 if (tmp.NegotiatedParameters &
1312 MPI_SCSIDEVPAGE0_NP_WIDE)
1315 if (mpt->mpt_disc_enable & (1 << tgt)) {
1316 dval |= DP_DISC_ENABLE;
1318 if (mpt->mpt_tag_enable & (1 << tgt)) {
1319 dval |= DP_TQING_ENABLE;
1321 oval = (tmp.NegotiatedParameters >> 16) & 0xff;
1322 pval = (tmp.NegotiatedParameters >> 8) & 0xff;
1325 * XXX: Fix wrt NVRAM someday. Attempts
1326 * XXX: to read port page2 device data
1327 * XXX: just returns zero in these areas.
1329 dval = DP_WIDE|DP_DISC|DP_TQING;
1330 oval = (mpt->mpt_port_page0.Capabilities >> 16);
1331 pval = (mpt->mpt_port_page0.Capabilities >> 8);
1333 #ifndef CAM_NEW_TRAN_CODE
1334 cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
1335 if (dval & DP_DISC_ENABLE) {
1336 cts->flags |= CCB_TRANS_DISC_ENB;
1338 if (dval & DP_TQING_ENABLE) {
1339 cts->flags |= CCB_TRANS_TAG_ENB;
1341 if (dval & DP_WIDE) {
1342 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
1344 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
1346 cts->valid = CCB_TRANS_BUS_WIDTH_VALID |
1347 CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
1349 cts->sync_period = pval;
1350 cts->sync_offset = oval;
1352 CCB_TRANS_SYNC_RATE_VALID |
1353 CCB_TRANS_SYNC_OFFSET_VALID;
1356 cts->protocol = PROTO_SCSI;
1357 cts->protocol_version = SCSI_REV_2;
1358 cts->transport = XPORT_SPI;
1359 cts->transport_version = 2;
1361 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
1362 spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
1363 if (dval & DP_DISC_ENABLE) {
1364 spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
1366 if (dval & DP_TQING_ENABLE) {
1367 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
1370 spi->sync_offset = oval;
1371 spi->sync_period = pval;
1372 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
1373 spi->valid |= CTS_SPI_VALID_SYNC_RATE;
1375 spi->valid |= CTS_SPI_VALID_BUS_WIDTH;
1376 if (dval & DP_WIDE) {
1377 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
1379 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
1381 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
1382 scsi->valid = CTS_SCSI_VALID_TQ;
1383 spi->valid |= CTS_SPI_VALID_DISC;
1388 if (mpt->verbose > 1) {
1389 device_printf(mpt->dev,
1390 "GET %s tgt %d flags %x period %x off %x\n",
1391 IS_CURRENT_SETTINGS(cts)? "ACTIVE" :
1392 "NVRAM", tgt, dval, pval, oval);
1395 ccb->ccb_h.status = CAM_REQ_CMP;
1399 case XPT_CALC_GEOMETRY:
1401 struct ccb_calc_geometry *ccg;
1402 u_int32_t secs_per_cylinder;
1406 if (ccg->block_size == 0) {
1407 ccb->ccb_h.status = CAM_REQ_INVALID;
1412 size_mb = ccg->volume_size /((1024L * 1024L) / ccg->block_size);
1413 if (size_mb > 1024) {
1415 ccg->secs_per_track = 63;
1418 ccg->secs_per_track = 32;
1420 secs_per_cylinder = ccg->heads * ccg->secs_per_track;
1421 ccg->cylinders = ccg->volume_size / secs_per_cylinder;
1422 ccb->ccb_h.status = CAM_REQ_CMP;
1426 case XPT_PATH_INQ: /* Path routing inquiry */
1428 struct ccb_pathinq *cpi = &ccb->cpi;
1430 cpi->version_num = 1;
1431 cpi->target_sprt = 0;
1432 cpi->hba_eng_cnt = 0;
1434 cpi->bus_id = cam_sim_bus(sim);
1436 cpi->max_target = 255;
1437 cpi->hba_misc = PIM_NOBUSRESET;
1438 cpi->initiator_id = cpi->max_target + 1;
1439 cpi->base_transfer_speed = 100000;
1440 cpi->hba_inquiry = PI_TAG_ABLE;
1442 cpi->initiator_id = mpt->mpt_ini_id;
1443 cpi->base_transfer_speed = 3300;
1444 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
1446 cpi->max_target = 15;
1449 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1450 strncpy(cpi->hba_vid, "LSI", HBA_IDLEN);
1451 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1452 cpi->unit_number = cam_sim_unit(sim);
1453 cpi->ccb_h.status = CAM_REQ_CMP;
1458 ccb->ccb_h.status = CAM_REQ_INVALID;
1465 mpt_setwidth(mpt_softc_t *mpt, int tgt, int onoff)
1467 fCONFIG_PAGE_SCSI_DEVICE_1 tmp;
1468 tmp = mpt->mpt_dev_page1[tgt];
1470 tmp.RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE;
1472 tmp.RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE;
1474 if (mpt_write_cfg_page(mpt, tgt, &tmp.Header)) {
1477 if (mpt_read_cfg_page(mpt, tgt, &tmp.Header)) {
1480 mpt->mpt_dev_page1[tgt] = tmp;
1481 if (mpt->verbose > 1) {
1482 device_printf(mpt->dev,
1483 "SPI Target %d Page 1: RequestedParameters %x Config %x\n",
1484 tgt, mpt->mpt_dev_page1[tgt].RequestedParameters,
1485 mpt->mpt_dev_page1[tgt].Configuration);
1491 mpt_setsync(mpt_softc_t *mpt, int tgt, int period, int offset)
1493 fCONFIG_PAGE_SCSI_DEVICE_1 tmp;
1494 tmp = mpt->mpt_dev_page1[tgt];
1495 tmp.RequestedParameters &=
1496 ~MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK;
1497 tmp.RequestedParameters &=
1498 ~MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK;
1499 tmp.RequestedParameters &=
1500 ~MPI_SCSIDEVPAGE1_RP_DT;
1501 tmp.RequestedParameters &=
1502 ~MPI_SCSIDEVPAGE1_RP_QAS;
1503 tmp.RequestedParameters &=
1504 ~MPI_SCSIDEVPAGE1_RP_IU;
1506 * XXX: For now, we're ignoring specific settings
1508 if (period && offset) {
1509 int factor, offset, np;
1510 factor = (mpt->mpt_port_page0.Capabilities >> 8) & 0xff;
1511 offset = (mpt->mpt_port_page0.Capabilities >> 16) & 0xff;
1514 np |= MPI_SCSIDEVPAGE1_RP_QAS;
1515 np |= MPI_SCSIDEVPAGE1_RP_IU;
1518 np |= MPI_SCSIDEVPAGE1_RP_DT;
1520 np |= (factor << 8) | (offset << 16);
1521 tmp.RequestedParameters |= np;
1523 if (mpt_write_cfg_page(mpt, tgt, &tmp.Header)) {
1526 if (mpt_read_cfg_page(mpt, tgt, &tmp.Header)) {
1529 mpt->mpt_dev_page1[tgt] = tmp;
1530 if (mpt->verbose > 1) {
1531 device_printf(mpt->dev,
1532 "SPI Target %d Page 1: RParams %x Config %x\n",
1533 tgt, mpt->mpt_dev_page1[tgt].RequestedParameters,
1534 mpt->mpt_dev_page1[tgt].Configuration);