1 /* $FreeBSD: src/sys/dev/mpt/mpt_freebsd.c,v 1.3.2.3 2002/09/24 21:37:25 mjacob Exp $ */
2 /* $DragonFly: src/sys/dev/disk/mpt/mpt_freebsd.c,v 1.8 2005/06/16 15:48:59 dillon Exp $ */
4 * FreeBSD/CAM specific routines for LSI '909 FC adapters.
7 * Copyright (c) 2000, 2001 by Greg Ansley
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice immediately at the beginning of the file, without modification,
14 * this list of conditions, and the following disclaimer.
15 * 2. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
22 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * Additional Copyright (c) 2002 by Matthew Jacob under same license.
34 #include "mpt_freebsd.h"
36 static void mpt_poll(struct cam_sim *);
37 static timeout_t mpttimeout;
38 static timeout_t mpttimeout2;
39 static void mpt_action(struct cam_sim *, union ccb *);
40 static int mpt_setwidth(mpt_softc_t *, int, int);
41 static int mpt_setsync(mpt_softc_t *, int, int, int);
44 mpt_cam_attach(mpt_softc_t *mpt)
50 maxq = (mpt->mpt_global_credits < MPT_MAX_REQUESTS(mpt))?
51 mpt->mpt_global_credits : MPT_MAX_REQUESTS(mpt);
55 * Construct our SIM entry.
57 sim = cam_sim_alloc(mpt_action, mpt_poll, "mpt", mpt,
58 mpt->unit, 1, maxq, NULL);
63 * Register exactly the bus.
66 if (xpt_bus_register(sim, 0) != CAM_SUCCESS) {
71 if (xpt_create_path(&mpt->path, NULL, cam_sim_path(sim),
72 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
73 xpt_bus_deregister(cam_sim_path(sim));
81 mpt_cam_detach(mpt_softc_t *mpt)
83 if (mpt->sim != NULL) {
84 xpt_free_path(mpt->path);
85 xpt_bus_deregister(cam_sim_path(mpt->sim));
86 cam_sim_free(mpt->sim);
91 /* This routine is used after a system crash to dump core onto the
95 mpt_poll(struct cam_sim *sim)
97 mpt_softc_t *mpt = (mpt_softc_t *) cam_sim_softc(sim);
104 * This routine is called if the 9x9 does not return completion status
105 * for a command after a CAM specified time.
108 mpttimeout(void *arg)
111 union ccb *ccb = arg;
115 mpt = ccb->ccb_h.ccb_mpt_ptr;
117 req = ccb->ccb_h.ccb_req_ptr;
118 oseq = req->sequence;
121 if (req->sequence != oseq) {
122 device_printf(mpt->dev, "bullet missed in timeout\n");
126 device_printf(mpt->dev, "bullet U-turned in timeout: got us\n");
128 device_printf(mpt->dev,
129 "time out on request index = 0x%02x sequence = 0x%08x\n",
130 req->index, req->sequence);
131 mpt_check_doorbell(mpt);
132 device_printf(mpt->dev, "Status %08X; Mask %08X; Doorbell %08X\n",
133 mpt_read(mpt, MPT_OFFSET_INTR_STATUS),
134 mpt_read(mpt, MPT_OFFSET_INTR_MASK),
135 mpt_read(mpt, MPT_OFFSET_DOORBELL) );
136 printf("request state %s\n", mpt_req_state(req->debug));
137 if (ccb != req->ccb) {
138 printf("time out: ccb %p != req->ccb %p\n",
141 mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf);
142 req->debug = REQ_TIMEOUT;
144 req->link.sle_next = (void *) mpt;
145 callout_reset(&req->timeout, hz / 10, mpttimeout2, req);
146 ccb->ccb_h.status = CAM_CMD_TIMEOUT;
147 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
154 mpttimeout2(void *arg)
156 request_t *req = arg;
157 if (req->debug == REQ_TIMEOUT) {
158 mpt_softc_t *mpt = (mpt_softc_t *) req->link.sle_next;
161 mpt_free_request(mpt, req);
167 * Callback routine from "bus_dmamap_load" or in simple case called directly.
169 * Takes a list of physical segments and builds the SGL for SCSI IO command
170 * and forwards the commard to the IOC after one last check that CAM has not
171 * aborted the transaction.
174 mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
179 MSG_SCSI_IO_REQUEST *mpt_req;
182 req = (request_t *)arg;
185 mpt = ccb->ccb_h.ccb_mpt_ptr;
186 req = ccb->ccb_h.ccb_req_ptr;
187 mpt_req = req->req_vbuf;
191 if (error == 0 && nseg > MPT_SGL_MAX) {
197 device_printf(mpt->dev, "bus_dmamap_load returned %d\n",
199 if (ccb->ccb_h.status == CAM_REQ_INPROG) {
200 xpt_freeze_devq(ccb->ccb_h.path, 1);
201 ccb->ccb_h.status = CAM_DEV_QFRZN;
203 ccb->ccb_h.status |= CAM_REQ_TOO_BIG;
205 ccb->ccb_h.status |= CAM_REQ_CMP_ERR;
207 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
209 mpt_free_request(mpt, req);
214 if (nseg > MPT_NSGL_FIRST(mpt)) {
220 mpt_req->DataLength = ccb->csio.dxfer_len;
221 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
222 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
223 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
225 se = (SGE_SIMPLE32 *) &mpt_req->SGL;
226 for (i = 0; i < MPT_NSGL_FIRST(mpt) - 1; i++, se++, dm_segs++) {
229 bzero(se, sizeof (*se));
230 se->Address = dm_segs->ds_addr;
231 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
233 if (i == MPT_NSGL_FIRST(mpt) - 2) {
234 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
236 MPI_pSGE_SET_FLAGS(se, tf);
241 * Tell the IOC where to find the first chain element
243 mpt_req->ChainOffset = ((char *)se - (char *)mpt_req) >> 2;
246 * Until we're finished with all segments...
251 * Construct the chain element that point to the
254 ce = (SGE_CHAIN32 *) se++;
255 if (nleft > MPT_NSGL(mpt)) {
256 ntodo = MPT_NSGL(mpt) - 1;
257 ce->NextChainOffset = (MPT_RQSL(mpt) -
258 sizeof (SGE_SIMPLE32)) >> 2;
259 ce->Length = MPT_NSGL(mpt)
260 * sizeof(SGE_SIMPLE32);
263 ce->NextChainOffset = 0;
264 ce->Length = ntodo * sizeof (SGE_SIMPLE32);
266 ce->Address = req->req_pbuf +
267 ((char *)se - (char *)mpt_req);
268 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
269 for (i = 0; i < ntodo; i++, se++, dm_segs++) {
272 bzero(se, sizeof (*se));
273 se->Address = dm_segs->ds_addr;
274 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
276 if (i == ntodo - 1) {
277 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
278 if (ce->NextChainOffset == 0) {
280 MPI_SGE_FLAGS_END_OF_LIST |
281 MPI_SGE_FLAGS_END_OF_BUFFER;
284 MPI_pSGE_SET_FLAGS(se, tf);
290 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
291 op = BUS_DMASYNC_PREREAD;
293 op = BUS_DMASYNC_PREWRITE;
294 if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) {
295 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
297 } else if (nseg > 0) {
302 mpt_req->DataLength = ccb->csio.dxfer_len;
303 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
304 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
305 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
307 /* Copy the segments into our SG list */
308 se = (SGE_SIMPLE32 *) &mpt_req->SGL;
309 for (i = 0; i < nseg; i++, se++, dm_segs++) {
312 bzero(se, sizeof (*se));
313 se->Address = dm_segs->ds_addr;
314 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
318 MPI_SGE_FLAGS_LAST_ELEMENT |
319 MPI_SGE_FLAGS_END_OF_BUFFER |
320 MPI_SGE_FLAGS_END_OF_LIST;
322 MPI_pSGE_SET_FLAGS(se, tf);
325 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
326 op = BUS_DMASYNC_PREREAD;
328 op = BUS_DMASYNC_PREWRITE;
329 if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) {
330 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
333 se = (SGE_SIMPLE32 *) &mpt_req->SGL;
335 * No data to transfer so we just make a single simple SGL
338 MPI_pSGE_SET_FLAGS(se,
339 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
340 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
344 * Last time we need to check if this CCB needs to be aborted.
346 if (ccb->ccb_h.status != CAM_REQ_INPROG) {
347 if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0)
348 bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
349 mpt_free_request(mpt, req);
355 ccb->ccb_h.status |= CAM_SIM_QUEUED;
356 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
357 callout_reset(&ccb->ccb_h.timeout_ch,
358 (ccb->ccb_h.timeout * hz) / 1000, mpttimeout, ccb);
360 if (mpt->verbose > 1)
361 mpt_print_scsi_io_request(mpt_req);
362 mpt_send_cmd(mpt, req);
367 mpt_start(union ccb *ccb)
370 struct mpt_softc *mpt;
371 MSG_SCSI_IO_REQUEST *mpt_req;
372 struct ccb_scsiio *csio = &ccb->csio;
373 struct ccb_hdr *ccbh = &ccb->ccb_h;
375 /* Get the pointer for the physical addapter */
376 mpt = ccb->ccb_h.ccb_mpt_ptr;
379 /* Get a request structure off the free list */
380 if ((req = mpt_get_request(mpt)) == NULL) {
381 if (mpt->outofbeer == 0) {
383 xpt_freeze_simq(mpt->sim, 1);
384 if (mpt->verbose > 1) {
385 device_printf(mpt->dev, "FREEZEQ\n");
388 ccb->ccb_h.status = CAM_REQUEUE_REQ;
394 /* Link the ccb and the request structure so we can find */
395 /* the other knowing either the request or the ccb */
397 ccb->ccb_h.ccb_req_ptr = req;
399 /* Now we build the command for the IOC */
400 mpt_req = req->req_vbuf;
401 bzero(mpt_req, sizeof *mpt_req);
403 mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
404 mpt_req->Bus = mpt->bus;
406 mpt_req->SenseBufferLength =
407 (csio->sense_len < MPT_SENSE_SIZE) ?
408 csio->sense_len : MPT_SENSE_SIZE;
410 /* We use the message context to find the request structure when we */
411 /* Get the command competion interrupt from the FC IOC. */
412 mpt_req->MsgContext = req->index;
414 /* Which physical device to do the I/O on */
415 mpt_req->TargetID = ccb->ccb_h.target_id;
416 mpt_req->LUN[1] = ccb->ccb_h.target_lun;
418 /* Set the direction of the transfer */
419 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
420 mpt_req->Control = MPI_SCSIIO_CONTROL_READ;
421 else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
422 mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE;
424 mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER;
426 if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
427 switch(ccb->csio.tag_action) {
428 case MSG_HEAD_OF_Q_TAG:
429 mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ;
432 mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ;
434 case MSG_ORDERED_Q_TAG:
435 mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ;
437 case MSG_SIMPLE_Q_TAG:
439 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
444 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
446 mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
449 if (mpt->is_fc == 0) {
450 if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) {
451 mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT;
455 /* Copy the scsi command block into place */
456 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0)
457 bcopy(csio->cdb_io.cdb_ptr, mpt_req->CDB, csio->cdb_len);
459 bcopy(csio->cdb_io.cdb_bytes, mpt_req->CDB, csio->cdb_len);
461 mpt_req->CDBLength = csio->cdb_len;
462 mpt_req->DataLength = csio->dxfer_len;
463 mpt_req->SenseBufferLowAddr = req->sense_pbuf;
466 * If we have any data to send with this command,
467 * map it into bus space.
470 if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
471 if ((ccbh->flags & CAM_SCATTER_VALID) == 0) {
473 * We've been given a pointer to a single buffer.
475 if ((ccbh->flags & CAM_DATA_PHYS) == 0) {
477 * Virtual address that needs to translated into
478 * one or more physical pages.
482 error = bus_dmamap_load(mpt->buffer_dmat,
483 req->dmap, csio->data_ptr, csio->dxfer_len,
484 mpt_execute_req, req, 0);
485 if (error == EINPROGRESS) {
487 * So as to maintain ordering,
488 * freeze the controller queue
489 * until our mapping is
492 xpt_freeze_simq(mpt->sim, 1);
493 ccbh->status |= CAM_RELEASE_SIMQ;
497 * We have been given a pointer to single
500 struct bus_dma_segment seg;
501 seg.ds_addr = (bus_addr_t)csio->data_ptr;
502 seg.ds_len = csio->dxfer_len;
503 mpt_execute_req(req, &seg, 1, 0);
507 * We have been given a list of addresses.
508 * These case could be easily done but they are not
509 * currently generated by the CAM subsystem so there
510 * is no point in wasting the time right now.
512 struct bus_dma_segment *segs;
513 if ((ccbh->flags & CAM_SG_LIST_PHYS) == 0) {
514 mpt_execute_req(req, NULL, 0, EFAULT);
516 /* Just use the segments provided */
517 segs = (struct bus_dma_segment *)csio->data_ptr;
518 mpt_execute_req(req, segs, csio->sglist_cnt,
519 (csio->sglist_cnt < MPT_SGL_MAX)?
524 mpt_execute_req(req, NULL, 0, 0);
530 mpt_bus_reset(union ccb *ccb)
535 MSG_SCSI_TASK_MGMT *reset_req;
537 /* Get the pointer for the physical adapter */
538 mpt = ccb->ccb_h.ccb_mpt_ptr;
540 /* Get a request structure off the free list */
541 if ((req = mpt_get_request(mpt)) == NULL) {
542 return (CAM_REQUEUE_REQ);
545 /* Link the ccb and the request structure so we can find */
546 /* the other knowing either the request or the ccb */
548 ccb->ccb_h.ccb_req_ptr = req;
550 reset_req = req->req_vbuf;
551 bzero(reset_req, sizeof *reset_req);
553 reset_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
554 reset_req->MsgContext = req->index;
555 reset_req->TaskType = MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS;
558 * Should really be TARGET_RESET_OPTION
560 reset_req->MsgFlags =
561 MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION;
563 /* Which physical device Reset */
564 reset_req->TargetID = ccb->ccb_h.target_id;
565 reset_req->LUN[1] = ccb->ccb_h.target_lun;
567 ccb->ccb_h.status |= CAM_SIM_QUEUED;
569 error = mpt_send_handshake_cmd(mpt,
570 sizeof (MSG_SCSI_TASK_MGMT), reset_req);
572 device_printf(mpt->dev,
573 "mpt_bus_reset: mpt_send_handshake return %d\n", error);
574 return (CAM_REQ_CMP_ERR);
576 return (CAM_REQ_CMP);
581 * Process an asynchronous event from the IOC.
583 static void mpt_ctlop(mpt_softc_t *, void *, u_int32_t);
584 static void mpt_event_notify_reply(mpt_softc_t *mpt, MSG_EVENT_NOTIFY_REPLY *);
587 mpt_ctlop(mpt_softc_t *mpt, void *vmsg, u_int32_t reply)
589 MSG_DEFAULT_REPLY *dmsg = vmsg;
591 if (dmsg->Function == MPI_FUNCTION_EVENT_NOTIFICATION) {
592 mpt_event_notify_reply(mpt, vmsg);
593 mpt_free_reply(mpt, (reply << 1));
594 } else if (dmsg->Function == MPI_FUNCTION_EVENT_ACK) {
595 mpt_free_reply(mpt, (reply << 1));
596 } else if (dmsg->Function == MPI_FUNCTION_PORT_ENABLE) {
597 MSG_PORT_ENABLE_REPLY *msg = vmsg;
598 int index = msg->MsgContext & ~0x80000000;
599 if (mpt->verbose > 1) {
600 device_printf(mpt->dev, "enable port reply idx %d\n",
603 if (index >= 0 && index < MPT_MAX_REQUESTS(mpt)) {
604 request_t *req = &mpt->request_pool[index];
605 req->debug = REQ_DONE;
607 mpt_free_reply(mpt, (reply << 1));
608 } else if (dmsg->Function == MPI_FUNCTION_CONFIG) {
609 MSG_CONFIG_REPLY *msg = vmsg;
610 int index = msg->MsgContext & ~0x80000000;
611 if (index >= 0 && index < MPT_MAX_REQUESTS(mpt)) {
612 request_t *req = &mpt->request_pool[index];
613 req->debug = REQ_DONE;
614 req->sequence = reply;
616 mpt_free_reply(mpt, (reply << 1));
619 device_printf(mpt->dev, "unknown mpt_ctlop: %x\n",
625 mpt_event_notify_reply(mpt_softc_t *mpt, MSG_EVENT_NOTIFY_REPLY *msg)
628 case MPI_EVENT_LOG_DATA:
629 /* Some error occured that LSI wants logged */
630 device_printf(mpt->dev,
631 "\tEvtLogData: IOCLogInfo: 0x%08x\n",
633 device_printf(mpt->dev, "\tEvtLogData: Event Data:");
636 for (i = 0; i < msg->EventDataLength; i++) {
637 device_printf(mpt->dev,
638 " %08X", msg->Data[i]);
641 device_printf(mpt->dev, "\n");
644 case MPI_EVENT_UNIT_ATTENTION:
645 device_printf(mpt->dev,
646 "Bus: 0x%02x TargetID: 0x%02x\n",
647 (msg->Data[0] >> 8) & 0xff, msg->Data[0] & 0xff);
650 case MPI_EVENT_IOC_BUS_RESET:
651 /* We generated a bus reset */
652 device_printf(mpt->dev, "IOC Bus Reset Port: %d\n",
653 (msg->Data[0] >> 8) & 0xff);
656 case MPI_EVENT_EXT_BUS_RESET:
657 /* Someone else generated a bus reset */
658 device_printf(mpt->dev, "Ext Bus Reset\n");
660 * These replies don't return EventData like the MPI
663 /* xpt_async(AC_BUS_RESET, path, NULL); */
666 case MPI_EVENT_RESCAN:
668 * In general this means a device has been added
671 device_printf(mpt->dev,
672 "Rescan Port: %d\n", (msg->Data[0] >> 8) & 0xff);
673 /* xpt_async(AC_FOUND_DEVICE, path, NULL); */
676 case MPI_EVENT_LINK_STATUS_CHANGE:
677 device_printf(mpt->dev, "Port %d: LinkState: %s\n",
678 (msg->Data[1] >> 8) & 0xff,
679 ((msg->Data[0] & 0xff) == 0)? "Failed" : "Active");
682 case MPI_EVENT_LOOP_STATE_CHANGE:
683 switch ((msg->Data[0] >> 16) & 0xff) {
685 device_printf(mpt->dev,
686 "Port 0x%x: FC LinkEvent: LIP(%02X,%02X) (Loop Initialization)\n",
687 (msg->Data[1] >> 8) & 0xff,
688 (msg->Data[0] >> 8) & 0xff,
689 (msg->Data[0] ) & 0xff);
690 switch ((msg->Data[0] >> 8) & 0xff) {
692 if ((msg->Data[0] & 0xff) == 0xF7) {
693 printf("Device needs AL_PA\n");
695 printf("Device %02X doesn't like FC performance\n",
696 msg->Data[0] & 0xFF);
700 if ((msg->Data[0] & 0xff) == 0xF7) {
701 printf("Device had loop failure at its receiver prior to acquiring AL_PA\n");
703 printf("Device %02X detected loop failure at its receiver\n",
704 msg->Data[0] & 0xFF);
708 printf("Device %02X requests that device %02X reset itself\n",
710 (msg->Data[0] >> 8) & 0xFF);
715 device_printf(mpt->dev, "Port 0x%x: FC LinkEvent: LPE(%02X,%02X) (Loop Port Enable)\n",
716 (msg->Data[1] >> 8) & 0xff, /* Port */
717 (msg->Data[0] >> 8) & 0xff, /* Character 3 */
718 (msg->Data[0] ) & 0xff /* Character 4 */
722 device_printf(mpt->dev, "Port 0x%x: FC LinkEvent: LPB(%02X,%02X) (Loop Port Bypass)\n",
723 (msg->Data[1] >> 8) & 0xff, /* Port */
724 (msg->Data[0] >> 8) & 0xff, /* Character 3 */
725 (msg->Data[0] ) & 0xff /* Character 4 */
729 device_printf(mpt->dev, "Port 0x%x: FC LinkEvent: Unknown FC event (%02X %02X %02X)\n",
730 (msg->Data[1] >> 8) & 0xff, /* Port */
731 (msg->Data[0] >> 16) & 0xff, /* Event */
732 (msg->Data[0] >> 8) & 0xff, /* Character 3 */
733 (msg->Data[0] ) & 0xff /* Character 4 */
738 case MPI_EVENT_LOGOUT:
739 device_printf(mpt->dev, "FC Logout Port: %d N_PortID: %02X\n",
740 (msg->Data[1] >> 8) & 0xff,
743 case MPI_EVENT_EVENT_CHANGE:
744 /* This is just an acknowledgement of our
745 mpt_send_event_request */
748 device_printf(mpt->dev, "Unknown event %X\n", msg->Event);
750 if (msg->AckRequired) {
753 if ((req = mpt_get_request(mpt)) == NULL) {
754 panic("unable to get request to acknowledge notify");
756 ackp = (MSG_EVENT_ACK *) req->req_vbuf;
757 bzero(ackp, sizeof *ackp);
758 ackp->Function = MPI_FUNCTION_EVENT_ACK;
759 ackp->Event = msg->Event;
760 ackp->EventContext = msg->EventContext;
761 ackp->MsgContext = req->index | 0x80000000;
762 mpt_check_doorbell(mpt);
763 mpt_send_cmd(mpt, req);
768 mpt_done(mpt_softc_t *mpt, u_int32_t reply)
773 MSG_REQUEST_HEADER *mpt_req;
774 MSG_SCSI_IO_REPLY *mpt_reply;
776 index = -1; /* Shutup the complier */
778 if ((reply & MPT_CONTEXT_REPLY) == 0) {
781 index = reply & MPT_CONTEXT_MASK;
785 bus_dmamap_sync(mpt->reply_dmat, mpt->reply_dmap,
786 BUS_DMASYNC_POSTREAD);
787 /* address reply (Error) */
788 mpt_reply = MPT_REPLY_PTOV(mpt, reply);
789 if (mpt->verbose > 1) {
790 pReply = (unsigned *) mpt_reply;
791 device_printf(mpt->dev, "Address Reply (index %u)\n",
792 mpt_reply->MsgContext & 0xffff);
793 device_printf(mpt->dev, "%08X %08X %08X %08X\n",
794 pReply[0], pReply[1], pReply[2], pReply[3]);
795 device_printf(mpt->dev, "%08X %08X %08X %08X\n",
796 pReply[4], pReply[5], pReply[6], pReply[7]);
797 device_printf(mpt->dev, "%08X %08X %08X %08X\n\n",
798 pReply[8], pReply[9], pReply[10], pReply[11]);
800 index = mpt_reply->MsgContext;
804 * Address reply with MessageContext high bit set
805 * This is most likely a notify message so we try
806 * to process it then free it
808 if ((index & 0x80000000) != 0) {
809 if (mpt_reply != NULL) {
810 mpt_ctlop(mpt, mpt_reply, reply);
812 device_printf(mpt->dev,
813 "mpt_done: index 0x%x, NULL reply\n", index);
818 /* Did we end up with a valid index into the table? */
819 if (index < 0 || index >= MPT_MAX_REQUESTS(mpt)) {
820 printf("mpt_done: invalid index (%x) in reply\n", index);
824 req = &mpt->request_pool[index];
826 /* Make sure memory hasn't been trashed */
827 if (req->index != index) {
828 printf("mpt_done: corrupted request struct");
832 /* Short cut for task management replys; nothing more for us to do */
833 mpt_req = req->req_vbuf;
834 if (mpt_req->Function == MPI_FUNCTION_SCSI_TASK_MGMT) {
835 if (mpt->verbose > 1) {
836 device_printf(mpt->dev, "mpt_done: TASK MGMT\n");
841 if (mpt_req->Function == MPI_FUNCTION_PORT_ENABLE) {
846 * At this point it better be a SCSI IO command, but don't
849 if (mpt_req->Function != MPI_FUNCTION_SCSI_IO_REQUEST) {
853 /* Recover the CAM control block from the request structure */
856 /* Can't have had a SCSI command with out a CAM control block */
857 if (ccb == NULL || (ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) {
858 device_printf(mpt->dev,
859 "mpt_done: corrupted ccb, index = 0x%02x seq = 0x%08x",
860 req->index, req->sequence);
861 printf(" request state %s\nmpt_request:\n",
862 mpt_req_state(req->debug));
863 mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf);
865 if (mpt_reply != NULL) {
866 printf("\nmpt_done: reply:\n");
867 mpt_print_reply(MPT_REPLY_PTOV(mpt, reply));
869 printf("\nmpt_done: context reply: 0x%08x\n", reply);
874 callout_stop(&ccb->ccb_h.timeout_ch);
876 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
879 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
880 op = BUS_DMASYNC_POSTREAD;
882 op = BUS_DMASYNC_POSTWRITE;
884 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
885 bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
889 if (mpt_reply == NULL) {
890 /* Context reply; report that the command was successfull */
891 ccb->ccb_h.status = CAM_REQ_CMP;
892 ccb->csio.scsi_status = SCSI_STATUS_OK;
893 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
894 if (mpt->outofbeer) {
895 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
897 if (mpt->verbose > 1) {
898 device_printf(mpt->dev, "THAWQ\n");
907 ccb->csio.scsi_status = mpt_reply->SCSIStatus;
908 switch(mpt_reply->IOCStatus) {
909 case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
910 ccb->ccb_h.status = CAM_DATA_RUN_ERR;
913 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
915 * Yikes, Tagged queue full comes through this path!
917 * So we'll change it to a status error and anything
918 * that returns status should probably be a status
922 ccb->csio.dxfer_len - mpt_reply->TransferCount;
923 if (mpt_reply->SCSIState & MPI_SCSI_STATE_NO_SCSI_STATUS) {
924 ccb->ccb_h.status = CAM_DATA_RUN_ERR;
928 device_printf(mpt->dev, "underrun, scsi status is %x\n", ccb->csio.scsi_status);
929 ccb->csio.scsi_status = SCSI_STATUS_QUEUE_FULL;
932 case MPI_IOCSTATUS_SUCCESS:
933 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
934 switch (ccb->csio.scsi_status) {
936 ccb->ccb_h.status = CAM_REQ_CMP;
939 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
943 case MPI_IOCSTATUS_BUSY:
944 case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
945 ccb->ccb_h.status = CAM_BUSY;
948 case MPI_IOCSTATUS_SCSI_INVALID_BUS:
949 case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
950 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
951 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
954 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
955 ccb->ccb_h.status = CAM_DATA_RUN_ERR;
958 case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR:
959 case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR:
960 ccb->ccb_h.status = CAM_UNCOR_PARITY;
963 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
964 ccb->ccb_h.status = CAM_REQ_CMP;
967 case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
968 ccb->ccb_h.status = CAM_UA_TERMIO;
971 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
972 ccb->ccb_h.status = CAM_REQ_TERMIO;
975 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
976 ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
980 ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
984 if ((mpt_reply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID) != 0) {
985 if (ccb->ccb_h.flags & (CAM_SENSE_PHYS | CAM_SENSE_PTR)) {
986 ccb->ccb_h.status |= CAM_AUTOSENSE_FAIL;
988 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
989 ccb->csio.sense_resid = mpt_reply->SenseCount;
990 bcopy(req->sense_vbuf, &ccb->csio.sense_data,
991 ccb->csio.sense_len);
993 } else if (mpt_reply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_FAILED) {
994 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
995 ccb->ccb_h.status |= CAM_AUTOSENSE_FAIL;
998 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
999 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
1000 ccb->ccb_h.status |= CAM_DEV_QFRZN;
1001 xpt_freeze_devq(ccb->ccb_h.path, 1);
1006 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1007 if (mpt->outofbeer) {
1008 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
1010 if (mpt->verbose > 1) {
1011 device_printf(mpt->dev, "THAWQ\n");
1019 /* If IOC done with this request free it up */
1020 if (mpt_reply == NULL || (mpt_reply->MsgFlags & 0x80) == 0)
1021 mpt_free_request(mpt, req);
1023 /* If address reply; give the buffer back to the IOC */
1024 if (mpt_reply != NULL)
1025 mpt_free_reply(mpt, (reply << 1));
1029 mpt_action(struct cam_sim *sim, union ccb *ccb)
1033 struct ccb_trans_settings *cts;
1035 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mpt_action\n"));
1037 mpt = (mpt_softc_t *)cam_sim_softc(sim);
1039 ccb->ccb_h.ccb_mpt_ptr = mpt;
1041 switch (ccb->ccb_h.func_code) {
1043 if (mpt->verbose > 1)
1044 device_printf(mpt->dev, "XPT_RESET_BUS\n");
1045 error = mpt_bus_reset(ccb);
1047 case CAM_REQ_INPROG:
1049 case CAM_REQUEUE_REQ:
1050 if (mpt->outofbeer == 0) {
1052 xpt_freeze_simq(sim, 1);
1053 if (mpt->verbose > 1) {
1054 device_printf(mpt->dev, "FREEZEQ\n");
1057 ccb->ccb_h.status = CAM_REQUEUE_REQ;
1064 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1065 ccb->ccb_h.status |= CAM_REQ_CMP;
1066 if (mpt->outofbeer) {
1067 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
1069 if (mpt->verbose > 1) {
1070 device_printf(mpt->dev, "THAWQ\n");
1079 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1086 case XPT_SCSI_IO: /* Execute the requested I/O operation */
1088 * Do a couple of preliminary checks...
1090 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
1091 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
1092 ccb->ccb_h.status = CAM_REQ_INVALID;
1099 /* Max supported CDB length is 16 bytes */
1100 if (ccb->csio.cdb_len >
1101 sizeof (((PTR_MSG_SCSI_IO_REQUEST)0)->CDB)) {
1102 ccb->ccb_h.status = CAM_REQ_INVALID;
1108 ccb->csio.scsi_status = SCSI_STATUS_OK;
1114 * XXX: Need to implement
1116 ccb->ccb_h.status = CAM_UA_ABORT;
1122 #ifdef CAM_NEW_TRAN_CODE
1123 #define IS_CURRENT_SETTINGS(c) (c->type == CTS_TYPE_CURRENT_SETTINGS)
1125 #define IS_CURRENT_SETTINGS(c) (c->flags & CCB_TRANS_CURRENT_SETTINGS)
1127 #define DP_DISC_ENABLE 0x1
1128 #define DP_DISC_DISABL 0x2
1129 #define DP_DISC (DP_DISC_ENABLE|DP_DISC_DISABL)
1131 #define DP_TQING_ENABLE 0x4
1132 #define DP_TQING_DISABL 0x8
1133 #define DP_TQING (DP_TQING_ENABLE|DP_TQING_DISABL)
1135 #define DP_WIDE 0x10
1136 #define DP_NARROW 0x20
1137 #define DP_WIDTH (DP_WIDE|DP_NARROW)
1139 #define DP_SYNC 0x40
1141 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */
1143 if (!IS_CURRENT_SETTINGS(cts)) {
1144 ccb->ccb_h.status = CAM_REQ_INVALID;
1150 tgt = cts->ccb_h.target_id;
1151 if (mpt->is_fc == 0) {
1153 u_int period = 0, offset = 0;
1154 #ifndef CAM_NEW_TRAN_CODE
1155 if (cts->valid & CCB_TRANS_DISC_VALID) {
1156 dval |= DP_DISC_ENABLE;
1158 if (cts->valid & CCB_TRANS_TQ_VALID) {
1159 dval |= DP_TQING_ENABLE;
1161 if (cts->valid & CCB_TRANS_BUS_WIDTH_VALID) {
1168 * Any SYNC RATE of nonzero and SYNC_OFFSET
1169 * of nonzero will cause us to go to the
1170 * selected (from NVRAM) maximum value for
1171 * this device. At a later point, we'll
1172 * allow finer control.
1174 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) &&
1175 (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID)) {
1177 period = cts->sync_period;
1178 offset = cts->sync_offset;
1181 struct ccb_trans_settings_scsi *scsi =
1182 &cts->proto_specific.scsi;
1183 struct ccb_trans_settings_spi *spi =
1184 &cts->xport_specific.spi;
1186 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
1187 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0)
1188 dval |= DP_DISC_ENABLE;
1190 dval |= DP_DISC_DISABL;
1193 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
1194 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0)
1195 dval |= DP_TQING_ENABLE;
1197 dval |= DP_TQING_DISABL;
1200 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
1201 if (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT)
1207 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) &&
1208 (spi->valid & CTS_SPI_VALID_SYNC_RATE) &&
1209 (spi->sync_period && spi->sync_offset)) {
1211 period = spi->sync_period;
1212 offset = spi->sync_offset;
1215 if (dval & DP_DISC_ENABLE) {
1216 mpt->mpt_disc_enable |= (1 << tgt);
1217 } else if (dval & DP_DISC_DISABL) {
1218 mpt->mpt_disc_enable &= ~(1 << tgt);
1220 if (dval & DP_TQING_ENABLE) {
1221 mpt->mpt_tag_enable |= (1 << tgt);
1222 } else if (dval & DP_TQING_DISABL) {
1223 mpt->mpt_tag_enable &= ~(1 << tgt);
1225 if (dval & DP_WIDTH) {
1226 if (mpt_setwidth(mpt, tgt, dval & DP_WIDE)) {
1227 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1234 if (dval & DP_SYNC) {
1235 if (mpt_setsync(mpt, tgt, period, offset)) {
1236 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1243 if (mpt->verbose > 1) {
1244 device_printf(mpt->dev,
1245 "SET tgt %d flags %x period %x off %x\n",
1246 tgt, dval, period, offset);
1249 ccb->ccb_h.status = CAM_REQ_CMP;
1255 case XPT_GET_TRAN_SETTINGS:
1257 tgt = cts->ccb_h.target_id;
1259 #ifndef CAM_NEW_TRAN_CODE
1261 * a lot of normal SCSI things don't make sense.
1263 cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
1264 cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
1266 * How do you measure the width of a high
1267 * speed serial bus? Well, in bytes.
1269 * Offset and period make no sense, though, so we set
1270 * (above) a 'base' transfer speed to be gigabit.
1272 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
1274 struct ccb_trans_settings_fc *fc =
1275 &cts->xport_specific.fc;
1277 cts->protocol = PROTO_SCSI;
1278 cts->protocol_version = SCSI_REV_2;
1279 cts->transport = XPORT_FC;
1280 cts->transport_version = 0;
1282 fc->valid = CTS_FC_VALID_SPEED;
1283 fc->bitrate = 100000; /* XXX: Need for 2Gb/s */
1284 /* XXX: need a port database for each target */
1287 #ifdef CAM_NEW_TRAN_CODE
1288 struct ccb_trans_settings_scsi *scsi =
1289 &cts->proto_specific.scsi;
1290 struct ccb_trans_settings_spi *spi =
1291 &cts->xport_specific.spi;
1293 u_int8_t dval, pval, oval;
1296 * We aren't going off of Port PAGE2 params for
1297 * tagged queuing or disconnect capabilities
1298 * for current settings. For goal settings,
1299 * we assert all capabilities- we've had some
1300 * problems with reading NVRAM data.
1302 if (IS_CURRENT_SETTINGS(cts)) {
1303 fCONFIG_PAGE_SCSI_DEVICE_0 tmp;
1306 tmp = mpt->mpt_dev_page0[tgt];
1307 if (mpt_read_cfg_page(mpt, tgt, &tmp.Header)) {
1308 device_printf(mpt->dev,
1309 "cannot get target %d DP0\n", tgt);
1311 if (mpt->verbose > 1) {
1312 device_printf(mpt->dev,
1313 "SPI Tgt %d Page 0: NParms %x Information %x\n",
1315 tmp.NegotiatedParameters,
1320 if (tmp.NegotiatedParameters &
1321 MPI_SCSIDEVPAGE0_NP_WIDE)
1324 if (mpt->mpt_disc_enable & (1 << tgt)) {
1325 dval |= DP_DISC_ENABLE;
1327 if (mpt->mpt_tag_enable & (1 << tgt)) {
1328 dval |= DP_TQING_ENABLE;
1330 oval = (tmp.NegotiatedParameters >> 16) & 0xff;
1331 pval = (tmp.NegotiatedParameters >> 8) & 0xff;
1334 * XXX: Fix wrt NVRAM someday. Attempts
1335 * XXX: to read port page2 device data
1336 * XXX: just returns zero in these areas.
1338 dval = DP_WIDE|DP_DISC|DP_TQING;
1339 oval = (mpt->mpt_port_page0.Capabilities >> 16);
1340 pval = (mpt->mpt_port_page0.Capabilities >> 8);
1342 #ifndef CAM_NEW_TRAN_CODE
1343 cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
1344 if (dval & DP_DISC_ENABLE) {
1345 cts->flags |= CCB_TRANS_DISC_ENB;
1347 if (dval & DP_TQING_ENABLE) {
1348 cts->flags |= CCB_TRANS_TAG_ENB;
1350 if (dval & DP_WIDE) {
1351 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
1353 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
1355 cts->valid = CCB_TRANS_BUS_WIDTH_VALID |
1356 CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
1358 cts->sync_period = pval;
1359 cts->sync_offset = oval;
1361 CCB_TRANS_SYNC_RATE_VALID |
1362 CCB_TRANS_SYNC_OFFSET_VALID;
1365 cts->protocol = PROTO_SCSI;
1366 cts->protocol_version = SCSI_REV_2;
1367 cts->transport = XPORT_SPI;
1368 cts->transport_version = 2;
1370 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
1371 spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
1372 if (dval & DP_DISC_ENABLE) {
1373 spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
1375 if (dval & DP_TQING_ENABLE) {
1376 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
1379 spi->sync_offset = oval;
1380 spi->sync_period = pval;
1381 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
1382 spi->valid |= CTS_SPI_VALID_SYNC_RATE;
1384 spi->valid |= CTS_SPI_VALID_BUS_WIDTH;
1385 if (dval & DP_WIDE) {
1386 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
1388 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
1390 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
1391 scsi->valid = CTS_SCSI_VALID_TQ;
1392 spi->valid |= CTS_SPI_VALID_DISC;
1397 if (mpt->verbose > 1) {
1398 device_printf(mpt->dev,
1399 "GET %s tgt %d flags %x period %x off %x\n",
1400 IS_CURRENT_SETTINGS(cts)? "ACTIVE" :
1401 "NVRAM", tgt, dval, pval, oval);
1404 ccb->ccb_h.status = CAM_REQ_CMP;
1410 case XPT_CALC_GEOMETRY:
1412 struct ccb_calc_geometry *ccg;
1413 u_int32_t secs_per_cylinder;
1417 if (ccg->block_size == 0) {
1418 ccb->ccb_h.status = CAM_REQ_INVALID;
1425 size_mb = ccg->volume_size /((1024L * 1024L) / ccg->block_size);
1426 if (size_mb > 1024) {
1428 ccg->secs_per_track = 63;
1431 ccg->secs_per_track = 32;
1433 secs_per_cylinder = ccg->heads * ccg->secs_per_track;
1434 ccg->cylinders = ccg->volume_size / secs_per_cylinder;
1435 ccb->ccb_h.status = CAM_REQ_CMP;
1441 case XPT_PATH_INQ: /* Path routing inquiry */
1443 struct ccb_pathinq *cpi = &ccb->cpi;
1445 cpi->version_num = 1;
1446 cpi->target_sprt = 0;
1447 cpi->hba_eng_cnt = 0;
1449 cpi->bus_id = cam_sim_bus(sim);
1451 cpi->max_target = 255;
1452 cpi->hba_misc = PIM_NOBUSRESET;
1453 cpi->initiator_id = cpi->max_target + 1;
1454 cpi->base_transfer_speed = 100000;
1455 cpi->hba_inquiry = PI_TAG_ABLE;
1457 cpi->initiator_id = mpt->mpt_ini_id;
1458 cpi->base_transfer_speed = 3300;
1459 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
1461 cpi->max_target = 15;
1464 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1465 strncpy(cpi->hba_vid, "LSI", HBA_IDLEN);
1466 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1467 cpi->unit_number = cam_sim_unit(sim);
1468 cpi->ccb_h.status = CAM_REQ_CMP;
1475 ccb->ccb_h.status = CAM_REQ_INVALID;
1484 mpt_setwidth(mpt_softc_t *mpt, int tgt, int onoff)
1486 fCONFIG_PAGE_SCSI_DEVICE_1 tmp;
1487 tmp = mpt->mpt_dev_page1[tgt];
1489 tmp.RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE;
1491 tmp.RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE;
1493 if (mpt_write_cfg_page(mpt, tgt, &tmp.Header)) {
1496 if (mpt_read_cfg_page(mpt, tgt, &tmp.Header)) {
1499 mpt->mpt_dev_page1[tgt] = tmp;
1500 if (mpt->verbose > 1) {
1501 device_printf(mpt->dev,
1502 "SPI Target %d Page 1: RequestedParameters %x Config %x\n",
1503 tgt, mpt->mpt_dev_page1[tgt].RequestedParameters,
1504 mpt->mpt_dev_page1[tgt].Configuration);
1510 mpt_setsync(mpt_softc_t *mpt, int tgt, int period, int offset)
1512 fCONFIG_PAGE_SCSI_DEVICE_1 tmp;
1513 tmp = mpt->mpt_dev_page1[tgt];
1514 tmp.RequestedParameters &=
1515 ~MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK;
1516 tmp.RequestedParameters &=
1517 ~MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK;
1518 tmp.RequestedParameters &=
1519 ~MPI_SCSIDEVPAGE1_RP_DT;
1520 tmp.RequestedParameters &=
1521 ~MPI_SCSIDEVPAGE1_RP_QAS;
1522 tmp.RequestedParameters &=
1523 ~MPI_SCSIDEVPAGE1_RP_IU;
1525 * XXX: For now, we're ignoring specific settings
1527 if (period && offset) {
1528 int factor, offset, np;
1529 factor = (mpt->mpt_port_page0.Capabilities >> 8) & 0xff;
1530 offset = (mpt->mpt_port_page0.Capabilities >> 16) & 0xff;
1533 np |= MPI_SCSIDEVPAGE1_RP_QAS;
1534 np |= MPI_SCSIDEVPAGE1_RP_IU;
1537 np |= MPI_SCSIDEVPAGE1_RP_DT;
1539 np |= (factor << 8) | (offset << 16);
1540 tmp.RequestedParameters |= np;
1542 if (mpt_write_cfg_page(mpt, tgt, &tmp.Header)) {
1545 if (mpt_read_cfg_page(mpt, tgt, &tmp.Header)) {
1548 mpt->mpt_dev_page1[tgt] = tmp;
1549 if (mpt->verbose > 1) {
1550 device_printf(mpt->dev,
1551 "SPI Target %d Page 1: RParams %x Config %x\n",
1552 tgt, mpt->mpt_dev_page1[tgt].RequestedParameters,
1553 mpt->mpt_dev_page1[tgt].Configuration);