2 * Generic routines for LSI Fusion adapters.
5 * Copyright (c) 2000, 2001 by Greg Ansley
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice immediately at the beginning of the file, without modification,
12 * this list of conditions, and the following disclaimer.
13 * 2. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * Copyright (c) 2002, 2006 by Matthew Jacob
30 * All rights reserved.
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions are
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
38 * substantially similar to the "NO WARRANTY" disclaimer below
39 * ("Disclaimer") and any redistribution must be conditioned upon including
40 * a substantially similar Disclaimer requirement for further binary
42 * 3. Neither the names of the above listed copyright holders nor the names
43 * of any contributors may be used to endorse or promote products derived
44 * from this software without specific prior written permission.
46 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
47 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
50 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
51 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
52 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
53 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
54 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
55 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
56 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
58 * Support from Chris Ellsworth in order to make SAS adapters work
59 * is gratefully acknowledged.
62 * Support from LSI-Logic has also gone a great deal toward making this a
63 * workable subsystem and is gratefully acknowledged.
66 * Copyright (c) 2004, Avid Technology, Inc. and its contributors.
67 * Copyright (c) 2005, WHEEL Sp. z o.o.
68 * Copyright (c) 2004, 2005 Justin T. Gibbs
69 * All rights reserved.
71 * Redistribution and use in source and binary forms, with or without
72 * modification, are permitted provided that the following conditions are
74 * 1. Redistributions of source code must retain the above copyright
75 * notice, this list of conditions and the following disclaimer.
76 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
77 * substantially similar to the "NO WARRANTY" disclaimer below
78 * ("Disclaimer") and any redistribution must be conditioned upon including
79 * a substantially similar Disclaimer requirement for further binary
81 * 3. Neither the names of the above listed copyright holders nor the names
82 * of any contributors may be used to endorse or promote products derived
83 * from this software without specific prior written permission.
85 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
86 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
87 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
88 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
89 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
90 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
91 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
92 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
93 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
94 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
95 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
97 * $FreeBSD: src/sys/dev/mpt/mpt.c,v 1.61 2012/02/11 12:03:44 marius Exp $
100 #include <dev/disk/mpt/mpt.h>
101 #include <dev/disk/mpt/mpt_cam.h> /* XXX For static handler registration */
102 #include <dev/disk/mpt/mpt_raid.h> /* XXX For static handler registration */
104 #include <dev/disk/mpt/mpilib/mpi.h>
105 #include <dev/disk/mpt/mpilib/mpi_ioc.h>
106 #include <dev/disk/mpt/mpilib/mpi_fc.h>
107 #include <dev/disk/mpt/mpilib/mpi_targ.h>
109 #include <sys/sysctl.h>
111 #define MPT_MAX_TRYS 3
112 #define MPT_MAX_WAIT 300000
114 static int maxwait_ack = 0;
115 static int maxwait_int = 0;
116 static int maxwait_state = 0;
118 static TAILQ_HEAD(, mpt_softc) mpt_tailq = TAILQ_HEAD_INITIALIZER(mpt_tailq);
119 mpt_reply_handler_t *mpt_reply_handlers[MPT_NUM_REPLY_HANDLERS];
121 static mpt_reply_handler_t mpt_default_reply_handler;
122 static mpt_reply_handler_t mpt_config_reply_handler;
123 static mpt_reply_handler_t mpt_handshake_reply_handler;
124 static mpt_reply_handler_t mpt_event_reply_handler;
125 static void mpt_send_event_ack(struct mpt_softc *mpt, request_t *ack_req,
126 MSG_EVENT_NOTIFY_REPLY *msg, uint32_t context);
127 static int mpt_send_event_request(struct mpt_softc *mpt, int onoff);
128 static int mpt_soft_reset(struct mpt_softc *mpt);
129 static void mpt_hard_reset(struct mpt_softc *mpt);
130 static int mpt_dma_buf_alloc(struct mpt_softc *mpt);
131 static void mpt_dma_buf_free(struct mpt_softc *mpt);
132 static int mpt_configure_ioc(struct mpt_softc *mpt, int, int);
133 static int mpt_enable_ioc(struct mpt_softc *mpt, int);
135 /************************* Personality Module Support *************************/
137 * We include one extra entry that is guaranteed to be NULL
138 * to simplify our itterator.
140 static struct mpt_personality *mpt_personalities[MPT_MAX_PERSONALITIES + 1];
141 static __inline struct mpt_personality*
142 mpt_pers_find(struct mpt_softc *, u_int);
143 static __inline struct mpt_personality*
144 mpt_pers_find_reverse(struct mpt_softc *, u_int);
146 static __inline struct mpt_personality *
147 mpt_pers_find(struct mpt_softc *mpt, u_int start_at)
149 KASSERT(start_at <= MPT_MAX_PERSONALITIES,
150 ("mpt_pers_find: starting position out of range"));
152 while (start_at < MPT_MAX_PERSONALITIES
153 && (mpt->mpt_pers_mask & (0x1 << start_at)) == 0) {
156 return (mpt_personalities[start_at]);
160 * Used infrequently, so no need to optimize like a forward
161 * traversal where we use the MAX+1 is guaranteed to be NULL
164 static __inline struct mpt_personality *
165 mpt_pers_find_reverse(struct mpt_softc *mpt, u_int start_at)
167 while (start_at < MPT_MAX_PERSONALITIES
168 && (mpt->mpt_pers_mask & (0x1 << start_at)) == 0) {
171 if (start_at < MPT_MAX_PERSONALITIES)
172 return (mpt_personalities[start_at]);
176 #define MPT_PERS_FOREACH(mpt, pers) \
177 for (pers = mpt_pers_find(mpt, /*start_at*/0); \
179 pers = mpt_pers_find(mpt, /*start_at*/pers->id+1))
181 #define MPT_PERS_FOREACH_REVERSE(mpt, pers) \
182 for (pers = mpt_pers_find_reverse(mpt, MPT_MAX_PERSONALITIES-1);\
184 pers = mpt_pers_find_reverse(mpt, /*start_at*/pers->id-1))
186 static mpt_load_handler_t mpt_stdload;
187 static mpt_probe_handler_t mpt_stdprobe;
188 static mpt_attach_handler_t mpt_stdattach;
189 static mpt_enable_handler_t mpt_stdenable;
190 static mpt_ready_handler_t mpt_stdready;
191 static mpt_event_handler_t mpt_stdevent;
192 static mpt_reset_handler_t mpt_stdreset;
193 static mpt_shutdown_handler_t mpt_stdshutdown;
194 static mpt_detach_handler_t mpt_stddetach;
195 static mpt_unload_handler_t mpt_stdunload;
196 static struct mpt_personality mpt_default_personality =
199 .probe = mpt_stdprobe,
200 .attach = mpt_stdattach,
201 .enable = mpt_stdenable,
202 .ready = mpt_stdready,
203 .event = mpt_stdevent,
204 .reset = mpt_stdreset,
205 .shutdown = mpt_stdshutdown,
206 .detach = mpt_stddetach,
207 .unload = mpt_stdunload
210 static mpt_load_handler_t mpt_core_load;
211 static mpt_attach_handler_t mpt_core_attach;
212 static mpt_enable_handler_t mpt_core_enable;
213 static mpt_reset_handler_t mpt_core_ioc_reset;
214 static mpt_event_handler_t mpt_core_event;
215 static mpt_shutdown_handler_t mpt_core_shutdown;
216 static mpt_shutdown_handler_t mpt_core_detach;
217 static mpt_unload_handler_t mpt_core_unload;
218 static struct mpt_personality mpt_core_personality =
221 .load = mpt_core_load,
222 // .attach = mpt_core_attach,
223 // .enable = mpt_core_enable,
224 .event = mpt_core_event,
225 .reset = mpt_core_ioc_reset,
226 .shutdown = mpt_core_shutdown,
227 .detach = mpt_core_detach,
228 .unload = mpt_core_unload,
232 * Manual declaration so that DECLARE_MPT_PERSONALITY doesn't need
233 * ordering information. We want the core to always register FIRST.
234 * other modules are set to SI_ORDER_SECOND.
236 static moduledata_t mpt_core_mod = {
237 "mpt_core", mpt_modevent, &mpt_core_personality
239 DECLARE_MODULE(mpt_core, mpt_core_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
240 MODULE_VERSION(mpt_core, 1);
242 #define MPT_PERS_ATTACHED(pers, mpt) ((mpt)->mpt_pers_mask & (0x1 << pers->id))
245 mpt_modevent(module_t mod, int type, void *data)
247 struct mpt_personality *pers;
250 pers = (struct mpt_personality *)data;
256 mpt_load_handler_t **def_handler;
257 mpt_load_handler_t **pers_handler;
260 for (i = 0; i < MPT_MAX_PERSONALITIES; i++) {
261 if (mpt_personalities[i] == NULL)
264 if (i >= MPT_MAX_PERSONALITIES) {
269 mpt_personalities[i] = pers;
271 /* Install standard/noop handlers for any NULL entries. */
272 def_handler = MPT_PERS_FIRST_HANDLER(&mpt_default_personality);
273 pers_handler = MPT_PERS_FIRST_HANDLER(pers);
274 while (pers_handler <= MPT_PERS_LAST_HANDLER(pers)) {
275 if (*pers_handler == NULL)
276 *pers_handler = *def_handler;
281 error = (pers->load(pers));
283 mpt_personalities[i] = NULL;
288 #if __FreeBSD_version >= 500000
293 error = pers->unload(pers);
294 mpt_personalities[pers->id] = NULL;
304 mpt_stdload(struct mpt_personality *pers)
307 /* Load is always successful. */
312 mpt_stdprobe(struct mpt_softc *mpt)
315 /* Probe is always successful. */
320 mpt_stdattach(struct mpt_softc *mpt)
323 /* Attach is always successful. */
328 mpt_stdenable(struct mpt_softc *mpt)
331 /* Enable is always successful. */
336 mpt_stdready(struct mpt_softc *mpt)
342 mpt_stdevent(struct mpt_softc *mpt, request_t *req, MSG_EVENT_NOTIFY_REPLY *msg)
345 mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_stdevent: 0x%x\n", msg->Event & 0xFF);
346 /* Event was not for us. */
351 mpt_stdreset(struct mpt_softc *mpt, int type)
357 mpt_stdshutdown(struct mpt_softc *mpt)
363 mpt_stddetach(struct mpt_softc *mpt)
369 mpt_stdunload(struct mpt_personality *pers)
372 /* Unload is always successful. */
377 * Post driver attachment, we may want to perform some global actions.
378 * Here is the hook to do so.
382 mpt_postattach(void *unused)
384 struct mpt_softc *mpt;
385 struct mpt_personality *pers;
387 TAILQ_FOREACH(mpt, &mpt_tailq, links) {
388 MPT_PERS_FOREACH(mpt, pers)
392 SYSINIT(mptdev, SI_SUB_CONFIGURE, SI_ORDER_MIDDLE, mpt_postattach, NULL);
394 /******************************* Bus DMA Support ******************************/
396 mpt_map_rquest(void *arg, bus_dma_segment_t *segs, int nseg, int error)
398 struct mpt_map_info *map_info;
400 map_info = (struct mpt_map_info *)arg;
401 map_info->error = error;
402 map_info->phys = segs->ds_addr;
405 /**************************** Reply/Event Handling ****************************/
407 mpt_register_handler(struct mpt_softc *mpt, mpt_handler_type type,
408 mpt_handler_t handler, uint32_t *phandler_id)
412 case MPT_HANDLER_REPLY:
417 if (phandler_id == NULL)
420 free_cbi = MPT_HANDLER_ID_NONE;
421 for (cbi = 0; cbi < MPT_NUM_REPLY_HANDLERS; cbi++) {
423 * If the same handler is registered multiple
424 * times, don't error out. Just return the
425 * index of the original registration.
427 if (mpt_reply_handlers[cbi] == handler.reply_handler) {
428 *phandler_id = MPT_CBI_TO_HID(cbi);
433 * Fill from the front in the hope that
434 * all registered handlers consume only a
437 * We don't break on the first empty slot so
438 * that the full table is checked to see if
439 * this handler was previously registered.
441 if (free_cbi == MPT_HANDLER_ID_NONE &&
442 (mpt_reply_handlers[cbi]
443 == mpt_default_reply_handler))
446 if (free_cbi == MPT_HANDLER_ID_NONE) {
449 mpt_reply_handlers[free_cbi] = handler.reply_handler;
450 *phandler_id = MPT_CBI_TO_HID(free_cbi);
454 mpt_prt(mpt, "mpt_register_handler unknown type %d\n", type);
461 mpt_deregister_handler(struct mpt_softc *mpt, mpt_handler_type type,
462 mpt_handler_t handler, uint32_t handler_id)
466 case MPT_HANDLER_REPLY:
470 cbi = MPT_CBI(handler_id);
471 if (cbi >= MPT_NUM_REPLY_HANDLERS
472 || mpt_reply_handlers[cbi] != handler.reply_handler)
474 mpt_reply_handlers[cbi] = mpt_default_reply_handler;
478 mpt_prt(mpt, "mpt_deregister_handler unknown type %d\n", type);
485 mpt_default_reply_handler(struct mpt_softc *mpt, request_t *req,
486 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
490 "Default Handler Called: req=%p:%u reply_descriptor=%x frame=%p\n",
491 req, req->serno, reply_desc, reply_frame);
493 if (reply_frame != NULL)
494 mpt_dump_reply_frame(mpt, reply_frame);
496 mpt_prt(mpt, "Reply Frame Ignored\n");
498 return (/*free_reply*/TRUE);
502 mpt_config_reply_handler(struct mpt_softc *mpt, request_t *req,
503 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
507 if (reply_frame != NULL) {
509 MSG_CONFIG_REPLY *reply;
511 cfgp = (MSG_CONFIG *)req->req_vbuf;
512 reply = (MSG_CONFIG_REPLY *)reply_frame;
513 req->IOCStatus = le16toh(reply_frame->IOCStatus);
514 bcopy(&reply->Header, &cfgp->Header,
515 sizeof(cfgp->Header));
516 cfgp->ExtPageLength = reply->ExtPageLength;
517 cfgp->ExtPageType = reply->ExtPageType;
519 req->state &= ~REQ_STATE_QUEUED;
520 req->state |= REQ_STATE_DONE;
521 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
522 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
524 } else if ((req->state & REQ_STATE_TIMEDOUT) != 0) {
526 * Whew- we can free this request (late completion)
528 mpt_free_request(mpt, req);
536 mpt_handshake_reply_handler(struct mpt_softc *mpt, request_t *req,
537 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
540 /* Nothing to be done. */
545 mpt_event_reply_handler(struct mpt_softc *mpt, request_t *req,
546 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
550 KASSERT(reply_frame != NULL, ("null reply in mpt_event_reply_handler"));
551 KASSERT(req != NULL, ("null request in mpt_event_reply_handler"));
554 switch (reply_frame->Function) {
555 case MPI_FUNCTION_EVENT_NOTIFICATION:
557 MSG_EVENT_NOTIFY_REPLY *msg;
558 struct mpt_personality *pers;
562 msg = (MSG_EVENT_NOTIFY_REPLY *)reply_frame;
563 msg->EventDataLength = le16toh(msg->EventDataLength);
564 msg->IOCStatus = le16toh(msg->IOCStatus);
565 msg->IOCLogInfo = le32toh(msg->IOCLogInfo);
566 msg->Event = le32toh(msg->Event);
567 MPT_PERS_FOREACH(mpt, pers)
568 handled += pers->event(mpt, req, msg);
570 if (handled == 0 && mpt->mpt_pers_mask == 0) {
571 mpt_lprt(mpt, MPT_PRT_INFO,
572 "No Handlers For Any Event Notify Frames. "
573 "Event %#x (ACK %sequired).\n",
574 msg->Event, msg->AckRequired? "r" : "not r");
575 } else if (handled == 0) {
577 msg->AckRequired? MPT_PRT_WARN : MPT_PRT_INFO,
578 "Unhandled Event Notify Frame. Event %#x "
579 "(ACK %sequired).\n",
580 msg->Event, msg->AckRequired? "r" : "not r");
583 if (msg->AckRequired) {
587 context = req->index | MPT_REPLY_HANDLER_EVENTS;
588 ack_req = mpt_get_request(mpt, FALSE);
589 if (ack_req == NULL) {
590 struct mpt_evtf_record *evtf;
592 evtf = (struct mpt_evtf_record *)reply_frame;
593 evtf->context = context;
594 LIST_INSERT_HEAD(&mpt->ack_frames, evtf, links);
598 mpt_send_event_ack(mpt, ack_req, msg, context);
600 * Don't check for CONTINUATION_REPLY here
606 case MPI_FUNCTION_PORT_ENABLE:
607 mpt_lprt(mpt, MPT_PRT_DEBUG , "enable port reply\n");
609 case MPI_FUNCTION_EVENT_ACK:
612 mpt_prt(mpt, "unknown event function: %x\n",
613 reply_frame->Function);
618 * I'm not sure that this continuation stuff works as it should.
620 * I've had FC async events occur that free the frame up because
621 * the continuation bit isn't set, and then additional async events
622 * then occur using the same context. As you might imagine, this
623 * leads to Very Bad Thing.
625 * Let's just be safe for now and not free them up until we figure
626 * out what's actually happening here.
629 if ((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0) {
630 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
631 mpt_free_request(mpt, req);
632 mpt_prt(mpt, "event_reply %x for req %p:%u NOT a continuation",
633 reply_frame->Function, req, req->serno);
634 if (reply_frame->Function == MPI_FUNCTION_EVENT_NOTIFICATION) {
635 MSG_EVENT_NOTIFY_REPLY *msg =
636 (MSG_EVENT_NOTIFY_REPLY *)reply_frame;
637 mpt_prtc(mpt, " Event=0x%x AckReq=%d",
638 msg->Event, msg->AckRequired);
641 mpt_prt(mpt, "event_reply %x for %p:%u IS a continuation",
642 reply_frame->Function, req, req->serno);
643 if (reply_frame->Function == MPI_FUNCTION_EVENT_NOTIFICATION) {
644 MSG_EVENT_NOTIFY_REPLY *msg =
645 (MSG_EVENT_NOTIFY_REPLY *)reply_frame;
646 mpt_prtc(mpt, " Event=0x%x AckReq=%d",
647 msg->Event, msg->AckRequired);
656 * Process an asynchronous event from the IOC.
659 mpt_core_event(struct mpt_softc *mpt, request_t *req,
660 MSG_EVENT_NOTIFY_REPLY *msg)
663 mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_core_event: 0x%x\n",
665 switch(msg->Event & 0xFF) {
668 case MPI_EVENT_LOG_DATA:
672 /* Some error occurred that LSI wants logged */
673 mpt_prt(mpt, "EvtLogData: IOCLogInfo: 0x%08x\n",
675 mpt_prt(mpt, "\tEvtLogData: Event Data:");
676 for (i = 0; i < msg->EventDataLength; i++)
677 mpt_prtc(mpt, " %08x", msg->Data[i]);
681 case MPI_EVENT_EVENT_CHANGE:
683 * This is just an acknowledgement
684 * of our mpt_send_event_request.
687 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
697 mpt_send_event_ack(struct mpt_softc *mpt, request_t *ack_req,
698 MSG_EVENT_NOTIFY_REPLY *msg, uint32_t context)
702 ackp = (MSG_EVENT_ACK *)ack_req->req_vbuf;
703 memset(ackp, 0, sizeof (*ackp));
704 ackp->Function = MPI_FUNCTION_EVENT_ACK;
705 ackp->Event = htole32(msg->Event);
706 ackp->EventContext = htole32(msg->EventContext);
707 ackp->MsgContext = htole32(context);
708 mpt_check_doorbell(mpt);
709 mpt_send_cmd(mpt, ack_req);
712 /***************************** Interrupt Handling *****************************/
716 struct mpt_softc *mpt;
720 mpt = (struct mpt_softc *)arg;
721 mpt_lprt(mpt, MPT_PRT_DEBUG2, "enter mpt_intr\n");
722 MPT_LOCK_ASSERT(mpt);
724 while ((reply_desc = mpt_pop_reply_queue(mpt)) != MPT_REPLY_EMPTY) {
726 MSG_DEFAULT_REPLY *reply_frame;
727 uint32_t reply_baddr;
738 if ((reply_desc & MPI_ADDRESS_REPLY_A_BIT) != 0) {
740 * Ensure that the reply frame is coherent.
742 reply_baddr = MPT_REPLY_BADDR(reply_desc);
743 offset = reply_baddr - (mpt->reply_phys & 0xFFFFFFFF);
744 bus_dmamap_sync_range(mpt->reply_dmat,
745 mpt->reply_dmap, offset, MPT_REPLY_SIZE,
746 BUS_DMASYNC_POSTREAD);
747 reply_frame = MPT_REPLY_OTOV(mpt, offset);
748 ctxt_idx = le32toh(reply_frame->MsgContext);
752 type = MPI_GET_CONTEXT_REPLY_TYPE(reply_desc);
753 ctxt_idx = reply_desc;
754 mpt_lprt(mpt, MPT_PRT_DEBUG1, "Context Reply: 0x%08x\n",
758 case MPI_CONTEXT_REPLY_TYPE_SCSI_INIT:
759 ctxt_idx &= MPI_CONTEXT_REPLY_CONTEXT_MASK;
761 case MPI_CONTEXT_REPLY_TYPE_SCSI_TARGET:
762 ctxt_idx = GET_IO_INDEX(reply_desc);
763 if (mpt->tgt_cmd_ptrs == NULL) {
765 "mpt_intr: no target cmd ptrs\n");
766 reply_desc = MPT_REPLY_EMPTY;
769 if (ctxt_idx >= mpt->tgt_cmds_allocated) {
771 "mpt_intr: bad tgt cmd ctxt %u\n",
773 reply_desc = MPT_REPLY_EMPTY;
777 req = mpt->tgt_cmd_ptrs[ctxt_idx];
779 mpt_prt(mpt, "no request backpointer "
780 "at index %u", ctxt_idx);
781 reply_desc = MPT_REPLY_EMPTY;
786 * Reformulate ctxt_idx to be just as if
787 * it were another type of context reply
788 * so the code below will find the request
789 * via indexing into the pool.
792 req->index | mpt->scsi_tgt_handler_id;
795 case MPI_CONTEXT_REPLY_TYPE_LAN:
796 mpt_prt(mpt, "LAN CONTEXT REPLY: 0x%08x\n",
798 reply_desc = MPT_REPLY_EMPTY;
801 mpt_prt(mpt, "Context Reply 0x%08x?\n", type);
802 reply_desc = MPT_REPLY_EMPTY;
805 if (reply_desc == MPT_REPLY_EMPTY) {
806 if (ntrips++ > 1000) {
813 cb_index = MPT_CONTEXT_TO_CBI(ctxt_idx);
814 req_index = MPT_CONTEXT_TO_REQI(ctxt_idx);
815 if (req_index < MPT_MAX_REQUESTS(mpt)) {
816 req = &mpt->request_pool[req_index];
818 mpt_prt(mpt, "WARN: mpt_intr index == %d (reply_desc =="
819 " 0x%x)\n", req_index, reply_desc);
822 bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
823 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
824 free_rf = mpt_reply_handlers[cb_index](mpt, req,
825 reply_desc, reply_frame);
827 if (reply_frame != NULL && free_rf) {
828 bus_dmamap_sync_range(mpt->reply_dmat,
829 mpt->reply_dmap, offset, MPT_REPLY_SIZE,
830 BUS_DMASYNC_PREREAD);
831 mpt_free_reply(mpt, reply_baddr);
835 * If we got ourselves disabled, don't get stuck in a loop
838 mpt_disable_ints(mpt);
841 if (ntrips++ > 1000) {
845 mpt_lprt(mpt, MPT_PRT_DEBUG2, "exit mpt_intr\n");
848 /******************************* Error Recovery *******************************/
850 mpt_complete_request_chain(struct mpt_softc *mpt, struct req_queue *chain,
853 MSG_DEFAULT_REPLY ioc_status_frame;
856 memset(&ioc_status_frame, 0, sizeof(ioc_status_frame));
857 ioc_status_frame.MsgLength = roundup2(sizeof(ioc_status_frame), 4);
858 ioc_status_frame.IOCStatus = iocstatus;
859 while((req = TAILQ_FIRST(chain)) != NULL) {
860 MSG_REQUEST_HEADER *msg_hdr;
863 bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
864 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
865 msg_hdr = (MSG_REQUEST_HEADER *)req->req_vbuf;
866 ioc_status_frame.Function = msg_hdr->Function;
867 ioc_status_frame.MsgContext = msg_hdr->MsgContext;
868 cb_index = MPT_CONTEXT_TO_CBI(le32toh(msg_hdr->MsgContext));
869 mpt_reply_handlers[cb_index](mpt, req, msg_hdr->MsgContext,
871 if (mpt_req_on_pending_list(mpt, req) != 0)
872 TAILQ_REMOVE(chain, req, links);
876 /********************************* Diagnostics ********************************/
878 * Perform a diagnostic dump of a reply frame.
881 mpt_dump_reply_frame(struct mpt_softc *mpt, MSG_DEFAULT_REPLY *reply_frame)
884 mpt_prt(mpt, "Address Reply:\n");
885 mpt_print_reply(reply_frame);
888 /******************************* Doorbell Access ******************************/
889 static __inline uint32_t mpt_rd_db(struct mpt_softc *mpt);
890 static __inline uint32_t mpt_rd_intr(struct mpt_softc *mpt);
892 static __inline uint32_t
893 mpt_rd_db(struct mpt_softc *mpt)
896 return mpt_read(mpt, MPT_OFFSET_DOORBELL);
899 static __inline uint32_t
900 mpt_rd_intr(struct mpt_softc *mpt)
903 return mpt_read(mpt, MPT_OFFSET_INTR_STATUS);
906 /* Busy wait for a door bell to be read by IOC */
908 mpt_wait_db_ack(struct mpt_softc *mpt)
912 for (i=0; i < MPT_MAX_WAIT; i++) {
913 if (!MPT_DB_IS_BUSY(mpt_rd_intr(mpt))) {
914 maxwait_ack = i > maxwait_ack ? i : maxwait_ack;
922 /* Busy wait for a door bell interrupt */
924 mpt_wait_db_int(struct mpt_softc *mpt)
928 for (i = 0; i < MPT_MAX_WAIT; i++) {
929 if (MPT_DB_INTR(mpt_rd_intr(mpt))) {
930 maxwait_int = i > maxwait_int ? i : maxwait_int;
938 /* Wait for IOC to transition to a give state */
940 mpt_check_doorbell(struct mpt_softc *mpt)
942 uint32_t db = mpt_rd_db(mpt);
944 if (MPT_STATE(db) != MPT_DB_STATE_RUNNING) {
945 mpt_prt(mpt, "Device not running\n");
950 /* Wait for IOC to transition to a give state */
952 mpt_wait_state(struct mpt_softc *mpt, enum DB_STATE_BITS state)
956 for (i = 0; i < MPT_MAX_WAIT; i++) {
957 uint32_t db = mpt_rd_db(mpt);
958 if (MPT_STATE(db) == state) {
959 maxwait_state = i > maxwait_state ? i : maxwait_state;
968 /************************* Intialization/Configuration ************************/
969 static int mpt_download_fw(struct mpt_softc *mpt);
971 /* Issue the reset COMMAND to the IOC */
973 mpt_soft_reset(struct mpt_softc *mpt)
976 mpt_lprt(mpt, MPT_PRT_DEBUG, "soft reset\n");
978 /* Have to use hard reset if we are not in Running state */
979 if (MPT_STATE(mpt_rd_db(mpt)) != MPT_DB_STATE_RUNNING) {
980 mpt_prt(mpt, "soft reset failed: device not running\n");
984 /* If door bell is in use we don't have a chance of getting
985 * a word in since the IOC probably crashed in message
986 * processing. So don't waste our time.
988 if (MPT_DB_IS_IN_USE(mpt_rd_db(mpt))) {
989 mpt_prt(mpt, "soft reset failed: doorbell wedged\n");
993 /* Send the reset request to the IOC */
994 mpt_write(mpt, MPT_OFFSET_DOORBELL,
995 MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET << MPI_DOORBELL_FUNCTION_SHIFT);
996 if (mpt_wait_db_ack(mpt) != MPT_OK) {
997 mpt_prt(mpt, "soft reset failed: ack timeout\n");
1001 /* Wait for the IOC to reload and come out of reset state */
1002 if (mpt_wait_state(mpt, MPT_DB_STATE_READY) != MPT_OK) {
1003 mpt_prt(mpt, "soft reset failed: device did not restart\n");
1011 mpt_enable_diag_mode(struct mpt_softc *mpt)
1018 if ((mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC) & MPI_DIAG_DRWE) != 0)
1021 /* Enable diagnostic registers */
1022 mpt_write(mpt, MPT_OFFSET_SEQUENCE, 0xFF);
1023 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_1ST_KEY_VALUE);
1024 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_2ND_KEY_VALUE);
1025 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_3RD_KEY_VALUE);
1026 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_4TH_KEY_VALUE);
1027 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_5TH_KEY_VALUE);
1037 mpt_disable_diag_mode(struct mpt_softc *mpt)
1040 mpt_write(mpt, MPT_OFFSET_SEQUENCE, 0xFFFFFFFF);
1043 /* This is a magic diagnostic reset that resets all the ARM
1044 * processors in the chip.
1047 mpt_hard_reset(struct mpt_softc *mpt)
1053 mpt_lprt(mpt, MPT_PRT_DEBUG, "hard reset\n");
1056 mpt_write(mpt, MPT_OFFSET_RESET_1078, 0x07);
1061 error = mpt_enable_diag_mode(mpt);
1063 mpt_prt(mpt, "WARNING - Could not enter diagnostic mode !\n");
1064 mpt_prt(mpt, "Trying to reset anyway.\n");
1067 diagreg = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC);
1070 * This appears to be a workaround required for some
1071 * firmware or hardware revs.
1073 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, diagreg | MPI_DIAG_DISABLE_ARM);
1076 /* Diag. port is now active so we can now hit the reset bit */
1077 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, diagreg | MPI_DIAG_RESET_ADAPTER);
1080 * Ensure that the reset has finished. We delay 1ms
1081 * prior to reading the register to make sure the chip
1082 * has sufficiently completed its reset to handle register
1088 diagreg = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC);
1089 } while (--wait && (diagreg & MPI_DIAG_RESET_ADAPTER) == 0);
1092 mpt_prt(mpt, "WARNING - Failed hard reset! "
1093 "Trying to initialize anyway.\n");
1097 * If we have firmware to download, it must be loaded before
1098 * the controller will become operational. Do so now.
1100 if (mpt->fw_image != NULL) {
1102 error = mpt_download_fw(mpt);
1105 mpt_prt(mpt, "WARNING - Firmware Download Failed!\n");
1106 mpt_prt(mpt, "Trying to initialize anyway.\n");
1111 * Reseting the controller should have disabled write
1112 * access to the diagnostic registers, but disable
1113 * manually to be sure.
1115 mpt_disable_diag_mode(mpt);
1119 mpt_core_ioc_reset(struct mpt_softc *mpt, int type)
1123 * Complete all pending requests with a status
1124 * appropriate for an IOC reset.
1126 mpt_complete_request_chain(mpt, &mpt->request_pending_list,
1127 MPI_IOCSTATUS_INVALID_STATE);
1131 * Reset the IOC when needed. Try software command first then if needed
1132 * poke at the magic diagnostic reset. Note that a hard reset resets
1133 * *both* IOCs on dual function chips (FC929 && LSI1030) as well as
1134 * fouls up the PCI configuration registers.
1137 mpt_reset(struct mpt_softc *mpt, int reinit)
1139 struct mpt_personality *pers;
1144 * Try a soft reset. If that fails, get out the big hammer.
1147 if ((ret = mpt_soft_reset(mpt)) != MPT_OK) {
1149 for (cnt = 0; cnt < 5; cnt++) {
1150 /* Failed; do a hard reset */
1151 mpt_hard_reset(mpt);
1154 * Wait for the IOC to reload
1155 * and come out of reset state
1157 ret = mpt_wait_state(mpt, MPT_DB_STATE_READY);
1158 if (ret == MPT_OK) {
1162 * Okay- try to check again...
1164 ret = mpt_wait_state(mpt, MPT_DB_STATE_READY);
1165 if (ret == MPT_OK) {
1168 mpt_prt(mpt, "mpt_reset: failed hard reset (%d:%d)\n",
1173 if (retry_cnt == 0) {
1175 * Invoke reset handlers. We bump the reset count so
1176 * that mpt_wait_req() understands that regardless of
1177 * the specified wait condition, it should stop its wait.
1180 MPT_PERS_FOREACH(mpt, pers)
1181 pers->reset(mpt, ret);
1185 ret = mpt_enable_ioc(mpt, 1);
1186 if (ret == MPT_OK) {
1187 mpt_enable_ints(mpt);
1190 if (ret != MPT_OK && retry_cnt++ < 2) {
1196 /* Return a command buffer to the free queue */
1198 mpt_free_request(struct mpt_softc *mpt, request_t *req)
1201 struct mpt_evtf_record *record;
1202 uint32_t offset, reply_baddr;
1204 if (req == NULL || req != &mpt->request_pool[req->index]) {
1205 panic("mpt_free_request: bad req ptr");
1207 if ((nxt = req->chain) != NULL) {
1209 mpt_free_request(mpt, nxt); /* NB: recursion */
1211 KASSERT(req->state != REQ_STATE_FREE, ("freeing free request"));
1212 KASSERT(!(req->state & REQ_STATE_LOCKED), ("freeing locked request"));
1213 MPT_LOCK_ASSERT(mpt);
1214 KASSERT(mpt_req_on_free_list(mpt, req) == 0,
1215 ("mpt_free_request: req %p:%u func %x already on freelist",
1216 req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
1217 KASSERT(mpt_req_on_pending_list(mpt, req) == 0,
1218 ("mpt_free_request: req %p:%u func %x on pending list",
1219 req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
1221 mpt_req_not_spcl(mpt, req, "mpt_free_request", __LINE__);
1225 if (LIST_EMPTY(&mpt->ack_frames)) {
1227 * Insert free ones at the tail
1230 req->state = REQ_STATE_FREE;
1232 memset(req->req_vbuf, 0xff, sizeof (MSG_REQUEST_HEADER));
1234 TAILQ_INSERT_TAIL(&mpt->request_free_list, req, links);
1235 if (mpt->getreqwaiter != 0) {
1236 mpt->getreqwaiter = 0;
1237 wakeup(&mpt->request_free_list);
1243 * Process an ack frame deferred due to resource shortage.
1245 record = LIST_FIRST(&mpt->ack_frames);
1246 LIST_REMOVE(record, links);
1247 req->state = REQ_STATE_ALLOCATED;
1248 mpt_assign_serno(mpt, req);
1249 mpt_send_event_ack(mpt, req, &record->reply, record->context);
1250 offset = (uint32_t)((uint8_t *)record - mpt->reply);
1251 reply_baddr = offset + (mpt->reply_phys & 0xFFFFFFFF);
1252 bus_dmamap_sync_range(mpt->reply_dmat, mpt->reply_dmap, offset,
1253 MPT_REPLY_SIZE, BUS_DMASYNC_PREREAD);
1254 mpt_free_reply(mpt, reply_baddr);
1257 /* Get a command buffer from the free queue */
1259 mpt_get_request(struct mpt_softc *mpt, int sleep_ok)
1264 MPT_LOCK_ASSERT(mpt);
1265 req = TAILQ_FIRST(&mpt->request_free_list);
1267 KASSERT(req == &mpt->request_pool[req->index],
1268 ("mpt_get_request: corrupted request free list"));
1269 KASSERT(req->state == REQ_STATE_FREE,
1270 ("req %p:%u not free on free list %x index %d function %x",
1271 req, req->serno, req->state, req->index,
1272 ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
1273 TAILQ_REMOVE(&mpt->request_free_list, req, links);
1274 req->state = REQ_STATE_ALLOCATED;
1276 mpt_assign_serno(mpt, req);
1277 } else if (sleep_ok != 0) {
1278 mpt->getreqwaiter = 1;
1279 mpt_sleep(mpt, &mpt->request_free_list, 0, "mptgreq", 0);
1285 /* Pass the command to the IOC */
1287 mpt_send_cmd(struct mpt_softc *mpt, request_t *req)
1290 if (mpt->verbose > MPT_PRT_DEBUG2) {
1291 mpt_dump_request(mpt, req);
1293 bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
1294 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1295 req->state |= REQ_STATE_QUEUED;
1296 KASSERT(mpt_req_on_free_list(mpt, req) == 0,
1297 ("req %p:%u func %x on freelist list in mpt_send_cmd",
1298 req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
1299 KASSERT(mpt_req_on_pending_list(mpt, req) == 0,
1300 ("req %p:%u func %x already on pending list in mpt_send_cmd",
1301 req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
1302 TAILQ_INSERT_HEAD(&mpt->request_pending_list, req, links);
1303 mpt_write(mpt, MPT_OFFSET_REQUEST_Q, (uint32_t) req->req_pbuf);
1307 * Wait for a request to complete.
1310 * mpt softc of controller executing request
1311 * req request to wait for
1312 * sleep_ok nonzero implies may sleep in this context
1313 * time_ms timeout in ms. 0 implies no timeout.
1316 * 0 Request completed
1317 * non-0 Timeout fired before request completion.
1320 mpt_wait_req(struct mpt_softc *mpt, request_t *req,
1321 mpt_req_state_t state, mpt_req_state_t mask,
1322 int sleep_ok, int time_ms)
1329 * timeout is in ms. 0 indicates infinite wait.
1330 * Convert to ticks or 500us units depending on
1333 if (sleep_ok != 0) {
1334 timeout = (time_ms * hz) / 1000;
1336 timeout = time_ms * 2;
1338 req->state |= REQ_STATE_NEED_WAKEUP;
1339 mask &= ~REQ_STATE_NEED_WAKEUP;
1340 saved_cnt = mpt->reset_cnt;
1341 while ((req->state & mask) != state && mpt->reset_cnt == saved_cnt) {
1342 if (sleep_ok != 0) {
1343 error = mpt_sleep(mpt, req, 0, "mptreq", timeout);
1344 if (error == EWOULDBLOCK) {
1349 if (time_ms != 0 && --timeout == 0) {
1356 req->state &= ~REQ_STATE_NEED_WAKEUP;
1357 if (mpt->reset_cnt != saved_cnt) {
1360 if (time_ms && timeout <= 0) {
1361 MSG_REQUEST_HEADER *msg_hdr = req->req_vbuf;
1362 req->state |= REQ_STATE_TIMEDOUT;
1363 mpt_prt(mpt, "mpt_wait_req(%x) timed out\n", msg_hdr->Function);
1370 * Send a command to the IOC via the handshake register.
1372 * Only done at initialization time and for certain unusual
1373 * commands such as device/bus reset as specified by LSI.
1376 mpt_send_handshake_cmd(struct mpt_softc *mpt, size_t len, void *cmd)
1379 uint32_t data, *data32;
1381 /* Check condition of the IOC */
1382 data = mpt_rd_db(mpt);
1383 if ((MPT_STATE(data) != MPT_DB_STATE_READY
1384 && MPT_STATE(data) != MPT_DB_STATE_RUNNING
1385 && MPT_STATE(data) != MPT_DB_STATE_FAULT)
1386 || MPT_DB_IS_IN_USE(data)) {
1387 mpt_prt(mpt, "handshake aborted - invalid doorbell state\n");
1392 /* We move things in 32 bit chunks */
1393 len = (len + 3) >> 2;
1396 /* Clear any left over pending doorbell interrupts */
1397 if (MPT_DB_INTR(mpt_rd_intr(mpt)))
1398 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1401 * Tell the handshake reg. we are going to send a command
1402 * and how long it is going to be.
1404 data = (MPI_FUNCTION_HANDSHAKE << MPI_DOORBELL_FUNCTION_SHIFT) |
1405 (len << MPI_DOORBELL_ADD_DWORDS_SHIFT);
1406 mpt_write(mpt, MPT_OFFSET_DOORBELL, data);
1408 /* Wait for the chip to notice */
1409 if (mpt_wait_db_int(mpt) != MPT_OK) {
1410 mpt_prt(mpt, "mpt_send_handshake_cmd: db ignored\n");
1414 /* Clear the interrupt */
1415 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1417 if (mpt_wait_db_ack(mpt) != MPT_OK) {
1418 mpt_prt(mpt, "mpt_send_handshake_cmd: db ack timed out\n");
1422 /* Send the command */
1423 for (i = 0; i < len; i++) {
1424 mpt_write(mpt, MPT_OFFSET_DOORBELL, htole32(*data32++));
1425 if (mpt_wait_db_ack(mpt) != MPT_OK) {
1427 "mpt_send_handshake_cmd: timeout @ index %d\n", i);
1434 /* Get the response from the handshake register */
1436 mpt_recv_handshake_reply(struct mpt_softc *mpt, size_t reply_len, void *reply)
1438 int left, reply_left;
1441 MSG_DEFAULT_REPLY *hdr;
1443 /* We move things out in 16 bit chunks */
1445 data16 = (u_int16_t *)reply;
1447 hdr = (MSG_DEFAULT_REPLY *)reply;
1449 /* Get first word */
1450 if (mpt_wait_db_int(mpt) != MPT_OK) {
1451 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout1\n");
1454 data = mpt_read(mpt, MPT_OFFSET_DOORBELL);
1455 *data16++ = le16toh(data & MPT_DB_DATA_MASK);
1456 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1458 /* Get Second Word */
1459 if (mpt_wait_db_int(mpt) != MPT_OK) {
1460 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout2\n");
1463 data = mpt_read(mpt, MPT_OFFSET_DOORBELL);
1464 *data16++ = le16toh(data & MPT_DB_DATA_MASK);
1465 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1468 * With the second word, we can now look at the length.
1469 * Warn about a reply that's too short (except for IOC FACTS REPLY)
1471 if ((reply_len >> 1) != hdr->MsgLength &&
1472 (hdr->Function != MPI_FUNCTION_IOC_FACTS)){
1473 mpt_prt(mpt, "reply length does not match message length: "
1474 "got %x; expected %zx for function %x\n",
1475 hdr->MsgLength << 2, reply_len << 1, hdr->Function);
1478 /* Get rest of the reply; but don't overflow the provided buffer */
1479 left = (hdr->MsgLength << 1) - 2;
1480 reply_left = reply_len - 2;
1484 if (mpt_wait_db_int(mpt) != MPT_OK) {
1485 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout3\n");
1488 data = mpt_read(mpt, MPT_OFFSET_DOORBELL);
1489 datum = le16toh(data & MPT_DB_DATA_MASK);
1491 if (reply_left-- > 0)
1494 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1497 /* One more wait & clear at the end */
1498 if (mpt_wait_db_int(mpt) != MPT_OK) {
1499 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout4\n");
1502 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1504 if ((hdr->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
1505 if (mpt->verbose >= MPT_PRT_TRACE)
1506 mpt_print_reply(hdr);
1507 return (MPT_FAIL | hdr->IOCStatus);
1514 mpt_get_iocfacts(struct mpt_softc *mpt, MSG_IOC_FACTS_REPLY *freplp)
1516 MSG_IOC_FACTS f_req;
1519 memset(&f_req, 0, sizeof f_req);
1520 f_req.Function = MPI_FUNCTION_IOC_FACTS;
1521 f_req.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
1522 error = mpt_send_handshake_cmd(mpt, sizeof f_req, &f_req);
1526 error = mpt_recv_handshake_reply(mpt, sizeof (*freplp), freplp);
1531 mpt_get_portfacts(struct mpt_softc *mpt, U8 port, MSG_PORT_FACTS_REPLY *freplp)
1533 MSG_PORT_FACTS f_req;
1536 memset(&f_req, 0, sizeof f_req);
1537 f_req.Function = MPI_FUNCTION_PORT_FACTS;
1538 f_req.PortNumber = port;
1539 f_req.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
1540 error = mpt_send_handshake_cmd(mpt, sizeof f_req, &f_req);
1544 error = mpt_recv_handshake_reply(mpt, sizeof (*freplp), freplp);
1549 * Send the initialization request. This is where we specify how many
1550 * SCSI busses and how many devices per bus we wish to emulate.
1551 * This is also the command that specifies the max size of the reply
1552 * frames from the IOC that we will be allocating.
1555 mpt_send_ioc_init(struct mpt_softc *mpt, uint32_t who)
1559 MSG_IOC_INIT_REPLY reply;
1561 memset(&init, 0, sizeof init);
1563 init.Function = MPI_FUNCTION_IOC_INIT;
1564 init.MaxDevices = 0; /* at least 256 devices per bus */
1565 init.MaxBuses = 16; /* at least 16 busses */
1567 init.MsgVersion = htole16(MPI_VERSION);
1568 init.HeaderVersion = htole16(MPI_HEADER_VERSION);
1569 init.ReplyFrameSize = htole16(MPT_REPLY_SIZE);
1570 init.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
1572 if ((error = mpt_send_handshake_cmd(mpt, sizeof init, &init)) != 0) {
1576 error = mpt_recv_handshake_reply(mpt, sizeof reply, &reply);
1582 * Utiltity routine to read configuration headers and pages
1585 mpt_issue_cfg_req(struct mpt_softc *mpt, request_t *req, cfgparms_t *params,
1586 bus_addr_t addr, bus_size_t len, int sleep_ok, int timeout_ms)
1591 cfgp = req->req_vbuf;
1592 memset(cfgp, 0, sizeof *cfgp);
1593 cfgp->Action = params->Action;
1594 cfgp->Function = MPI_FUNCTION_CONFIG;
1595 cfgp->Header.PageVersion = params->PageVersion;
1596 cfgp->Header.PageNumber = params->PageNumber;
1597 cfgp->PageAddress = htole32(params->PageAddress);
1598 if ((params->PageType & MPI_CONFIG_PAGETYPE_MASK) ==
1599 MPI_CONFIG_PAGETYPE_EXTENDED) {
1600 cfgp->Header.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
1601 cfgp->Header.PageLength = 0;
1602 cfgp->ExtPageLength = htole16(params->ExtPageLength);
1603 cfgp->ExtPageType = params->ExtPageType;
1605 cfgp->Header.PageType = params->PageType;
1606 cfgp->Header.PageLength = params->PageLength;
1608 se = (SGE_SIMPLE32 *)&cfgp->PageBufferSGE;
1609 se->Address = htole32(addr);
1610 MPI_pSGE_SET_LENGTH(se, len);
1611 MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
1612 MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1613 MPI_SGE_FLAGS_END_OF_LIST |
1614 ((params->Action == MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT
1615 || params->Action == MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM)
1616 ? MPI_SGE_FLAGS_HOST_TO_IOC : MPI_SGE_FLAGS_IOC_TO_HOST)));
1617 se->FlagsLength = htole32(se->FlagsLength);
1618 cfgp->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_CONFIG);
1620 mpt_check_doorbell(mpt);
1621 mpt_send_cmd(mpt, req);
1622 return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
1623 sleep_ok, timeout_ms));
1627 mpt_read_extcfg_header(struct mpt_softc *mpt, int PageVersion, int PageNumber,
1628 uint32_t PageAddress, int ExtPageType,
1629 CONFIG_EXTENDED_PAGE_HEADER *rslt,
1630 int sleep_ok, int timeout_ms)
1634 MSG_CONFIG_REPLY *cfgp;
1637 req = mpt_get_request(mpt, sleep_ok);
1639 mpt_prt(mpt, "mpt_extread_cfg_header: Get request failed!\n");
1643 params.Action = MPI_CONFIG_ACTION_PAGE_HEADER;
1644 params.PageVersion = PageVersion;
1645 params.PageLength = 0;
1646 params.PageNumber = PageNumber;
1647 params.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
1648 params.PageAddress = PageAddress;
1649 params.ExtPageType = ExtPageType;
1650 params.ExtPageLength = 0;
1651 error = mpt_issue_cfg_req(mpt, req, ¶ms, /*addr*/0, /*len*/0,
1652 sleep_ok, timeout_ms);
1655 * Leave the request. Without resetting the chip, it's
1656 * still owned by it and we'll just get into trouble
1657 * freeing it now. Mark it as abandoned so that if it
1658 * shows up later it can be freed.
1660 mpt_prt(mpt, "read_extcfg_header timed out\n");
1664 switch (req->IOCStatus & MPI_IOCSTATUS_MASK) {
1665 case MPI_IOCSTATUS_SUCCESS:
1666 cfgp = req->req_vbuf;
1667 rslt->PageVersion = cfgp->Header.PageVersion;
1668 rslt->PageNumber = cfgp->Header.PageNumber;
1669 rslt->PageType = cfgp->Header.PageType;
1670 rslt->ExtPageLength = le16toh(cfgp->ExtPageLength);
1671 rslt->ExtPageType = cfgp->ExtPageType;
1674 case MPI_IOCSTATUS_CONFIG_INVALID_PAGE:
1675 mpt_lprt(mpt, MPT_PRT_DEBUG,
1676 "Invalid Page Type %d Number %d Addr 0x%0x\n",
1677 MPI_CONFIG_PAGETYPE_EXTENDED, PageNumber, PageAddress);
1681 mpt_prt(mpt, "mpt_read_extcfg_header: Config Info Status %x\n",
1686 mpt_free_request(mpt, req);
1691 mpt_read_extcfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress,
1692 CONFIG_EXTENDED_PAGE_HEADER *hdr, void *buf, size_t len,
1693 int sleep_ok, int timeout_ms)
1699 req = mpt_get_request(mpt, sleep_ok);
1701 mpt_prt(mpt, "mpt_read_extcfg_page: Get request failed!\n");
1705 params.Action = Action;
1706 params.PageVersion = hdr->PageVersion;
1707 params.PageLength = 0;
1708 params.PageNumber = hdr->PageNumber;
1709 params.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
1710 params.PageAddress = PageAddress;
1711 params.ExtPageType = hdr->ExtPageType;
1712 params.ExtPageLength = hdr->ExtPageLength;
1713 error = mpt_issue_cfg_req(mpt, req, ¶ms,
1714 req->req_pbuf + MPT_RQSL(mpt),
1715 len, sleep_ok, timeout_ms);
1717 mpt_prt(mpt, "read_extcfg_page(%d) timed out\n", Action);
1721 if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
1722 mpt_prt(mpt, "mpt_read_extcfg_page: Config Info Status %x\n",
1724 mpt_free_request(mpt, req);
1727 memcpy(buf, ((uint8_t *)req->req_vbuf)+MPT_RQSL(mpt), len);
1728 mpt_free_request(mpt, req);
1733 mpt_read_cfg_header(struct mpt_softc *mpt, int PageType, int PageNumber,
1734 uint32_t PageAddress, CONFIG_PAGE_HEADER *rslt,
1735 int sleep_ok, int timeout_ms)
1742 req = mpt_get_request(mpt, sleep_ok);
1744 mpt_prt(mpt, "mpt_read_cfg_header: Get request failed!\n");
1748 params.Action = MPI_CONFIG_ACTION_PAGE_HEADER;
1749 params.PageVersion = 0;
1750 params.PageLength = 0;
1751 params.PageNumber = PageNumber;
1752 params.PageType = PageType;
1753 params.PageAddress = PageAddress;
1754 error = mpt_issue_cfg_req(mpt, req, ¶ms, /*addr*/0, /*len*/0,
1755 sleep_ok, timeout_ms);
1758 * Leave the request. Without resetting the chip, it's
1759 * still owned by it and we'll just get into trouble
1760 * freeing it now. Mark it as abandoned so that if it
1761 * shows up later it can be freed.
1763 mpt_prt(mpt, "read_cfg_header timed out\n");
1767 switch (req->IOCStatus & MPI_IOCSTATUS_MASK) {
1768 case MPI_IOCSTATUS_SUCCESS:
1769 cfgp = req->req_vbuf;
1770 bcopy(&cfgp->Header, rslt, sizeof(*rslt));
1773 case MPI_IOCSTATUS_CONFIG_INVALID_PAGE:
1774 mpt_lprt(mpt, MPT_PRT_DEBUG,
1775 "Invalid Page Type %d Number %d Addr 0x%0x\n",
1776 PageType, PageNumber, PageAddress);
1780 mpt_prt(mpt, "mpt_read_cfg_header: Config Info Status %x\n",
1785 mpt_free_request(mpt, req);
1790 mpt_read_cfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress,
1791 CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok,
1798 req = mpt_get_request(mpt, sleep_ok);
1800 mpt_prt(mpt, "mpt_read_cfg_page: Get request failed!\n");
1804 params.Action = Action;
1805 params.PageVersion = hdr->PageVersion;
1806 params.PageLength = hdr->PageLength;
1807 params.PageNumber = hdr->PageNumber;
1808 params.PageType = hdr->PageType & MPI_CONFIG_PAGETYPE_MASK;
1809 params.PageAddress = PageAddress;
1810 error = mpt_issue_cfg_req(mpt, req, ¶ms,
1811 req->req_pbuf + MPT_RQSL(mpt),
1812 len, sleep_ok, timeout_ms);
1814 mpt_prt(mpt, "read_cfg_page(%d) timed out\n", Action);
1818 if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
1819 mpt_prt(mpt, "mpt_read_cfg_page: Config Info Status %x\n",
1821 mpt_free_request(mpt, req);
1824 memcpy(hdr, ((uint8_t *)req->req_vbuf)+MPT_RQSL(mpt), len);
1825 mpt_free_request(mpt, req);
1830 mpt_write_cfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress,
1831 CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok,
1839 hdr_attr = hdr->PageType & MPI_CONFIG_PAGEATTR_MASK;
1840 if (hdr_attr != MPI_CONFIG_PAGEATTR_CHANGEABLE &&
1841 hdr_attr != MPI_CONFIG_PAGEATTR_PERSISTENT) {
1842 mpt_prt(mpt, "page type 0x%x not changeable\n",
1843 hdr->PageType & MPI_CONFIG_PAGETYPE_MASK);
1849 * We shouldn't mask off other bits here.
1851 hdr->PageType &= MPI_CONFIG_PAGETYPE_MASK;
1854 req = mpt_get_request(mpt, sleep_ok);
1858 memcpy(((caddr_t)req->req_vbuf) + MPT_RQSL(mpt), hdr, len);
1861 * There isn't any point in restoring stripped out attributes
1862 * if you then mask them going down to issue the request.
1865 params.Action = Action;
1866 params.PageVersion = hdr->PageVersion;
1867 params.PageLength = hdr->PageLength;
1868 params.PageNumber = hdr->PageNumber;
1869 params.PageAddress = PageAddress;
1871 /* Restore stripped out attributes */
1872 hdr->PageType |= hdr_attr;
1873 params.PageType = hdr->PageType & MPI_CONFIG_PAGETYPE_MASK;
1875 params.PageType = hdr->PageType;
1877 error = mpt_issue_cfg_req(mpt, req, ¶ms,
1878 req->req_pbuf + MPT_RQSL(mpt),
1879 len, sleep_ok, timeout_ms);
1881 mpt_prt(mpt, "mpt_write_cfg_page timed out\n");
1885 if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
1886 mpt_prt(mpt, "mpt_write_cfg_page: Config Info Status %x\n",
1888 mpt_free_request(mpt, req);
1891 mpt_free_request(mpt, req);
1896 * Read IOC configuration information
1899 mpt_read_config_info_ioc(struct mpt_softc *mpt)
1901 CONFIG_PAGE_HEADER hdr;
1902 struct mpt_raid_volume *mpt_raid;
1907 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC,
1908 2, 0, &hdr, FALSE, 5000);
1910 * If it's an invalid page, so what? Not a supported function....
1919 mpt_lprt(mpt, MPT_PRT_DEBUG,
1920 "IOC Page 2 Header: Version %x len %x PageNumber %x PageType %x\n",
1921 hdr.PageVersion, hdr.PageLength << 2,
1922 hdr.PageNumber, hdr.PageType);
1924 len = hdr.PageLength * sizeof(uint32_t);
1925 mpt->ioc_page2 = kmalloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
1926 if (mpt->ioc_page2 == NULL) {
1927 mpt_prt(mpt, "unable to allocate memory for IOC page 2\n");
1928 mpt_raid_free_mem(mpt);
1931 memcpy(&mpt->ioc_page2->Header, &hdr, sizeof(hdr));
1932 rv = mpt_read_cur_cfg_page(mpt, 0,
1933 &mpt->ioc_page2->Header, len, FALSE, 5000);
1935 mpt_prt(mpt, "failed to read IOC Page 2\n");
1936 mpt_raid_free_mem(mpt);
1939 mpt2host_config_page_ioc2(mpt->ioc_page2);
1941 if (mpt->ioc_page2->CapabilitiesFlags != 0) {
1944 mpt_prt(mpt, "Capabilities: (");
1945 for (mask = 1; mask != 0; mask <<= 1) {
1946 if ((mpt->ioc_page2->CapabilitiesFlags & mask) == 0) {
1950 case MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT:
1951 mpt_prtc(mpt, " RAID-0");
1953 case MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT:
1954 mpt_prtc(mpt, " RAID-1E");
1956 case MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT:
1957 mpt_prtc(mpt, " RAID-1");
1959 case MPI_IOCPAGE2_CAP_FLAGS_SES_SUPPORT:
1960 mpt_prtc(mpt, " SES");
1962 case MPI_IOCPAGE2_CAP_FLAGS_SAFTE_SUPPORT:
1963 mpt_prtc(mpt, " SAFTE");
1965 case MPI_IOCPAGE2_CAP_FLAGS_CROSS_CHANNEL_SUPPORT:
1966 mpt_prtc(mpt, " Multi-Channel-Arrays");
1971 mpt_prtc(mpt, " )\n");
1972 if ((mpt->ioc_page2->CapabilitiesFlags
1973 & (MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT
1974 | MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT
1975 | MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT)) != 0) {
1976 mpt_prt(mpt, "%d Active Volume%s(%d Max)\n",
1977 mpt->ioc_page2->NumActiveVolumes,
1978 mpt->ioc_page2->NumActiveVolumes != 1
1980 mpt->ioc_page2->MaxVolumes);
1981 mpt_prt(mpt, "%d Hidden Drive Member%s(%d Max)\n",
1982 mpt->ioc_page2->NumActivePhysDisks,
1983 mpt->ioc_page2->NumActivePhysDisks != 1
1985 mpt->ioc_page2->MaxPhysDisks);
1989 len = mpt->ioc_page2->MaxVolumes * sizeof(struct mpt_raid_volume);
1990 mpt->raid_volumes = kmalloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
1991 if (mpt->raid_volumes == NULL) {
1992 mpt_prt(mpt, "Could not allocate RAID volume data\n");
1993 mpt_raid_free_mem(mpt);
1998 * Copy critical data out of ioc_page2 so that we can
1999 * safely refresh the page without windows of unreliable
2002 mpt->raid_max_volumes = mpt->ioc_page2->MaxVolumes;
2004 len = sizeof(*mpt->raid_volumes->config_page) +
2005 (sizeof (RAID_VOL0_PHYS_DISK) * (mpt->ioc_page2->MaxPhysDisks - 1));
2006 for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
2007 mpt_raid = &mpt->raid_volumes[i];
2008 mpt_raid->config_page =
2009 kmalloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
2010 if (mpt_raid->config_page == NULL) {
2011 mpt_prt(mpt, "Could not allocate RAID page data\n");
2012 mpt_raid_free_mem(mpt);
2016 mpt->raid_page0_len = len;
2018 len = mpt->ioc_page2->MaxPhysDisks * sizeof(struct mpt_raid_disk);
2019 mpt->raid_disks = kmalloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
2020 if (mpt->raid_disks == NULL) {
2021 mpt_prt(mpt, "Could not allocate RAID disk data\n");
2022 mpt_raid_free_mem(mpt);
2025 mpt->raid_max_disks = mpt->ioc_page2->MaxPhysDisks;
2030 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC,
2031 3, 0, &hdr, FALSE, 5000);
2033 mpt_raid_free_mem(mpt);
2037 mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC Page 3 Header: %x %x %x %x\n",
2038 hdr.PageVersion, hdr.PageLength, hdr.PageNumber, hdr.PageType);
2040 len = hdr.PageLength * sizeof(uint32_t);
2041 mpt->ioc_page3 = kmalloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
2042 if (mpt->ioc_page3 == NULL) {
2043 mpt_prt(mpt, "unable to allocate memory for IOC page 3\n");
2044 mpt_raid_free_mem(mpt);
2047 memcpy(&mpt->ioc_page3->Header, &hdr, sizeof(hdr));
2048 rv = mpt_read_cur_cfg_page(mpt, 0,
2049 &mpt->ioc_page3->Header, len, FALSE, 5000);
2051 mpt_raid_free_mem(mpt);
2054 mpt2host_config_page_ioc3(mpt->ioc_page3);
2055 mpt_raid_wakeup(mpt);
2063 mpt_send_port_enable(struct mpt_softc *mpt, int port)
2066 MSG_PORT_ENABLE *enable_req;
2069 req = mpt_get_request(mpt, /*sleep_ok*/FALSE);
2073 enable_req = req->req_vbuf;
2074 memset(enable_req, 0, MPT_RQSL(mpt));
2076 enable_req->Function = MPI_FUNCTION_PORT_ENABLE;
2077 enable_req->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_CONFIG);
2078 enable_req->PortNumber = port;
2080 mpt_check_doorbell(mpt);
2081 mpt_lprt(mpt, MPT_PRT_DEBUG, "enabling port %d\n", port);
2083 mpt_send_cmd(mpt, req);
2084 error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
2085 FALSE, (mpt->is_sas || mpt->is_fc)? 300000 : 30000);
2087 mpt_prt(mpt, "port %d enable timed out\n", port);
2090 mpt_free_request(mpt, req);
2091 mpt_lprt(mpt, MPT_PRT_DEBUG, "enabled port %d\n", port);
2096 * Enable/Disable asynchronous event reporting.
2099 mpt_send_event_request(struct mpt_softc *mpt, int onoff)
2102 MSG_EVENT_NOTIFY *enable_req;
2104 req = mpt_get_request(mpt, FALSE);
2108 enable_req = req->req_vbuf;
2109 memset(enable_req, 0, sizeof *enable_req);
2111 enable_req->Function = MPI_FUNCTION_EVENT_NOTIFICATION;
2112 enable_req->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_EVENTS);
2113 enable_req->Switch = onoff;
2115 mpt_check_doorbell(mpt);
2116 mpt_lprt(mpt, MPT_PRT_DEBUG, "%sabling async events\n",
2117 onoff ? "en" : "dis");
2119 * Send the command off, but don't wait for it.
2121 mpt_send_cmd(mpt, req);
2126 * Un-mask the interrupts on the chip.
2129 mpt_enable_ints(struct mpt_softc *mpt)
2132 /* Unmask every thing except door bell int */
2133 mpt_write(mpt, MPT_OFFSET_INTR_MASK, MPT_INTR_DB_MASK);
2137 * Mask the interrupts on the chip.
2140 mpt_disable_ints(struct mpt_softc *mpt)
2143 /* Mask all interrupts */
2144 mpt_write(mpt, MPT_OFFSET_INTR_MASK,
2145 MPT_INTR_REPLY_MASK | MPT_INTR_DB_MASK);
2149 mpt_sysctl_attach(struct mpt_softc *mpt)
2151 SYSCTL_ADD_UINT(&mpt->mpt_sysctl_ctx,
2152 SYSCTL_CHILDREN(mpt->mpt_sysctl_tree), OID_AUTO,
2153 "debug", CTLFLAG_RW, &mpt->verbose, 0,
2154 "Debugging/Verbose level");
2155 SYSCTL_ADD_UINT(&mpt->mpt_sysctl_ctx,
2156 SYSCTL_CHILDREN(mpt->mpt_sysctl_tree), OID_AUTO,
2157 "role", CTLFLAG_RD, &mpt->role, 0,
2159 #ifdef MPT_TEST_MULTIPATH
2160 SYSCTL_ADD_INT(&mpt->mpt_sysctl_ctx,
2161 SYSCTL_CHILDREN(mpt->mpt_sysctl_tree), OID_AUTO,
2162 "failure_id", CTLFLAG_RW, &mpt->failure_id, -1,
2163 "Next Target to Fail");
2168 mpt_attach(struct mpt_softc *mpt)
2170 struct mpt_personality *pers;
2174 mpt_core_attach(mpt);
2175 mpt_core_enable(mpt);
2177 TAILQ_INSERT_TAIL(&mpt_tailq, mpt, links);
2178 for (i = 0; i < MPT_MAX_PERSONALITIES; i++) {
2179 pers = mpt_personalities[i];
2183 if (pers->probe(mpt) == 0) {
2184 error = pers->attach(mpt);
2189 mpt->mpt_pers_mask |= (0x1 << pers->id);
2195 * Now that we've attached everything, do the enable function
2196 * for all of the personalities. This allows the personalities
2197 * to do setups that are appropriate for them prior to enabling
2200 for (i = 0; i < MPT_MAX_PERSONALITIES; i++) {
2201 pers = mpt_personalities[i];
2202 if (pers != NULL && MPT_PERS_ATTACHED(pers, mpt) != 0) {
2203 error = pers->enable(mpt);
2205 mpt_prt(mpt, "personality %s attached but would"
2206 " not enable (%d)\n", pers->name, error);
2216 mpt_shutdown(struct mpt_softc *mpt)
2218 struct mpt_personality *pers;
2220 MPT_PERS_FOREACH_REVERSE(mpt, pers) {
2221 pers->shutdown(mpt);
2227 mpt_detach(struct mpt_softc *mpt)
2229 struct mpt_personality *pers;
2231 MPT_PERS_FOREACH_REVERSE(mpt, pers) {
2233 mpt->mpt_pers_mask &= ~(0x1 << pers->id);
2236 TAILQ_REMOVE(&mpt_tailq, mpt, links);
2241 mpt_core_load(struct mpt_personality *pers)
2246 * Setup core handlers and insert the default handler
2247 * into all "empty slots".
2249 for (i = 0; i < MPT_NUM_REPLY_HANDLERS; i++) {
2250 mpt_reply_handlers[i] = mpt_default_reply_handler;
2253 mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_EVENTS)] =
2254 mpt_event_reply_handler;
2255 mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_CONFIG)] =
2256 mpt_config_reply_handler;
2257 mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_HANDSHAKE)] =
2258 mpt_handshake_reply_handler;
2263 * Initialize per-instance driver data and perform
2264 * initial controller configuration.
2267 mpt_core_attach(struct mpt_softc *mpt)
2271 LIST_INIT(&mpt->ack_frames);
2272 /* Put all request buffers on the free list */
2273 TAILQ_INIT(&mpt->request_pending_list);
2274 TAILQ_INIT(&mpt->request_free_list);
2275 TAILQ_INIT(&mpt->request_timeout_list);
2276 for (val = 0; val < MPT_MAX_LUNS; val++) {
2277 STAILQ_INIT(&mpt->trt[val].atios);
2278 STAILQ_INIT(&mpt->trt[val].inots);
2280 STAILQ_INIT(&mpt->trt_wildcard.atios);
2281 STAILQ_INIT(&mpt->trt_wildcard.inots);
2282 #ifdef MPT_TEST_MULTIPATH
2283 mpt->failure_id = -1;
2285 mpt->scsi_tgt_handler_id = MPT_HANDLER_ID_NONE;
2286 sysctl_ctx_init(&mpt->mpt_sysctl_ctx);
2287 mpt->mpt_sysctl_tree = SYSCTL_ADD_NODE(&mpt->mpt_sysctl_ctx,
2288 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
2289 device_get_nameunit(mpt->dev), CTLFLAG_RD, 0, "");
2290 if (mpt->mpt_sysctl_tree == NULL) {
2291 device_printf(mpt->dev, "can't add sysctl node\n");
2294 mpt_sysctl_attach(mpt);
2295 mpt_lprt(mpt, MPT_PRT_DEBUG, "doorbell req = %s\n",
2296 mpt_ioc_diag(mpt_read(mpt, MPT_OFFSET_DOORBELL)));
2299 error = mpt_configure_ioc(mpt, 0, 0);
2306 mpt_core_enable(struct mpt_softc *mpt)
2310 * We enter with the IOC enabled, but async events
2311 * not enabled, ports not enabled and interrupts
2317 * Enable asynchronous event reporting- all personalities
2318 * have attached so that they should be able to now field
2321 mpt_send_event_request(mpt, 1);
2324 * Catch any pending interrupts
2326 * This seems to be crucial- otherwise
2327 * the portenable below times out.
2334 mpt_enable_ints(mpt);
2337 * Catch any pending interrupts
2339 * This seems to be crucial- otherwise
2340 * the portenable below times out.
2347 if (mpt_send_port_enable(mpt, 0) != MPT_OK) {
2348 mpt_prt(mpt, "failed to enable port 0\n");
2357 mpt_core_shutdown(struct mpt_softc *mpt)
2360 mpt_disable_ints(mpt);
2364 mpt_core_detach(struct mpt_softc *mpt)
2371 mpt_disable_ints(mpt);
2373 /* Make sure no request has pending timeouts. */
2374 for (val = 0; val < MPT_MAX_REQUESTS(mpt); val++) {
2375 request_t *req = &mpt->request_pool[val];
2376 callout_stop(&req->callout);
2379 mpt_dma_buf_free(mpt);
2381 if (mpt->mpt_sysctl_tree != NULL)
2382 sysctl_ctx_free(&mpt->mpt_sysctl_ctx);
2386 mpt_core_unload(struct mpt_personality *pers)
2389 /* Unload is always successful. */
2393 #define FW_UPLOAD_REQ_SIZE \
2394 (sizeof(MSG_FW_UPLOAD) - sizeof(SGE_MPI_UNION) \
2395 + sizeof(FW_UPLOAD_TCSGE) + sizeof(SGE_SIMPLE32))
2398 mpt_upload_fw(struct mpt_softc *mpt)
2400 uint8_t fw_req_buf[FW_UPLOAD_REQ_SIZE];
2401 MSG_FW_UPLOAD_REPLY fw_reply;
2402 MSG_FW_UPLOAD *fw_req;
2403 FW_UPLOAD_TCSGE *tsge;
2408 memset(&fw_req_buf, 0, sizeof(fw_req_buf));
2409 fw_req = (MSG_FW_UPLOAD *)fw_req_buf;
2410 fw_req->ImageType = MPI_FW_UPLOAD_ITYPE_FW_IOC_MEM;
2411 fw_req->Function = MPI_FUNCTION_FW_UPLOAD;
2412 fw_req->MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
2413 tsge = (FW_UPLOAD_TCSGE *)&fw_req->SGL;
2414 tsge->DetailsLength = 12;
2415 tsge->Flags = MPI_SGE_FLAGS_TRANSACTION_ELEMENT;
2416 tsge->ImageSize = htole32(mpt->fw_image_size);
2417 sge = (SGE_SIMPLE32 *)(tsge + 1);
2418 flags = (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER
2419 | MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_SIMPLE_ELEMENT
2420 | MPI_SGE_FLAGS_32_BIT_ADDRESSING | MPI_SGE_FLAGS_IOC_TO_HOST);
2421 flags <<= MPI_SGE_FLAGS_SHIFT;
2422 sge->FlagsLength = htole32(flags | mpt->fw_image_size);
2423 sge->Address = htole32(mpt->fw_phys);
2424 bus_dmamap_sync(mpt->fw_dmat, mpt->fw_dmap, BUS_DMASYNC_PREREAD);
2425 error = mpt_send_handshake_cmd(mpt, sizeof(fw_req_buf), &fw_req_buf);
2428 error = mpt_recv_handshake_reply(mpt, sizeof(fw_reply), &fw_reply);
2429 bus_dmamap_sync(mpt->fw_dmat, mpt->fw_dmap, BUS_DMASYNC_POSTREAD);
2434 mpt_diag_outsl(struct mpt_softc *mpt, uint32_t addr,
2435 uint32_t *data, bus_size_t len)
2439 data_end = data + (roundup2(len, sizeof(uint32_t)) / 4);
2441 pci_enable_io(mpt->dev, SYS_RES_IOPORT);
2443 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, addr);
2444 while (data != data_end) {
2445 mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, *data);
2449 pci_disable_io(mpt->dev, SYS_RES_IOPORT);
2454 mpt_download_fw(struct mpt_softc *mpt)
2456 MpiFwHeader_t *fw_hdr;
2458 uint32_t ext_offset;
2461 if (mpt->pci_pio_reg == NULL) {
2462 mpt_prt(mpt, "No PIO resource!\n");
2466 mpt_prt(mpt, "Downloading Firmware - Image Size %d\n",
2467 mpt->fw_image_size);
2469 error = mpt_enable_diag_mode(mpt);
2471 mpt_prt(mpt, "Could not enter diagnostic mode!\n");
2475 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC,
2476 MPI_DIAG_RW_ENABLE|MPI_DIAG_DISABLE_ARM);
2478 fw_hdr = (MpiFwHeader_t *)mpt->fw_image;
2479 bus_dmamap_sync(mpt->fw_dmat, mpt->fw_dmap, BUS_DMASYNC_PREWRITE);
2480 mpt_diag_outsl(mpt, fw_hdr->LoadStartAddress, (uint32_t*)fw_hdr,
2482 bus_dmamap_sync(mpt->fw_dmat, mpt->fw_dmap, BUS_DMASYNC_POSTWRITE);
2484 ext_offset = fw_hdr->NextImageHeaderOffset;
2485 while (ext_offset != 0) {
2486 MpiExtImageHeader_t *ext;
2488 ext = (MpiExtImageHeader_t *)((uintptr_t)fw_hdr + ext_offset);
2489 ext_offset = ext->NextImageHeaderOffset;
2490 bus_dmamap_sync(mpt->fw_dmat, mpt->fw_dmap,
2491 BUS_DMASYNC_PREWRITE);
2492 mpt_diag_outsl(mpt, ext->LoadStartAddress, (uint32_t*)ext,
2494 bus_dmamap_sync(mpt->fw_dmat, mpt->fw_dmap,
2495 BUS_DMASYNC_POSTWRITE);
2499 pci_enable_io(mpt->dev, SYS_RES_IOPORT);
2501 /* Setup the address to jump to on reset. */
2502 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, fw_hdr->IopResetRegAddr);
2503 mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, fw_hdr->IopResetVectorValue);
2506 * The controller sets the "flash bad" status after attempting
2507 * to auto-boot from flash. Clear the status so that the controller
2508 * will continue the boot process with our newly installed firmware.
2510 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, MPT_DIAG_MEM_CFG_BASE);
2511 data = mpt_pio_read(mpt, MPT_OFFSET_DIAG_DATA) | MPT_DIAG_MEM_CFG_BADFL;
2512 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, MPT_DIAG_MEM_CFG_BASE);
2513 mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, data);
2516 pci_disable_io(mpt->dev, SYS_RES_IOPORT);
2520 * Re-enable the processor and clear the boot halt flag.
2522 data = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC);
2523 data &= ~(MPI_DIAG_PREVENT_IOC_BOOT|MPI_DIAG_DISABLE_ARM);
2524 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, data);
2526 mpt_disable_diag_mode(mpt);
2531 mpt_dma_buf_alloc(struct mpt_softc *mpt)
2533 struct mpt_map_info mi;
2538 /* Create a child tag for data buffers */
2539 if (mpt_dma_tag_create(mpt, mpt->parent_dmat, 1,
2540 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
2541 NULL, NULL, (mpt->max_cam_seg_cnt - 1) * PAGE_SIZE,
2542 mpt->max_cam_seg_cnt, BUS_SPACE_MAXSIZE_32BIT, 0,
2543 &mpt->buffer_dmat) != 0) {
2544 mpt_prt(mpt, "cannot create a dma tag for data buffers\n");
2548 /* Create a child tag for request buffers */
2549 if (mpt_dma_tag_create(mpt, mpt->parent_dmat, PAGE_SIZE, 0,
2550 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
2551 NULL, NULL, MPT_REQ_MEM_SIZE(mpt), 1, BUS_SPACE_MAXSIZE_32BIT, 0,
2552 &mpt->request_dmat) != 0) {
2553 mpt_prt(mpt, "cannot create a dma tag for requests\n");
2557 /* Allocate some DMA accessible memory for requests */
2558 if (bus_dmamem_alloc(mpt->request_dmat, (void **)&mpt->request,
2559 BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &mpt->request_dmap) != 0) {
2560 mpt_prt(mpt, "cannot allocate %d bytes of request memory\n",
2561 MPT_REQ_MEM_SIZE(mpt));
2568 /* Load and lock it into "bus space" */
2569 bus_dmamap_load(mpt->request_dmat, mpt->request_dmap, mpt->request,
2570 MPT_REQ_MEM_SIZE(mpt), mpt_map_rquest, &mi, 0);
2573 mpt_prt(mpt, "error %d loading dma map for DMA request queue\n",
2577 mpt->request_phys = mi.phys;
2580 * Now create per-request dma maps
2583 pptr = mpt->request_phys;
2584 vptr = mpt->request;
2585 end = pptr + MPT_REQ_MEM_SIZE(mpt);
2587 request_t *req = &mpt->request_pool[i];
2590 /* Store location of Request Data */
2591 req->req_pbuf = pptr;
2592 req->req_vbuf = vptr;
2594 pptr += MPT_REQUEST_AREA;
2595 vptr += MPT_REQUEST_AREA;
2597 req->sense_pbuf = (pptr - MPT_SENSE_SIZE);
2598 req->sense_vbuf = (vptr - MPT_SENSE_SIZE);
2600 error = bus_dmamap_create(mpt->buffer_dmat, 0, &req->dmap);
2602 mpt_prt(mpt, "error %d creating per-cmd DMA maps\n",
2612 mpt_dma_buf_free(struct mpt_softc *mpt)
2616 if (mpt->request_dmat == 0) {
2617 mpt_lprt(mpt, MPT_PRT_DEBUG, "already released dma memory\n");
2620 for (i = 0; i < MPT_MAX_REQUESTS(mpt); i++) {
2621 bus_dmamap_destroy(mpt->buffer_dmat, mpt->request_pool[i].dmap);
2623 bus_dmamap_unload(mpt->request_dmat, mpt->request_dmap);
2624 bus_dmamem_free(mpt->request_dmat, mpt->request, mpt->request_dmap);
2625 bus_dma_tag_destroy(mpt->request_dmat);
2626 mpt->request_dmat = 0;
2627 bus_dma_tag_destroy(mpt->buffer_dmat);
2631 * Allocate/Initialize data structures for the controller. Called
2632 * once at instance startup.
2635 mpt_configure_ioc(struct mpt_softc *mpt, int tn, int needreset)
2637 PTR_MSG_PORT_FACTS_REPLY pfp;
2638 int error, port, val;
2641 if (tn == MPT_MAX_TRYS) {
2646 * No need to reset if the IOC is already in the READY state.
2648 * Force reset if initialization failed previously.
2649 * Note that a hard_reset of the second channel of a '929
2650 * will stop operation of the first channel. Hopefully, if the
2651 * first channel is ok, the second will not require a hard
2654 if (needreset || MPT_STATE(mpt_rd_db(mpt)) != MPT_DB_STATE_READY) {
2655 if (mpt_reset(mpt, FALSE) != MPT_OK) {
2656 return (mpt_configure_ioc(mpt, tn++, 1));
2661 if (mpt_get_iocfacts(mpt, &mpt->ioc_facts) != MPT_OK) {
2662 mpt_prt(mpt, "mpt_get_iocfacts failed\n");
2663 return (mpt_configure_ioc(mpt, tn++, 1));
2665 mpt2host_iocfacts_reply(&mpt->ioc_facts);
2667 mpt_prt(mpt, "MPI Version=%d.%d.%d.%d\n",
2668 mpt->ioc_facts.MsgVersion >> 8,
2669 mpt->ioc_facts.MsgVersion & 0xFF,
2670 mpt->ioc_facts.HeaderVersion >> 8,
2671 mpt->ioc_facts.HeaderVersion & 0xFF);
2674 * Now that we know request frame size, we can calculate
2675 * the actual (reasonable) segment limit for read/write I/O.
2677 * This limit is constrained by:
2679 * + The size of each area we allocate per command (and how
2680 * many chain segments we can fit into it).
2681 * + The total number of areas we've set up.
2682 * + The actual chain depth the card will allow.
2684 * The first area's segment count is limited by the I/O request
2685 * at the head of it. We cannot allocate realistically more
2686 * than MPT_MAX_REQUESTS areas. Therefore, to account for both
2687 * conditions, we'll just start out with MPT_MAX_REQUESTS-2.
2690 /* total number of request areas we (can) allocate */
2691 mpt->max_seg_cnt = MPT_MAX_REQUESTS(mpt) - 2;
2693 /* converted to the number of chain areas possible */
2694 mpt->max_seg_cnt *= MPT_NRFM(mpt);
2696 /* limited by the number of chain areas the card will support */
2697 if (mpt->max_seg_cnt > mpt->ioc_facts.MaxChainDepth) {
2698 mpt_lprt(mpt, MPT_PRT_INFO,
2699 "chain depth limited to %u (from %u)\n",
2700 mpt->ioc_facts.MaxChainDepth, mpt->max_seg_cnt);
2701 mpt->max_seg_cnt = mpt->ioc_facts.MaxChainDepth;
2704 /* converted to the number of simple sges in chain segments. */
2705 mpt->max_seg_cnt *= (MPT_NSGL(mpt) - 1);
2708 * Use this as the basis for reporting the maximum I/O size to CAM.
2710 mpt->max_cam_seg_cnt = min(mpt->max_seg_cnt, (MAXPHYS / PAGE_SIZE) + 1);
2712 error = mpt_dma_buf_alloc(mpt);
2714 mpt_prt(mpt, "mpt_dma_buf_alloc() failed!\n");
2718 for (val = 0; val < MPT_MAX_REQUESTS(mpt); val++) {
2719 request_t *req = &mpt->request_pool[val];
2720 req->state = REQ_STATE_ALLOCATED;
2721 mpt_callout_init(mpt, &req->callout);
2722 mpt_free_request(mpt, req);
2725 mpt_lprt(mpt, MPT_PRT_INFO, "Maximum Segment Count: %u, Maximum "
2726 "CAM Segment Count: %u\n", mpt->max_seg_cnt,
2727 mpt->max_cam_seg_cnt);
2729 mpt_lprt(mpt, MPT_PRT_INFO, "MsgLength=%u IOCNumber = %d\n",
2730 mpt->ioc_facts.MsgLength, mpt->ioc_facts.IOCNumber);
2731 mpt_lprt(mpt, MPT_PRT_INFO,
2732 "IOCFACTS: GlobalCredits=%d BlockSize=%u bytes "
2733 "Request Frame Size %u bytes Max Chain Depth %u\n",
2734 mpt->ioc_facts.GlobalCredits, mpt->ioc_facts.BlockSize,
2735 mpt->ioc_facts.RequestFrameSize << 2,
2736 mpt->ioc_facts.MaxChainDepth);
2737 mpt_lprt(mpt, MPT_PRT_INFO, "IOCFACTS: Num Ports %d, FWImageSize %d, "
2738 "Flags=%#x\n", mpt->ioc_facts.NumberOfPorts,
2739 mpt->ioc_facts.FWImageSize, mpt->ioc_facts.Flags);
2741 len = mpt->ioc_facts.NumberOfPorts * sizeof (MSG_PORT_FACTS_REPLY);
2742 mpt->port_facts = kmalloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
2743 if (mpt->port_facts == NULL) {
2744 mpt_prt(mpt, "unable to allocate memory for port facts\n");
2749 if ((mpt->ioc_facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT) &&
2750 (mpt->fw_uploaded == 0)) {
2751 struct mpt_map_info mi;
2754 * In some configurations, the IOC's firmware is
2755 * stored in a shared piece of system NVRAM that
2756 * is only accessible via the BIOS. In this
2757 * case, the firmware keeps a copy of firmware in
2758 * RAM until the OS driver retrieves it. Once
2759 * retrieved, we are responsible for re-downloading
2760 * the firmware after any hard-reset.
2762 mpt->fw_image_size = mpt->ioc_facts.FWImageSize;
2763 error = mpt_dma_tag_create(mpt, mpt->parent_dmat, 1, 0,
2764 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2765 mpt->fw_image_size, 1, mpt->fw_image_size, 0,
2768 mpt_prt(mpt, "cannot create firmware dma tag\n");
2771 error = bus_dmamem_alloc(mpt->fw_dmat,
2772 (void **)&mpt->fw_image, BUS_DMA_NOWAIT |
2773 BUS_DMA_COHERENT, &mpt->fw_dmap);
2775 mpt_prt(mpt, "cannot allocate firmware memory\n");
2776 bus_dma_tag_destroy(mpt->fw_dmat);
2781 bus_dmamap_load(mpt->fw_dmat, mpt->fw_dmap,
2782 mpt->fw_image, mpt->fw_image_size, mpt_map_rquest, &mi, 0);
2783 mpt->fw_phys = mi.phys;
2785 error = mpt_upload_fw(mpt);
2787 mpt_prt(mpt, "firmware upload failed.\n");
2788 bus_dmamap_unload(mpt->fw_dmat, mpt->fw_dmap);
2789 bus_dmamem_free(mpt->fw_dmat, mpt->fw_image,
2791 bus_dma_tag_destroy(mpt->fw_dmat);
2792 mpt->fw_image = NULL;
2795 mpt->fw_uploaded = 1;
2798 for (port = 0; port < mpt->ioc_facts.NumberOfPorts; port++) {
2799 pfp = &mpt->port_facts[port];
2800 error = mpt_get_portfacts(mpt, 0, pfp);
2801 if (error != MPT_OK) {
2803 "mpt_get_portfacts on port %d failed\n", port);
2804 kfree(mpt->port_facts, M_DEVBUF);
2805 mpt->port_facts = NULL;
2806 return (mpt_configure_ioc(mpt, tn++, 1));
2808 mpt2host_portfacts_reply(pfp);
2811 error = MPT_PRT_INFO;
2813 error = MPT_PRT_DEBUG;
2815 mpt_lprt(mpt, error,
2816 "PORTFACTS[%d]: Type %x PFlags %x IID %d MaxDev %d\n",
2817 port, pfp->PortType, pfp->ProtocolFlags, pfp->PortSCSIID,
2823 * XXX: Not yet supporting more than port 0
2825 pfp = &mpt->port_facts[0];
2826 if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_FC) {
2830 } else if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_SAS) {
2834 } else if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_SCSI) {
2838 if (mpt->mpt_ini_id == MPT_INI_ID_NONE)
2839 mpt->mpt_ini_id = pfp->PortSCSIID;
2840 } else if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_ISCSI) {
2841 mpt_prt(mpt, "iSCSI not supported yet\n");
2843 } else if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_INACTIVE) {
2844 mpt_prt(mpt, "Inactive Port\n");
2847 mpt_prt(mpt, "unknown Port Type %#x\n", pfp->PortType);
2852 * Set our role with what this port supports.
2854 * Note this might be changed later in different modules
2855 * if this is different from what is wanted.
2857 mpt->role = MPT_ROLE_NONE;
2858 if (pfp->ProtocolFlags & MPI_PORTFACTS_PROTOCOL_INITIATOR) {
2859 mpt->role |= MPT_ROLE_INITIATOR;
2861 if (pfp->ProtocolFlags & MPI_PORTFACTS_PROTOCOL_TARGET) {
2862 mpt->role |= MPT_ROLE_TARGET;
2868 if (mpt_enable_ioc(mpt, 1) != MPT_OK) {
2869 mpt_prt(mpt, "unable to initialize IOC\n");
2874 * Read IOC configuration information.
2876 * We need this to determine whether or not we have certain
2877 * settings for Integrated Mirroring (e.g.).
2879 mpt_read_config_info_ioc(mpt);
2885 mpt_enable_ioc(struct mpt_softc *mpt, int portenable)
2890 if (mpt_send_ioc_init(mpt, MPI_WHOINIT_HOST_DRIVER) != MPT_OK) {
2891 mpt_prt(mpt, "mpt_send_ioc_init failed\n");
2895 mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_send_ioc_init ok\n");
2897 if (mpt_wait_state(mpt, MPT_DB_STATE_RUNNING) != MPT_OK) {
2898 mpt_prt(mpt, "IOC failed to go to run state\n");
2901 mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC now at RUNSTATE\n");
2904 * Give it reply buffers
2906 * Do *not* exceed global credits.
2908 for (val = 0, pptr = mpt->reply_phys;
2909 (pptr + MPT_REPLY_SIZE) < (mpt->reply_phys + PAGE_SIZE);
2910 pptr += MPT_REPLY_SIZE) {
2911 mpt_free_reply(mpt, pptr);
2912 if (++val == mpt->ioc_facts.GlobalCredits - 1)
2918 * Enable the port if asked. This is only done if we're resetting
2919 * the IOC after initial startup.
2923 * Enable asynchronous event reporting
2925 mpt_send_event_request(mpt, 1);
2927 if (mpt_send_port_enable(mpt, 0) != MPT_OK) {
2928 mpt_prt(mpt, "%s: failed to enable port 0\n", __func__);
2936 * Endian Conversion Functions- only used on Big Endian machines
2938 #if _BYTE_ORDER == _BIG_ENDIAN
2940 mpt2host_sge_simple_union(SGE_SIMPLE_UNION *sge)
2943 MPT_2_HOST32(sge, FlagsLength);
2944 MPT_2_HOST32(sge, u.Address64.Low);
2945 MPT_2_HOST32(sge, u.Address64.High);
2949 mpt2host_iocfacts_reply(MSG_IOC_FACTS_REPLY *rp)
2952 MPT_2_HOST16(rp, MsgVersion);
2953 MPT_2_HOST16(rp, HeaderVersion);
2954 MPT_2_HOST32(rp, MsgContext);
2955 MPT_2_HOST16(rp, IOCExceptions);
2956 MPT_2_HOST16(rp, IOCStatus);
2957 MPT_2_HOST32(rp, IOCLogInfo);
2958 MPT_2_HOST16(rp, ReplyQueueDepth);
2959 MPT_2_HOST16(rp, RequestFrameSize);
2960 MPT_2_HOST16(rp, Reserved_0101_FWVersion);
2961 MPT_2_HOST16(rp, ProductID);
2962 MPT_2_HOST32(rp, CurrentHostMfaHighAddr);
2963 MPT_2_HOST16(rp, GlobalCredits);
2964 MPT_2_HOST32(rp, CurrentSenseBufferHighAddr);
2965 MPT_2_HOST16(rp, CurReplyFrameSize);
2966 MPT_2_HOST32(rp, FWImageSize);
2967 MPT_2_HOST32(rp, IOCCapabilities);
2968 MPT_2_HOST32(rp, FWVersion.Word);
2969 MPT_2_HOST16(rp, HighPriorityQueueDepth);
2970 MPT_2_HOST16(rp, Reserved2);
2971 mpt2host_sge_simple_union(&rp->HostPageBufferSGE);
2972 MPT_2_HOST32(rp, ReplyFifoHostSignalingAddr);
2976 mpt2host_portfacts_reply(MSG_PORT_FACTS_REPLY *pfp)
2979 MPT_2_HOST16(pfp, Reserved);
2980 MPT_2_HOST16(pfp, Reserved1);
2981 MPT_2_HOST32(pfp, MsgContext);
2982 MPT_2_HOST16(pfp, Reserved2);
2983 MPT_2_HOST16(pfp, IOCStatus);
2984 MPT_2_HOST32(pfp, IOCLogInfo);
2985 MPT_2_HOST16(pfp, MaxDevices);
2986 MPT_2_HOST16(pfp, PortSCSIID);
2987 MPT_2_HOST16(pfp, ProtocolFlags);
2988 MPT_2_HOST16(pfp, MaxPostedCmdBuffers);
2989 MPT_2_HOST16(pfp, MaxPersistentIDs);
2990 MPT_2_HOST16(pfp, MaxLanBuckets);
2991 MPT_2_HOST16(pfp, Reserved4);
2992 MPT_2_HOST32(pfp, Reserved5);
2996 mpt2host_config_page_ioc2(CONFIG_PAGE_IOC_2 *ioc2)
3000 MPT_2_HOST32(ioc2, CapabilitiesFlags);
3001 for (i = 0; i < MPI_IOC_PAGE_2_RAID_VOLUME_MAX; i++) {
3002 MPT_2_HOST16(ioc2, RaidVolume[i].Reserved3);
3007 mpt2host_config_page_ioc3(CONFIG_PAGE_IOC_3 *ioc3)
3010 MPT_2_HOST16(ioc3, Reserved2);
3014 mpt2host_config_page_scsi_port_0(CONFIG_PAGE_SCSI_PORT_0 *sp0)
3017 MPT_2_HOST32(sp0, Capabilities);
3018 MPT_2_HOST32(sp0, PhysicalInterface);
3022 mpt2host_config_page_scsi_port_1(CONFIG_PAGE_SCSI_PORT_1 *sp1)
3025 MPT_2_HOST32(sp1, Configuration);
3026 MPT_2_HOST32(sp1, OnBusTimerValue);
3027 MPT_2_HOST16(sp1, IDConfig);
3031 host2mpt_config_page_scsi_port_1(CONFIG_PAGE_SCSI_PORT_1 *sp1)
3034 HOST_2_MPT32(sp1, Configuration);
3035 HOST_2_MPT32(sp1, OnBusTimerValue);
3036 HOST_2_MPT16(sp1, IDConfig);
3040 mpt2host_config_page_scsi_port_2(CONFIG_PAGE_SCSI_PORT_2 *sp2)
3044 MPT_2_HOST32(sp2, PortFlags);
3045 MPT_2_HOST32(sp2, PortSettings);
3046 for (i = 0; i < sizeof(sp2->DeviceSettings) /
3047 sizeof(*sp2->DeviceSettings); i++) {
3048 MPT_2_HOST16(sp2, DeviceSettings[i].DeviceFlags);
3053 mpt2host_config_page_scsi_device_0(CONFIG_PAGE_SCSI_DEVICE_0 *sd0)
3056 MPT_2_HOST32(sd0, NegotiatedParameters);
3057 MPT_2_HOST32(sd0, Information);
3061 mpt2host_config_page_scsi_device_1(CONFIG_PAGE_SCSI_DEVICE_1 *sd1)
3064 MPT_2_HOST32(sd1, RequestedParameters);
3065 MPT_2_HOST32(sd1, Reserved);
3066 MPT_2_HOST32(sd1, Configuration);
3070 host2mpt_config_page_scsi_device_1(CONFIG_PAGE_SCSI_DEVICE_1 *sd1)
3073 HOST_2_MPT32(sd1, RequestedParameters);
3074 HOST_2_MPT32(sd1, Reserved);
3075 HOST_2_MPT32(sd1, Configuration);
3079 mpt2host_config_page_fc_port_0(CONFIG_PAGE_FC_PORT_0 *fp0)
3082 MPT_2_HOST32(fp0, Flags);
3083 MPT_2_HOST32(fp0, PortIdentifier);
3084 MPT_2_HOST32(fp0, WWNN.Low);
3085 MPT_2_HOST32(fp0, WWNN.High);
3086 MPT_2_HOST32(fp0, WWPN.Low);
3087 MPT_2_HOST32(fp0, WWPN.High);
3088 MPT_2_HOST32(fp0, SupportedServiceClass);
3089 MPT_2_HOST32(fp0, SupportedSpeeds);
3090 MPT_2_HOST32(fp0, CurrentSpeed);
3091 MPT_2_HOST32(fp0, MaxFrameSize);
3092 MPT_2_HOST32(fp0, FabricWWNN.Low);
3093 MPT_2_HOST32(fp0, FabricWWNN.High);
3094 MPT_2_HOST32(fp0, FabricWWPN.Low);
3095 MPT_2_HOST32(fp0, FabricWWPN.High);
3096 MPT_2_HOST32(fp0, DiscoveredPortsCount);
3097 MPT_2_HOST32(fp0, MaxInitiators);
3101 mpt2host_config_page_fc_port_1(CONFIG_PAGE_FC_PORT_1 *fp1)
3104 MPT_2_HOST32(fp1, Flags);
3105 MPT_2_HOST32(fp1, NoSEEPROMWWNN.Low);
3106 MPT_2_HOST32(fp1, NoSEEPROMWWNN.High);
3107 MPT_2_HOST32(fp1, NoSEEPROMWWPN.Low);
3108 MPT_2_HOST32(fp1, NoSEEPROMWWPN.High);
3112 host2mpt_config_page_fc_port_1(CONFIG_PAGE_FC_PORT_1 *fp1)
3115 HOST_2_MPT32(fp1, Flags);
3116 HOST_2_MPT32(fp1, NoSEEPROMWWNN.Low);
3117 HOST_2_MPT32(fp1, NoSEEPROMWWNN.High);
3118 HOST_2_MPT32(fp1, NoSEEPROMWWPN.Low);
3119 HOST_2_MPT32(fp1, NoSEEPROMWWPN.High);
3123 mpt2host_config_page_raid_vol_0(CONFIG_PAGE_RAID_VOL_0 *volp)
3127 MPT_2_HOST16(volp, VolumeStatus.Reserved);
3128 MPT_2_HOST16(volp, VolumeSettings.Settings);
3129 MPT_2_HOST32(volp, MaxLBA);
3130 MPT_2_HOST32(volp, MaxLBAHigh);
3131 MPT_2_HOST32(volp, StripeSize);
3132 MPT_2_HOST32(volp, Reserved2);
3133 MPT_2_HOST32(volp, Reserved3);
3134 for (i = 0; i < MPI_RAID_VOL_PAGE_0_PHYSDISK_MAX; i++) {
3135 MPT_2_HOST16(volp, PhysDisk[i].Reserved);
3140 mpt2host_config_page_raid_phys_disk_0(CONFIG_PAGE_RAID_PHYS_DISK_0 *rpd0)
3143 MPT_2_HOST32(rpd0, Reserved1);
3144 MPT_2_HOST16(rpd0, PhysDiskStatus.Reserved);
3145 MPT_2_HOST32(rpd0, MaxLBA);
3146 MPT_2_HOST16(rpd0, ErrorData.Reserved);
3147 MPT_2_HOST16(rpd0, ErrorData.ErrorCount);
3148 MPT_2_HOST16(rpd0, ErrorData.SmartCount);
3152 mpt2host_mpi_raid_vol_indicator(MPI_RAID_VOL_INDICATOR *vi)
3155 MPT_2_HOST16(vi, TotalBlocks.High);
3156 MPT_2_HOST16(vi, TotalBlocks.Low);
3157 MPT_2_HOST16(vi, BlocksRemaining.High);
3158 MPT_2_HOST16(vi, BlocksRemaining.Low);