2 * Generic routines for LSI Fusion adapters.
5 * Copyright (c) 2000, 2001 by Greg Ansley
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice immediately at the beginning of the file, without modification,
12 * this list of conditions, and the following disclaimer.
13 * 2. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * Copyright (c) 2002, 2006 by Matthew Jacob
30 * All rights reserved.
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions are
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
38 * substantially similar to the "NO WARRANTY" disclaimer below
39 * ("Disclaimer") and any redistribution must be conditioned upon including
40 * a substantially similar Disclaimer requirement for further binary
42 * 3. Neither the names of the above listed copyright holders nor the names
43 * of any contributors may be used to endorse or promote products derived
44 * from this software without specific prior written permission.
46 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
47 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
50 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
51 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
52 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
53 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
54 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
55 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
56 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
58 * Support from Chris Ellsworth in order to make SAS adapters work
59 * is gratefully acknowledged.
62 * Support from LSI-Logic has also gone a great deal toward making this a
63 * workable subsystem and is gratefully acknowledged.
66 * Copyright (c) 2004, Avid Technology, Inc. and its contributors.
67 * Copyright (c) 2005, WHEEL Sp. z o.o.
68 * Copyright (c) 2004, 2005 Justin T. Gibbs
69 * All rights reserved.
71 * Redistribution and use in source and binary forms, with or without
72 * modification, are permitted provided that the following conditions are
74 * 1. Redistributions of source code must retain the above copyright
75 * notice, this list of conditions and the following disclaimer.
76 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
77 * substantially similar to the "NO WARRANTY" disclaimer below
78 * ("Disclaimer") and any redistribution must be conditioned upon including
79 * a substantially similar Disclaimer requirement for further binary
81 * 3. Neither the names of the above listed copyright holders nor the names
82 * of any contributors may be used to endorse or promote products derived
83 * from this software without specific prior written permission.
85 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
86 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
87 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
88 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
89 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
90 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
91 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
92 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
93 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
94 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
95 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
97 * $FreeBSD: src/sys/dev/mpt/mpt.c,v 1.57 2011/04/22 09:59:16 marius Exp $
100 #include <dev/disk/mpt/mpt.h>
101 #include <dev/disk/mpt/mpt_cam.h> /* XXX For static handler registration */
102 #include <dev/disk/mpt/mpt_raid.h> /* XXX For static handler registration */
104 #include <dev/disk/mpt/mpilib/mpi.h>
105 #include <dev/disk/mpt/mpilib/mpi_ioc.h>
106 #include <dev/disk/mpt/mpilib/mpi_fc.h>
107 #include <dev/disk/mpt/mpilib/mpi_targ.h>
109 #include <sys/sysctl.h>
111 #define MPT_MAX_TRYS 3
112 #define MPT_MAX_WAIT 300000
114 static int maxwait_ack = 0;
115 static int maxwait_int = 0;
116 static int maxwait_state = 0;
118 static TAILQ_HEAD(, mpt_softc) mpt_tailq = TAILQ_HEAD_INITIALIZER(mpt_tailq);
119 mpt_reply_handler_t *mpt_reply_handlers[MPT_NUM_REPLY_HANDLERS];
121 static mpt_reply_handler_t mpt_default_reply_handler;
122 static mpt_reply_handler_t mpt_config_reply_handler;
123 static mpt_reply_handler_t mpt_handshake_reply_handler;
124 static mpt_reply_handler_t mpt_event_reply_handler;
125 static void mpt_send_event_ack(struct mpt_softc *mpt, request_t *ack_req,
126 MSG_EVENT_NOTIFY_REPLY *msg, uint32_t context);
127 static int mpt_send_event_request(struct mpt_softc *mpt, int onoff);
128 static int mpt_soft_reset(struct mpt_softc *mpt);
129 static void mpt_hard_reset(struct mpt_softc *mpt);
130 static int mpt_dma_buf_alloc(struct mpt_softc *mpt);
131 static void mpt_dma_buf_free(struct mpt_softc *mpt);
132 static int mpt_configure_ioc(struct mpt_softc *mpt, int, int);
133 static int mpt_enable_ioc(struct mpt_softc *mpt, int);
135 /************************* Personality Module Support *************************/
137 * We include one extra entry that is guaranteed to be NULL
138 * to simplify our itterator.
140 static struct mpt_personality *mpt_personalities[MPT_MAX_PERSONALITIES + 1];
141 static __inline struct mpt_personality*
142 mpt_pers_find(struct mpt_softc *, u_int);
143 static __inline struct mpt_personality*
144 mpt_pers_find_reverse(struct mpt_softc *, u_int);
146 static __inline struct mpt_personality *
147 mpt_pers_find(struct mpt_softc *mpt, u_int start_at)
149 KASSERT(start_at <= MPT_MAX_PERSONALITIES,
150 ("mpt_pers_find: starting position out of range\n"));
152 while (start_at < MPT_MAX_PERSONALITIES
153 && (mpt->mpt_pers_mask & (0x1 << start_at)) == 0) {
156 return (mpt_personalities[start_at]);
160 * Used infrequently, so no need to optimize like a forward
161 * traversal where we use the MAX+1 is guaranteed to be NULL
164 static __inline struct mpt_personality *
165 mpt_pers_find_reverse(struct mpt_softc *mpt, u_int start_at)
167 while (start_at < MPT_MAX_PERSONALITIES
168 && (mpt->mpt_pers_mask & (0x1 << start_at)) == 0) {
171 if (start_at < MPT_MAX_PERSONALITIES)
172 return (mpt_personalities[start_at]);
176 #define MPT_PERS_FOREACH(mpt, pers) \
177 for (pers = mpt_pers_find(mpt, /*start_at*/0); \
179 pers = mpt_pers_find(mpt, /*start_at*/pers->id+1))
181 #define MPT_PERS_FOREACH_REVERSE(mpt, pers) \
182 for (pers = mpt_pers_find_reverse(mpt, MPT_MAX_PERSONALITIES-1);\
184 pers = mpt_pers_find_reverse(mpt, /*start_at*/pers->id-1))
186 static mpt_load_handler_t mpt_stdload;
187 static mpt_probe_handler_t mpt_stdprobe;
188 static mpt_attach_handler_t mpt_stdattach;
189 static mpt_enable_handler_t mpt_stdenable;
190 static mpt_ready_handler_t mpt_stdready;
191 static mpt_event_handler_t mpt_stdevent;
192 static mpt_reset_handler_t mpt_stdreset;
193 static mpt_shutdown_handler_t mpt_stdshutdown;
194 static mpt_detach_handler_t mpt_stddetach;
195 static mpt_unload_handler_t mpt_stdunload;
196 static struct mpt_personality mpt_default_personality =
199 .probe = mpt_stdprobe,
200 .attach = mpt_stdattach,
201 .enable = mpt_stdenable,
202 .ready = mpt_stdready,
203 .event = mpt_stdevent,
204 .reset = mpt_stdreset,
205 .shutdown = mpt_stdshutdown,
206 .detach = mpt_stddetach,
207 .unload = mpt_stdunload
210 static mpt_load_handler_t mpt_core_load;
211 static mpt_attach_handler_t mpt_core_attach;
212 static mpt_enable_handler_t mpt_core_enable;
213 static mpt_reset_handler_t mpt_core_ioc_reset;
214 static mpt_event_handler_t mpt_core_event;
215 static mpt_shutdown_handler_t mpt_core_shutdown;
216 static mpt_shutdown_handler_t mpt_core_detach;
217 static mpt_unload_handler_t mpt_core_unload;
218 static struct mpt_personality mpt_core_personality =
221 .load = mpt_core_load,
222 // .attach = mpt_core_attach,
223 // .enable = mpt_core_enable,
224 .event = mpt_core_event,
225 .reset = mpt_core_ioc_reset,
226 .shutdown = mpt_core_shutdown,
227 .detach = mpt_core_detach,
228 .unload = mpt_core_unload,
232 * Manual declaration so that DECLARE_MPT_PERSONALITY doesn't need
233 * ordering information. We want the core to always register FIRST.
234 * other modules are set to SI_ORDER_SECOND.
236 static moduledata_t mpt_core_mod = {
237 "mpt_core", mpt_modevent, &mpt_core_personality
239 DECLARE_MODULE(mpt_core, mpt_core_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
240 MODULE_VERSION(mpt_core, 1);
242 #define MPT_PERS_ATTACHED(pers, mpt) ((mpt)->mpt_pers_mask & (0x1 << pers->id))
245 mpt_modevent(module_t mod, int type, void *data)
247 struct mpt_personality *pers;
250 pers = (struct mpt_personality *)data;
256 mpt_load_handler_t **def_handler;
257 mpt_load_handler_t **pers_handler;
260 for (i = 0; i < MPT_MAX_PERSONALITIES; i++) {
261 if (mpt_personalities[i] == NULL)
264 if (i >= MPT_MAX_PERSONALITIES) {
269 mpt_personalities[i] = pers;
271 /* Install standard/noop handlers for any NULL entries. */
272 def_handler = MPT_PERS_FIRST_HANDLER(&mpt_default_personality);
273 pers_handler = MPT_PERS_FIRST_HANDLER(pers);
274 while (pers_handler <= MPT_PERS_LAST_HANDLER(pers)) {
275 if (*pers_handler == NULL)
276 *pers_handler = *def_handler;
281 error = (pers->load(pers));
283 mpt_personalities[i] = NULL;
288 #if __FreeBSD_version >= 500000
293 error = pers->unload(pers);
294 mpt_personalities[pers->id] = NULL;
304 mpt_stdload(struct mpt_personality *pers)
306 /* Load is always successful. */
311 mpt_stdprobe(struct mpt_softc *mpt)
313 /* Probe is always successful. */
318 mpt_stdattach(struct mpt_softc *mpt)
320 /* Attach is always successful. */
325 mpt_stdenable(struct mpt_softc *mpt)
327 /* Enable is always successful. */
332 mpt_stdready(struct mpt_softc *mpt)
338 mpt_stdevent(struct mpt_softc *mpt, request_t *req, MSG_EVENT_NOTIFY_REPLY *msg)
340 mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_stdevent: 0x%x\n", msg->Event & 0xFF);
341 /* Event was not for us. */
346 mpt_stdreset(struct mpt_softc *mpt, int type)
351 mpt_stdshutdown(struct mpt_softc *mpt)
356 mpt_stddetach(struct mpt_softc *mpt)
361 mpt_stdunload(struct mpt_personality *pers)
363 /* Unload is always successful. */
368 * Post driver attachment, we may want to perform some global actions.
369 * Here is the hook to do so.
373 mpt_postattach(void *unused)
375 struct mpt_softc *mpt;
376 struct mpt_personality *pers;
378 TAILQ_FOREACH(mpt, &mpt_tailq, links) {
379 MPT_PERS_FOREACH(mpt, pers)
383 SYSINIT(mptdev, SI_SUB_CONFIGURE, SI_ORDER_MIDDLE, mpt_postattach, NULL);
386 /******************************* Bus DMA Support ******************************/
388 mpt_map_rquest(void *arg, bus_dma_segment_t *segs, int nseg, int error)
390 struct mpt_map_info *map_info;
392 map_info = (struct mpt_map_info *)arg;
393 map_info->error = error;
394 map_info->phys = segs->ds_addr;
397 /**************************** Reply/Event Handling ****************************/
399 mpt_register_handler(struct mpt_softc *mpt, mpt_handler_type type,
400 mpt_handler_t handler, uint32_t *phandler_id)
404 case MPT_HANDLER_REPLY:
409 if (phandler_id == NULL)
412 free_cbi = MPT_HANDLER_ID_NONE;
413 for (cbi = 0; cbi < MPT_NUM_REPLY_HANDLERS; cbi++) {
415 * If the same handler is registered multiple
416 * times, don't error out. Just return the
417 * index of the original registration.
419 if (mpt_reply_handlers[cbi] == handler.reply_handler) {
420 *phandler_id = MPT_CBI_TO_HID(cbi);
425 * Fill from the front in the hope that
426 * all registered handlers consume only a
429 * We don't break on the first empty slot so
430 * that the full table is checked to see if
431 * this handler was previously registered.
433 if (free_cbi == MPT_HANDLER_ID_NONE &&
434 (mpt_reply_handlers[cbi]
435 == mpt_default_reply_handler))
438 if (free_cbi == MPT_HANDLER_ID_NONE) {
441 mpt_reply_handlers[free_cbi] = handler.reply_handler;
442 *phandler_id = MPT_CBI_TO_HID(free_cbi);
446 mpt_prt(mpt, "mpt_register_handler unknown type %d\n", type);
453 mpt_deregister_handler(struct mpt_softc *mpt, mpt_handler_type type,
454 mpt_handler_t handler, uint32_t handler_id)
458 case MPT_HANDLER_REPLY:
462 cbi = MPT_CBI(handler_id);
463 if (cbi >= MPT_NUM_REPLY_HANDLERS
464 || mpt_reply_handlers[cbi] != handler.reply_handler)
466 mpt_reply_handlers[cbi] = mpt_default_reply_handler;
470 mpt_prt(mpt, "mpt_deregister_handler unknown type %d\n", type);
477 mpt_default_reply_handler(struct mpt_softc *mpt, request_t *req,
478 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
481 "Default Handler Called: req=%p:%u reply_descriptor=%x frame=%p\n",
482 req, req->serno, reply_desc, reply_frame);
484 if (reply_frame != NULL)
485 mpt_dump_reply_frame(mpt, reply_frame);
487 mpt_prt(mpt, "Reply Frame Ignored\n");
489 return (/*free_reply*/TRUE);
493 mpt_config_reply_handler(struct mpt_softc *mpt, request_t *req,
494 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
498 if (reply_frame != NULL) {
500 MSG_CONFIG_REPLY *reply;
502 cfgp = (MSG_CONFIG *)req->req_vbuf;
503 reply = (MSG_CONFIG_REPLY *)reply_frame;
504 req->IOCStatus = le16toh(reply_frame->IOCStatus);
505 bcopy(&reply->Header, &cfgp->Header,
506 sizeof(cfgp->Header));
507 cfgp->ExtPageLength = reply->ExtPageLength;
508 cfgp->ExtPageType = reply->ExtPageType;
510 req->state &= ~REQ_STATE_QUEUED;
511 req->state |= REQ_STATE_DONE;
512 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
513 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
515 } else if ((req->state & REQ_STATE_TIMEDOUT) != 0) {
517 * Whew- we can free this request (late completion)
519 mpt_free_request(mpt, req);
527 mpt_handshake_reply_handler(struct mpt_softc *mpt, request_t *req,
528 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
530 /* Nothing to be done. */
535 mpt_event_reply_handler(struct mpt_softc *mpt, request_t *req,
536 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
540 KASSERT(reply_frame != NULL, ("null reply in mpt_event_reply_handler"));
541 KASSERT(req != NULL, ("null request in mpt_event_reply_handler"));
544 switch (reply_frame->Function) {
545 case MPI_FUNCTION_EVENT_NOTIFICATION:
547 MSG_EVENT_NOTIFY_REPLY *msg;
548 struct mpt_personality *pers;
552 msg = (MSG_EVENT_NOTIFY_REPLY *)reply_frame;
553 msg->EventDataLength = le16toh(msg->EventDataLength);
554 msg->IOCStatus = le16toh(msg->IOCStatus);
555 msg->IOCLogInfo = le32toh(msg->IOCLogInfo);
556 msg->Event = le32toh(msg->Event);
557 MPT_PERS_FOREACH(mpt, pers)
558 handled += pers->event(mpt, req, msg);
560 if (handled == 0 && mpt->mpt_pers_mask == 0) {
561 mpt_lprt(mpt, MPT_PRT_INFO,
562 "No Handlers For Any Event Notify Frames. "
563 "Event %#x (ACK %sequired).\n",
564 msg->Event, msg->AckRequired? "r" : "not r");
565 } else if (handled == 0) {
567 msg->AckRequired? MPT_PRT_WARN : MPT_PRT_INFO,
568 "Unhandled Event Notify Frame. Event %#x "
569 "(ACK %sequired).\n",
570 msg->Event, msg->AckRequired? "r" : "not r");
573 if (msg->AckRequired) {
577 context = req->index | MPT_REPLY_HANDLER_EVENTS;
578 ack_req = mpt_get_request(mpt, FALSE);
579 if (ack_req == NULL) {
580 struct mpt_evtf_record *evtf;
582 evtf = (struct mpt_evtf_record *)reply_frame;
583 evtf->context = context;
584 LIST_INSERT_HEAD(&mpt->ack_frames, evtf, links);
588 mpt_send_event_ack(mpt, ack_req, msg, context);
590 * Don't check for CONTINUATION_REPLY here
596 case MPI_FUNCTION_PORT_ENABLE:
597 mpt_lprt(mpt, MPT_PRT_DEBUG , "enable port reply\n");
599 case MPI_FUNCTION_EVENT_ACK:
602 mpt_prt(mpt, "unknown event function: %x\n",
603 reply_frame->Function);
608 * I'm not sure that this continuation stuff works as it should.
610 * I've had FC async events occur that free the frame up because
611 * the continuation bit isn't set, and then additional async events
612 * then occur using the same context. As you might imagine, this
613 * leads to Very Bad Thing.
615 * Let's just be safe for now and not free them up until we figure
616 * out what's actually happening here.
619 if ((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0) {
620 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
621 mpt_free_request(mpt, req);
622 mpt_prt(mpt, "event_reply %x for req %p:%u NOT a continuation",
623 reply_frame->Function, req, req->serno);
624 if (reply_frame->Function == MPI_FUNCTION_EVENT_NOTIFICATION) {
625 MSG_EVENT_NOTIFY_REPLY *msg =
626 (MSG_EVENT_NOTIFY_REPLY *)reply_frame;
627 mpt_prtc(mpt, " Event=0x%x AckReq=%d",
628 msg->Event, msg->AckRequired);
631 mpt_prt(mpt, "event_reply %x for %p:%u IS a continuation",
632 reply_frame->Function, req, req->serno);
633 if (reply_frame->Function == MPI_FUNCTION_EVENT_NOTIFICATION) {
634 MSG_EVENT_NOTIFY_REPLY *msg =
635 (MSG_EVENT_NOTIFY_REPLY *)reply_frame;
636 mpt_prtc(mpt, " Event=0x%x AckReq=%d",
637 msg->Event, msg->AckRequired);
646 * Process an asynchronous event from the IOC.
649 mpt_core_event(struct mpt_softc *mpt, request_t *req,
650 MSG_EVENT_NOTIFY_REPLY *msg)
652 mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_core_event: 0x%x\n",
654 switch(msg->Event & 0xFF) {
657 case MPI_EVENT_LOG_DATA:
661 /* Some error occurred that LSI wants logged */
662 mpt_prt(mpt, "EvtLogData: IOCLogInfo: 0x%08x\n",
664 mpt_prt(mpt, "\tEvtLogData: Event Data:");
665 for (i = 0; i < msg->EventDataLength; i++)
666 mpt_prtc(mpt, " %08x", msg->Data[i]);
670 case MPI_EVENT_EVENT_CHANGE:
672 * This is just an acknowledgement
673 * of our mpt_send_event_request.
676 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
686 mpt_send_event_ack(struct mpt_softc *mpt, request_t *ack_req,
687 MSG_EVENT_NOTIFY_REPLY *msg, uint32_t context)
691 ackp = (MSG_EVENT_ACK *)ack_req->req_vbuf;
692 memset(ackp, 0, sizeof (*ackp));
693 ackp->Function = MPI_FUNCTION_EVENT_ACK;
694 ackp->Event = htole32(msg->Event);
695 ackp->EventContext = htole32(msg->EventContext);
696 ackp->MsgContext = htole32(context);
697 mpt_check_doorbell(mpt);
698 mpt_send_cmd(mpt, ack_req);
701 /***************************** Interrupt Handling *****************************/
705 struct mpt_softc *mpt;
709 mpt = (struct mpt_softc *)arg;
710 mpt_lprt(mpt, MPT_PRT_DEBUG2, "enter mpt_intr\n");
711 MPT_LOCK_ASSERT(mpt);
713 while ((reply_desc = mpt_pop_reply_queue(mpt)) != MPT_REPLY_EMPTY) {
715 MSG_DEFAULT_REPLY *reply_frame;
716 uint32_t reply_baddr;
727 if ((reply_desc & MPI_ADDRESS_REPLY_A_BIT) != 0) {
729 * Ensure that the reply frame is coherent.
731 reply_baddr = MPT_REPLY_BADDR(reply_desc);
732 offset = reply_baddr - (mpt->reply_phys & 0xFFFFFFFF);
733 bus_dmamap_sync_range(mpt->reply_dmat,
734 mpt->reply_dmap, offset, MPT_REPLY_SIZE,
735 BUS_DMASYNC_POSTREAD);
736 reply_frame = MPT_REPLY_OTOV(mpt, offset);
737 ctxt_idx = le32toh(reply_frame->MsgContext);
741 type = MPI_GET_CONTEXT_REPLY_TYPE(reply_desc);
742 ctxt_idx = reply_desc;
743 mpt_lprt(mpt, MPT_PRT_DEBUG1, "Context Reply: 0x%08x\n",
747 case MPI_CONTEXT_REPLY_TYPE_SCSI_INIT:
748 ctxt_idx &= MPI_CONTEXT_REPLY_CONTEXT_MASK;
750 case MPI_CONTEXT_REPLY_TYPE_SCSI_TARGET:
751 ctxt_idx = GET_IO_INDEX(reply_desc);
752 if (mpt->tgt_cmd_ptrs == NULL) {
754 "mpt_intr: no target cmd ptrs\n");
755 reply_desc = MPT_REPLY_EMPTY;
758 if (ctxt_idx >= mpt->tgt_cmds_allocated) {
760 "mpt_intr: bad tgt cmd ctxt %u\n",
762 reply_desc = MPT_REPLY_EMPTY;
766 req = mpt->tgt_cmd_ptrs[ctxt_idx];
768 mpt_prt(mpt, "no request backpointer "
769 "at index %u", ctxt_idx);
770 reply_desc = MPT_REPLY_EMPTY;
775 * Reformulate ctxt_idx to be just as if
776 * it were another type of context reply
777 * so the code below will find the request
778 * via indexing into the pool.
781 req->index | mpt->scsi_tgt_handler_id;
784 case MPI_CONTEXT_REPLY_TYPE_LAN:
785 mpt_prt(mpt, "LAN CONTEXT REPLY: 0x%08x\n",
787 reply_desc = MPT_REPLY_EMPTY;
790 mpt_prt(mpt, "Context Reply 0x%08x?\n", type);
791 reply_desc = MPT_REPLY_EMPTY;
794 if (reply_desc == MPT_REPLY_EMPTY) {
795 if (ntrips++ > 1000) {
802 cb_index = MPT_CONTEXT_TO_CBI(ctxt_idx);
803 req_index = MPT_CONTEXT_TO_REQI(ctxt_idx);
804 if (req_index < MPT_MAX_REQUESTS(mpt)) {
805 req = &mpt->request_pool[req_index];
807 mpt_prt(mpt, "WARN: mpt_intr index == %d (reply_desc =="
808 " 0x%x)\n", req_index, reply_desc);
811 bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
812 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
813 free_rf = mpt_reply_handlers[cb_index](mpt, req,
814 reply_desc, reply_frame);
816 if (reply_frame != NULL && free_rf) {
817 bus_dmamap_sync_range(mpt->reply_dmat,
818 mpt->reply_dmap, offset, MPT_REPLY_SIZE,
819 BUS_DMASYNC_PREREAD);
820 mpt_free_reply(mpt, reply_baddr);
824 * If we got ourselves disabled, don't get stuck in a loop
827 mpt_disable_ints(mpt);
830 if (ntrips++ > 1000) {
834 mpt_lprt(mpt, MPT_PRT_DEBUG2, "exit mpt_intr\n");
837 /******************************* Error Recovery *******************************/
839 mpt_complete_request_chain(struct mpt_softc *mpt, struct req_queue *chain,
842 MSG_DEFAULT_REPLY ioc_status_frame;
845 memset(&ioc_status_frame, 0, sizeof(ioc_status_frame));
846 ioc_status_frame.MsgLength = roundup2(sizeof(ioc_status_frame), 4);
847 ioc_status_frame.IOCStatus = iocstatus;
848 while((req = TAILQ_FIRST(chain)) != NULL) {
849 MSG_REQUEST_HEADER *msg_hdr;
852 bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
853 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
854 msg_hdr = (MSG_REQUEST_HEADER *)req->req_vbuf;
855 ioc_status_frame.Function = msg_hdr->Function;
856 ioc_status_frame.MsgContext = msg_hdr->MsgContext;
857 cb_index = MPT_CONTEXT_TO_CBI(le32toh(msg_hdr->MsgContext));
858 mpt_reply_handlers[cb_index](mpt, req, msg_hdr->MsgContext,
860 if (mpt_req_on_pending_list(mpt, req) != 0)
861 TAILQ_REMOVE(chain, req, links);
865 /********************************* Diagnostics ********************************/
867 * Perform a diagnostic dump of a reply frame.
870 mpt_dump_reply_frame(struct mpt_softc *mpt, MSG_DEFAULT_REPLY *reply_frame)
872 mpt_prt(mpt, "Address Reply:\n");
873 mpt_print_reply(reply_frame);
876 /******************************* Doorbell Access ******************************/
877 static __inline uint32_t mpt_rd_db(struct mpt_softc *mpt);
878 static __inline uint32_t mpt_rd_intr(struct mpt_softc *mpt);
880 static __inline uint32_t
881 mpt_rd_db(struct mpt_softc *mpt)
883 return mpt_read(mpt, MPT_OFFSET_DOORBELL);
886 static __inline uint32_t
887 mpt_rd_intr(struct mpt_softc *mpt)
889 return mpt_read(mpt, MPT_OFFSET_INTR_STATUS);
892 /* Busy wait for a door bell to be read by IOC */
894 mpt_wait_db_ack(struct mpt_softc *mpt)
897 for (i=0; i < MPT_MAX_WAIT; i++) {
898 if (!MPT_DB_IS_BUSY(mpt_rd_intr(mpt))) {
899 maxwait_ack = i > maxwait_ack ? i : maxwait_ack;
907 /* Busy wait for a door bell interrupt */
909 mpt_wait_db_int(struct mpt_softc *mpt)
912 for (i = 0; i < MPT_MAX_WAIT; i++) {
913 if (MPT_DB_INTR(mpt_rd_intr(mpt))) {
914 maxwait_int = i > maxwait_int ? i : maxwait_int;
922 /* Wait for IOC to transition to a give state */
924 mpt_check_doorbell(struct mpt_softc *mpt)
926 uint32_t db = mpt_rd_db(mpt);
927 if (MPT_STATE(db) != MPT_DB_STATE_RUNNING) {
928 mpt_prt(mpt, "Device not running\n");
933 /* Wait for IOC to transition to a give state */
935 mpt_wait_state(struct mpt_softc *mpt, enum DB_STATE_BITS state)
939 for (i = 0; i < MPT_MAX_WAIT; i++) {
940 uint32_t db = mpt_rd_db(mpt);
941 if (MPT_STATE(db) == state) {
942 maxwait_state = i > maxwait_state ? i : maxwait_state;
951 /************************* Intialization/Configuration ************************/
952 static int mpt_download_fw(struct mpt_softc *mpt);
954 /* Issue the reset COMMAND to the IOC */
956 mpt_soft_reset(struct mpt_softc *mpt)
958 mpt_lprt(mpt, MPT_PRT_DEBUG, "soft reset\n");
960 /* Have to use hard reset if we are not in Running state */
961 if (MPT_STATE(mpt_rd_db(mpt)) != MPT_DB_STATE_RUNNING) {
962 mpt_prt(mpt, "soft reset failed: device not running\n");
966 /* If door bell is in use we don't have a chance of getting
967 * a word in since the IOC probably crashed in message
968 * processing. So don't waste our time.
970 if (MPT_DB_IS_IN_USE(mpt_rd_db(mpt))) {
971 mpt_prt(mpt, "soft reset failed: doorbell wedged\n");
975 /* Send the reset request to the IOC */
976 mpt_write(mpt, MPT_OFFSET_DOORBELL,
977 MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET << MPI_DOORBELL_FUNCTION_SHIFT);
978 if (mpt_wait_db_ack(mpt) != MPT_OK) {
979 mpt_prt(mpt, "soft reset failed: ack timeout\n");
983 /* Wait for the IOC to reload and come out of reset state */
984 if (mpt_wait_state(mpt, MPT_DB_STATE_READY) != MPT_OK) {
985 mpt_prt(mpt, "soft reset failed: device did not restart\n");
993 mpt_enable_diag_mode(struct mpt_softc *mpt)
1000 if ((mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC) & MPI_DIAG_DRWE) != 0)
1003 /* Enable diagnostic registers */
1004 mpt_write(mpt, MPT_OFFSET_SEQUENCE, 0xFF);
1005 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_1ST_KEY_VALUE);
1006 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_2ND_KEY_VALUE);
1007 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_3RD_KEY_VALUE);
1008 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_4TH_KEY_VALUE);
1009 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_5TH_KEY_VALUE);
1019 mpt_disable_diag_mode(struct mpt_softc *mpt)
1021 mpt_write(mpt, MPT_OFFSET_SEQUENCE, 0xFFFFFFFF);
1024 /* This is a magic diagnostic reset that resets all the ARM
1025 * processors in the chip.
1028 mpt_hard_reset(struct mpt_softc *mpt)
1034 mpt_lprt(mpt, MPT_PRT_DEBUG, "hard reset\n");
1036 error = mpt_enable_diag_mode(mpt);
1038 mpt_prt(mpt, "WARNING - Could not enter diagnostic mode !\n");
1039 mpt_prt(mpt, "Trying to reset anyway.\n");
1042 diagreg = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC);
1045 * This appears to be a workaround required for some
1046 * firmware or hardware revs.
1048 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, diagreg | MPI_DIAG_DISABLE_ARM);
1051 /* Diag. port is now active so we can now hit the reset bit */
1052 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, diagreg | MPI_DIAG_RESET_ADAPTER);
1055 * Ensure that the reset has finished. We delay 1ms
1056 * prior to reading the register to make sure the chip
1057 * has sufficiently completed its reset to handle register
1063 diagreg = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC);
1064 } while (--wait && (diagreg & MPI_DIAG_RESET_ADAPTER) == 0);
1067 mpt_prt(mpt, "WARNING - Failed hard reset! "
1068 "Trying to initialize anyway.\n");
1072 * If we have firmware to download, it must be loaded before
1073 * the controller will become operational. Do so now.
1075 if (mpt->fw_image != NULL) {
1077 error = mpt_download_fw(mpt);
1080 mpt_prt(mpt, "WARNING - Firmware Download Failed!\n");
1081 mpt_prt(mpt, "Trying to initialize anyway.\n");
1086 * Reseting the controller should have disabled write
1087 * access to the diagnostic registers, but disable
1088 * manually to be sure.
1090 mpt_disable_diag_mode(mpt);
1094 mpt_core_ioc_reset(struct mpt_softc *mpt, int type)
1097 * Complete all pending requests with a status
1098 * appropriate for an IOC reset.
1100 mpt_complete_request_chain(mpt, &mpt->request_pending_list,
1101 MPI_IOCSTATUS_INVALID_STATE);
1106 * Reset the IOC when needed. Try software command first then if needed
1107 * poke at the magic diagnostic reset. Note that a hard reset resets
1108 * *both* IOCs on dual function chips (FC929 && LSI1030) as well as
1109 * fouls up the PCI configuration registers.
1112 mpt_reset(struct mpt_softc *mpt, int reinit)
1114 struct mpt_personality *pers;
1119 * Try a soft reset. If that fails, get out the big hammer.
1122 if ((ret = mpt_soft_reset(mpt)) != MPT_OK) {
1124 for (cnt = 0; cnt < 5; cnt++) {
1125 /* Failed; do a hard reset */
1126 mpt_hard_reset(mpt);
1129 * Wait for the IOC to reload
1130 * and come out of reset state
1132 ret = mpt_wait_state(mpt, MPT_DB_STATE_READY);
1133 if (ret == MPT_OK) {
1137 * Okay- try to check again...
1139 ret = mpt_wait_state(mpt, MPT_DB_STATE_READY);
1140 if (ret == MPT_OK) {
1143 mpt_prt(mpt, "mpt_reset: failed hard reset (%d:%d)\n",
1148 if (retry_cnt == 0) {
1150 * Invoke reset handlers. We bump the reset count so
1151 * that mpt_wait_req() understands that regardless of
1152 * the specified wait condition, it should stop its wait.
1155 MPT_PERS_FOREACH(mpt, pers)
1156 pers->reset(mpt, ret);
1160 ret = mpt_enable_ioc(mpt, 1);
1161 if (ret == MPT_OK) {
1162 mpt_enable_ints(mpt);
1165 if (ret != MPT_OK && retry_cnt++ < 2) {
1171 /* Return a command buffer to the free queue */
1173 mpt_free_request(struct mpt_softc *mpt, request_t *req)
1176 struct mpt_evtf_record *record;
1177 uint32_t offset, reply_baddr;
1179 if (req == NULL || req != &mpt->request_pool[req->index]) {
1180 panic("mpt_free_request bad req ptr\n");
1183 if ((nxt = req->chain) != NULL) {
1185 mpt_free_request(mpt, nxt); /* NB: recursion */
1187 KASSERT(req->state != REQ_STATE_FREE, ("freeing free request"));
1188 KASSERT(!(req->state & REQ_STATE_LOCKED), ("freeing locked request"));
1189 MPT_LOCK_ASSERT(mpt);
1190 KASSERT(mpt_req_on_free_list(mpt, req) == 0,
1191 ("mpt_free_request: req %p:%u func %x already on freelist",
1192 req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
1193 KASSERT(mpt_req_on_pending_list(mpt, req) == 0,
1194 ("mpt_free_request: req %p:%u func %x on pending list",
1195 req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
1197 mpt_req_not_spcl(mpt, req, "mpt_free_request", __LINE__);
1201 if (LIST_EMPTY(&mpt->ack_frames)) {
1203 * Insert free ones at the tail
1206 req->state = REQ_STATE_FREE;
1208 memset(req->req_vbuf, 0xff, sizeof (MSG_REQUEST_HEADER));
1210 TAILQ_INSERT_TAIL(&mpt->request_free_list, req, links);
1211 if (mpt->getreqwaiter != 0) {
1212 mpt->getreqwaiter = 0;
1213 wakeup(&mpt->request_free_list);
1219 * Process an ack frame deferred due to resource shortage.
1221 record = LIST_FIRST(&mpt->ack_frames);
1222 LIST_REMOVE(record, links);
1223 req->state = REQ_STATE_ALLOCATED;
1224 mpt_assign_serno(mpt, req);
1225 mpt_send_event_ack(mpt, req, &record->reply, record->context);
1226 offset = (uint32_t)((uint8_t *)record - mpt->reply);
1227 reply_baddr = offset + (mpt->reply_phys & 0xFFFFFFFF);
1228 bus_dmamap_sync_range(mpt->reply_dmat, mpt->reply_dmap, offset,
1229 MPT_REPLY_SIZE, BUS_DMASYNC_PREREAD);
1230 mpt_free_reply(mpt, reply_baddr);
1233 /* Get a command buffer from the free queue */
1235 mpt_get_request(struct mpt_softc *mpt, int sleep_ok)
1240 MPT_LOCK_ASSERT(mpt);
1241 req = TAILQ_FIRST(&mpt->request_free_list);
1243 KASSERT(req == &mpt->request_pool[req->index],
1244 ("mpt_get_request: corrupted request free list\n"));
1245 KASSERT(req->state == REQ_STATE_FREE,
1246 ("req %p:%u not free on free list %x index %d function %x",
1247 req, req->serno, req->state, req->index,
1248 ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
1249 TAILQ_REMOVE(&mpt->request_free_list, req, links);
1250 req->state = REQ_STATE_ALLOCATED;
1252 mpt_assign_serno(mpt, req);
1253 } else if (sleep_ok != 0) {
1254 mpt->getreqwaiter = 1;
1255 mpt_sleep(mpt, &mpt->request_free_list, 0, "mptgreq", 0);
1261 /* Pass the command to the IOC */
1263 mpt_send_cmd(struct mpt_softc *mpt, request_t *req)
1265 if (mpt->verbose > MPT_PRT_DEBUG2) {
1266 mpt_dump_request(mpt, req);
1268 bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
1269 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1270 req->state |= REQ_STATE_QUEUED;
1271 KASSERT(mpt_req_on_free_list(mpt, req) == 0,
1272 ("req %p:%u func %x on freelist list in mpt_send_cmd",
1273 req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
1274 KASSERT(mpt_req_on_pending_list(mpt, req) == 0,
1275 ("req %p:%u func %x already on pending list in mpt_send_cmd",
1276 req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
1277 TAILQ_INSERT_HEAD(&mpt->request_pending_list, req, links);
1278 mpt_write(mpt, MPT_OFFSET_REQUEST_Q, (uint32_t) req->req_pbuf);
1282 * Wait for a request to complete.
1285 * mpt softc of controller executing request
1286 * req request to wait for
1287 * sleep_ok nonzero implies may sleep in this context
1288 * time_ms timeout in ms. 0 implies no timeout.
1291 * 0 Request completed
1292 * non-0 Timeout fired before request completion.
1295 mpt_wait_req(struct mpt_softc *mpt, request_t *req,
1296 mpt_req_state_t state, mpt_req_state_t mask,
1297 int sleep_ok, int time_ms)
1304 * timeout is in ms. 0 indicates infinite wait.
1305 * Convert to ticks or 500us units depending on
1308 if (sleep_ok != 0) {
1309 timeout = (time_ms * hz) / 1000;
1311 timeout = time_ms * 2;
1313 req->state |= REQ_STATE_NEED_WAKEUP;
1314 mask &= ~REQ_STATE_NEED_WAKEUP;
1315 saved_cnt = mpt->reset_cnt;
1316 while ((req->state & mask) != state && mpt->reset_cnt == saved_cnt) {
1317 if (sleep_ok != 0) {
1318 error = mpt_sleep(mpt, req, 0, "mptreq", timeout);
1319 if (error == EWOULDBLOCK) {
1324 if (time_ms != 0 && --timeout == 0) {
1331 req->state &= ~REQ_STATE_NEED_WAKEUP;
1332 if (mpt->reset_cnt != saved_cnt) {
1335 if (time_ms && timeout <= 0) {
1336 MSG_REQUEST_HEADER *msg_hdr = req->req_vbuf;
1337 req->state |= REQ_STATE_TIMEDOUT;
1338 mpt_prt(mpt, "mpt_wait_req(%x) timed out\n", msg_hdr->Function);
1345 * Send a command to the IOC via the handshake register.
1347 * Only done at initialization time and for certain unusual
1348 * commands such as device/bus reset as specified by LSI.
1351 mpt_send_handshake_cmd(struct mpt_softc *mpt, size_t len, void *cmd)
1354 uint32_t data, *data32;
1356 /* Check condition of the IOC */
1357 data = mpt_rd_db(mpt);
1358 if ((MPT_STATE(data) != MPT_DB_STATE_READY
1359 && MPT_STATE(data) != MPT_DB_STATE_RUNNING
1360 && MPT_STATE(data) != MPT_DB_STATE_FAULT)
1361 || MPT_DB_IS_IN_USE(data)) {
1362 mpt_prt(mpt, "handshake aborted - invalid doorbell state\n");
1367 /* We move things in 32 bit chunks */
1368 len = (len + 3) >> 2;
1371 /* Clear any left over pending doorbell interrupts */
1372 if (MPT_DB_INTR(mpt_rd_intr(mpt)))
1373 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1376 * Tell the handshake reg. we are going to send a command
1377 * and how long it is going to be.
1379 data = (MPI_FUNCTION_HANDSHAKE << MPI_DOORBELL_FUNCTION_SHIFT) |
1380 (len << MPI_DOORBELL_ADD_DWORDS_SHIFT);
1381 mpt_write(mpt, MPT_OFFSET_DOORBELL, data);
1383 /* Wait for the chip to notice */
1384 if (mpt_wait_db_int(mpt) != MPT_OK) {
1385 mpt_prt(mpt, "mpt_send_handshake_cmd: db ignored\n");
1389 /* Clear the interrupt */
1390 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1392 if (mpt_wait_db_ack(mpt) != MPT_OK) {
1393 mpt_prt(mpt, "mpt_send_handshake_cmd: db ack timed out\n");
1397 /* Send the command */
1398 for (i = 0; i < len; i++) {
1399 mpt_write(mpt, MPT_OFFSET_DOORBELL, htole32(*data32++));
1400 if (mpt_wait_db_ack(mpt) != MPT_OK) {
1402 "mpt_send_handshake_cmd: timeout @ index %d\n", i);
1409 /* Get the response from the handshake register */
1411 mpt_recv_handshake_reply(struct mpt_softc *mpt, size_t reply_len, void *reply)
1413 int left, reply_left;
1416 MSG_DEFAULT_REPLY *hdr;
1418 /* We move things out in 16 bit chunks */
1420 data16 = (u_int16_t *)reply;
1422 hdr = (MSG_DEFAULT_REPLY *)reply;
1424 /* Get first word */
1425 if (mpt_wait_db_int(mpt) != MPT_OK) {
1426 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout1\n");
1429 data = mpt_read(mpt, MPT_OFFSET_DOORBELL);
1430 *data16++ = le16toh(data & MPT_DB_DATA_MASK);
1431 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1433 /* Get Second Word */
1434 if (mpt_wait_db_int(mpt) != MPT_OK) {
1435 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout2\n");
1438 data = mpt_read(mpt, MPT_OFFSET_DOORBELL);
1439 *data16++ = le16toh(data & MPT_DB_DATA_MASK);
1440 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1443 * With the second word, we can now look at the length.
1444 * Warn about a reply that's too short (except for IOC FACTS REPLY)
1446 if ((reply_len >> 1) != hdr->MsgLength &&
1447 (hdr->Function != MPI_FUNCTION_IOC_FACTS)){
1448 mpt_prt(mpt, "reply length does not match message length: "
1449 "got %x; expected %zx for function %x\n",
1450 hdr->MsgLength << 2, reply_len << 1, hdr->Function);
1453 /* Get rest of the reply; but don't overflow the provided buffer */
1454 left = (hdr->MsgLength << 1) - 2;
1455 reply_left = reply_len - 2;
1459 if (mpt_wait_db_int(mpt) != MPT_OK) {
1460 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout3\n");
1463 data = mpt_read(mpt, MPT_OFFSET_DOORBELL);
1464 datum = le16toh(data & MPT_DB_DATA_MASK);
1466 if (reply_left-- > 0)
1469 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1472 /* One more wait & clear at the end */
1473 if (mpt_wait_db_int(mpt) != MPT_OK) {
1474 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout4\n");
1477 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1479 if ((hdr->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
1480 if (mpt->verbose >= MPT_PRT_TRACE)
1481 mpt_print_reply(hdr);
1482 return (MPT_FAIL | hdr->IOCStatus);
1489 mpt_get_iocfacts(struct mpt_softc *mpt, MSG_IOC_FACTS_REPLY *freplp)
1491 MSG_IOC_FACTS f_req;
1494 memset(&f_req, 0, sizeof f_req);
1495 f_req.Function = MPI_FUNCTION_IOC_FACTS;
1496 f_req.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
1497 error = mpt_send_handshake_cmd(mpt, sizeof f_req, &f_req);
1501 error = mpt_recv_handshake_reply(mpt, sizeof (*freplp), freplp);
1506 mpt_get_portfacts(struct mpt_softc *mpt, U8 port, MSG_PORT_FACTS_REPLY *freplp)
1508 MSG_PORT_FACTS f_req;
1511 memset(&f_req, 0, sizeof f_req);
1512 f_req.Function = MPI_FUNCTION_PORT_FACTS;
1513 f_req.PortNumber = port;
1514 f_req.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
1515 error = mpt_send_handshake_cmd(mpt, sizeof f_req, &f_req);
1519 error = mpt_recv_handshake_reply(mpt, sizeof (*freplp), freplp);
1524 * Send the initialization request. This is where we specify how many
1525 * SCSI busses and how many devices per bus we wish to emulate.
1526 * This is also the command that specifies the max size of the reply
1527 * frames from the IOC that we will be allocating.
1530 mpt_send_ioc_init(struct mpt_softc *mpt, uint32_t who)
1534 MSG_IOC_INIT_REPLY reply;
1536 memset(&init, 0, sizeof init);
1538 init.Function = MPI_FUNCTION_IOC_INIT;
1539 init.MaxDevices = 0; /* at least 256 devices per bus */
1540 init.MaxBuses = 16; /* at least 16 busses */
1542 init.MsgVersion = htole16(MPI_VERSION);
1543 init.HeaderVersion = htole16(MPI_HEADER_VERSION);
1544 init.ReplyFrameSize = htole16(MPT_REPLY_SIZE);
1545 init.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
1547 if ((error = mpt_send_handshake_cmd(mpt, sizeof init, &init)) != 0) {
1551 error = mpt_recv_handshake_reply(mpt, sizeof reply, &reply);
1557 * Utiltity routine to read configuration headers and pages
1560 mpt_issue_cfg_req(struct mpt_softc *mpt, request_t *req, cfgparms_t *params,
1561 bus_addr_t addr, bus_size_t len, int sleep_ok, int timeout_ms)
1566 cfgp = req->req_vbuf;
1567 memset(cfgp, 0, sizeof *cfgp);
1568 cfgp->Action = params->Action;
1569 cfgp->Function = MPI_FUNCTION_CONFIG;
1570 cfgp->Header.PageVersion = params->PageVersion;
1571 cfgp->Header.PageNumber = params->PageNumber;
1572 cfgp->PageAddress = htole32(params->PageAddress);
1573 if ((params->PageType & MPI_CONFIG_PAGETYPE_MASK) ==
1574 MPI_CONFIG_PAGETYPE_EXTENDED) {
1575 cfgp->Header.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
1576 cfgp->Header.PageLength = 0;
1577 cfgp->ExtPageLength = htole16(params->ExtPageLength);
1578 cfgp->ExtPageType = params->ExtPageType;
1580 cfgp->Header.PageType = params->PageType;
1581 cfgp->Header.PageLength = params->PageLength;
1583 se = (SGE_SIMPLE32 *)&cfgp->PageBufferSGE;
1584 se->Address = htole32(addr);
1585 MPI_pSGE_SET_LENGTH(se, len);
1586 MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
1587 MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1588 MPI_SGE_FLAGS_END_OF_LIST |
1589 ((params->Action == MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT
1590 || params->Action == MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM)
1591 ? MPI_SGE_FLAGS_HOST_TO_IOC : MPI_SGE_FLAGS_IOC_TO_HOST)));
1592 se->FlagsLength = htole32(se->FlagsLength);
1593 cfgp->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_CONFIG);
1595 mpt_check_doorbell(mpt);
1596 mpt_send_cmd(mpt, req);
1597 return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
1598 sleep_ok, timeout_ms));
1602 mpt_read_extcfg_header(struct mpt_softc *mpt, int PageVersion, int PageNumber,
1603 uint32_t PageAddress, int ExtPageType,
1604 CONFIG_EXTENDED_PAGE_HEADER *rslt,
1605 int sleep_ok, int timeout_ms)
1609 MSG_CONFIG_REPLY *cfgp;
1612 req = mpt_get_request(mpt, sleep_ok);
1614 mpt_prt(mpt, "mpt_extread_cfg_header: Get request failed!\n");
1618 params.Action = MPI_CONFIG_ACTION_PAGE_HEADER;
1619 params.PageVersion = PageVersion;
1620 params.PageLength = 0;
1621 params.PageNumber = PageNumber;
1622 params.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
1623 params.PageAddress = PageAddress;
1624 params.ExtPageType = ExtPageType;
1625 params.ExtPageLength = 0;
1626 error = mpt_issue_cfg_req(mpt, req, ¶ms, /*addr*/0, /*len*/0,
1627 sleep_ok, timeout_ms);
1630 * Leave the request. Without resetting the chip, it's
1631 * still owned by it and we'll just get into trouble
1632 * freeing it now. Mark it as abandoned so that if it
1633 * shows up later it can be freed.
1635 mpt_prt(mpt, "read_extcfg_header timed out\n");
1639 switch (req->IOCStatus & MPI_IOCSTATUS_MASK) {
1640 case MPI_IOCSTATUS_SUCCESS:
1641 cfgp = req->req_vbuf;
1642 rslt->PageVersion = cfgp->Header.PageVersion;
1643 rslt->PageNumber = cfgp->Header.PageNumber;
1644 rslt->PageType = cfgp->Header.PageType;
1645 rslt->ExtPageLength = le16toh(cfgp->ExtPageLength);
1646 rslt->ExtPageType = cfgp->ExtPageType;
1649 case MPI_IOCSTATUS_CONFIG_INVALID_PAGE:
1650 mpt_lprt(mpt, MPT_PRT_DEBUG,
1651 "Invalid Page Type %d Number %d Addr 0x%0x\n",
1652 MPI_CONFIG_PAGETYPE_EXTENDED, PageNumber, PageAddress);
1656 mpt_prt(mpt, "mpt_read_extcfg_header: Config Info Status %x\n",
1661 mpt_free_request(mpt, req);
1666 mpt_read_extcfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress,
1667 CONFIG_EXTENDED_PAGE_HEADER *hdr, void *buf, size_t len,
1668 int sleep_ok, int timeout_ms)
1674 req = mpt_get_request(mpt, sleep_ok);
1676 mpt_prt(mpt, "mpt_read_extcfg_page: Get request failed!\n");
1680 params.Action = Action;
1681 params.PageVersion = hdr->PageVersion;
1682 params.PageLength = 0;
1683 params.PageNumber = hdr->PageNumber;
1684 params.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
1685 params.PageAddress = PageAddress;
1686 params.ExtPageType = hdr->ExtPageType;
1687 params.ExtPageLength = hdr->ExtPageLength;
1688 error = mpt_issue_cfg_req(mpt, req, ¶ms,
1689 req->req_pbuf + MPT_RQSL(mpt),
1690 len, sleep_ok, timeout_ms);
1692 mpt_prt(mpt, "read_extcfg_page(%d) timed out\n", Action);
1696 if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
1697 mpt_prt(mpt, "mpt_read_extcfg_page: Config Info Status %x\n",
1699 mpt_free_request(mpt, req);
1702 memcpy(buf, ((uint8_t *)req->req_vbuf)+MPT_RQSL(mpt), len);
1703 mpt_free_request(mpt, req);
1708 mpt_read_cfg_header(struct mpt_softc *mpt, int PageType, int PageNumber,
1709 uint32_t PageAddress, CONFIG_PAGE_HEADER *rslt,
1710 int sleep_ok, int timeout_ms)
1717 req = mpt_get_request(mpt, sleep_ok);
1719 mpt_prt(mpt, "mpt_read_cfg_header: Get request failed!\n");
1723 params.Action = MPI_CONFIG_ACTION_PAGE_HEADER;
1724 params.PageVersion = 0;
1725 params.PageLength = 0;
1726 params.PageNumber = PageNumber;
1727 params.PageType = PageType;
1728 params.PageAddress = PageAddress;
1729 error = mpt_issue_cfg_req(mpt, req, ¶ms, /*addr*/0, /*len*/0,
1730 sleep_ok, timeout_ms);
1733 * Leave the request. Without resetting the chip, it's
1734 * still owned by it and we'll just get into trouble
1735 * freeing it now. Mark it as abandoned so that if it
1736 * shows up later it can be freed.
1738 mpt_prt(mpt, "read_cfg_header timed out\n");
1742 switch (req->IOCStatus & MPI_IOCSTATUS_MASK) {
1743 case MPI_IOCSTATUS_SUCCESS:
1744 cfgp = req->req_vbuf;
1745 bcopy(&cfgp->Header, rslt, sizeof(*rslt));
1748 case MPI_IOCSTATUS_CONFIG_INVALID_PAGE:
1749 mpt_lprt(mpt, MPT_PRT_DEBUG,
1750 "Invalid Page Type %d Number %d Addr 0x%0x\n",
1751 PageType, PageNumber, PageAddress);
1755 mpt_prt(mpt, "mpt_read_cfg_header: Config Info Status %x\n",
1760 mpt_free_request(mpt, req);
1765 mpt_read_cfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress,
1766 CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok,
1773 req = mpt_get_request(mpt, sleep_ok);
1775 mpt_prt(mpt, "mpt_read_cfg_page: Get request failed!\n");
1779 params.Action = Action;
1780 params.PageVersion = hdr->PageVersion;
1781 params.PageLength = hdr->PageLength;
1782 params.PageNumber = hdr->PageNumber;
1783 params.PageType = hdr->PageType & MPI_CONFIG_PAGETYPE_MASK;
1784 params.PageAddress = PageAddress;
1785 error = mpt_issue_cfg_req(mpt, req, ¶ms,
1786 req->req_pbuf + MPT_RQSL(mpt),
1787 len, sleep_ok, timeout_ms);
1789 mpt_prt(mpt, "read_cfg_page(%d) timed out\n", Action);
1793 if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
1794 mpt_prt(mpt, "mpt_read_cfg_page: Config Info Status %x\n",
1796 mpt_free_request(mpt, req);
1799 memcpy(hdr, ((uint8_t *)req->req_vbuf)+MPT_RQSL(mpt), len);
1800 mpt_free_request(mpt, req);
1805 mpt_write_cfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress,
1806 CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok,
1814 hdr_attr = hdr->PageType & MPI_CONFIG_PAGEATTR_MASK;
1815 if (hdr_attr != MPI_CONFIG_PAGEATTR_CHANGEABLE &&
1816 hdr_attr != MPI_CONFIG_PAGEATTR_PERSISTENT) {
1817 mpt_prt(mpt, "page type 0x%x not changeable\n",
1818 hdr->PageType & MPI_CONFIG_PAGETYPE_MASK);
1824 * We shouldn't mask off other bits here.
1826 hdr->PageType &= MPI_CONFIG_PAGETYPE_MASK;
1829 req = mpt_get_request(mpt, sleep_ok);
1833 memcpy(((caddr_t)req->req_vbuf) + MPT_RQSL(mpt), hdr, len);
1836 * There isn't any point in restoring stripped out attributes
1837 * if you then mask them going down to issue the request.
1840 params.Action = Action;
1841 params.PageVersion = hdr->PageVersion;
1842 params.PageLength = hdr->PageLength;
1843 params.PageNumber = hdr->PageNumber;
1844 params.PageAddress = PageAddress;
1846 /* Restore stripped out attributes */
1847 hdr->PageType |= hdr_attr;
1848 params.PageType = hdr->PageType & MPI_CONFIG_PAGETYPE_MASK;
1850 params.PageType = hdr->PageType;
1852 error = mpt_issue_cfg_req(mpt, req, ¶ms,
1853 req->req_pbuf + MPT_RQSL(mpt),
1854 len, sleep_ok, timeout_ms);
1856 mpt_prt(mpt, "mpt_write_cfg_page timed out\n");
1860 if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
1861 mpt_prt(mpt, "mpt_write_cfg_page: Config Info Status %x\n",
1863 mpt_free_request(mpt, req);
1866 mpt_free_request(mpt, req);
1871 * Read IOC configuration information
1874 mpt_read_config_info_ioc(struct mpt_softc *mpt)
1876 CONFIG_PAGE_HEADER hdr;
1877 struct mpt_raid_volume *mpt_raid;
1882 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC,
1883 2, 0, &hdr, FALSE, 5000);
1885 * If it's an invalid page, so what? Not a supported function....
1894 mpt_lprt(mpt, MPT_PRT_DEBUG,
1895 "IOC Page 2 Header: Version %x len %x PageNumber %x PageType %x\n",
1896 hdr.PageVersion, hdr.PageLength << 2,
1897 hdr.PageNumber, hdr.PageType);
1899 len = hdr.PageLength * sizeof(uint32_t);
1900 mpt->ioc_page2 = kmalloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
1901 if (mpt->ioc_page2 == NULL) {
1902 mpt_prt(mpt, "unable to allocate memory for IOC page 2\n");
1903 mpt_raid_free_mem(mpt);
1906 memcpy(&mpt->ioc_page2->Header, &hdr, sizeof(hdr));
1907 rv = mpt_read_cur_cfg_page(mpt, 0,
1908 &mpt->ioc_page2->Header, len, FALSE, 5000);
1910 mpt_prt(mpt, "failed to read IOC Page 2\n");
1911 mpt_raid_free_mem(mpt);
1914 mpt2host_config_page_ioc2(mpt->ioc_page2);
1916 if (mpt->ioc_page2->CapabilitiesFlags != 0) {
1919 mpt_prt(mpt, "Capabilities: (");
1920 for (mask = 1; mask != 0; mask <<= 1) {
1921 if ((mpt->ioc_page2->CapabilitiesFlags & mask) == 0) {
1925 case MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT:
1926 mpt_prtc(mpt, " RAID-0");
1928 case MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT:
1929 mpt_prtc(mpt, " RAID-1E");
1931 case MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT:
1932 mpt_prtc(mpt, " RAID-1");
1934 case MPI_IOCPAGE2_CAP_FLAGS_SES_SUPPORT:
1935 mpt_prtc(mpt, " SES");
1937 case MPI_IOCPAGE2_CAP_FLAGS_SAFTE_SUPPORT:
1938 mpt_prtc(mpt, " SAFTE");
1940 case MPI_IOCPAGE2_CAP_FLAGS_CROSS_CHANNEL_SUPPORT:
1941 mpt_prtc(mpt, " Multi-Channel-Arrays");
1946 mpt_prtc(mpt, " )\n");
1947 if ((mpt->ioc_page2->CapabilitiesFlags
1948 & (MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT
1949 | MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT
1950 | MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT)) != 0) {
1951 mpt_prt(mpt, "%d Active Volume%s(%d Max)\n",
1952 mpt->ioc_page2->NumActiveVolumes,
1953 mpt->ioc_page2->NumActiveVolumes != 1
1955 mpt->ioc_page2->MaxVolumes);
1956 mpt_prt(mpt, "%d Hidden Drive Member%s(%d Max)\n",
1957 mpt->ioc_page2->NumActivePhysDisks,
1958 mpt->ioc_page2->NumActivePhysDisks != 1
1960 mpt->ioc_page2->MaxPhysDisks);
1964 len = mpt->ioc_page2->MaxVolumes * sizeof(struct mpt_raid_volume);
1965 mpt->raid_volumes = kmalloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
1966 if (mpt->raid_volumes == NULL) {
1967 mpt_prt(mpt, "Could not allocate RAID volume data\n");
1968 mpt_raid_free_mem(mpt);
1973 * Copy critical data out of ioc_page2 so that we can
1974 * safely refresh the page without windows of unreliable
1977 mpt->raid_max_volumes = mpt->ioc_page2->MaxVolumes;
1979 len = sizeof(*mpt->raid_volumes->config_page) +
1980 (sizeof (RAID_VOL0_PHYS_DISK) * (mpt->ioc_page2->MaxPhysDisks - 1));
1981 for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1982 mpt_raid = &mpt->raid_volumes[i];
1983 mpt_raid->config_page =
1984 kmalloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
1985 if (mpt_raid->config_page == NULL) {
1986 mpt_prt(mpt, "Could not allocate RAID page data\n");
1987 mpt_raid_free_mem(mpt);
1991 mpt->raid_page0_len = len;
1993 len = mpt->ioc_page2->MaxPhysDisks * sizeof(struct mpt_raid_disk);
1994 mpt->raid_disks = kmalloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
1995 if (mpt->raid_disks == NULL) {
1996 mpt_prt(mpt, "Could not allocate RAID disk data\n");
1997 mpt_raid_free_mem(mpt);
2000 mpt->raid_max_disks = mpt->ioc_page2->MaxPhysDisks;
2005 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC,
2006 3, 0, &hdr, FALSE, 5000);
2008 mpt_raid_free_mem(mpt);
2012 mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC Page 3 Header: %x %x %x %x\n",
2013 hdr.PageVersion, hdr.PageLength, hdr.PageNumber, hdr.PageType);
2015 len = hdr.PageLength * sizeof(uint32_t);
2016 mpt->ioc_page3 = kmalloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
2017 if (mpt->ioc_page3 == NULL) {
2018 mpt_prt(mpt, "unable to allocate memory for IOC page 3\n");
2019 mpt_raid_free_mem(mpt);
2022 memcpy(&mpt->ioc_page3->Header, &hdr, sizeof(hdr));
2023 rv = mpt_read_cur_cfg_page(mpt, 0,
2024 &mpt->ioc_page3->Header, len, FALSE, 5000);
2026 mpt_raid_free_mem(mpt);
2029 mpt2host_config_page_ioc3(mpt->ioc_page3);
2030 mpt_raid_wakeup(mpt);
2038 mpt_send_port_enable(struct mpt_softc *mpt, int port)
2041 MSG_PORT_ENABLE *enable_req;
2044 req = mpt_get_request(mpt, /*sleep_ok*/FALSE);
2048 enable_req = req->req_vbuf;
2049 memset(enable_req, 0, MPT_RQSL(mpt));
2051 enable_req->Function = MPI_FUNCTION_PORT_ENABLE;
2052 enable_req->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_CONFIG);
2053 enable_req->PortNumber = port;
2055 mpt_check_doorbell(mpt);
2056 mpt_lprt(mpt, MPT_PRT_DEBUG, "enabling port %d\n", port);
2058 mpt_send_cmd(mpt, req);
2059 error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
2060 FALSE, (mpt->is_sas || mpt->is_fc)? 30000 : 3000);
2062 mpt_prt(mpt, "port %d enable timed out\n", port);
2065 mpt_free_request(mpt, req);
2066 mpt_lprt(mpt, MPT_PRT_DEBUG, "enabled port %d\n", port);
2071 * Enable/Disable asynchronous event reporting.
2074 mpt_send_event_request(struct mpt_softc *mpt, int onoff)
2077 MSG_EVENT_NOTIFY *enable_req;
2079 req = mpt_get_request(mpt, FALSE);
2083 enable_req = req->req_vbuf;
2084 memset(enable_req, 0, sizeof *enable_req);
2086 enable_req->Function = MPI_FUNCTION_EVENT_NOTIFICATION;
2087 enable_req->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_EVENTS);
2088 enable_req->Switch = onoff;
2090 mpt_check_doorbell(mpt);
2091 mpt_lprt(mpt, MPT_PRT_DEBUG, "%sabling async events\n",
2092 onoff ? "en" : "dis");
2094 * Send the command off, but don't wait for it.
2096 mpt_send_cmd(mpt, req);
2101 * Un-mask the interrupts on the chip.
2104 mpt_enable_ints(struct mpt_softc *mpt)
2106 /* Unmask every thing except door bell int */
2107 mpt_write(mpt, MPT_OFFSET_INTR_MASK, MPT_INTR_DB_MASK);
2111 * Mask the interrupts on the chip.
2114 mpt_disable_ints(struct mpt_softc *mpt)
2116 /* Mask all interrupts */
2117 mpt_write(mpt, MPT_OFFSET_INTR_MASK,
2118 MPT_INTR_REPLY_MASK | MPT_INTR_DB_MASK);
2122 mpt_sysctl_attach(struct mpt_softc *mpt)
2124 SYSCTL_ADD_UINT(&mpt->mpt_sysctl_ctx,
2125 SYSCTL_CHILDREN(mpt->mpt_sysctl_tree), OID_AUTO,
2126 "debug", CTLFLAG_RW, &mpt->verbose, 0,
2127 "Debugging/Verbose level");
2128 SYSCTL_ADD_UINT(&mpt->mpt_sysctl_ctx,
2129 SYSCTL_CHILDREN(mpt->mpt_sysctl_tree), OID_AUTO,
2130 "role", CTLFLAG_RD, &mpt->role, 0,
2132 #ifdef MPT_TEST_MULTIPATH
2133 SYSCTL_ADD_INT(&mpt->mpt_sysctl_ctx,
2134 SYSCTL_CHILDREN(mpt->mpt_sysctl_tree), OID_AUTO,
2135 "failure_id", CTLFLAG_RW, &mpt->failure_id, -1,
2136 "Next Target to Fail");
2141 mpt_attach(struct mpt_softc *mpt)
2143 struct mpt_personality *pers;
2147 mpt_core_attach(mpt);
2148 mpt_core_enable(mpt);
2150 TAILQ_INSERT_TAIL(&mpt_tailq, mpt, links);
2151 for (i = 0; i < MPT_MAX_PERSONALITIES; i++) {
2152 pers = mpt_personalities[i];
2156 if (pers->probe(mpt) == 0) {
2157 error = pers->attach(mpt);
2162 mpt->mpt_pers_mask |= (0x1 << pers->id);
2168 * Now that we've attached everything, do the enable function
2169 * for all of the personalities. This allows the personalities
2170 * to do setups that are appropriate for them prior to enabling
2173 for (i = 0; i < MPT_MAX_PERSONALITIES; i++) {
2174 pers = mpt_personalities[i];
2175 if (pers != NULL && MPT_PERS_ATTACHED(pers, mpt) != 0) {
2176 error = pers->enable(mpt);
2178 mpt_prt(mpt, "personality %s attached but would"
2179 " not enable (%d)\n", pers->name, error);
2189 mpt_shutdown(struct mpt_softc *mpt)
2191 struct mpt_personality *pers;
2193 MPT_PERS_FOREACH_REVERSE(mpt, pers) {
2194 pers->shutdown(mpt);
2200 mpt_detach(struct mpt_softc *mpt)
2202 struct mpt_personality *pers;
2204 MPT_PERS_FOREACH_REVERSE(mpt, pers) {
2206 mpt->mpt_pers_mask &= ~(0x1 << pers->id);
2209 TAILQ_REMOVE(&mpt_tailq, mpt, links);
2214 mpt_core_load(struct mpt_personality *pers)
2219 * Setup core handlers and insert the default handler
2220 * into all "empty slots".
2222 for (i = 0; i < MPT_NUM_REPLY_HANDLERS; i++) {
2223 mpt_reply_handlers[i] = mpt_default_reply_handler;
2226 mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_EVENTS)] =
2227 mpt_event_reply_handler;
2228 mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_CONFIG)] =
2229 mpt_config_reply_handler;
2230 mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_HANDSHAKE)] =
2231 mpt_handshake_reply_handler;
2236 * Initialize per-instance driver data and perform
2237 * initial controller configuration.
2240 mpt_core_attach(struct mpt_softc *mpt)
2244 LIST_INIT(&mpt->ack_frames);
2245 /* Put all request buffers on the free list */
2246 TAILQ_INIT(&mpt->request_pending_list);
2247 TAILQ_INIT(&mpt->request_free_list);
2248 TAILQ_INIT(&mpt->request_timeout_list);
2249 for (val = 0; val < MPT_MAX_LUNS; val++) {
2250 STAILQ_INIT(&mpt->trt[val].atios);
2251 STAILQ_INIT(&mpt->trt[val].inots);
2253 STAILQ_INIT(&mpt->trt_wildcard.atios);
2254 STAILQ_INIT(&mpt->trt_wildcard.inots);
2255 #ifdef MPT_TEST_MULTIPATH
2256 mpt->failure_id = -1;
2258 mpt->scsi_tgt_handler_id = MPT_HANDLER_ID_NONE;
2259 sysctl_ctx_init(&mpt->mpt_sysctl_ctx);
2260 mpt->mpt_sysctl_tree = SYSCTL_ADD_NODE(&mpt->mpt_sysctl_ctx,
2261 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
2262 device_get_nameunit(mpt->dev), CTLFLAG_RD, 0, "");
2263 if (mpt->mpt_sysctl_tree == NULL) {
2264 device_printf(mpt->dev, "can't add sysctl node\n");
2267 mpt_sysctl_attach(mpt);
2268 mpt_lprt(mpt, MPT_PRT_DEBUG, "doorbell req = %s\n",
2269 mpt_ioc_diag(mpt_read(mpt, MPT_OFFSET_DOORBELL)));
2272 error = mpt_configure_ioc(mpt, 0, 0);
2279 mpt_core_enable(struct mpt_softc *mpt)
2282 * We enter with the IOC enabled, but async events
2283 * not enabled, ports not enabled and interrupts
2289 * Enable asynchronous event reporting- all personalities
2290 * have attached so that they should be able to now field
2293 mpt_send_event_request(mpt, 1);
2296 * Catch any pending interrupts
2298 * This seems to be crucial- otherwise
2299 * the portenable below times out.
2306 mpt_enable_ints(mpt);
2309 * Catch any pending interrupts
2311 * This seems to be crucial- otherwise
2312 * the portenable below times out.
2319 if (mpt_send_port_enable(mpt, 0) != MPT_OK) {
2320 mpt_prt(mpt, "failed to enable port 0\n");
2329 mpt_core_shutdown(struct mpt_softc *mpt)
2331 mpt_disable_ints(mpt);
2335 mpt_core_detach(struct mpt_softc *mpt)
2342 mpt_disable_ints(mpt);
2344 /* Make sure no request has pending timeouts. */
2345 for (val = 0; val < MPT_MAX_REQUESTS(mpt); val++) {
2346 request_t *req = &mpt->request_pool[val];
2347 callout_stop(&req->callout);
2350 mpt_dma_buf_free(mpt);
2352 if (mpt->mpt_sysctl_tree != NULL)
2353 sysctl_ctx_free(&mpt->mpt_sysctl_ctx);
2357 mpt_core_unload(struct mpt_personality *pers)
2359 /* Unload is always successful. */
2363 #define FW_UPLOAD_REQ_SIZE \
2364 (sizeof(MSG_FW_UPLOAD) - sizeof(SGE_MPI_UNION) \
2365 + sizeof(FW_UPLOAD_TCSGE) + sizeof(SGE_SIMPLE32))
2368 mpt_upload_fw(struct mpt_softc *mpt)
2370 uint8_t fw_req_buf[FW_UPLOAD_REQ_SIZE];
2371 MSG_FW_UPLOAD_REPLY fw_reply;
2372 MSG_FW_UPLOAD *fw_req;
2373 FW_UPLOAD_TCSGE *tsge;
2378 memset(&fw_req_buf, 0, sizeof(fw_req_buf));
2379 fw_req = (MSG_FW_UPLOAD *)fw_req_buf;
2380 fw_req->ImageType = MPI_FW_UPLOAD_ITYPE_FW_IOC_MEM;
2381 fw_req->Function = MPI_FUNCTION_FW_UPLOAD;
2382 fw_req->MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
2383 tsge = (FW_UPLOAD_TCSGE *)&fw_req->SGL;
2384 tsge->DetailsLength = 12;
2385 tsge->Flags = MPI_SGE_FLAGS_TRANSACTION_ELEMENT;
2386 tsge->ImageSize = htole32(mpt->fw_image_size);
2387 sge = (SGE_SIMPLE32 *)(tsge + 1);
2388 flags = (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER
2389 | MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_SIMPLE_ELEMENT
2390 | MPI_SGE_FLAGS_32_BIT_ADDRESSING | MPI_SGE_FLAGS_IOC_TO_HOST);
2391 flags <<= MPI_SGE_FLAGS_SHIFT;
2392 sge->FlagsLength = htole32(flags | mpt->fw_image_size);
2393 sge->Address = htole32(mpt->fw_phys);
2394 bus_dmamap_sync(mpt->fw_dmat, mpt->fw_dmap, BUS_DMASYNC_PREREAD);
2395 error = mpt_send_handshake_cmd(mpt, sizeof(fw_req_buf), &fw_req_buf);
2398 error = mpt_recv_handshake_reply(mpt, sizeof(fw_reply), &fw_reply);
2399 bus_dmamap_sync(mpt->fw_dmat, mpt->fw_dmap, BUS_DMASYNC_POSTREAD);
2404 mpt_diag_outsl(struct mpt_softc *mpt, uint32_t addr,
2405 uint32_t *data, bus_size_t len)
2409 data_end = data + (roundup2(len, sizeof(uint32_t)) / 4);
2411 pci_enable_io(mpt->dev, SYS_RES_IOPORT);
2413 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, addr);
2414 while (data != data_end) {
2415 mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, *data);
2419 pci_disable_io(mpt->dev, SYS_RES_IOPORT);
2424 mpt_download_fw(struct mpt_softc *mpt)
2426 MpiFwHeader_t *fw_hdr;
2428 uint32_t ext_offset;
2431 mpt_prt(mpt, "Downloading Firmware - Image Size %d\n",
2432 mpt->fw_image_size);
2434 error = mpt_enable_diag_mode(mpt);
2436 mpt_prt(mpt, "Could not enter diagnostic mode!\n");
2440 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC,
2441 MPI_DIAG_RW_ENABLE|MPI_DIAG_DISABLE_ARM);
2443 fw_hdr = (MpiFwHeader_t *)mpt->fw_image;
2444 bus_dmamap_sync(mpt->fw_dmat, mpt->fw_dmap, BUS_DMASYNC_PREWRITE);
2445 mpt_diag_outsl(mpt, fw_hdr->LoadStartAddress, (uint32_t*)fw_hdr,
2447 bus_dmamap_sync(mpt->fw_dmat, mpt->fw_dmap, BUS_DMASYNC_POSTWRITE);
2449 ext_offset = fw_hdr->NextImageHeaderOffset;
2450 while (ext_offset != 0) {
2451 MpiExtImageHeader_t *ext;
2453 ext = (MpiExtImageHeader_t *)((uintptr_t)fw_hdr + ext_offset);
2454 ext_offset = ext->NextImageHeaderOffset;
2455 bus_dmamap_sync(mpt->fw_dmat, mpt->fw_dmap,
2456 BUS_DMASYNC_PREWRITE);
2457 mpt_diag_outsl(mpt, ext->LoadStartAddress, (uint32_t*)ext,
2459 bus_dmamap_sync(mpt->fw_dmat, mpt->fw_dmap,
2460 BUS_DMASYNC_POSTWRITE);
2464 pci_enable_io(mpt->dev, SYS_RES_IOPORT);
2466 /* Setup the address to jump to on reset. */
2467 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, fw_hdr->IopResetRegAddr);
2468 mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, fw_hdr->IopResetVectorValue);
2471 * The controller sets the "flash bad" status after attempting
2472 * to auto-boot from flash. Clear the status so that the controller
2473 * will continue the boot process with our newly installed firmware.
2475 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, MPT_DIAG_MEM_CFG_BASE);
2476 data = mpt_pio_read(mpt, MPT_OFFSET_DIAG_DATA) | MPT_DIAG_MEM_CFG_BADFL;
2477 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, MPT_DIAG_MEM_CFG_BASE);
2478 mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, data);
2481 pci_disable_io(mpt->dev, SYS_RES_IOPORT);
2485 * Re-enable the processor and clear the boot halt flag.
2487 data = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC);
2488 data &= ~(MPI_DIAG_PREVENT_IOC_BOOT|MPI_DIAG_DISABLE_ARM);
2489 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, data);
2491 mpt_disable_diag_mode(mpt);
2496 mpt_dma_buf_alloc(struct mpt_softc *mpt)
2498 struct mpt_map_info mi;
2503 /* Create a child tag for data buffers */
2504 if (mpt_dma_tag_create(mpt, mpt->parent_dmat, 1,
2505 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
2506 NULL, NULL, (mpt->max_cam_seg_cnt - 1) * PAGE_SIZE,
2507 mpt->max_cam_seg_cnt, BUS_SPACE_MAXSIZE_32BIT, 0,
2508 &mpt->buffer_dmat) != 0) {
2509 mpt_prt(mpt, "cannot create a dma tag for data buffers\n");
2513 /* Create a child tag for request buffers */
2514 if (mpt_dma_tag_create(mpt, mpt->parent_dmat, PAGE_SIZE, 0,
2515 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
2516 NULL, NULL, MPT_REQ_MEM_SIZE(mpt), 1, BUS_SPACE_MAXSIZE_32BIT, 0,
2517 &mpt->request_dmat) != 0) {
2518 mpt_prt(mpt, "cannot create a dma tag for requests\n");
2522 /* Allocate some DMA accessible memory for requests */
2523 if (bus_dmamem_alloc(mpt->request_dmat, (void **)&mpt->request,
2524 BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &mpt->request_dmap) != 0) {
2525 mpt_prt(mpt, "cannot allocate %d bytes of request memory\n",
2526 MPT_REQ_MEM_SIZE(mpt));
2533 /* Load and lock it into "bus space" */
2534 bus_dmamap_load(mpt->request_dmat, mpt->request_dmap, mpt->request,
2535 MPT_REQ_MEM_SIZE(mpt), mpt_map_rquest, &mi, 0);
2538 mpt_prt(mpt, "error %d loading dma map for DMA request queue\n",
2542 mpt->request_phys = mi.phys;
2545 * Now create per-request dma maps
2548 pptr = mpt->request_phys;
2549 vptr = mpt->request;
2550 end = pptr + MPT_REQ_MEM_SIZE(mpt);
2552 request_t *req = &mpt->request_pool[i];
2555 /* Store location of Request Data */
2556 req->req_pbuf = pptr;
2557 req->req_vbuf = vptr;
2559 pptr += MPT_REQUEST_AREA;
2560 vptr += MPT_REQUEST_AREA;
2562 req->sense_pbuf = (pptr - MPT_SENSE_SIZE);
2563 req->sense_vbuf = (vptr - MPT_SENSE_SIZE);
2565 error = bus_dmamap_create(mpt->buffer_dmat, 0, &req->dmap);
2567 mpt_prt(mpt, "error %d creating per-cmd DMA maps\n",
2577 mpt_dma_buf_free(struct mpt_softc *mpt)
2580 if (mpt->request_dmat == 0) {
2581 mpt_lprt(mpt, MPT_PRT_DEBUG, "already released dma memory\n");
2584 for (i = 0; i < MPT_MAX_REQUESTS(mpt); i++) {
2585 bus_dmamap_destroy(mpt->buffer_dmat, mpt->request_pool[i].dmap);
2587 bus_dmamap_unload(mpt->request_dmat, mpt->request_dmap);
2588 bus_dmamem_free(mpt->request_dmat, mpt->request, mpt->request_dmap);
2589 bus_dma_tag_destroy(mpt->request_dmat);
2590 mpt->request_dmat = 0;
2591 bus_dma_tag_destroy(mpt->buffer_dmat);
2595 * Allocate/Initialize data structures for the controller. Called
2596 * once at instance startup.
2599 mpt_configure_ioc(struct mpt_softc *mpt, int tn, int needreset)
2601 PTR_MSG_PORT_FACTS_REPLY pfp;
2602 int error, port, val;
2605 if (tn == MPT_MAX_TRYS) {
2610 * No need to reset if the IOC is already in the READY state.
2612 * Force reset if initialization failed previously.
2613 * Note that a hard_reset of the second channel of a '929
2614 * will stop operation of the first channel. Hopefully, if the
2615 * first channel is ok, the second will not require a hard
2618 if (needreset || MPT_STATE(mpt_rd_db(mpt)) != MPT_DB_STATE_READY) {
2619 if (mpt_reset(mpt, FALSE) != MPT_OK) {
2620 return (mpt_configure_ioc(mpt, tn++, 1));
2625 if (mpt_get_iocfacts(mpt, &mpt->ioc_facts) != MPT_OK) {
2626 mpt_prt(mpt, "mpt_get_iocfacts failed\n");
2627 return (mpt_configure_ioc(mpt, tn++, 1));
2629 mpt2host_iocfacts_reply(&mpt->ioc_facts);
2631 mpt_prt(mpt, "MPI Version=%d.%d.%d.%d\n",
2632 mpt->ioc_facts.MsgVersion >> 8,
2633 mpt->ioc_facts.MsgVersion & 0xFF,
2634 mpt->ioc_facts.HeaderVersion >> 8,
2635 mpt->ioc_facts.HeaderVersion & 0xFF);
2638 * Now that we know request frame size, we can calculate
2639 * the actual (reasonable) segment limit for read/write I/O.
2641 * This limit is constrained by:
2643 * + The size of each area we allocate per command (and how
2644 * many chain segments we can fit into it).
2645 * + The total number of areas we've set up.
2646 * + The actual chain depth the card will allow.
2648 * The first area's segment count is limited by the I/O request
2649 * at the head of it. We cannot allocate realistically more
2650 * than MPT_MAX_REQUESTS areas. Therefore, to account for both
2651 * conditions, we'll just start out with MPT_MAX_REQUESTS-2.
2654 /* total number of request areas we (can) allocate */
2655 mpt->max_seg_cnt = MPT_MAX_REQUESTS(mpt) - 2;
2657 /* converted to the number of chain areas possible */
2658 mpt->max_seg_cnt *= MPT_NRFM(mpt);
2660 /* limited by the number of chain areas the card will support */
2661 if (mpt->max_seg_cnt > mpt->ioc_facts.MaxChainDepth) {
2662 mpt_lprt(mpt, MPT_PRT_INFO,
2663 "chain depth limited to %u (from %u)\n",
2664 mpt->ioc_facts.MaxChainDepth, mpt->max_seg_cnt);
2665 mpt->max_seg_cnt = mpt->ioc_facts.MaxChainDepth;
2668 /* converted to the number of simple sges in chain segments. */
2669 mpt->max_seg_cnt *= (MPT_NSGL(mpt) - 1);
2672 * Use this as the basis for reporting the maximum I/O size to CAM.
2674 mpt->max_cam_seg_cnt = min(mpt->max_seg_cnt, (MAXPHYS / PAGE_SIZE) + 1);
2676 error = mpt_dma_buf_alloc(mpt);
2678 mpt_prt(mpt, "mpt_dma_buf_alloc() failed!\n");
2682 for (val = 0; val < MPT_MAX_REQUESTS(mpt); val++) {
2683 request_t *req = &mpt->request_pool[val];
2684 req->state = REQ_STATE_ALLOCATED;
2685 mpt_callout_init(mpt, &req->callout);
2686 mpt_free_request(mpt, req);
2689 mpt_lprt(mpt, MPT_PRT_INFO, "Maximum Segment Count: %u, Maximum "
2690 "CAM Segment Count: %u\n", mpt->max_seg_cnt,
2691 mpt->max_cam_seg_cnt);
2693 mpt_lprt(mpt, MPT_PRT_INFO, "MsgLength=%u IOCNumber = %d\n",
2694 mpt->ioc_facts.MsgLength, mpt->ioc_facts.IOCNumber);
2695 mpt_lprt(mpt, MPT_PRT_INFO,
2696 "IOCFACTS: GlobalCredits=%d BlockSize=%u bytes "
2697 "Request Frame Size %u bytes Max Chain Depth %u\n",
2698 mpt->ioc_facts.GlobalCredits, mpt->ioc_facts.BlockSize,
2699 mpt->ioc_facts.RequestFrameSize << 2,
2700 mpt->ioc_facts.MaxChainDepth);
2701 mpt_lprt(mpt, MPT_PRT_INFO, "IOCFACTS: Num Ports %d, FWImageSize %d, "
2702 "Flags=%#x\n", mpt->ioc_facts.NumberOfPorts,
2703 mpt->ioc_facts.FWImageSize, mpt->ioc_facts.Flags);
2705 len = mpt->ioc_facts.NumberOfPorts * sizeof (MSG_PORT_FACTS_REPLY);
2706 mpt->port_facts = kmalloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
2707 if (mpt->port_facts == NULL) {
2708 mpt_prt(mpt, "unable to allocate memory for port facts\n");
2713 if ((mpt->ioc_facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT) &&
2714 (mpt->fw_uploaded == 0)) {
2715 struct mpt_map_info mi;
2718 * In some configurations, the IOC's firmware is
2719 * stored in a shared piece of system NVRAM that
2720 * is only accessible via the BIOS. In this
2721 * case, the firmware keeps a copy of firmware in
2722 * RAM until the OS driver retrieves it. Once
2723 * retrieved, we are responsible for re-downloading
2724 * the firmware after any hard-reset.
2726 mpt->fw_image_size = mpt->ioc_facts.FWImageSize;
2727 error = mpt_dma_tag_create(mpt, mpt->parent_dmat, 1, 0,
2728 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2729 mpt->fw_image_size, 1, mpt->fw_image_size, 0,
2732 mpt_prt(mpt, "cannot create firmware dma tag\n");
2735 error = bus_dmamem_alloc(mpt->fw_dmat,
2736 (void **)&mpt->fw_image, BUS_DMA_NOWAIT |
2737 BUS_DMA_COHERENT, &mpt->fw_dmap);
2739 mpt_prt(mpt, "cannot allocate firmware memory\n");
2740 bus_dma_tag_destroy(mpt->fw_dmat);
2745 bus_dmamap_load(mpt->fw_dmat, mpt->fw_dmap,
2746 mpt->fw_image, mpt->fw_image_size, mpt_map_rquest, &mi, 0);
2747 mpt->fw_phys = mi.phys;
2749 error = mpt_upload_fw(mpt);
2751 mpt_prt(mpt, "firmware upload failed.\n");
2752 bus_dmamap_unload(mpt->fw_dmat, mpt->fw_dmap);
2753 bus_dmamem_free(mpt->fw_dmat, mpt->fw_image,
2755 bus_dma_tag_destroy(mpt->fw_dmat);
2756 mpt->fw_image = NULL;
2759 mpt->fw_uploaded = 1;
2762 for (port = 0; port < mpt->ioc_facts.NumberOfPorts; port++) {
2763 pfp = &mpt->port_facts[port];
2764 error = mpt_get_portfacts(mpt, 0, pfp);
2765 if (error != MPT_OK) {
2767 "mpt_get_portfacts on port %d failed\n", port);
2768 kfree(mpt->port_facts, M_DEVBUF);
2769 mpt->port_facts = NULL;
2770 return (mpt_configure_ioc(mpt, tn++, 1));
2772 mpt2host_portfacts_reply(pfp);
2775 error = MPT_PRT_INFO;
2777 error = MPT_PRT_DEBUG;
2779 mpt_lprt(mpt, error,
2780 "PORTFACTS[%d]: Type %x PFlags %x IID %d MaxDev %d\n",
2781 port, pfp->PortType, pfp->ProtocolFlags, pfp->PortSCSIID,
2787 * XXX: Not yet supporting more than port 0
2789 pfp = &mpt->port_facts[0];
2790 if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_FC) {
2794 } else if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_SAS) {
2798 } else if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_SCSI) {
2802 if (mpt->mpt_ini_id == MPT_INI_ID_NONE)
2803 mpt->mpt_ini_id = pfp->PortSCSIID;
2804 } else if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_ISCSI) {
2805 mpt_prt(mpt, "iSCSI not supported yet\n");
2807 } else if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_INACTIVE) {
2808 mpt_prt(mpt, "Inactive Port\n");
2811 mpt_prt(mpt, "unknown Port Type %#x\n", pfp->PortType);
2816 * Set our role with what this port supports.
2818 * Note this might be changed later in different modules
2819 * if this is different from what is wanted.
2821 mpt->role = MPT_ROLE_NONE;
2822 if (pfp->ProtocolFlags & MPI_PORTFACTS_PROTOCOL_INITIATOR) {
2823 mpt->role |= MPT_ROLE_INITIATOR;
2825 if (pfp->ProtocolFlags & MPI_PORTFACTS_PROTOCOL_TARGET) {
2826 mpt->role |= MPT_ROLE_TARGET;
2832 if (mpt_enable_ioc(mpt, 1) != MPT_OK) {
2833 mpt_prt(mpt, "unable to initialize IOC\n");
2838 * Read IOC configuration information.
2840 * We need this to determine whether or not we have certain
2841 * settings for Integrated Mirroring (e.g.).
2843 mpt_read_config_info_ioc(mpt);
2849 mpt_enable_ioc(struct mpt_softc *mpt, int portenable)
2854 if (mpt_send_ioc_init(mpt, MPI_WHOINIT_HOST_DRIVER) != MPT_OK) {
2855 mpt_prt(mpt, "mpt_send_ioc_init failed\n");
2859 mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_send_ioc_init ok\n");
2861 if (mpt_wait_state(mpt, MPT_DB_STATE_RUNNING) != MPT_OK) {
2862 mpt_prt(mpt, "IOC failed to go to run state\n");
2865 mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC now at RUNSTATE\n");
2868 * Give it reply buffers
2870 * Do *not* exceed global credits.
2872 for (val = 0, pptr = mpt->reply_phys;
2873 (pptr + MPT_REPLY_SIZE) < (mpt->reply_phys + PAGE_SIZE);
2874 pptr += MPT_REPLY_SIZE) {
2875 mpt_free_reply(mpt, pptr);
2876 if (++val == mpt->ioc_facts.GlobalCredits - 1)
2882 * Enable the port if asked. This is only done if we're resetting
2883 * the IOC after initial startup.
2887 * Enable asynchronous event reporting
2889 mpt_send_event_request(mpt, 1);
2891 if (mpt_send_port_enable(mpt, 0) != MPT_OK) {
2892 mpt_prt(mpt, "%s: failed to enable port 0\n", __func__);
2900 * Endian Conversion Functions- only used on Big Endian machines
2902 #if _BYTE_ORDER == _BIG_ENDIAN
2904 mpt2host_sge_simple_union(SGE_SIMPLE_UNION *sge)
2907 MPT_2_HOST32(sge, FlagsLength);
2908 MPT_2_HOST32(sge, u.Address64.Low);
2909 MPT_2_HOST32(sge, u.Address64.High);
2913 mpt2host_iocfacts_reply(MSG_IOC_FACTS_REPLY *rp)
2916 MPT_2_HOST16(rp, MsgVersion);
2917 MPT_2_HOST16(rp, HeaderVersion);
2918 MPT_2_HOST32(rp, MsgContext);
2919 MPT_2_HOST16(rp, IOCExceptions);
2920 MPT_2_HOST16(rp, IOCStatus);
2921 MPT_2_HOST32(rp, IOCLogInfo);
2922 MPT_2_HOST16(rp, ReplyQueueDepth);
2923 MPT_2_HOST16(rp, RequestFrameSize);
2924 MPT_2_HOST16(rp, Reserved_0101_FWVersion);
2925 MPT_2_HOST16(rp, ProductID);
2926 MPT_2_HOST32(rp, CurrentHostMfaHighAddr);
2927 MPT_2_HOST16(rp, GlobalCredits);
2928 MPT_2_HOST32(rp, CurrentSenseBufferHighAddr);
2929 MPT_2_HOST16(rp, CurReplyFrameSize);
2930 MPT_2_HOST32(rp, FWImageSize);
2931 MPT_2_HOST32(rp, IOCCapabilities);
2932 MPT_2_HOST32(rp, FWVersion.Word);
2933 MPT_2_HOST16(rp, HighPriorityQueueDepth);
2934 MPT_2_HOST16(rp, Reserved2);
2935 mpt2host_sge_simple_union(&rp->HostPageBufferSGE);
2936 MPT_2_HOST32(rp, ReplyFifoHostSignalingAddr);
2940 mpt2host_portfacts_reply(MSG_PORT_FACTS_REPLY *pfp)
2943 MPT_2_HOST16(pfp, Reserved);
2944 MPT_2_HOST16(pfp, Reserved1);
2945 MPT_2_HOST32(pfp, MsgContext);
2946 MPT_2_HOST16(pfp, Reserved2);
2947 MPT_2_HOST16(pfp, IOCStatus);
2948 MPT_2_HOST32(pfp, IOCLogInfo);
2949 MPT_2_HOST16(pfp, MaxDevices);
2950 MPT_2_HOST16(pfp, PortSCSIID);
2951 MPT_2_HOST16(pfp, ProtocolFlags);
2952 MPT_2_HOST16(pfp, MaxPostedCmdBuffers);
2953 MPT_2_HOST16(pfp, MaxPersistentIDs);
2954 MPT_2_HOST16(pfp, MaxLanBuckets);
2955 MPT_2_HOST16(pfp, Reserved4);
2956 MPT_2_HOST32(pfp, Reserved5);
2960 mpt2host_config_page_ioc2(CONFIG_PAGE_IOC_2 *ioc2)
2964 MPT_2_HOST32(ioc2, CapabilitiesFlags);
2965 for (i = 0; i < MPI_IOC_PAGE_2_RAID_VOLUME_MAX; i++) {
2966 MPT_2_HOST16(ioc2, RaidVolume[i].Reserved3);
2971 mpt2host_config_page_ioc3(CONFIG_PAGE_IOC_3 *ioc3)
2974 MPT_2_HOST16(ioc3, Reserved2);
2978 mpt2host_config_page_scsi_port_0(CONFIG_PAGE_SCSI_PORT_0 *sp0)
2981 MPT_2_HOST32(sp0, Capabilities);
2982 MPT_2_HOST32(sp0, PhysicalInterface);
2986 mpt2host_config_page_scsi_port_1(CONFIG_PAGE_SCSI_PORT_1 *sp1)
2989 MPT_2_HOST32(sp1, Configuration);
2990 MPT_2_HOST32(sp1, OnBusTimerValue);
2991 MPT_2_HOST16(sp1, IDConfig);
2995 host2mpt_config_page_scsi_port_1(CONFIG_PAGE_SCSI_PORT_1 *sp1)
2998 HOST_2_MPT32(sp1, Configuration);
2999 HOST_2_MPT32(sp1, OnBusTimerValue);
3000 HOST_2_MPT16(sp1, IDConfig);
3004 mpt2host_config_page_scsi_port_2(CONFIG_PAGE_SCSI_PORT_2 *sp2)
3008 MPT_2_HOST32(sp2, PortFlags);
3009 MPT_2_HOST32(sp2, PortSettings);
3010 for (i = 0; i < sizeof(sp2->DeviceSettings) /
3011 sizeof(*sp2->DeviceSettings); i++) {
3012 MPT_2_HOST16(sp2, DeviceSettings[i].DeviceFlags);
3017 mpt2host_config_page_scsi_device_0(CONFIG_PAGE_SCSI_DEVICE_0 *sd0)
3020 MPT_2_HOST32(sd0, NegotiatedParameters);
3021 MPT_2_HOST32(sd0, Information);
3025 mpt2host_config_page_scsi_device_1(CONFIG_PAGE_SCSI_DEVICE_1 *sd1)
3028 MPT_2_HOST32(sd1, RequestedParameters);
3029 MPT_2_HOST32(sd1, Reserved);
3030 MPT_2_HOST32(sd1, Configuration);
3034 host2mpt_config_page_scsi_device_1(CONFIG_PAGE_SCSI_DEVICE_1 *sd1)
3037 HOST_2_MPT32(sd1, RequestedParameters);
3038 HOST_2_MPT32(sd1, Reserved);
3039 HOST_2_MPT32(sd1, Configuration);
3043 mpt2host_config_page_fc_port_0(CONFIG_PAGE_FC_PORT_0 *fp0)
3046 MPT_2_HOST32(fp0, Flags);
3047 MPT_2_HOST32(fp0, PortIdentifier);
3048 MPT_2_HOST32(fp0, WWNN.Low);
3049 MPT_2_HOST32(fp0, WWNN.High);
3050 MPT_2_HOST32(fp0, WWPN.Low);
3051 MPT_2_HOST32(fp0, WWPN.High);
3052 MPT_2_HOST32(fp0, SupportedServiceClass);
3053 MPT_2_HOST32(fp0, SupportedSpeeds);
3054 MPT_2_HOST32(fp0, CurrentSpeed);
3055 MPT_2_HOST32(fp0, MaxFrameSize);
3056 MPT_2_HOST32(fp0, FabricWWNN.Low);
3057 MPT_2_HOST32(fp0, FabricWWNN.High);
3058 MPT_2_HOST32(fp0, FabricWWPN.Low);
3059 MPT_2_HOST32(fp0, FabricWWPN.High);
3060 MPT_2_HOST32(fp0, DiscoveredPortsCount);
3061 MPT_2_HOST32(fp0, MaxInitiators);
3065 mpt2host_config_page_fc_port_1(CONFIG_PAGE_FC_PORT_1 *fp1)
3068 MPT_2_HOST32(fp1, Flags);
3069 MPT_2_HOST32(fp1, NoSEEPROMWWNN.Low);
3070 MPT_2_HOST32(fp1, NoSEEPROMWWNN.High);
3071 MPT_2_HOST32(fp1, NoSEEPROMWWPN.Low);
3072 MPT_2_HOST32(fp1, NoSEEPROMWWPN.High);
3076 host2mpt_config_page_fc_port_1(CONFIG_PAGE_FC_PORT_1 *fp1)
3079 HOST_2_MPT32(fp1, Flags);
3080 HOST_2_MPT32(fp1, NoSEEPROMWWNN.Low);
3081 HOST_2_MPT32(fp1, NoSEEPROMWWNN.High);
3082 HOST_2_MPT32(fp1, NoSEEPROMWWPN.Low);
3083 HOST_2_MPT32(fp1, NoSEEPROMWWPN.High);
3087 mpt2host_config_page_raid_vol_0(CONFIG_PAGE_RAID_VOL_0 *volp)
3091 MPT_2_HOST16(volp, VolumeStatus.Reserved);
3092 MPT_2_HOST16(volp, VolumeSettings.Settings);
3093 MPT_2_HOST32(volp, MaxLBA);
3094 MPT_2_HOST32(volp, MaxLBAHigh);
3095 MPT_2_HOST32(volp, StripeSize);
3096 MPT_2_HOST32(volp, Reserved2);
3097 MPT_2_HOST32(volp, Reserved3);
3098 for (i = 0; i < MPI_RAID_VOL_PAGE_0_PHYSDISK_MAX; i++) {
3099 MPT_2_HOST16(volp, PhysDisk[i].Reserved);
3104 mpt2host_config_page_raid_phys_disk_0(CONFIG_PAGE_RAID_PHYS_DISK_0 *rpd0)
3107 MPT_2_HOST32(rpd0, Reserved1);
3108 MPT_2_HOST16(rpd0, PhysDiskStatus.Reserved);
3109 MPT_2_HOST32(rpd0, MaxLBA);
3110 MPT_2_HOST16(rpd0, ErrorData.Reserved);
3111 MPT_2_HOST16(rpd0, ErrorData.ErrorCount);
3112 MPT_2_HOST16(rpd0, ErrorData.SmartCount);
3116 mpt2host_mpi_raid_vol_indicator(MPI_RAID_VOL_INDICATOR *vi)
3119 MPT_2_HOST16(vi, TotalBlocks.High);
3120 MPT_2_HOST16(vi, TotalBlocks.Low);
3121 MPT_2_HOST16(vi, BlocksRemaining.High);
3122 MPT_2_HOST16(vi, BlocksRemaining.Low);