Remove inclusion of <sys/cdefs.h> from kernel .c files.
[dragonfly.git] / sys / dev / disk / mpt / mpt.c
... / ...
CommitLineData
1/*-
2 * Generic routines for LSI Fusion adapters.
3 * FreeBSD Version.
4 *
5 * Copyright (c) 2000, 2001 by Greg Ansley
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice immediately at the beginning of the file, without modification,
12 * this list of conditions, and the following disclaimer.
13 * 2. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28/*-
29 * Copyright (c) 2002, 2006 by Matthew Jacob
30 * All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions are
34 * met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
38 * substantially similar to the "NO WARRANTY" disclaimer below
39 * ("Disclaimer") and any redistribution must be conditioned upon including
40 * a substantially similar Disclaimer requirement for further binary
41 * redistribution.
42 * 3. Neither the names of the above listed copyright holders nor the names
43 * of any contributors may be used to endorse or promote products derived
44 * from this software without specific prior written permission.
45 *
46 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
47 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
50 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
51 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
52 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
53 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
54 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
55 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
56 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
57 *
58 * Support from Chris Ellsworth in order to make SAS adapters work
59 * is gratefully acknowledged.
60 *
61 *
62 * Support from LSI-Logic has also gone a great deal toward making this a
63 * workable subsystem and is gratefully acknowledged.
64 */
65/*-
66 * Copyright (c) 2004, Avid Technology, Inc. and its contributors.
67 * Copyright (c) 2005, WHEEL Sp. z o.o.
68 * Copyright (c) 2004, 2005 Justin T. Gibbs
69 * All rights reserved.
70 *
71 * Redistribution and use in source and binary forms, with or without
72 * modification, are permitted provided that the following conditions are
73 * met:
74 * 1. Redistributions of source code must retain the above copyright
75 * notice, this list of conditions and the following disclaimer.
76 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
77 * substantially similar to the "NO WARRANTY" disclaimer below
78 * ("Disclaimer") and any redistribution must be conditioned upon including
79 * a substantially similar Disclaimer requirement for further binary
80 * redistribution.
81 * 3. Neither the names of the above listed copyright holders nor the names
82 * of any contributors may be used to endorse or promote products derived
83 * from this software without specific prior written permission.
84 *
85 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
86 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
87 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
88 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
89 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
90 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
91 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
92 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
93 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
94 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
95 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
96 *
97 * $FreeBSD: src/sys/dev/mpt/mpt.c,v 1.49 2009/01/07 21:52:47 marius Exp $
98 */
99
100#include <dev/disk/mpt/mpt.h>
101#include <dev/disk/mpt/mpt_cam.h> /* XXX For static handler registration */
102#include <dev/disk/mpt/mpt_raid.h> /* XXX For static handler registration */
103
104#include <dev/disk/mpt/mpilib/mpi.h>
105#include <dev/disk/mpt/mpilib/mpi_ioc.h>
106#include <dev/disk/mpt/mpilib/mpi_fc.h>
107#include <dev/disk/mpt/mpilib/mpi_targ.h>
108
109#include <sys/sysctl.h>
110
111#define MPT_MAX_TRYS 3
112#define MPT_MAX_WAIT 300000
113
114static int maxwait_ack = 0;
115static int maxwait_int = 0;
116static int maxwait_state = 0;
117
118static TAILQ_HEAD(, mpt_softc) mpt_tailq = TAILQ_HEAD_INITIALIZER(mpt_tailq);
119mpt_reply_handler_t *mpt_reply_handlers[MPT_NUM_REPLY_HANDLERS];
120
121static mpt_reply_handler_t mpt_default_reply_handler;
122static mpt_reply_handler_t mpt_config_reply_handler;
123static mpt_reply_handler_t mpt_handshake_reply_handler;
124static mpt_reply_handler_t mpt_event_reply_handler;
125static void mpt_send_event_ack(struct mpt_softc *mpt, request_t *ack_req,
126 MSG_EVENT_NOTIFY_REPLY *msg, uint32_t context);
127static int mpt_send_event_request(struct mpt_softc *mpt, int onoff);
128static int mpt_soft_reset(struct mpt_softc *mpt);
129static void mpt_hard_reset(struct mpt_softc *mpt);
130static int mpt_configure_ioc(struct mpt_softc *mpt, int, int);
131static int mpt_enable_ioc(struct mpt_softc *mpt, int);
132
133/************************* Personality Module Support *************************/
134/*
135 * We include one extra entry that is guaranteed to be NULL
136 * to simplify our itterator.
137 */
138static struct mpt_personality *mpt_personalities[MPT_MAX_PERSONALITIES + 1];
139static __inline struct mpt_personality*
140 mpt_pers_find(struct mpt_softc *, u_int);
141static __inline struct mpt_personality*
142 mpt_pers_find_reverse(struct mpt_softc *, u_int);
143
144static __inline struct mpt_personality *
145mpt_pers_find(struct mpt_softc *mpt, u_int start_at)
146{
147 KASSERT(start_at <= MPT_MAX_PERSONALITIES,
148 ("mpt_pers_find: starting position out of range\n"));
149
150 while (start_at < MPT_MAX_PERSONALITIES
151 && (mpt->mpt_pers_mask & (0x1 << start_at)) == 0) {
152 start_at++;
153 }
154 return (mpt_personalities[start_at]);
155}
156
157/*
158 * Used infrequently, so no need to optimize like a forward
159 * traversal where we use the MAX+1 is guaranteed to be NULL
160 * trick.
161 */
162static __inline struct mpt_personality *
163mpt_pers_find_reverse(struct mpt_softc *mpt, u_int start_at)
164{
165 while (start_at < MPT_MAX_PERSONALITIES
166 && (mpt->mpt_pers_mask & (0x1 << start_at)) == 0) {
167 start_at--;
168 }
169 if (start_at < MPT_MAX_PERSONALITIES)
170 return (mpt_personalities[start_at]);
171 return (NULL);
172}
173
174#define MPT_PERS_FOREACH(mpt, pers) \
175 for (pers = mpt_pers_find(mpt, /*start_at*/0); \
176 pers != NULL; \
177 pers = mpt_pers_find(mpt, /*start_at*/pers->id+1))
178
179#define MPT_PERS_FOREACH_REVERSE(mpt, pers) \
180 for (pers = mpt_pers_find_reverse(mpt, MPT_MAX_PERSONALITIES-1);\
181 pers != NULL; \
182 pers = mpt_pers_find_reverse(mpt, /*start_at*/pers->id-1))
183
184static mpt_load_handler_t mpt_stdload;
185static mpt_probe_handler_t mpt_stdprobe;
186static mpt_attach_handler_t mpt_stdattach;
187static mpt_enable_handler_t mpt_stdenable;
188static mpt_ready_handler_t mpt_stdready;
189static mpt_event_handler_t mpt_stdevent;
190static mpt_reset_handler_t mpt_stdreset;
191static mpt_shutdown_handler_t mpt_stdshutdown;
192static mpt_detach_handler_t mpt_stddetach;
193static mpt_unload_handler_t mpt_stdunload;
194static struct mpt_personality mpt_default_personality =
195{
196 .load = mpt_stdload,
197 .probe = mpt_stdprobe,
198 .attach = mpt_stdattach,
199 .enable = mpt_stdenable,
200 .ready = mpt_stdready,
201 .event = mpt_stdevent,
202 .reset = mpt_stdreset,
203 .shutdown = mpt_stdshutdown,
204 .detach = mpt_stddetach,
205 .unload = mpt_stdunload
206};
207
208static mpt_load_handler_t mpt_core_load;
209static mpt_attach_handler_t mpt_core_attach;
210static mpt_enable_handler_t mpt_core_enable;
211static mpt_reset_handler_t mpt_core_ioc_reset;
212static mpt_event_handler_t mpt_core_event;
213static mpt_shutdown_handler_t mpt_core_shutdown;
214static mpt_shutdown_handler_t mpt_core_detach;
215static mpt_unload_handler_t mpt_core_unload;
216static struct mpt_personality mpt_core_personality =
217{
218 .name = "mpt_core",
219 .load = mpt_core_load,
220// .attach = mpt_core_attach,
221// .enable = mpt_core_enable,
222 .event = mpt_core_event,
223 .reset = mpt_core_ioc_reset,
224 .shutdown = mpt_core_shutdown,
225 .detach = mpt_core_detach,
226 .unload = mpt_core_unload,
227};
228
229/*
230 * Manual declaration so that DECLARE_MPT_PERSONALITY doesn't need
231 * ordering information. We want the core to always register FIRST.
232 * other modules are set to SI_ORDER_SECOND.
233 */
234static moduledata_t mpt_core_mod = {
235 "mpt_core", mpt_modevent, &mpt_core_personality
236};
237DECLARE_MODULE(mpt_core, mpt_core_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
238MODULE_VERSION(mpt_core, 1);
239
240#define MPT_PERS_ATTACHED(pers, mpt) ((mpt)->mpt_pers_mask & (0x1 << pers->id))
241
242int
243mpt_modevent(module_t mod, int type, void *data)
244{
245 struct mpt_personality *pers;
246 int error;
247
248 pers = (struct mpt_personality *)data;
249
250 error = 0;
251 switch (type) {
252 case MOD_LOAD:
253 {
254 mpt_load_handler_t **def_handler;
255 mpt_load_handler_t **pers_handler;
256 int i;
257
258 for (i = 0; i < MPT_MAX_PERSONALITIES; i++) {
259 if (mpt_personalities[i] == NULL)
260 break;
261 }
262 if (i >= MPT_MAX_PERSONALITIES) {
263 error = ENOMEM;
264 break;
265 }
266 pers->id = i;
267 mpt_personalities[i] = pers;
268
269 /* Install standard/noop handlers for any NULL entries. */
270 def_handler = MPT_PERS_FIRST_HANDLER(&mpt_default_personality);
271 pers_handler = MPT_PERS_FIRST_HANDLER(pers);
272 while (pers_handler <= MPT_PERS_LAST_HANDLER(pers)) {
273 if (*pers_handler == NULL)
274 *pers_handler = *def_handler;
275 pers_handler++;
276 def_handler++;
277 }
278
279 error = (pers->load(pers));
280 if (error != 0)
281 mpt_personalities[i] = NULL;
282 break;
283 }
284 case MOD_SHUTDOWN:
285 break;
286#if __FreeBSD_version >= 500000
287 case MOD_QUIESCE:
288 break;
289#endif
290 case MOD_UNLOAD:
291 error = pers->unload(pers);
292 mpt_personalities[pers->id] = NULL;
293 break;
294 default:
295 error = EINVAL;
296 break;
297 }
298 return (error);
299}
300
301int
302mpt_stdload(struct mpt_personality *pers)
303{
304 /* Load is always successfull. */
305 return (0);
306}
307
308int
309mpt_stdprobe(struct mpt_softc *mpt)
310{
311 /* Probe is always successfull. */
312 return (0);
313}
314
315int
316mpt_stdattach(struct mpt_softc *mpt)
317{
318 /* Attach is always successfull. */
319 return (0);
320}
321
322int
323mpt_stdenable(struct mpt_softc *mpt)
324{
325 /* Enable is always successfull. */
326 return (0);
327}
328
329void
330mpt_stdready(struct mpt_softc *mpt)
331{
332}
333
334
335int
336mpt_stdevent(struct mpt_softc *mpt, request_t *req, MSG_EVENT_NOTIFY_REPLY *msg)
337{
338 mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_stdevent: 0x%x\n",
339 msg->Event & 0xFF);
340 /* Event was not for us. */
341 return (0);
342}
343
344void
345mpt_stdreset(struct mpt_softc *mpt, int type)
346{
347}
348
349void
350mpt_stdshutdown(struct mpt_softc *mpt)
351{
352}
353
354void
355mpt_stddetach(struct mpt_softc *mpt)
356{
357}
358
359int
360mpt_stdunload(struct mpt_personality *pers)
361{
362 /* Unload is always successfull. */
363 return (0);
364}
365
366/*
367 * Post driver attachment, we may want to perform some global actions.
368 * Here is the hook to do so.
369 */
370
371static void
372mpt_postattach(void *unused)
373{
374 struct mpt_softc *mpt;
375 struct mpt_personality *pers;
376
377 TAILQ_FOREACH(mpt, &mpt_tailq, links) {
378 MPT_PERS_FOREACH(mpt, pers)
379 pers->ready(mpt);
380 }
381}
382SYSINIT(mptdev, SI_SUB_CONFIGURE, SI_ORDER_MIDDLE, mpt_postattach, NULL);
383
384
385/******************************* Bus DMA Support ******************************/
386void
387mpt_map_rquest(void *arg, bus_dma_segment_t *segs, int nseg, int error)
388{
389 struct mpt_map_info *map_info;
390
391 map_info = (struct mpt_map_info *)arg;
392 map_info->error = error;
393 map_info->phys = segs->ds_addr;
394}
395
396/**************************** Reply/Event Handling ****************************/
397int
398mpt_register_handler(struct mpt_softc *mpt, mpt_handler_type type,
399 mpt_handler_t handler, uint32_t *phandler_id)
400{
401
402 switch (type) {
403 case MPT_HANDLER_REPLY:
404 {
405 u_int cbi;
406 u_int free_cbi;
407
408 if (phandler_id == NULL)
409 return (EINVAL);
410
411 free_cbi = MPT_HANDLER_ID_NONE;
412 for (cbi = 0; cbi < MPT_NUM_REPLY_HANDLERS; cbi++) {
413 /*
414 * If the same handler is registered multiple
415 * times, don't error out. Just return the
416 * index of the original registration.
417 */
418 if (mpt_reply_handlers[cbi] == handler.reply_handler) {
419 *phandler_id = MPT_CBI_TO_HID(cbi);
420 return (0);
421 }
422
423 /*
424 * Fill from the front in the hope that
425 * all registered handlers consume only a
426 * single cache line.
427 *
428 * We don't break on the first empty slot so
429 * that the full table is checked to see if
430 * this handler was previously registered.
431 */
432 if (free_cbi == MPT_HANDLER_ID_NONE &&
433 (mpt_reply_handlers[cbi]
434 == mpt_default_reply_handler))
435 free_cbi = cbi;
436 }
437 if (free_cbi == MPT_HANDLER_ID_NONE) {
438 return (ENOMEM);
439 }
440 mpt_reply_handlers[free_cbi] = handler.reply_handler;
441 *phandler_id = MPT_CBI_TO_HID(free_cbi);
442 break;
443 }
444 default:
445 mpt_prt(mpt, "mpt_register_handler unknown type %d\n", type);
446 return (EINVAL);
447 }
448 return (0);
449}
450
451int
452mpt_deregister_handler(struct mpt_softc *mpt, mpt_handler_type type,
453 mpt_handler_t handler, uint32_t handler_id)
454{
455
456 switch (type) {
457 case MPT_HANDLER_REPLY:
458 {
459 u_int cbi;
460
461 cbi = MPT_CBI(handler_id);
462 if (cbi >= MPT_NUM_REPLY_HANDLERS
463 || mpt_reply_handlers[cbi] != handler.reply_handler)
464 return (ENOENT);
465 mpt_reply_handlers[cbi] = mpt_default_reply_handler;
466 break;
467 }
468 default:
469 mpt_prt(mpt, "mpt_deregister_handler unknown type %d\n", type);
470 return (EINVAL);
471 }
472 return (0);
473}
474
475static int
476mpt_default_reply_handler(struct mpt_softc *mpt, request_t *req,
477 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
478{
479 mpt_prt(mpt,
480 "Default Handler Called: req=%p:%u reply_descriptor=%x frame=%p\n",
481 req, req->serno, reply_desc, reply_frame);
482
483 if (reply_frame != NULL)
484 mpt_dump_reply_frame(mpt, reply_frame);
485
486 mpt_prt(mpt, "Reply Frame Ignored\n");
487
488 return (/*free_reply*/TRUE);
489}
490
491static int
492mpt_config_reply_handler(struct mpt_softc *mpt, request_t *req,
493 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
494{
495 if (req != NULL) {
496
497 if (reply_frame != NULL) {
498 MSG_CONFIG *cfgp;
499 MSG_CONFIG_REPLY *reply;
500
501 cfgp = (MSG_CONFIG *)req->req_vbuf;
502 reply = (MSG_CONFIG_REPLY *)reply_frame;
503 req->IOCStatus = le16toh(reply_frame->IOCStatus);
504 bcopy(&reply->Header, &cfgp->Header,
505 sizeof(cfgp->Header));
506 cfgp->ExtPageLength = reply->ExtPageLength;
507 cfgp->ExtPageType = reply->ExtPageType;
508 }
509 req->state &= ~REQ_STATE_QUEUED;
510 req->state |= REQ_STATE_DONE;
511 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
512 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
513 wakeup(req);
514 } else if ((req->state & REQ_STATE_TIMEDOUT) != 0) {
515 /*
516 * Whew- we can free this request (late completion)
517 */
518 mpt_free_request(mpt, req);
519 }
520 }
521
522 return (TRUE);
523}
524
525static int
526mpt_handshake_reply_handler(struct mpt_softc *mpt, request_t *req,
527 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
528{
529 /* Nothing to be done. */
530 return (TRUE);
531}
532
533static int
534mpt_event_reply_handler(struct mpt_softc *mpt, request_t *req,
535 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
536{
537 int free_reply;
538
539 KASSERT(reply_frame != NULL, ("null reply in mpt_event_reply_handler"));
540 KASSERT(req != NULL, ("null request in mpt_event_reply_handler"));
541
542 free_reply = TRUE;
543 switch (reply_frame->Function) {
544 case MPI_FUNCTION_EVENT_NOTIFICATION:
545 {
546 MSG_EVENT_NOTIFY_REPLY *msg;
547 struct mpt_personality *pers;
548 u_int handled;
549
550 handled = 0;
551 msg = (MSG_EVENT_NOTIFY_REPLY *)reply_frame;
552 msg->EventDataLength = le16toh(msg->EventDataLength);
553 msg->IOCStatus = le16toh(msg->IOCStatus);
554 msg->IOCLogInfo = le32toh(msg->IOCLogInfo);
555 msg->Event = le32toh(msg->Event);
556 MPT_PERS_FOREACH(mpt, pers)
557 handled += pers->event(mpt, req, msg);
558
559 if (handled == 0 && mpt->mpt_pers_mask == 0) {
560 mpt_lprt(mpt, MPT_PRT_INFO,
561 "No Handlers For Any Event Notify Frames. "
562 "Event %#x (ACK %sequired).\n",
563 (unsigned)msg->Event,
564 msg->AckRequired? "r" : "not r");
565 } else if (handled == 0) {
566 mpt_lprt(mpt,
567 msg->AckRequired? MPT_PRT_WARN : MPT_PRT_INFO,
568 "Unhandled Event Notify Frame. Event %#x "
569 "(ACK %sequired).\n",
570 (unsigned)msg->Event,
571 msg->AckRequired? "r" : "not r");
572 }
573
574 if (msg->AckRequired) {
575 request_t *ack_req;
576 uint32_t context;
577
578 context = req->index | MPT_REPLY_HANDLER_EVENTS;
579 ack_req = mpt_get_request(mpt, FALSE);
580 if (ack_req == NULL) {
581 struct mpt_evtf_record *evtf;
582
583 evtf = (struct mpt_evtf_record *)reply_frame;
584 evtf->context = context;
585 LIST_INSERT_HEAD(&mpt->ack_frames, evtf, links);
586 free_reply = FALSE;
587 break;
588 }
589 mpt_send_event_ack(mpt, ack_req, msg, context);
590 /*
591 * Don't check for CONTINUATION_REPLY here
592 */
593 return (free_reply);
594 }
595 break;
596 }
597 case MPI_FUNCTION_PORT_ENABLE:
598 mpt_lprt(mpt, MPT_PRT_DEBUG , "enable port reply\n");
599 break;
600 case MPI_FUNCTION_EVENT_ACK:
601 break;
602 default:
603 mpt_prt(mpt, "unknown event function: %x\n",
604 reply_frame->Function);
605 break;
606 }
607
608 /*
609 * I'm not sure that this continuation stuff works as it should.
610 *
611 * I've had FC async events occur that free the frame up because
612 * the continuation bit isn't set, and then additional async events
613 * then occur using the same context. As you might imagine, this
614 * leads to Very Bad Thing.
615 *
616 * Let's just be safe for now and not free them up until we figure
617 * out what's actually happening here.
618 */
619#if 0
620 if ((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0) {
621 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
622 mpt_free_request(mpt, req);
623 mpt_prt(mpt, "event_reply %x for req %p:%u NOT a continuation",
624 reply_frame->Function, req, req->serno);
625 if (reply_frame->Function == MPI_FUNCTION_EVENT_NOTIFICATION) {
626 MSG_EVENT_NOTIFY_REPLY *msg =
627 (MSG_EVENT_NOTIFY_REPLY *)reply_frame;
628 mpt_prtc(mpt, " Event=0x%x AckReq=%d",
629 msg->Event, msg->AckRequired);
630 }
631 } else {
632 mpt_prt(mpt, "event_reply %x for %p:%u IS a continuation",
633 reply_frame->Function, req, req->serno);
634 if (reply_frame->Function == MPI_FUNCTION_EVENT_NOTIFICATION) {
635 MSG_EVENT_NOTIFY_REPLY *msg =
636 (MSG_EVENT_NOTIFY_REPLY *)reply_frame;
637 mpt_prtc(mpt, " Event=0x%x AckReq=%d",
638 msg->Event, msg->AckRequired);
639 }
640 mpt_prtc(mpt, "\n");
641 }
642#endif
643 return (free_reply);
644}
645
646/*
647 * Process an asynchronous event from the IOC.
648 */
649static int
650mpt_core_event(struct mpt_softc *mpt, request_t *req,
651 MSG_EVENT_NOTIFY_REPLY *msg)
652{
653 mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_core_event: 0x%x\n",
654 (unsigned)(msg->Event & 0xFF));
655 switch(msg->Event & 0xFF) {
656 case MPI_EVENT_NONE:
657 break;
658 case MPI_EVENT_LOG_DATA:
659 {
660 int i;
661
662 /* Some error occured that LSI wants logged */
663 mpt_prt(mpt, "EvtLogData: IOCLogInfo: 0x%08x\n",
664 (unsigned)msg->IOCLogInfo);
665 mpt_prt(mpt, "\tEvtLogData: Event Data:");
666 for (i = 0; i < msg->EventDataLength; i++)
667 mpt_prtc(mpt, " %08x", (unsigned)msg->Data[i]);
668 mpt_prtc(mpt, "\n");
669 break;
670 }
671 case MPI_EVENT_EVENT_CHANGE:
672 /*
673 * This is just an acknowledgement
674 * of our mpt_send_event_request.
675 */
676 break;
677 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
678 break;
679 default:
680 return (0);
681 break;
682 }
683 return (1);
684}
685
686static void
687mpt_send_event_ack(struct mpt_softc *mpt, request_t *ack_req,
688 MSG_EVENT_NOTIFY_REPLY *msg, uint32_t context)
689{
690 MSG_EVENT_ACK *ackp;
691
692 ackp = (MSG_EVENT_ACK *)ack_req->req_vbuf;
693 memset(ackp, 0, sizeof (*ackp));
694 ackp->Function = MPI_FUNCTION_EVENT_ACK;
695 ackp->Event = htole32(msg->Event);
696 ackp->EventContext = htole32(msg->EventContext);
697 ackp->MsgContext = htole32(context);
698 mpt_check_doorbell(mpt);
699 mpt_send_cmd(mpt, ack_req);
700}
701
702/***************************** Interrupt Handling *****************************/
703void
704mpt_intr(void *arg)
705{
706 struct mpt_softc *mpt;
707 uint32_t reply_desc;
708 int ntrips = 0;
709
710 mpt = (struct mpt_softc *)arg;
711 mpt_lprt(mpt, MPT_PRT_DEBUG2, "enter mpt_intr\n");
712 MPT_LOCK_ASSERT(mpt);
713
714 while ((reply_desc = mpt_pop_reply_queue(mpt)) != MPT_REPLY_EMPTY) {
715 request_t *req;
716 MSG_DEFAULT_REPLY *reply_frame;
717 uint32_t reply_baddr;
718 uint32_t ctxt_idx;
719 u_int cb_index;
720 u_int req_index;
721 int free_rf;
722
723 req = NULL;
724 reply_frame = NULL;
725 reply_baddr = 0;
726 if ((reply_desc & MPI_ADDRESS_REPLY_A_BIT) != 0) {
727 u_int offset;
728 /*
729 * Insure that the reply frame is coherent.
730 */
731 reply_baddr = MPT_REPLY_BADDR(reply_desc);
732 offset = reply_baddr - (mpt->reply_phys & 0xFFFFFFFF);
733 bus_dmamap_sync_range(mpt->reply_dmat,
734 mpt->reply_dmap, offset, MPT_REPLY_SIZE,
735 BUS_DMASYNC_POSTREAD);
736 reply_frame = MPT_REPLY_OTOV(mpt, offset);
737 ctxt_idx = le32toh(reply_frame->MsgContext);
738 } else {
739 uint32_t type;
740
741 type = MPI_GET_CONTEXT_REPLY_TYPE(reply_desc);
742 ctxt_idx = reply_desc;
743 mpt_lprt(mpt, MPT_PRT_DEBUG1, "Context Reply: 0x%08x\n",
744 reply_desc);
745
746 switch (type) {
747 case MPI_CONTEXT_REPLY_TYPE_SCSI_INIT:
748 ctxt_idx &= MPI_CONTEXT_REPLY_CONTEXT_MASK;
749 break;
750 case MPI_CONTEXT_REPLY_TYPE_SCSI_TARGET:
751 ctxt_idx = GET_IO_INDEX(reply_desc);
752 if (mpt->tgt_cmd_ptrs == NULL) {
753 mpt_prt(mpt,
754 "mpt_intr: no target cmd ptrs\n");
755 reply_desc = MPT_REPLY_EMPTY;
756 break;
757 }
758 if (ctxt_idx >= mpt->tgt_cmds_allocated) {
759 mpt_prt(mpt,
760 "mpt_intr: bad tgt cmd ctxt %u\n",
761 ctxt_idx);
762 reply_desc = MPT_REPLY_EMPTY;
763 ntrips = 1000;
764 break;
765 }
766 req = mpt->tgt_cmd_ptrs[ctxt_idx];
767 if (req == NULL) {
768 mpt_prt(mpt, "no request backpointer "
769 "at index %u", ctxt_idx);
770 reply_desc = MPT_REPLY_EMPTY;
771 ntrips = 1000;
772 break;
773 }
774 /*
775 * Reformulate ctxt_idx to be just as if
776 * it were another type of context reply
777 * so the code below will find the request
778 * via indexing into the pool.
779 */
780 ctxt_idx =
781 req->index | mpt->scsi_tgt_handler_id;
782 req = NULL;
783 break;
784 case MPI_CONTEXT_REPLY_TYPE_LAN:
785 mpt_prt(mpt, "LAN CONTEXT REPLY: 0x%08x\n",
786 reply_desc);
787 reply_desc = MPT_REPLY_EMPTY;
788 break;
789 default:
790 mpt_prt(mpt, "Context Reply 0x%08x?\n", type);
791 reply_desc = MPT_REPLY_EMPTY;
792 break;
793 }
794 if (reply_desc == MPT_REPLY_EMPTY) {
795 if (ntrips++ > 1000) {
796 break;
797 }
798 continue;
799 }
800 }
801
802 cb_index = MPT_CONTEXT_TO_CBI(ctxt_idx);
803 req_index = MPT_CONTEXT_TO_REQI(ctxt_idx);
804 if (req_index < MPT_MAX_REQUESTS(mpt)) {
805 req = &mpt->request_pool[req_index];
806 } else {
807 mpt_prt(mpt, "WARN: mpt_intr index == %d (reply_desc =="
808 " 0x%x)\n", req_index, reply_desc);
809 }
810
811 free_rf = mpt_reply_handlers[cb_index](mpt, req,
812 reply_desc, reply_frame);
813
814 if (reply_frame != NULL && free_rf) {
815 mpt_free_reply(mpt, reply_baddr);
816 }
817
818 /*
819 * If we got ourselves disabled, don't get stuck in a loop
820 */
821 if (mpt->disabled) {
822 mpt_disable_ints(mpt);
823 break;
824 }
825 if (ntrips++ > 1000) {
826 break;
827 }
828 }
829 mpt_lprt(mpt, MPT_PRT_DEBUG2, "exit mpt_intr\n");
830}
831
832/******************************* Error Recovery *******************************/
833void
834mpt_complete_request_chain(struct mpt_softc *mpt, struct req_queue *chain,
835 u_int iocstatus)
836{
837 MSG_DEFAULT_REPLY ioc_status_frame;
838 request_t *req;
839
840 memset(&ioc_status_frame, 0, sizeof(ioc_status_frame));
841 ioc_status_frame.MsgLength = roundup2(sizeof(ioc_status_frame), 4);
842 ioc_status_frame.IOCStatus = iocstatus;
843 while((req = TAILQ_FIRST(chain)) != NULL) {
844 MSG_REQUEST_HEADER *msg_hdr;
845 u_int cb_index;
846
847 TAILQ_REMOVE(chain, req, links);
848 msg_hdr = (MSG_REQUEST_HEADER *)req->req_vbuf;
849 ioc_status_frame.Function = msg_hdr->Function;
850 ioc_status_frame.MsgContext = msg_hdr->MsgContext;
851 cb_index = MPT_CONTEXT_TO_CBI(le32toh(msg_hdr->MsgContext));
852 mpt_reply_handlers[cb_index](mpt, req, msg_hdr->MsgContext,
853 &ioc_status_frame);
854 }
855}
856
857/********************************* Diagnostics ********************************/
858/*
859 * Perform a diagnostic dump of a reply frame.
860 */
861void
862mpt_dump_reply_frame(struct mpt_softc *mpt, MSG_DEFAULT_REPLY *reply_frame)
863{
864 mpt_prt(mpt, "Address Reply:\n");
865 mpt_print_reply(reply_frame);
866}
867
868/******************************* Doorbell Access ******************************/
869static __inline uint32_t mpt_rd_db(struct mpt_softc *mpt);
870static __inline uint32_t mpt_rd_intr(struct mpt_softc *mpt);
871
872static __inline uint32_t
873mpt_rd_db(struct mpt_softc *mpt)
874{
875 return mpt_read(mpt, MPT_OFFSET_DOORBELL);
876}
877
878static __inline uint32_t
879mpt_rd_intr(struct mpt_softc *mpt)
880{
881 return mpt_read(mpt, MPT_OFFSET_INTR_STATUS);
882}
883
884/* Busy wait for a door bell to be read by IOC */
885static int
886mpt_wait_db_ack(struct mpt_softc *mpt)
887{
888 int i;
889 for (i=0; i < MPT_MAX_WAIT; i++) {
890 if (!MPT_DB_IS_BUSY(mpt_rd_intr(mpt))) {
891 maxwait_ack = i > maxwait_ack ? i : maxwait_ack;
892 return (MPT_OK);
893 }
894 DELAY(200);
895 }
896 return (MPT_FAIL);
897}
898
899/* Busy wait for a door bell interrupt */
900static int
901mpt_wait_db_int(struct mpt_softc *mpt)
902{
903 int i;
904 for (i = 0; i < MPT_MAX_WAIT; i++) {
905 if (MPT_DB_INTR(mpt_rd_intr(mpt))) {
906 maxwait_int = i > maxwait_int ? i : maxwait_int;
907 return MPT_OK;
908 }
909 DELAY(100);
910 }
911 return (MPT_FAIL);
912}
913
914/* Wait for IOC to transition to a give state */
915void
916mpt_check_doorbell(struct mpt_softc *mpt)
917{
918 uint32_t db = mpt_rd_db(mpt);
919 if (MPT_STATE(db) != MPT_DB_STATE_RUNNING) {
920 mpt_prt(mpt, "Device not running\n");
921 mpt_print_db(db);
922 }
923}
924
925/* Wait for IOC to transition to a give state */
926static int
927mpt_wait_state(struct mpt_softc *mpt, enum DB_STATE_BITS state)
928{
929 int i;
930
931 for (i = 0; i < MPT_MAX_WAIT; i++) {
932 uint32_t db = mpt_rd_db(mpt);
933 if (MPT_STATE(db) == state) {
934 maxwait_state = i > maxwait_state ? i : maxwait_state;
935 return (MPT_OK);
936 }
937 DELAY(100);
938 }
939 return (MPT_FAIL);
940}
941
942
943/************************* Intialization/Configuration ************************/
944static int mpt_download_fw(struct mpt_softc *mpt);
945
946/* Issue the reset COMMAND to the IOC */
947static int
948mpt_soft_reset(struct mpt_softc *mpt)
949{
950 mpt_lprt(mpt, MPT_PRT_DEBUG, "soft reset\n");
951
952 /* Have to use hard reset if we are not in Running state */
953 if (MPT_STATE(mpt_rd_db(mpt)) != MPT_DB_STATE_RUNNING) {
954 mpt_prt(mpt, "soft reset failed: device not running\n");
955 return (MPT_FAIL);
956 }
957
958 /* If door bell is in use we don't have a chance of getting
959 * a word in since the IOC probably crashed in message
960 * processing. So don't waste our time.
961 */
962 if (MPT_DB_IS_IN_USE(mpt_rd_db(mpt))) {
963 mpt_prt(mpt, "soft reset failed: doorbell wedged\n");
964 return (MPT_FAIL);
965 }
966
967 /* Send the reset request to the IOC */
968 mpt_write(mpt, MPT_OFFSET_DOORBELL,
969 MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET << MPI_DOORBELL_FUNCTION_SHIFT);
970 if (mpt_wait_db_ack(mpt) != MPT_OK) {
971 mpt_prt(mpt, "soft reset failed: ack timeout\n");
972 return (MPT_FAIL);
973 }
974
975 /* Wait for the IOC to reload and come out of reset state */
976 if (mpt_wait_state(mpt, MPT_DB_STATE_READY) != MPT_OK) {
977 mpt_prt(mpt, "soft reset failed: device did not restart\n");
978 return (MPT_FAIL);
979 }
980
981 return MPT_OK;
982}
983
984static int
985mpt_enable_diag_mode(struct mpt_softc *mpt)
986{
987 int try;
988
989 try = 20;
990 while (--try) {
991
992 if ((mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC) & MPI_DIAG_DRWE) != 0)
993 break;
994
995 /* Enable diagnostic registers */
996 mpt_write(mpt, MPT_OFFSET_SEQUENCE, 0xFF);
997 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_1ST_KEY_VALUE);
998 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_2ND_KEY_VALUE);
999 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_3RD_KEY_VALUE);
1000 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_4TH_KEY_VALUE);
1001 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_5TH_KEY_VALUE);
1002
1003 DELAY(100000);
1004 }
1005 if (try == 0)
1006 return (EIO);
1007 return (0);
1008}
1009
1010static void
1011mpt_disable_diag_mode(struct mpt_softc *mpt)
1012{
1013 mpt_write(mpt, MPT_OFFSET_SEQUENCE, 0xFFFFFFFF);
1014}
1015
1016/* This is a magic diagnostic reset that resets all the ARM
1017 * processors in the chip.
1018 */
1019static void
1020mpt_hard_reset(struct mpt_softc *mpt)
1021{
1022 int error;
1023 int wait;
1024 uint32_t diagreg;
1025
1026 mpt_lprt(mpt, MPT_PRT_DEBUG, "hard reset\n");
1027
1028 error = mpt_enable_diag_mode(mpt);
1029 if (error) {
1030 mpt_prt(mpt, "WARNING - Could not enter diagnostic mode !\n");
1031 mpt_prt(mpt, "Trying to reset anyway.\n");
1032 }
1033
1034 diagreg = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC);
1035
1036 /*
1037 * This appears to be a workaround required for some
1038 * firmware or hardware revs.
1039 */
1040 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, diagreg | MPI_DIAG_DISABLE_ARM);
1041 DELAY(1000);
1042
1043 /* Diag. port is now active so we can now hit the reset bit */
1044 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, diagreg | MPI_DIAG_RESET_ADAPTER);
1045
1046 /*
1047 * Ensure that the reset has finished. We delay 1ms
1048 * prior to reading the register to make sure the chip
1049 * has sufficiently completed its reset to handle register
1050 * accesses.
1051 */
1052 wait = 5000;
1053 do {
1054 DELAY(1000);
1055 diagreg = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC);
1056 } while (--wait && (diagreg & MPI_DIAG_RESET_ADAPTER) == 0);
1057
1058 if (wait == 0) {
1059 mpt_prt(mpt, "WARNING - Failed hard reset! "
1060 "Trying to initialize anyway.\n");
1061 }
1062
1063 /*
1064 * If we have firmware to download, it must be loaded before
1065 * the controller will become operational. Do so now.
1066 */
1067 if (mpt->fw_image != NULL) {
1068
1069 error = mpt_download_fw(mpt);
1070
1071 if (error) {
1072 mpt_prt(mpt, "WARNING - Firmware Download Failed!\n");
1073 mpt_prt(mpt, "Trying to initialize anyway.\n");
1074 }
1075 }
1076
1077 /*
1078 * Reseting the controller should have disabled write
1079 * access to the diagnostic registers, but disable
1080 * manually to be sure.
1081 */
1082 mpt_disable_diag_mode(mpt);
1083}
1084
1085static void
1086mpt_core_ioc_reset(struct mpt_softc *mpt, int type)
1087{
1088 /*
1089 * Complete all pending requests with a status
1090 * appropriate for an IOC reset.
1091 */
1092 mpt_complete_request_chain(mpt, &mpt->request_pending_list,
1093 MPI_IOCSTATUS_INVALID_STATE);
1094}
1095
1096
1097/*
1098 * Reset the IOC when needed. Try software command first then if needed
1099 * poke at the magic diagnostic reset. Note that a hard reset resets
1100 * *both* IOCs on dual function chips (FC929 && LSI1030) as well as
1101 * fouls up the PCI configuration registers.
1102 */
1103int
1104mpt_reset(struct mpt_softc *mpt, int reinit)
1105{
1106 struct mpt_personality *pers;
1107 int ret;
1108 int retry_cnt = 0;
1109
1110 /*
1111 * Try a soft reset. If that fails, get out the big hammer.
1112 */
1113 again:
1114 if ((ret = mpt_soft_reset(mpt)) != MPT_OK) {
1115 int cnt;
1116 for (cnt = 0; cnt < 5; cnt++) {
1117 /* Failed; do a hard reset */
1118 mpt_hard_reset(mpt);
1119
1120 /*
1121 * Wait for the IOC to reload
1122 * and come out of reset state
1123 */
1124 ret = mpt_wait_state(mpt, MPT_DB_STATE_READY);
1125 if (ret == MPT_OK) {
1126 break;
1127 }
1128 /*
1129 * Okay- try to check again...
1130 */
1131 ret = mpt_wait_state(mpt, MPT_DB_STATE_READY);
1132 if (ret == MPT_OK) {
1133 break;
1134 }
1135 mpt_prt(mpt, "mpt_reset: failed hard reset (%d:%d)\n",
1136 retry_cnt, cnt);
1137 }
1138 }
1139
1140 if (retry_cnt == 0) {
1141 /*
1142 * Invoke reset handlers. We bump the reset count so
1143 * that mpt_wait_req() understands that regardless of
1144 * the specified wait condition, it should stop its wait.
1145 */
1146 mpt->reset_cnt++;
1147 MPT_PERS_FOREACH(mpt, pers)
1148 pers->reset(mpt, ret);
1149 }
1150
1151 if (reinit) {
1152 ret = mpt_enable_ioc(mpt, 1);
1153 if (ret == MPT_OK) {
1154 mpt_enable_ints(mpt);
1155 }
1156 }
1157 if (ret != MPT_OK && retry_cnt++ < 2) {
1158 goto again;
1159 }
1160 return ret;
1161}
1162
1163/* Return a command buffer to the free queue */
1164void
1165mpt_free_request(struct mpt_softc *mpt, request_t *req)
1166{
1167 request_t *nxt;
1168 struct mpt_evtf_record *record;
1169 uint32_t reply_baddr;
1170
1171 if (req == NULL || req != &mpt->request_pool[req->index]) {
1172 panic("mpt_free_request bad req ptr\n");
1173 return;
1174 }
1175 if ((nxt = req->chain) != NULL) {
1176 req->chain = NULL;
1177 mpt_free_request(mpt, nxt); /* NB: recursion */
1178 }
1179 KASSERT(req->state != REQ_STATE_FREE, ("freeing free request"));
1180 KASSERT(!(req->state & REQ_STATE_LOCKED), ("freeing locked request"));
1181 MPT_LOCK_ASSERT(mpt);
1182 KASSERT(mpt_req_on_free_list(mpt, req) == 0,
1183 ("mpt_free_request: req %p:%u func %x already on freelist",
1184 req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
1185 KASSERT(mpt_req_on_pending_list(mpt, req) == 0,
1186 ("mpt_free_request: req %p:%u func %x on pending list",
1187 req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
1188#ifdef INVARIANTS
1189 mpt_req_not_spcl(mpt, req, "mpt_free_request", __LINE__);
1190#endif
1191
1192 req->ccb = NULL;
1193 if (LIST_EMPTY(&mpt->ack_frames)) {
1194 /*
1195 * Insert free ones at the tail
1196 */
1197 req->serno = 0;
1198 req->state = REQ_STATE_FREE;
1199#ifdef INVARIANTS
1200 memset(req->req_vbuf, 0xff, sizeof (MSG_REQUEST_HEADER));
1201#endif
1202 TAILQ_INSERT_TAIL(&mpt->request_free_list, req, links);
1203 if (mpt->getreqwaiter != 0) {
1204 mpt->getreqwaiter = 0;
1205 wakeup(&mpt->request_free_list);
1206 }
1207 return;
1208 }
1209
1210 /*
1211 * Process an ack frame deferred due to resource shortage.
1212 */
1213 record = LIST_FIRST(&mpt->ack_frames);
1214 LIST_REMOVE(record, links);
1215 req->state = REQ_STATE_ALLOCATED;
1216 mpt_assign_serno(mpt, req);
1217 mpt_send_event_ack(mpt, req, &record->reply, record->context);
1218 reply_baddr = (uint32_t)((uint8_t *)record - mpt->reply)
1219 + (mpt->reply_phys & 0xFFFFFFFF);
1220 mpt_free_reply(mpt, reply_baddr);
1221}
1222
1223/* Get a command buffer from the free queue */
1224request_t *
1225mpt_get_request(struct mpt_softc *mpt, int sleep_ok)
1226{
1227 request_t *req;
1228
1229retry:
1230 MPT_LOCK_ASSERT(mpt);
1231 req = TAILQ_FIRST(&mpt->request_free_list);
1232 if (req != NULL) {
1233 KASSERT(req == &mpt->request_pool[req->index],
1234 ("mpt_get_request: corrupted request free list\n"));
1235 KASSERT(req->state == REQ_STATE_FREE,
1236 ("req %p:%u not free on free list %x index %d function %x",
1237 req, req->serno, req->state, req->index,
1238 ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
1239 TAILQ_REMOVE(&mpt->request_free_list, req, links);
1240 req->state = REQ_STATE_ALLOCATED;
1241 req->chain = NULL;
1242 mpt_assign_serno(mpt, req);
1243 mpt_callout_init(&req->callout);
1244 } else if (sleep_ok != 0) {
1245 mpt->getreqwaiter = 1;
1246 mpt_sleep(mpt, &mpt->request_free_list, PUSER, "mptgreq", 0);
1247 goto retry;
1248 }
1249 return (req);
1250}
1251
1252/* Pass the command to the IOC */
1253void
1254mpt_send_cmd(struct mpt_softc *mpt, request_t *req)
1255{
1256 if (mpt->verbose > MPT_PRT_DEBUG2) {
1257 mpt_dump_request(mpt, req);
1258 }
1259 bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
1260 BUS_DMASYNC_PREWRITE);
1261 req->state |= REQ_STATE_QUEUED;
1262 KASSERT(mpt_req_on_free_list(mpt, req) == 0,
1263 ("req %p:%u func %x on freelist list in mpt_send_cmd",
1264 req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
1265 KASSERT(mpt_req_on_pending_list(mpt, req) == 0,
1266 ("req %p:%u func %x already on pending list in mpt_send_cmd",
1267 req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
1268 TAILQ_INSERT_HEAD(&mpt->request_pending_list, req, links);
1269 mpt_write(mpt, MPT_OFFSET_REQUEST_Q, (uint32_t) req->req_pbuf);
1270}
1271
1272/*
1273 * Wait for a request to complete.
1274 *
1275 * Inputs:
1276 * mpt softc of controller executing request
1277 * req request to wait for
1278 * sleep_ok nonzero implies may sleep in this context
1279 * time_ms timeout in ms. 0 implies no timeout.
1280 *
1281 * Return Values:
1282 * 0 Request completed
1283 * non-0 Timeout fired before request completion.
1284 */
1285int
1286mpt_wait_req(struct mpt_softc *mpt, request_t *req,
1287 mpt_req_state_t state, mpt_req_state_t mask,
1288 int sleep_ok, int time_ms)
1289{
1290 int error;
1291 int timeout;
1292 u_int saved_cnt;
1293
1294 /*
1295 * timeout is in ms. 0 indicates infinite wait.
1296 * Convert to ticks or 500us units depending on
1297 * our sleep mode.
1298 */
1299 if (sleep_ok != 0) {
1300 timeout = (time_ms * hz) / 1000;
1301 } else {
1302 timeout = time_ms * 2;
1303 }
1304 req->state |= REQ_STATE_NEED_WAKEUP;
1305 mask &= ~REQ_STATE_NEED_WAKEUP;
1306 saved_cnt = mpt->reset_cnt;
1307 while ((req->state & mask) != state && mpt->reset_cnt == saved_cnt) {
1308 if (sleep_ok != 0) {
1309 error = mpt_sleep(mpt, req, PUSER, "mptreq", timeout);
1310 if (error == EWOULDBLOCK) {
1311 timeout = 0;
1312 break;
1313 }
1314 } else {
1315 if (time_ms != 0 && --timeout == 0) {
1316 break;
1317 }
1318 DELAY(500);
1319 mpt_intr(mpt);
1320 }
1321 }
1322 req->state &= ~REQ_STATE_NEED_WAKEUP;
1323 if (mpt->reset_cnt != saved_cnt) {
1324 return (EIO);
1325 }
1326 if (time_ms && timeout <= 0) {
1327 MSG_REQUEST_HEADER *msg_hdr = req->req_vbuf;
1328 req->state |= REQ_STATE_TIMEDOUT;
1329 mpt_prt(mpt, "mpt_wait_req(%x) timed out\n", msg_hdr->Function);
1330 return (ETIMEDOUT);
1331 }
1332 return (0);
1333}
1334
1335/*
1336 * Send a command to the IOC via the handshake register.
1337 *
1338 * Only done at initialization time and for certain unusual
1339 * commands such as device/bus reset as specified by LSI.
1340 */
1341int
1342mpt_send_handshake_cmd(struct mpt_softc *mpt, size_t len, void *cmd)
1343{
1344 int i;
1345 uint32_t data, *data32;
1346
1347 /* Check condition of the IOC */
1348 data = mpt_rd_db(mpt);
1349 if ((MPT_STATE(data) != MPT_DB_STATE_READY
1350 && MPT_STATE(data) != MPT_DB_STATE_RUNNING
1351 && MPT_STATE(data) != MPT_DB_STATE_FAULT)
1352 || MPT_DB_IS_IN_USE(data)) {
1353 mpt_prt(mpt, "handshake aborted - invalid doorbell state\n");
1354 mpt_print_db(data);
1355 return (EBUSY);
1356 }
1357
1358 /* We move things in 32 bit chunks */
1359 len = (len + 3) >> 2;
1360 data32 = cmd;
1361
1362 /* Clear any left over pending doorbell interrupts */
1363 if (MPT_DB_INTR(mpt_rd_intr(mpt)))
1364 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1365
1366 /*
1367 * Tell the handshake reg. we are going to send a command
1368 * and how long it is going to be.
1369 */
1370 data = (MPI_FUNCTION_HANDSHAKE << MPI_DOORBELL_FUNCTION_SHIFT) |
1371 (len << MPI_DOORBELL_ADD_DWORDS_SHIFT);
1372 mpt_write(mpt, MPT_OFFSET_DOORBELL, data);
1373
1374 /* Wait for the chip to notice */
1375 if (mpt_wait_db_int(mpt) != MPT_OK) {
1376 mpt_prt(mpt, "mpt_send_handshake_cmd: db ignored\n");
1377 return (ETIMEDOUT);
1378 }
1379
1380 /* Clear the interrupt */
1381 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1382
1383 if (mpt_wait_db_ack(mpt) != MPT_OK) {
1384 mpt_prt(mpt, "mpt_send_handshake_cmd: db ack timed out\n");
1385 return (ETIMEDOUT);
1386 }
1387
1388 /* Send the command */
1389 for (i = 0; i < len; i++) {
1390 mpt_write(mpt, MPT_OFFSET_DOORBELL, htole32(*data32++));
1391 if (mpt_wait_db_ack(mpt) != MPT_OK) {
1392 mpt_prt(mpt,
1393 "mpt_send_handshake_cmd: timeout @ index %d\n", i);
1394 return (ETIMEDOUT);
1395 }
1396 }
1397 return MPT_OK;
1398}
1399
1400/* Get the response from the handshake register */
1401int
1402mpt_recv_handshake_reply(struct mpt_softc *mpt, size_t reply_len, void *reply)
1403{
1404 int left, reply_left;
1405 u_int16_t *data16;
1406 uint32_t data;
1407 MSG_DEFAULT_REPLY *hdr;
1408
1409 /* We move things out in 16 bit chunks */
1410 reply_len >>= 1;
1411 data16 = (u_int16_t *)reply;
1412
1413 hdr = (MSG_DEFAULT_REPLY *)reply;
1414
1415 /* Get first word */
1416 if (mpt_wait_db_int(mpt) != MPT_OK) {
1417 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout1\n");
1418 return ETIMEDOUT;
1419 }
1420 data = mpt_read(mpt, MPT_OFFSET_DOORBELL);
1421 *data16++ = le16toh(data & MPT_DB_DATA_MASK);
1422 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1423
1424 /* Get Second Word */
1425 if (mpt_wait_db_int(mpt) != MPT_OK) {
1426 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout2\n");
1427 return ETIMEDOUT;
1428 }
1429 data = mpt_read(mpt, MPT_OFFSET_DOORBELL);
1430 *data16++ = le16toh(data & MPT_DB_DATA_MASK);
1431 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1432
1433 /*
1434 * With the second word, we can now look at the length.
1435 * Warn about a reply that's too short (except for IOC FACTS REPLY)
1436 */
1437 if ((reply_len >> 1) != hdr->MsgLength &&
1438 (hdr->Function != MPI_FUNCTION_IOC_FACTS)){
1439#if __FreeBSD_version >= 500000
1440 mpt_prt(mpt, "reply length does not match message length: "
1441 "got %x; expected %zx for function %x\n",
1442 hdr->MsgLength << 2, reply_len << 1, hdr->Function);
1443#else
1444 mpt_prt(mpt, "reply length does not match message length: "
1445 "got %x; expected %zx for function %x\n",
1446 hdr->MsgLength << 2, reply_len << 1, hdr->Function);
1447#endif
1448 }
1449
1450 /* Get rest of the reply; but don't overflow the provided buffer */
1451 left = (hdr->MsgLength << 1) - 2;
1452 reply_left = reply_len - 2;
1453 while (left--) {
1454 u_int16_t datum;
1455
1456 if (mpt_wait_db_int(mpt) != MPT_OK) {
1457 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout3\n");
1458 return ETIMEDOUT;
1459 }
1460 data = mpt_read(mpt, MPT_OFFSET_DOORBELL);
1461 datum = le16toh(data & MPT_DB_DATA_MASK);
1462
1463 if (reply_left-- > 0)
1464 *data16++ = datum;
1465
1466 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1467 }
1468
1469 /* One more wait & clear at the end */
1470 if (mpt_wait_db_int(mpt) != MPT_OK) {
1471 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout4\n");
1472 return ETIMEDOUT;
1473 }
1474 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1475
1476 if ((hdr->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
1477 if (mpt->verbose >= MPT_PRT_TRACE)
1478 mpt_print_reply(hdr);
1479 return (MPT_FAIL | hdr->IOCStatus);
1480 }
1481
1482 return (0);
1483}
1484
1485static int
1486mpt_get_iocfacts(struct mpt_softc *mpt, MSG_IOC_FACTS_REPLY *freplp)
1487{
1488 MSG_IOC_FACTS f_req;
1489 int error;
1490
1491 memset(&f_req, 0, sizeof f_req);
1492 f_req.Function = MPI_FUNCTION_IOC_FACTS;
1493 f_req.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
1494 error = mpt_send_handshake_cmd(mpt, sizeof f_req, &f_req);
1495 if (error) {
1496 return(error);
1497 }
1498 error = mpt_recv_handshake_reply(mpt, sizeof (*freplp), freplp);
1499 return (error);
1500}
1501
1502static int
1503mpt_get_portfacts(struct mpt_softc *mpt, U8 port, MSG_PORT_FACTS_REPLY *freplp)
1504{
1505 MSG_PORT_FACTS f_req;
1506 int error;
1507
1508 memset(&f_req, 0, sizeof f_req);
1509 f_req.Function = MPI_FUNCTION_PORT_FACTS;
1510 f_req.PortNumber = port;
1511 f_req.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
1512 error = mpt_send_handshake_cmd(mpt, sizeof f_req, &f_req);
1513 if (error) {
1514 return(error);
1515 }
1516 error = mpt_recv_handshake_reply(mpt, sizeof (*freplp), freplp);
1517 return (error);
1518}
1519
1520/*
1521 * Send the initialization request. This is where we specify how many
1522 * SCSI busses and how many devices per bus we wish to emulate.
1523 * This is also the command that specifies the max size of the reply
1524 * frames from the IOC that we will be allocating.
1525 */
1526static int
1527mpt_send_ioc_init(struct mpt_softc *mpt, uint32_t who)
1528{
1529 int error = 0;
1530 MSG_IOC_INIT init;
1531 MSG_IOC_INIT_REPLY reply;
1532
1533 memset(&init, 0, sizeof init);
1534 init.WhoInit = who;
1535 init.Function = MPI_FUNCTION_IOC_INIT;
1536 init.MaxDevices = 0; /* at least 256 devices per bus */
1537 init.MaxBuses = 16; /* at least 16 busses */
1538
1539 init.MsgVersion = htole16(MPI_VERSION);
1540 init.HeaderVersion = htole16(MPI_HEADER_VERSION);
1541 init.ReplyFrameSize = htole16(MPT_REPLY_SIZE);
1542 init.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
1543
1544 if ((error = mpt_send_handshake_cmd(mpt, sizeof init, &init)) != 0) {
1545 return(error);
1546 }
1547
1548 error = mpt_recv_handshake_reply(mpt, sizeof reply, &reply);
1549 return (error);
1550}
1551
1552
1553/*
1554 * Utiltity routine to read configuration headers and pages
1555 */
1556int
1557mpt_issue_cfg_req(struct mpt_softc *mpt, request_t *req, cfgparms_t *params,
1558 bus_addr_t addr, bus_size_t len, int sleep_ok, int timeout_ms)
1559{
1560 MSG_CONFIG *cfgp;
1561 SGE_SIMPLE32 *se;
1562
1563 cfgp = req->req_vbuf;
1564 memset(cfgp, 0, sizeof *cfgp);
1565 cfgp->Action = params->Action;
1566 cfgp->Function = MPI_FUNCTION_CONFIG;
1567 cfgp->Header.PageVersion = params->PageVersion;
1568 cfgp->Header.PageNumber = params->PageNumber;
1569 cfgp->PageAddress = htole32(params->PageAddress);
1570 if ((params->PageType & MPI_CONFIG_PAGETYPE_MASK) ==
1571 MPI_CONFIG_PAGETYPE_EXTENDED) {
1572 cfgp->Header.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
1573 cfgp->Header.PageLength = 0;
1574 cfgp->ExtPageLength = htole16(params->ExtPageLength);
1575 cfgp->ExtPageType = params->ExtPageType;
1576 } else {
1577 cfgp->Header.PageType = params->PageType;
1578 cfgp->Header.PageLength = params->PageLength;
1579 }
1580 se = (SGE_SIMPLE32 *)&cfgp->PageBufferSGE;
1581 se->Address = htole32(addr);
1582 MPI_pSGE_SET_LENGTH(se, len);
1583 MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
1584 MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1585 MPI_SGE_FLAGS_END_OF_LIST |
1586 ((params->Action == MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT
1587 || params->Action == MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM)
1588 ? MPI_SGE_FLAGS_HOST_TO_IOC : MPI_SGE_FLAGS_IOC_TO_HOST)));
1589 se->FlagsLength = htole32(se->FlagsLength);
1590 cfgp->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_CONFIG);
1591
1592 mpt_check_doorbell(mpt);
1593 mpt_send_cmd(mpt, req);
1594 return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
1595 sleep_ok, timeout_ms));
1596}
1597
1598int
1599mpt_read_extcfg_header(struct mpt_softc *mpt, int PageVersion, int PageNumber,
1600 uint32_t PageAddress, int ExtPageType,
1601 CONFIG_EXTENDED_PAGE_HEADER *rslt,
1602 int sleep_ok, int timeout_ms)
1603{
1604 request_t *req;
1605 cfgparms_t params;
1606 MSG_CONFIG_REPLY *cfgp;
1607 int error;
1608
1609 req = mpt_get_request(mpt, sleep_ok);
1610 if (req == NULL) {
1611 mpt_prt(mpt, "mpt_extread_cfg_header: Get request failed!\n");
1612 return (ENOMEM);
1613 }
1614
1615 params.Action = MPI_CONFIG_ACTION_PAGE_HEADER;
1616 params.PageVersion = PageVersion;
1617 params.PageLength = 0;
1618 params.PageNumber = PageNumber;
1619 params.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
1620 params.PageAddress = PageAddress;
1621 params.ExtPageType = ExtPageType;
1622 params.ExtPageLength = 0;
1623 error = mpt_issue_cfg_req(mpt, req, &params, /*addr*/0, /*len*/0,
1624 sleep_ok, timeout_ms);
1625 if (error != 0) {
1626 /*
1627 * Leave the request. Without resetting the chip, it's
1628 * still owned by it and we'll just get into trouble
1629 * freeing it now. Mark it as abandoned so that if it
1630 * shows up later it can be freed.
1631 */
1632 mpt_prt(mpt, "read_extcfg_header timed out\n");
1633 return (ETIMEDOUT);
1634 }
1635
1636 switch (req->IOCStatus & MPI_IOCSTATUS_MASK) {
1637 case MPI_IOCSTATUS_SUCCESS:
1638 cfgp = req->req_vbuf;
1639 rslt->PageVersion = cfgp->Header.PageVersion;
1640 rslt->PageNumber = cfgp->Header.PageNumber;
1641 rslt->PageType = cfgp->Header.PageType;
1642 rslt->ExtPageLength = le16toh(cfgp->ExtPageLength);
1643 rslt->ExtPageType = cfgp->ExtPageType;
1644 error = 0;
1645 break;
1646 case MPI_IOCSTATUS_CONFIG_INVALID_PAGE:
1647 mpt_lprt(mpt, MPT_PRT_DEBUG,
1648 "Invalid Page Type %d Number %d Addr 0x%0x\n",
1649 MPI_CONFIG_PAGETYPE_EXTENDED, PageNumber, PageAddress);
1650 error = EINVAL;
1651 break;
1652 default:
1653 mpt_prt(mpt, "mpt_read_extcfg_header: Config Info Status %x\n",
1654 req->IOCStatus);
1655 error = EIO;
1656 break;
1657 }
1658 mpt_free_request(mpt, req);
1659 return (error);
1660}
1661
1662int
1663mpt_read_extcfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress,
1664 CONFIG_EXTENDED_PAGE_HEADER *hdr, void *buf, size_t len,
1665 int sleep_ok, int timeout_ms)
1666{
1667 request_t *req;
1668 cfgparms_t params;
1669 int error;
1670
1671 req = mpt_get_request(mpt, sleep_ok);
1672 if (req == NULL) {
1673 mpt_prt(mpt, "mpt_read_extcfg_page: Get request failed!\n");
1674 return (-1);
1675 }
1676
1677 params.Action = Action;
1678 params.PageVersion = hdr->PageVersion;
1679 params.PageLength = 0;
1680 params.PageNumber = hdr->PageNumber;
1681 params.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
1682 params.PageAddress = PageAddress;
1683 params.ExtPageType = hdr->ExtPageType;
1684 params.ExtPageLength = hdr->ExtPageLength;
1685 error = mpt_issue_cfg_req(mpt, req, &params,
1686 req->req_pbuf + MPT_RQSL(mpt),
1687 len, sleep_ok, timeout_ms);
1688 if (error != 0) {
1689 mpt_prt(mpt, "read_extcfg_page(%d) timed out\n", Action);
1690 return (-1);
1691 }
1692
1693 if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
1694 mpt_prt(mpt, "mpt_read_extcfg_page: Config Info Status %x\n",
1695 req->IOCStatus);
1696 mpt_free_request(mpt, req);
1697 return (-1);
1698 }
1699 bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
1700 BUS_DMASYNC_POSTREAD);
1701 memcpy(buf, ((uint8_t *)req->req_vbuf)+MPT_RQSL(mpt), len);
1702 mpt_free_request(mpt, req);
1703 return (0);
1704}
1705
1706int
1707mpt_read_cfg_header(struct mpt_softc *mpt, int PageType, int PageNumber,
1708 uint32_t PageAddress, CONFIG_PAGE_HEADER *rslt,
1709 int sleep_ok, int timeout_ms)
1710{
1711 request_t *req;
1712 cfgparms_t params;
1713 MSG_CONFIG *cfgp;
1714 int error;
1715
1716 req = mpt_get_request(mpt, sleep_ok);
1717 if (req == NULL) {
1718 mpt_prt(mpt, "mpt_read_cfg_header: Get request failed!\n");
1719 return (ENOMEM);
1720 }
1721
1722 params.Action = MPI_CONFIG_ACTION_PAGE_HEADER;
1723 params.PageVersion = 0;
1724 params.PageLength = 0;
1725 params.PageNumber = PageNumber;
1726 params.PageType = PageType;
1727 params.PageAddress = PageAddress;
1728 error = mpt_issue_cfg_req(mpt, req, &params, /*addr*/0, /*len*/0,
1729 sleep_ok, timeout_ms);
1730 if (error != 0) {
1731 /*
1732 * Leave the request. Without resetting the chip, it's
1733 * still owned by it and we'll just get into trouble
1734 * freeing it now. Mark it as abandoned so that if it
1735 * shows up later it can be freed.
1736 */
1737 mpt_prt(mpt, "read_cfg_header timed out\n");
1738 return (ETIMEDOUT);
1739 }
1740
1741 switch (req->IOCStatus & MPI_IOCSTATUS_MASK) {
1742 case MPI_IOCSTATUS_SUCCESS:
1743 cfgp = req->req_vbuf;
1744 bcopy(&cfgp->Header, rslt, sizeof(*rslt));
1745 error = 0;
1746 break;
1747 case MPI_IOCSTATUS_CONFIG_INVALID_PAGE:
1748 mpt_lprt(mpt, MPT_PRT_DEBUG,
1749 "Invalid Page Type %d Number %d Addr 0x%0x\n",
1750 PageType, PageNumber, PageAddress);
1751 error = EINVAL;
1752 break;
1753 default:
1754 mpt_prt(mpt, "mpt_read_cfg_header: Config Info Status %x\n",
1755 req->IOCStatus);
1756 error = EIO;
1757 break;
1758 }
1759 mpt_free_request(mpt, req);
1760 return (error);
1761}
1762
1763int
1764mpt_read_cfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress,
1765 CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok,
1766 int timeout_ms)
1767{
1768 request_t *req;
1769 cfgparms_t params;
1770 int error;
1771
1772 req = mpt_get_request(mpt, sleep_ok);
1773 if (req == NULL) {
1774 mpt_prt(mpt, "mpt_read_cfg_page: Get request failed!\n");
1775 return (-1);
1776 }
1777
1778 params.Action = Action;
1779 params.PageVersion = hdr->PageVersion;
1780 params.PageLength = hdr->PageLength;
1781 params.PageNumber = hdr->PageNumber;
1782 params.PageType = hdr->PageType & MPI_CONFIG_PAGETYPE_MASK;
1783 params.PageAddress = PageAddress;
1784 error = mpt_issue_cfg_req(mpt, req, &params,
1785 req->req_pbuf + MPT_RQSL(mpt),
1786 len, sleep_ok, timeout_ms);
1787 if (error != 0) {
1788 mpt_prt(mpt, "read_cfg_page(%d) timed out\n", Action);
1789 return (-1);
1790 }
1791
1792 if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
1793 mpt_prt(mpt, "mpt_read_cfg_page: Config Info Status %x\n",
1794 req->IOCStatus);
1795 mpt_free_request(mpt, req);
1796 return (-1);
1797 }
1798 bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
1799 BUS_DMASYNC_POSTREAD);
1800 memcpy(hdr, ((uint8_t *)req->req_vbuf)+MPT_RQSL(mpt), len);
1801 mpt_free_request(mpt, req);
1802 return (0);
1803}
1804
1805int
1806mpt_write_cfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress,
1807 CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok,
1808 int timeout_ms)
1809{
1810 request_t *req;
1811 cfgparms_t params;
1812 u_int hdr_attr;
1813 int error;
1814
1815 hdr_attr = hdr->PageType & MPI_CONFIG_PAGEATTR_MASK;
1816 if (hdr_attr != MPI_CONFIG_PAGEATTR_CHANGEABLE &&
1817 hdr_attr != MPI_CONFIG_PAGEATTR_PERSISTENT) {
1818 mpt_prt(mpt, "page type 0x%x not changeable\n",
1819 hdr->PageType & MPI_CONFIG_PAGETYPE_MASK);
1820 return (-1);
1821 }
1822
1823#if 0
1824 /*
1825 * We shouldn't mask off other bits here.
1826 */
1827 hdr->PageType &= MPI_CONFIG_PAGETYPE_MASK;
1828#endif
1829
1830 req = mpt_get_request(mpt, sleep_ok);
1831 if (req == NULL)
1832 return (-1);
1833
1834 memcpy(((caddr_t)req->req_vbuf) + MPT_RQSL(mpt), hdr, len);
1835
1836 /*
1837 * There isn't any point in restoring stripped out attributes
1838 * if you then mask them going down to issue the request.
1839 */
1840
1841 params.Action = Action;
1842 params.PageVersion = hdr->PageVersion;
1843 params.PageLength = hdr->PageLength;
1844 params.PageNumber = hdr->PageNumber;
1845 params.PageAddress = PageAddress;
1846#if 0
1847 /* Restore stripped out attributes */
1848 hdr->PageType |= hdr_attr;
1849 params.PageType = hdr->PageType & MPI_CONFIG_PAGETYPE_MASK;
1850#else
1851 params.PageType = hdr->PageType;
1852#endif
1853 error = mpt_issue_cfg_req(mpt, req, &params,
1854 req->req_pbuf + MPT_RQSL(mpt),
1855 len, sleep_ok, timeout_ms);
1856 if (error != 0) {
1857 mpt_prt(mpt, "mpt_write_cfg_page timed out\n");
1858 return (-1);
1859 }
1860
1861 if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
1862 mpt_prt(mpt, "mpt_write_cfg_page: Config Info Status %x\n",
1863 req->IOCStatus);
1864 mpt_free_request(mpt, req);
1865 return (-1);
1866 }
1867 mpt_free_request(mpt, req);
1868 return (0);
1869}
1870
1871/*
1872 * Read IOC configuration information
1873 */
1874static int
1875mpt_read_config_info_ioc(struct mpt_softc *mpt)
1876{
1877 CONFIG_PAGE_HEADER hdr;
1878 struct mpt_raid_volume *mpt_raid;
1879 int rv;
1880 int i;
1881 size_t len;
1882
1883 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC,
1884 2, 0, &hdr, FALSE, 5000);
1885 /*
1886 * If it's an invalid page, so what? Not a supported function....
1887 */
1888 if (rv == EINVAL) {
1889 return (0);
1890 }
1891 if (rv) {
1892 return (rv);
1893 }
1894
1895 mpt_lprt(mpt, MPT_PRT_DEBUG,
1896 "IOC Page 2 Header: Version %x len %x PageNumber %x PageType %x\n",
1897 hdr.PageVersion, hdr.PageLength << 2,
1898 hdr.PageNumber, hdr.PageType);
1899
1900 len = hdr.PageLength * sizeof(uint32_t);
1901 mpt->ioc_page2 = kmalloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
1902 if (mpt->ioc_page2 == NULL) {
1903 mpt_prt(mpt, "unable to allocate memory for IOC page 2\n");
1904 mpt_raid_free_mem(mpt);
1905 return (ENOMEM);
1906 }
1907 memcpy(&mpt->ioc_page2->Header, &hdr, sizeof(hdr));
1908 rv = mpt_read_cur_cfg_page(mpt, 0,
1909 &mpt->ioc_page2->Header, len, FALSE, 5000);
1910 if (rv) {
1911 mpt_prt(mpt, "failed to read IOC Page 2\n");
1912 mpt_raid_free_mem(mpt);
1913 return (EIO);
1914 }
1915 mpt2host_config_page_ioc2(mpt->ioc_page2);
1916
1917 if (mpt->ioc_page2->CapabilitiesFlags != 0) {
1918 uint32_t mask;
1919
1920 mpt_prt(mpt, "Capabilities: (");
1921 for (mask = 1; mask != 0; mask <<= 1) {
1922 if ((mpt->ioc_page2->CapabilitiesFlags & mask) == 0) {
1923 continue;
1924 }
1925 switch (mask) {
1926 case MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT:
1927 mpt_prtc(mpt, " RAID-0");
1928 break;
1929 case MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT:
1930 mpt_prtc(mpt, " RAID-1E");
1931 break;
1932 case MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT:
1933 mpt_prtc(mpt, " RAID-1");
1934 break;
1935 case MPI_IOCPAGE2_CAP_FLAGS_SES_SUPPORT:
1936 mpt_prtc(mpt, " SES");
1937 break;
1938 case MPI_IOCPAGE2_CAP_FLAGS_SAFTE_SUPPORT:
1939 mpt_prtc(mpt, " SAFTE");
1940 break;
1941 case MPI_IOCPAGE2_CAP_FLAGS_CROSS_CHANNEL_SUPPORT:
1942 mpt_prtc(mpt, " Multi-Channel-Arrays");
1943 default:
1944 break;
1945 }
1946 }
1947 mpt_prtc(mpt, " )\n");
1948 if ((mpt->ioc_page2->CapabilitiesFlags
1949 & (MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT
1950 | MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT
1951 | MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT)) != 0) {
1952 mpt_prt(mpt, "%d Active Volume%s(%d Max)\n",
1953 mpt->ioc_page2->NumActiveVolumes,
1954 mpt->ioc_page2->NumActiveVolumes != 1
1955 ? "s " : " ",
1956 mpt->ioc_page2->MaxVolumes);
1957 mpt_prt(mpt, "%d Hidden Drive Member%s(%d Max)\n",
1958 mpt->ioc_page2->NumActivePhysDisks,
1959 mpt->ioc_page2->NumActivePhysDisks != 1
1960 ? "s " : " ",
1961 mpt->ioc_page2->MaxPhysDisks);
1962 }
1963 }
1964
1965 len = mpt->ioc_page2->MaxVolumes * sizeof(struct mpt_raid_volume);
1966 mpt->raid_volumes = kmalloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
1967 if (mpt->raid_volumes == NULL) {
1968 mpt_prt(mpt, "Could not allocate RAID volume data\n");
1969 mpt_raid_free_mem(mpt);
1970 return (ENOMEM);
1971 }
1972
1973 /*
1974 * Copy critical data out of ioc_page2 so that we can
1975 * safely refresh the page without windows of unreliable
1976 * data.
1977 */
1978 mpt->raid_max_volumes = mpt->ioc_page2->MaxVolumes;
1979
1980 len = sizeof(*mpt->raid_volumes->config_page) +
1981 (sizeof (RAID_VOL0_PHYS_DISK) * (mpt->ioc_page2->MaxPhysDisks - 1));
1982 for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1983 mpt_raid = &mpt->raid_volumes[i];
1984 mpt_raid->config_page =
1985 kmalloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
1986 if (mpt_raid->config_page == NULL) {
1987 mpt_prt(mpt, "Could not allocate RAID page data\n");
1988 mpt_raid_free_mem(mpt);
1989 return (ENOMEM);
1990 }
1991 }
1992 mpt->raid_page0_len = len;
1993
1994 len = mpt->ioc_page2->MaxPhysDisks * sizeof(struct mpt_raid_disk);
1995 mpt->raid_disks = kmalloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
1996 if (mpt->raid_disks == NULL) {
1997 mpt_prt(mpt, "Could not allocate RAID disk data\n");
1998 mpt_raid_free_mem(mpt);
1999 return (ENOMEM);
2000 }
2001 mpt->raid_max_disks = mpt->ioc_page2->MaxPhysDisks;
2002
2003 /*
2004 * Load page 3.
2005 */
2006 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC,
2007 3, 0, &hdr, FALSE, 5000);
2008 if (rv) {
2009 mpt_raid_free_mem(mpt);
2010 return (EIO);
2011 }
2012
2013 mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC Page 3 Header: %x %x %x %x\n",
2014 hdr.PageVersion, hdr.PageLength, hdr.PageNumber, hdr.PageType);
2015
2016 len = hdr.PageLength * sizeof(uint32_t);
2017 mpt->ioc_page3 = kmalloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
2018 if (mpt->ioc_page3 == NULL) {
2019 mpt_prt(mpt, "unable to allocate memory for IOC page 3\n");
2020 mpt_raid_free_mem(mpt);
2021 return (ENOMEM);
2022 }
2023 memcpy(&mpt->ioc_page3->Header, &hdr, sizeof(hdr));
2024 rv = mpt_read_cur_cfg_page(mpt, 0,
2025 &mpt->ioc_page3->Header, len, FALSE, 5000);
2026 if (rv) {
2027 mpt_raid_free_mem(mpt);
2028 return (EIO);
2029 }
2030 mpt2host_config_page_ioc3(mpt->ioc_page3);
2031 mpt_raid_wakeup(mpt);
2032 return (0);
2033}
2034
2035/*
2036 * Enable IOC port
2037 */
2038static int
2039mpt_send_port_enable(struct mpt_softc *mpt, int port)
2040{
2041 request_t *req;
2042 MSG_PORT_ENABLE *enable_req;
2043 int error;
2044
2045 req = mpt_get_request(mpt, /*sleep_ok*/FALSE);
2046 if (req == NULL)
2047 return (-1);
2048
2049 enable_req = req->req_vbuf;
2050 memset(enable_req, 0, MPT_RQSL(mpt));
2051
2052 enable_req->Function = MPI_FUNCTION_PORT_ENABLE;
2053 enable_req->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_CONFIG);
2054 enable_req->PortNumber = port;
2055
2056 mpt_check_doorbell(mpt);
2057 mpt_lprt(mpt, MPT_PRT_DEBUG, "enabling port %d\n", port);
2058
2059 mpt_send_cmd(mpt, req);
2060 error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
2061 FALSE, (mpt->is_sas || mpt->is_fc)? 30000 : 3000);
2062 if (error != 0) {
2063 mpt_prt(mpt, "port %d enable timed out\n", port);
2064 return (-1);
2065 }
2066 mpt_free_request(mpt, req);
2067 mpt_lprt(mpt, MPT_PRT_DEBUG, "enabled port %d\n", port);
2068 return (0);
2069}
2070
2071/*
2072 * Enable/Disable asynchronous event reporting.
2073 */
2074static int
2075mpt_send_event_request(struct mpt_softc *mpt, int onoff)
2076{
2077 request_t *req;
2078 MSG_EVENT_NOTIFY *enable_req;
2079
2080 req = mpt_get_request(mpt, FALSE);
2081 if (req == NULL) {
2082 return (ENOMEM);
2083 }
2084 enable_req = req->req_vbuf;
2085 memset(enable_req, 0, sizeof *enable_req);
2086
2087 enable_req->Function = MPI_FUNCTION_EVENT_NOTIFICATION;
2088 enable_req->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_EVENTS);
2089 enable_req->Switch = onoff;
2090
2091 mpt_check_doorbell(mpt);
2092 mpt_lprt(mpt, MPT_PRT_DEBUG, "%sabling async events\n",
2093 onoff ? "en" : "dis");
2094 /*
2095 * Send the command off, but don't wait for it.
2096 */
2097 mpt_send_cmd(mpt, req);
2098 return (0);
2099}
2100
2101/*
2102 * Un-mask the interrupts on the chip.
2103 */
2104void
2105mpt_enable_ints(struct mpt_softc *mpt)
2106{
2107 /* Unmask every thing except door bell int */
2108 mpt_write(mpt, MPT_OFFSET_INTR_MASK, MPT_INTR_DB_MASK);
2109}
2110
2111/*
2112 * Mask the interrupts on the chip.
2113 */
2114void
2115mpt_disable_ints(struct mpt_softc *mpt)
2116{
2117 /* Mask all interrupts */
2118 mpt_write(mpt, MPT_OFFSET_INTR_MASK,
2119 MPT_INTR_REPLY_MASK | MPT_INTR_DB_MASK);
2120}
2121
2122static void
2123mpt_sysctl_attach(struct mpt_softc *mpt)
2124{
2125#if __FreeBSD_version >= 500000
2126 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev);
2127 struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev);
2128
2129 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
2130 "debug", CTLFLAG_RW, &mpt->verbose, 0,
2131 "Debugging/Verbose level");
2132 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
2133 "role", CTLFLAG_RD, &mpt->role, 0,
2134 "HBA role");
2135#ifdef MPT_TEST_MULTIPATH
2136 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
2137 "failure_id", CTLFLAG_RW, &mpt->failure_id, -1,
2138 "Next Target to Fail");
2139#endif
2140#endif
2141}
2142
2143int
2144mpt_attach(struct mpt_softc *mpt)
2145{
2146 struct mpt_personality *pers;
2147 int i;
2148 int error;
2149
2150 mpt_core_attach(mpt);
2151 mpt_core_enable(mpt);
2152
2153 TAILQ_INSERT_TAIL(&mpt_tailq, mpt, links);
2154 for (i = 0; i < MPT_MAX_PERSONALITIES; i++) {
2155 pers = mpt_personalities[i];
2156 if (pers == NULL) {
2157 continue;
2158 }
2159 if (pers->probe(mpt) == 0) {
2160 error = pers->attach(mpt);
2161 if (error != 0) {
2162 mpt_detach(mpt);
2163 return (error);
2164 }
2165 mpt->mpt_pers_mask |= (0x1 << pers->id);
2166 pers->use_count++;
2167 }
2168 }
2169
2170 /*
2171 * Now that we've attached everything, do the enable function
2172 * for all of the personalities. This allows the personalities
2173 * to do setups that are appropriate for them prior to enabling
2174 * any ports.
2175 */
2176 for (i = 0; i < MPT_MAX_PERSONALITIES; i++) {
2177 pers = mpt_personalities[i];
2178 if (pers != NULL && MPT_PERS_ATTACHED(pers, mpt) != 0) {
2179 error = pers->enable(mpt);
2180 if (error != 0) {
2181 mpt_prt(mpt, "personality %s attached but would"
2182 " not enable (%d)\n", pers->name, error);
2183 mpt_detach(mpt);
2184 return (error);
2185 }
2186 }
2187 }
2188 return (0);
2189}
2190
2191int
2192mpt_shutdown(struct mpt_softc *mpt)
2193{
2194 struct mpt_personality *pers;
2195
2196 MPT_PERS_FOREACH_REVERSE(mpt, pers) {
2197 pers->shutdown(mpt);
2198 }
2199 return (0);
2200}
2201
2202int
2203mpt_detach(struct mpt_softc *mpt)
2204{
2205 struct mpt_personality *pers;
2206
2207 MPT_PERS_FOREACH_REVERSE(mpt, pers) {
2208 pers->detach(mpt);
2209 mpt->mpt_pers_mask &= ~(0x1 << pers->id);
2210 pers->use_count--;
2211 }
2212 TAILQ_REMOVE(&mpt_tailq, mpt, links);
2213 return (0);
2214}
2215
2216int
2217mpt_core_load(struct mpt_personality *pers)
2218{
2219 int i;
2220
2221 /*
2222 * Setup core handlers and insert the default handler
2223 * into all "empty slots".
2224 */
2225 for (i = 0; i < MPT_NUM_REPLY_HANDLERS; i++) {
2226 mpt_reply_handlers[i] = mpt_default_reply_handler;
2227 }
2228
2229 mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_EVENTS)] =
2230 mpt_event_reply_handler;
2231 mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_CONFIG)] =
2232 mpt_config_reply_handler;
2233 mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_HANDSHAKE)] =
2234 mpt_handshake_reply_handler;
2235 return (0);
2236}
2237
2238/*
2239 * Initialize per-instance driver data and perform
2240 * initial controller configuration.
2241 */
2242int
2243mpt_core_attach(struct mpt_softc *mpt)
2244{
2245 int val, error;
2246
2247 LIST_INIT(&mpt->ack_frames);
2248 /* Put all request buffers on the free list */
2249 TAILQ_INIT(&mpt->request_pending_list);
2250 TAILQ_INIT(&mpt->request_free_list);
2251 TAILQ_INIT(&mpt->request_timeout_list);
2252 MPT_LOCK(mpt);
2253 for (val = 0; val < MPT_MAX_REQUESTS(mpt); val++) {
2254 request_t *req = &mpt->request_pool[val];
2255 req->state = REQ_STATE_ALLOCATED;
2256 mpt_free_request(mpt, req);
2257 }
2258 MPT_UNLOCK(mpt);
2259 for (val = 0; val < MPT_MAX_LUNS; val++) {
2260 STAILQ_INIT(&mpt->trt[val].atios);
2261 STAILQ_INIT(&mpt->trt[val].inots);
2262 }
2263 STAILQ_INIT(&mpt->trt_wildcard.atios);
2264 STAILQ_INIT(&mpt->trt_wildcard.inots);
2265#ifdef MPT_TEST_MULTIPATH
2266 mpt->failure_id = -1;
2267#endif
2268 mpt->scsi_tgt_handler_id = MPT_HANDLER_ID_NONE;
2269 mpt_sysctl_attach(mpt);
2270 mpt_lprt(mpt, MPT_PRT_DEBUG, "doorbell req = %s\n",
2271 mpt_ioc_diag(mpt_read(mpt, MPT_OFFSET_DOORBELL)));
2272
2273 MPT_LOCK(mpt);
2274 error = mpt_configure_ioc(mpt, 0, 0);
2275 MPT_UNLOCK(mpt);
2276
2277 return (error);
2278}
2279
2280int
2281mpt_core_enable(struct mpt_softc *mpt)
2282{
2283 /*
2284 * We enter with the IOC enabled, but async events
2285 * not enabled, ports not enabled and interrupts
2286 * not enabled.
2287 */
2288 MPT_LOCK(mpt);
2289
2290 /*
2291 * Enable asynchronous event reporting- all personalities
2292 * have attached so that they should be able to now field
2293 * async events.
2294 */
2295 mpt_send_event_request(mpt, 1);
2296
2297 /*
2298 * Catch any pending interrupts
2299 *
2300 * This seems to be crucial- otherwise
2301 * the portenable below times out.
2302 */
2303 mpt_intr(mpt);
2304
2305 /*
2306 * Enable Interrupts
2307 */
2308 mpt_enable_ints(mpt);
2309
2310 /*
2311 * Catch any pending interrupts
2312 *
2313 * This seems to be crucial- otherwise
2314 * the portenable below times out.
2315 */
2316 mpt_intr(mpt);
2317
2318 /*
2319 * Enable the port.
2320 */
2321 if (mpt_send_port_enable(mpt, 0) != MPT_OK) {
2322 mpt_prt(mpt, "failed to enable port 0\n");
2323 MPT_UNLOCK(mpt);
2324 return (ENXIO);
2325 }
2326 MPT_UNLOCK(mpt);
2327 return (0);
2328}
2329
2330void
2331mpt_core_shutdown(struct mpt_softc *mpt)
2332{
2333 mpt_disable_ints(mpt);
2334}
2335
2336void
2337mpt_core_detach(struct mpt_softc *mpt)
2338{
2339 /*
2340 * XXX: FREE MEMORY
2341 */
2342 mpt_disable_ints(mpt);
2343}
2344
2345int
2346mpt_core_unload(struct mpt_personality *pers)
2347{
2348 /* Unload is always successfull. */
2349 return (0);
2350}
2351
2352#define FW_UPLOAD_REQ_SIZE \
2353 (sizeof(MSG_FW_UPLOAD) - sizeof(SGE_MPI_UNION) \
2354 + sizeof(FW_UPLOAD_TCSGE) + sizeof(SGE_SIMPLE32))
2355
2356static int
2357mpt_upload_fw(struct mpt_softc *mpt)
2358{
2359 uint8_t fw_req_buf[FW_UPLOAD_REQ_SIZE];
2360 MSG_FW_UPLOAD_REPLY fw_reply;
2361 MSG_FW_UPLOAD *fw_req;
2362 FW_UPLOAD_TCSGE *tsge;
2363 SGE_SIMPLE32 *sge;
2364 uint32_t flags;
2365 int error;
2366
2367 memset(&fw_req_buf, 0, sizeof(fw_req_buf));
2368 fw_req = (MSG_FW_UPLOAD *)fw_req_buf;
2369 fw_req->ImageType = MPI_FW_UPLOAD_ITYPE_FW_IOC_MEM;
2370 fw_req->Function = MPI_FUNCTION_FW_UPLOAD;
2371 fw_req->MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
2372 tsge = (FW_UPLOAD_TCSGE *)&fw_req->SGL;
2373 tsge->DetailsLength = 12;
2374 tsge->Flags = MPI_SGE_FLAGS_TRANSACTION_ELEMENT;
2375 tsge->ImageSize = htole32(mpt->fw_image_size);
2376 sge = (SGE_SIMPLE32 *)(tsge + 1);
2377 flags = (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER
2378 | MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_SIMPLE_ELEMENT
2379 | MPI_SGE_FLAGS_32_BIT_ADDRESSING | MPI_SGE_FLAGS_IOC_TO_HOST);
2380 flags <<= MPI_SGE_FLAGS_SHIFT;
2381 sge->FlagsLength = htole32(flags | mpt->fw_image_size);
2382 sge->Address = htole32(mpt->fw_phys);
2383 error = mpt_send_handshake_cmd(mpt, sizeof(fw_req_buf), &fw_req_buf);
2384 if (error)
2385 return(error);
2386 error = mpt_recv_handshake_reply(mpt, sizeof(fw_reply), &fw_reply);
2387 return (error);
2388}
2389
2390static void
2391mpt_diag_outsl(struct mpt_softc *mpt, uint32_t addr,
2392 uint32_t *data, bus_size_t len)
2393{
2394 uint32_t *data_end;
2395
2396 data_end = data + (roundup2(len, sizeof(uint32_t)) / 4);
2397 if (mpt->is_sas) {
2398 pci_enable_io(mpt->dev, SYS_RES_IOPORT);
2399 }
2400 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, addr);
2401 while (data != data_end) {
2402 mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, *data);
2403 data++;
2404 }
2405 if (mpt->is_sas) {
2406 pci_disable_io(mpt->dev, SYS_RES_IOPORT);
2407 }
2408}
2409
2410static int
2411mpt_download_fw(struct mpt_softc *mpt)
2412{
2413 MpiFwHeader_t *fw_hdr;
2414 int error;
2415 uint32_t ext_offset;
2416 uint32_t data;
2417
2418 mpt_prt(mpt, "Downloading Firmware - Image Size %d\n",
2419 mpt->fw_image_size);
2420
2421 error = mpt_enable_diag_mode(mpt);
2422 if (error != 0) {
2423 mpt_prt(mpt, "Could not enter diagnostic mode!\n");
2424 return (EIO);
2425 }
2426
2427 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC,
2428 MPI_DIAG_RW_ENABLE|MPI_DIAG_DISABLE_ARM);
2429
2430 fw_hdr = (MpiFwHeader_t *)mpt->fw_image;
2431 mpt_diag_outsl(mpt, fw_hdr->LoadStartAddress, (uint32_t*)fw_hdr,
2432 fw_hdr->ImageSize);
2433
2434 ext_offset = fw_hdr->NextImageHeaderOffset;
2435 while (ext_offset != 0) {
2436 MpiExtImageHeader_t *ext;
2437
2438 ext = (MpiExtImageHeader_t *)((uintptr_t)fw_hdr + ext_offset);
2439 ext_offset = ext->NextImageHeaderOffset;
2440
2441 mpt_diag_outsl(mpt, ext->LoadStartAddress, (uint32_t*)ext,
2442 ext->ImageSize);
2443 }
2444
2445 if (mpt->is_sas) {
2446 pci_enable_io(mpt->dev, SYS_RES_IOPORT);
2447 }
2448 /* Setup the address to jump to on reset. */
2449 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, fw_hdr->IopResetRegAddr);
2450 mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, fw_hdr->IopResetVectorValue);
2451
2452 /*
2453 * The controller sets the "flash bad" status after attempting
2454 * to auto-boot from flash. Clear the status so that the controller
2455 * will continue the boot process with our newly installed firmware.
2456 */
2457 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, MPT_DIAG_MEM_CFG_BASE);
2458 data = mpt_pio_read(mpt, MPT_OFFSET_DIAG_DATA) | MPT_DIAG_MEM_CFG_BADFL;
2459 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, MPT_DIAG_MEM_CFG_BASE);
2460 mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, data);
2461
2462 if (mpt->is_sas) {
2463 pci_disable_io(mpt->dev, SYS_RES_IOPORT);
2464 }
2465
2466 /*
2467 * Re-enable the processor and clear the boot halt flag.
2468 */
2469 data = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC);
2470 data &= ~(MPI_DIAG_PREVENT_IOC_BOOT|MPI_DIAG_DISABLE_ARM);
2471 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, data);
2472
2473 mpt_disable_diag_mode(mpt);
2474 return (0);
2475}
2476
2477/*
2478 * Allocate/Initialize data structures for the controller. Called
2479 * once at instance startup.
2480 */
2481static int
2482mpt_configure_ioc(struct mpt_softc *mpt, int tn, int needreset)
2483{
2484 PTR_MSG_PORT_FACTS_REPLY pfp;
2485 int error, port;
2486 size_t len;
2487
2488 if (tn == MPT_MAX_TRYS) {
2489 return (-1);
2490 }
2491
2492 /*
2493 * No need to reset if the IOC is already in the READY state.
2494 *
2495 * Force reset if initialization failed previously.
2496 * Note that a hard_reset of the second channel of a '929
2497 * will stop operation of the first channel. Hopefully, if the
2498 * first channel is ok, the second will not require a hard
2499 * reset.
2500 */
2501 if (needreset || MPT_STATE(mpt_rd_db(mpt)) != MPT_DB_STATE_READY) {
2502 if (mpt_reset(mpt, FALSE) != MPT_OK) {
2503 return (mpt_configure_ioc(mpt, tn++, 1));
2504 }
2505 needreset = 0;
2506 }
2507
2508 if (mpt_get_iocfacts(mpt, &mpt->ioc_facts) != MPT_OK) {
2509 mpt_prt(mpt, "mpt_get_iocfacts failed\n");
2510 return (mpt_configure_ioc(mpt, tn++, 1));
2511 }
2512 mpt2host_iocfacts_reply(&mpt->ioc_facts);
2513
2514 mpt_prt(mpt, "MPI Version=%d.%d.%d.%d\n",
2515 mpt->ioc_facts.MsgVersion >> 8,
2516 mpt->ioc_facts.MsgVersion & 0xFF,
2517 mpt->ioc_facts.HeaderVersion >> 8,
2518 mpt->ioc_facts.HeaderVersion & 0xFF);
2519
2520 /*
2521 * Now that we know request frame size, we can calculate
2522 * the actual (reasonable) segment limit for read/write I/O.
2523 *
2524 * This limit is constrained by:
2525 *
2526 * + The size of each area we allocate per command (and how
2527 * many chain segments we can fit into it).
2528 * + The total number of areas we've set up.
2529 * + The actual chain depth the card will allow.
2530 *
2531 * The first area's segment count is limited by the I/O request
2532 * at the head of it. We cannot allocate realistically more
2533 * than MPT_MAX_REQUESTS areas. Therefore, to account for both
2534 * conditions, we'll just start out with MPT_MAX_REQUESTS-2.
2535 *
2536 */
2537 /* total number of request areas we (can) allocate */
2538 mpt->max_seg_cnt = MPT_MAX_REQUESTS(mpt) - 2;
2539
2540 /* converted to the number of chain areas possible */
2541 mpt->max_seg_cnt *= MPT_NRFM(mpt);
2542
2543 /* limited by the number of chain areas the card will support */
2544 if (mpt->max_seg_cnt > mpt->ioc_facts.MaxChainDepth) {
2545 mpt_lprt(mpt, MPT_PRT_DEBUG,
2546 "chain depth limited to %u (from %u)\n",
2547 mpt->ioc_facts.MaxChainDepth, mpt->max_seg_cnt);
2548 mpt->max_seg_cnt = mpt->ioc_facts.MaxChainDepth;
2549 }
2550
2551 /* converted to the number of simple sges in chain segments. */
2552 mpt->max_seg_cnt *= (MPT_NSGL(mpt) - 1);
2553
2554 mpt_lprt(mpt, MPT_PRT_DEBUG, "Maximum Segment Count: %u\n",
2555 mpt->max_seg_cnt);
2556 mpt_lprt(mpt, MPT_PRT_DEBUG, "MsgLength=%u IOCNumber = %d\n",
2557 mpt->ioc_facts.MsgLength, mpt->ioc_facts.IOCNumber);
2558 mpt_lprt(mpt, MPT_PRT_DEBUG,
2559 "IOCFACTS: GlobalCredits=%d BlockSize=%u bytes "
2560 "Request Frame Size %u bytes Max Chain Depth %u\n",
2561 mpt->ioc_facts.GlobalCredits, mpt->ioc_facts.BlockSize,
2562 mpt->ioc_facts.RequestFrameSize << 2,
2563 mpt->ioc_facts.MaxChainDepth);
2564 mpt_lprt(mpt, MPT_PRT_DEBUG, "IOCFACTS: Num Ports %d, FWImageSize %d, "
2565 "Flags=%#x\n", mpt->ioc_facts.NumberOfPorts,
2566 (int)mpt->ioc_facts.FWImageSize, mpt->ioc_facts.Flags);
2567
2568 len = mpt->ioc_facts.NumberOfPorts * sizeof (MSG_PORT_FACTS_REPLY);
2569 mpt->port_facts = kmalloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
2570 if (mpt->port_facts == NULL) {
2571 mpt_prt(mpt, "unable to allocate memory for port facts\n");
2572 return (ENOMEM);
2573 }
2574
2575
2576 if ((mpt->ioc_facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT) &&
2577 (mpt->fw_uploaded == 0)) {
2578 struct mpt_map_info mi;
2579
2580 /*
2581 * In some configurations, the IOC's firmware is
2582 * stored in a shared piece of system NVRAM that
2583 * is only accessable via the BIOS. In this
2584 * case, the firmware keeps a copy of firmware in
2585 * RAM until the OS driver retrieves it. Once
2586 * retrieved, we are responsible for re-downloading
2587 * the firmware after any hard-reset.
2588 */
2589 mpt->fw_image_size = mpt->ioc_facts.FWImageSize;
2590 error = mpt_dma_tag_create(mpt, mpt->parent_dmat, 1, 0,
2591 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2592 mpt->fw_image_size, 1, mpt->fw_image_size, 0,
2593 &mpt->fw_dmat);
2594 if (error != 0) {
2595 mpt_prt(mpt, "cannot create firmwarew dma tag\n");
2596 return (ENOMEM);
2597 }
2598 error = bus_dmamem_alloc(mpt->fw_dmat,
2599 (void **)&mpt->fw_image, BUS_DMA_NOWAIT, &mpt->fw_dmap);
2600 if (error != 0) {
2601 mpt_prt(mpt, "cannot allocate firmware memory\n");
2602 bus_dma_tag_destroy(mpt->fw_dmat);
2603 return (ENOMEM);
2604 }
2605 mi.mpt = mpt;
2606 mi.error = 0;
2607 bus_dmamap_load(mpt->fw_dmat, mpt->fw_dmap,
2608 mpt->fw_image, mpt->fw_image_size, mpt_map_rquest, &mi, 0);
2609 mpt->fw_phys = mi.phys;
2610
2611 error = mpt_upload_fw(mpt);
2612 if (error != 0) {
2613 mpt_prt(mpt, "firmware upload failed.\n");
2614 bus_dmamap_unload(mpt->fw_dmat, mpt->fw_dmap);
2615 bus_dmamem_free(mpt->fw_dmat, mpt->fw_image,
2616 mpt->fw_dmap);
2617 bus_dma_tag_destroy(mpt->fw_dmat);
2618 mpt->fw_image = NULL;
2619 return (EIO);
2620 }
2621 mpt->fw_uploaded = 1;
2622 }
2623
2624 for (port = 0; port < mpt->ioc_facts.NumberOfPorts; port++) {
2625 pfp = &mpt->port_facts[port];
2626 error = mpt_get_portfacts(mpt, 0, pfp);
2627 if (error != MPT_OK) {
2628 mpt_prt(mpt,
2629 "mpt_get_portfacts on port %d failed\n", port);
2630 kfree(mpt->port_facts, M_DEVBUF);
2631 mpt->port_facts = NULL;
2632 return (mpt_configure_ioc(mpt, tn++, 1));
2633 }
2634 mpt2host_portfacts_reply(pfp);
2635
2636 if (port > 0) {
2637 error = MPT_PRT_INFO;
2638 } else {
2639 error = MPT_PRT_DEBUG;
2640 }
2641 mpt_lprt(mpt, error,
2642 "PORTFACTS[%d]: Type %x PFlags %x IID %d MaxDev %d\n",
2643 port, pfp->PortType, pfp->ProtocolFlags, pfp->PortSCSIID,
2644 pfp->MaxDevices);
2645
2646 }
2647
2648 /*
2649 * XXX: Not yet supporting more than port 0
2650 */
2651 pfp = &mpt->port_facts[0];
2652 if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_FC) {
2653 mpt->is_fc = 1;
2654 mpt->is_sas = 0;
2655 mpt->is_spi = 0;
2656 } else if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_SAS) {
2657 mpt->is_fc = 0;
2658 mpt->is_sas = 1;
2659 mpt->is_spi = 0;
2660 } else if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_SCSI) {
2661 mpt->is_fc = 0;
2662 mpt->is_sas = 0;
2663 mpt->is_spi = 1;
2664 } else if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_ISCSI) {
2665 mpt_prt(mpt, "iSCSI not supported yet\n");
2666 return (ENXIO);
2667 } else if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_INACTIVE) {
2668 mpt_prt(mpt, "Inactive Port\n");
2669 return (ENXIO);
2670 } else {
2671 mpt_prt(mpt, "unknown Port Type %#x\n", pfp->PortType);
2672 return (ENXIO);
2673 }
2674
2675 /*
2676 * Set our role with what this port supports.
2677 *
2678 * Note this might be changed later in different modules
2679 * if this is different from what is wanted.
2680 */
2681 mpt->role = MPT_ROLE_NONE;
2682 if (pfp->ProtocolFlags & MPI_PORTFACTS_PROTOCOL_INITIATOR) {
2683 mpt->role |= MPT_ROLE_INITIATOR;
2684 }
2685 if (pfp->ProtocolFlags & MPI_PORTFACTS_PROTOCOL_TARGET) {
2686 mpt->role |= MPT_ROLE_TARGET;
2687 }
2688
2689 /*
2690 * Enable the IOC
2691 */
2692 if (mpt_enable_ioc(mpt, 1) != MPT_OK) {
2693 mpt_prt(mpt, "unable to initialize IOC\n");
2694 return (ENXIO);
2695 }
2696
2697 /*
2698 * Read IOC configuration information.
2699 *
2700 * We need this to determine whether or not we have certain
2701 * settings for Integrated Mirroring (e.g.).
2702 */
2703 mpt_read_config_info_ioc(mpt);
2704
2705 return (0);
2706}
2707
2708static int
2709mpt_enable_ioc(struct mpt_softc *mpt, int portenable)
2710{
2711 uint32_t pptr;
2712 int val;
2713
2714 if (mpt_send_ioc_init(mpt, MPI_WHOINIT_HOST_DRIVER) != MPT_OK) {
2715 mpt_prt(mpt, "mpt_send_ioc_init failed\n");
2716 return (EIO);
2717 }
2718
2719 mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_send_ioc_init ok\n");
2720
2721 if (mpt_wait_state(mpt, MPT_DB_STATE_RUNNING) != MPT_OK) {
2722 mpt_prt(mpt, "IOC failed to go to run state\n");
2723 return (ENXIO);
2724 }
2725 mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC now at RUNSTATE\n");
2726
2727 /*
2728 * Give it reply buffers
2729 *
2730 * Do *not* exceed global credits.
2731 */
2732 for (val = 0, pptr = mpt->reply_phys;
2733 (pptr + MPT_REPLY_SIZE) < (mpt->reply_phys + PAGE_SIZE);
2734 pptr += MPT_REPLY_SIZE) {
2735 mpt_free_reply(mpt, pptr);
2736 if (++val == mpt->ioc_facts.GlobalCredits - 1)
2737 break;
2738 }
2739
2740
2741 /*
2742 * Enable the port if asked. This is only done if we're resetting
2743 * the IOC after initial startup.
2744 */
2745 if (portenable) {
2746 /*
2747 * Enable asynchronous event reporting
2748 */
2749 mpt_send_event_request(mpt, 1);
2750
2751 if (mpt_send_port_enable(mpt, 0) != MPT_OK) {
2752 mpt_prt(mpt, "failed to enable port 0\n");
2753 return (ENXIO);
2754 }
2755 }
2756 return (MPT_OK);
2757}
2758
2759/*
2760 * Endian Conversion Functions- only used on Big Endian machines
2761 */
2762#if _BYTE_ORDER == _BIG_ENDIAN
2763void
2764mpt2host_sge_simple_union(SGE_SIMPLE_UNION *sge)
2765{
2766
2767 MPT_2_HOST32(sge, FlagsLength);
2768 MPT_2_HOST32(sge, u.Address64.Low);
2769 MPT_2_HOST32(sge, u.Address64.High);
2770}
2771
2772void
2773mpt2host_iocfacts_reply(MSG_IOC_FACTS_REPLY *rp)
2774{
2775
2776 MPT_2_HOST16(rp, MsgVersion);
2777 MPT_2_HOST16(rp, HeaderVersion);
2778 MPT_2_HOST32(rp, MsgContext);
2779 MPT_2_HOST16(rp, IOCExceptions);
2780 MPT_2_HOST16(rp, IOCStatus);
2781 MPT_2_HOST32(rp, IOCLogInfo);
2782 MPT_2_HOST16(rp, ReplyQueueDepth);
2783 MPT_2_HOST16(rp, RequestFrameSize);
2784 MPT_2_HOST16(rp, Reserved_0101_FWVersion);
2785 MPT_2_HOST16(rp, ProductID);
2786 MPT_2_HOST32(rp, CurrentHostMfaHighAddr);
2787 MPT_2_HOST16(rp, GlobalCredits);
2788 MPT_2_HOST32(rp, CurrentSenseBufferHighAddr);
2789 MPT_2_HOST16(rp, CurReplyFrameSize);
2790 MPT_2_HOST32(rp, FWImageSize);
2791 MPT_2_HOST32(rp, IOCCapabilities);
2792 MPT_2_HOST32(rp, FWVersion.Word);
2793 MPT_2_HOST16(rp, HighPriorityQueueDepth);
2794 MPT_2_HOST16(rp, Reserved2);
2795 mpt2host_sge_simple_union(&rp->HostPageBufferSGE);
2796 MPT_2_HOST32(rp, ReplyFifoHostSignalingAddr);
2797}
2798
2799void
2800mpt2host_portfacts_reply(MSG_PORT_FACTS_REPLY *pfp)
2801{
2802
2803 MPT_2_HOST16(pfp, Reserved);
2804 MPT_2_HOST16(pfp, Reserved1);
2805 MPT_2_HOST32(pfp, MsgContext);
2806 MPT_2_HOST16(pfp, Reserved2);
2807 MPT_2_HOST16(pfp, IOCStatus);
2808 MPT_2_HOST32(pfp, IOCLogInfo);
2809 MPT_2_HOST16(pfp, MaxDevices);
2810 MPT_2_HOST16(pfp, PortSCSIID);
2811 MPT_2_HOST16(pfp, ProtocolFlags);
2812 MPT_2_HOST16(pfp, MaxPostedCmdBuffers);
2813 MPT_2_HOST16(pfp, MaxPersistentIDs);
2814 MPT_2_HOST16(pfp, MaxLanBuckets);
2815 MPT_2_HOST16(pfp, Reserved4);
2816 MPT_2_HOST32(pfp, Reserved5);
2817}
2818
2819void
2820mpt2host_config_page_ioc2(CONFIG_PAGE_IOC_2 *ioc2)
2821{
2822 int i;
2823
2824 MPT_2_HOST32(ioc2, CapabilitiesFlags);
2825 for (i = 0; i < MPI_IOC_PAGE_2_RAID_VOLUME_MAX; i++) {
2826 MPT_2_HOST16(ioc2, RaidVolume[i].Reserved3);
2827 }
2828}
2829
2830void
2831mpt2host_config_page_ioc3(CONFIG_PAGE_IOC_3 *ioc3)
2832{
2833
2834 MPT_2_HOST16(ioc3, Reserved2);
2835}
2836
2837void
2838mpt2host_config_page_scsi_port_0(CONFIG_PAGE_SCSI_PORT_0 *sp0)
2839{
2840
2841 MPT_2_HOST32(sp0, Capabilities);
2842 MPT_2_HOST32(sp0, PhysicalInterface);
2843}
2844
2845void
2846mpt2host_config_page_scsi_port_1(CONFIG_PAGE_SCSI_PORT_1 *sp1)
2847{
2848
2849 MPT_2_HOST32(sp1, Configuration);
2850 MPT_2_HOST32(sp1, OnBusTimerValue);
2851 MPT_2_HOST16(sp1, IDConfig);
2852}
2853
2854void
2855host2mpt_config_page_scsi_port_1(CONFIG_PAGE_SCSI_PORT_1 *sp1)
2856{
2857
2858 HOST_2_MPT32(sp1, Configuration);
2859 HOST_2_MPT32(sp1, OnBusTimerValue);
2860 HOST_2_MPT16(sp1, IDConfig);
2861}
2862
2863void
2864mpt2host_config_page_scsi_port_2(CONFIG_PAGE_SCSI_PORT_2 *sp2)
2865{
2866 int i;
2867
2868 MPT_2_HOST32(sp2, PortFlags);
2869 MPT_2_HOST32(sp2, PortSettings);
2870 for (i = 0; i < sizeof(sp2->DeviceSettings) /
2871 sizeof(*sp2->DeviceSettings); i++) {
2872 MPT_2_HOST16(sp2, DeviceSettings[i].DeviceFlags);
2873 }
2874}
2875
2876void
2877mpt2host_config_page_scsi_device_0(CONFIG_PAGE_SCSI_DEVICE_0 *sd0)
2878{
2879
2880 MPT_2_HOST32(sd0, NegotiatedParameters);
2881 MPT_2_HOST32(sd0, Information);
2882}
2883
2884void
2885mpt2host_config_page_scsi_device_1(CONFIG_PAGE_SCSI_DEVICE_1 *sd1)
2886{
2887
2888 MPT_2_HOST32(sd1, RequestedParameters);
2889 MPT_2_HOST32(sd1, Reserved);
2890 MPT_2_HOST32(sd1, Configuration);
2891}
2892
2893void
2894host2mpt_config_page_scsi_device_1(CONFIG_PAGE_SCSI_DEVICE_1 *sd1)
2895{
2896
2897 HOST_2_MPT32(sd1, RequestedParameters);
2898 HOST_2_MPT32(sd1, Reserved);
2899 HOST_2_MPT32(sd1, Configuration);
2900}
2901
2902void
2903mpt2host_config_page_fc_port_0(CONFIG_PAGE_FC_PORT_0 *fp0)
2904{
2905
2906 MPT_2_HOST32(fp0, Flags);
2907 MPT_2_HOST32(fp0, PortIdentifier);
2908 MPT_2_HOST32(fp0, WWNN.Low);
2909 MPT_2_HOST32(fp0, WWNN.High);
2910 MPT_2_HOST32(fp0, WWPN.Low);
2911 MPT_2_HOST32(fp0, WWPN.High);
2912 MPT_2_HOST32(fp0, SupportedServiceClass);
2913 MPT_2_HOST32(fp0, SupportedSpeeds);
2914 MPT_2_HOST32(fp0, CurrentSpeed);
2915 MPT_2_HOST32(fp0, MaxFrameSize);
2916 MPT_2_HOST32(fp0, FabricWWNN.Low);
2917 MPT_2_HOST32(fp0, FabricWWNN.High);
2918 MPT_2_HOST32(fp0, FabricWWPN.Low);
2919 MPT_2_HOST32(fp0, FabricWWPN.High);
2920 MPT_2_HOST32(fp0, DiscoveredPortsCount);
2921 MPT_2_HOST32(fp0, MaxInitiators);
2922}
2923
2924void
2925mpt2host_config_page_fc_port_1(CONFIG_PAGE_FC_PORT_1 *fp1)
2926{
2927
2928 MPT_2_HOST32(fp1, Flags);
2929 MPT_2_HOST32(fp1, NoSEEPROMWWNN.Low);
2930 MPT_2_HOST32(fp1, NoSEEPROMWWNN.High);
2931 MPT_2_HOST32(fp1, NoSEEPROMWWPN.Low);
2932 MPT_2_HOST32(fp1, NoSEEPROMWWPN.High);
2933}
2934
2935void
2936host2mpt_config_page_fc_port_1(CONFIG_PAGE_FC_PORT_1 *fp1)
2937{
2938
2939 HOST_2_MPT32(fp1, Flags);
2940 HOST_2_MPT32(fp1, NoSEEPROMWWNN.Low);
2941 HOST_2_MPT32(fp1, NoSEEPROMWWNN.High);
2942 HOST_2_MPT32(fp1, NoSEEPROMWWPN.Low);
2943 HOST_2_MPT32(fp1, NoSEEPROMWWPN.High);
2944}
2945
2946void
2947mpt2host_config_page_raid_vol_0(CONFIG_PAGE_RAID_VOL_0 *volp)
2948{
2949 int i;
2950
2951 MPT_2_HOST16(volp, VolumeStatus.Reserved);
2952 MPT_2_HOST16(volp, VolumeSettings.Settings);
2953 MPT_2_HOST32(volp, MaxLBA);
2954 MPT_2_HOST32(volp, MaxLBAHigh);
2955 MPT_2_HOST32(volp, StripeSize);
2956 MPT_2_HOST32(volp, Reserved2);
2957 MPT_2_HOST32(volp, Reserved3);
2958 for (i = 0; i < MPI_RAID_VOL_PAGE_0_PHYSDISK_MAX; i++) {
2959 MPT_2_HOST16(volp, PhysDisk[i].Reserved);
2960 }
2961}
2962
2963void
2964mpt2host_config_page_raid_phys_disk_0(CONFIG_PAGE_RAID_PHYS_DISK_0 *rpd0)
2965{
2966
2967 MPT_2_HOST32(rpd0, Reserved1);
2968 MPT_2_HOST16(rpd0, PhysDiskStatus.Reserved);
2969 MPT_2_HOST32(rpd0, MaxLBA);
2970 MPT_2_HOST16(rpd0, ErrorData.Reserved);
2971 MPT_2_HOST16(rpd0, ErrorData.ErrorCount);
2972 MPT_2_HOST16(rpd0, ErrorData.SmartCount);
2973}
2974
2975void
2976mpt2host_mpi_raid_vol_indicator(MPI_RAID_VOL_INDICATOR *vi)
2977{
2978
2979 MPT_2_HOST16(vi, TotalBlocks.High);
2980 MPT_2_HOST16(vi, TotalBlocks.Low);
2981 MPT_2_HOST16(vi, BlocksRemaining.High);
2982 MPT_2_HOST16(vi, BlocksRemaining.Low);
2983}
2984#endif