MPT - fix all compiler warnings
[dragonfly.git] / sys / dev / disk / mpt / mpt.c
CommitLineData
d751f32e
MD
1/*-
2 * Generic routines for LSI Fusion adapters.
984263bc
MD
3 * FreeBSD Version.
4 *
5 * Copyright (c) 2000, 2001 by Greg Ansley
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice immediately at the beginning of the file, without modification,
12 * this list of conditions, and the following disclaimer.
13 * 2. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
d751f32e
MD
28/*-
29 * Copyright (c) 2002, 2006 by Matthew Jacob
30 * All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions are
34 * met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
38 * substantially similar to the "NO WARRANTY" disclaimer below
39 * ("Disclaimer") and any redistribution must be conditioned upon including
40 * a substantially similar Disclaimer requirement for further binary
41 * redistribution.
42 * 3. Neither the names of the above listed copyright holders nor the names
43 * of any contributors may be used to endorse or promote products derived
44 * from this software without specific prior written permission.
45 *
46 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
47 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
50 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
51 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
52 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
53 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
54 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
55 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
56 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
57 *
58 * Support from Chris Ellsworth in order to make SAS adapters work
59 * is gratefully acknowledged.
60 *
61 *
62 * Support from LSI-Logic has also gone a great deal toward making this a
63 * workable subsystem and is gratefully acknowledged.
64 */
65/*-
66 * Copyright (c) 2004, Avid Technology, Inc. and its contributors.
67 * Copyright (c) 2005, WHEEL Sp. z o.o.
68 * Copyright (c) 2004, 2005 Justin T. Gibbs
69 * All rights reserved.
70 *
71 * Redistribution and use in source and binary forms, with or without
72 * modification, are permitted provided that the following conditions are
73 * met:
74 * 1. Redistributions of source code must retain the above copyright
75 * notice, this list of conditions and the following disclaimer.
76 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
77 * substantially similar to the "NO WARRANTY" disclaimer below
78 * ("Disclaimer") and any redistribution must be conditioned upon including
79 * a substantially similar Disclaimer requirement for further binary
80 * redistribution.
81 * 3. Neither the names of the above listed copyright holders nor the names
82 * of any contributors may be used to endorse or promote products derived
83 * from this software without specific prior written permission.
84 *
85 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
86 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
87 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
88 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
89 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
90 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
91 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
92 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
93 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
94 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
95 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
96 * $FreeBSD: src/sys/dev/mpt/mpt.c,v 1.49 2009/01/07 21:52:47 marius Exp $
984263bc
MD
97 */
98
d751f32e
MD
99#include <sys/cdefs.h>
100
101#include <dev/disk/mpt/mpt.h>
102#include <dev/disk/mpt/mpt_cam.h> /* XXX For static handler registration */
103#include <dev/disk/mpt/mpt_raid.h> /* XXX For static handler registration */
104
105#include <dev/disk/mpt/mpilib/mpi.h>
106#include <dev/disk/mpt/mpilib/mpi_ioc.h>
107#include <dev/disk/mpt/mpilib/mpi_fc.h>
108#include <dev/disk/mpt/mpilib/mpi_targ.h>
109
110#include <sys/sysctl.h>
984263bc
MD
111
112#define MPT_MAX_TRYS 3
113#define MPT_MAX_WAIT 300000
114
115static int maxwait_ack = 0;
116static int maxwait_int = 0;
117static int maxwait_state = 0;
118
d751f32e
MD
119static TAILQ_HEAD(, mpt_softc) mpt_tailq = TAILQ_HEAD_INITIALIZER(mpt_tailq);
120mpt_reply_handler_t *mpt_reply_handlers[MPT_NUM_REPLY_HANDLERS];
121
122static mpt_reply_handler_t mpt_default_reply_handler;
123static mpt_reply_handler_t mpt_config_reply_handler;
124static mpt_reply_handler_t mpt_handshake_reply_handler;
125static mpt_reply_handler_t mpt_event_reply_handler;
126static void mpt_send_event_ack(struct mpt_softc *mpt, request_t *ack_req,
127 MSG_EVENT_NOTIFY_REPLY *msg, uint32_t context);
128static int mpt_send_event_request(struct mpt_softc *mpt, int onoff);
129static int mpt_soft_reset(struct mpt_softc *mpt);
130static void mpt_hard_reset(struct mpt_softc *mpt);
131static int mpt_configure_ioc(struct mpt_softc *mpt, int, int);
132static int mpt_enable_ioc(struct mpt_softc *mpt, int);
133
134/************************* Personality Module Support *************************/
135/*
136 * We include one extra entry that is guaranteed to be NULL
137 * to simplify our itterator.
138 */
139static struct mpt_personality *mpt_personalities[MPT_MAX_PERSONALITIES + 1];
140static __inline struct mpt_personality*
141 mpt_pers_find(struct mpt_softc *, u_int);
142static __inline struct mpt_personality*
143 mpt_pers_find_reverse(struct mpt_softc *, u_int);
144
145static __inline struct mpt_personality *
146mpt_pers_find(struct mpt_softc *mpt, u_int start_at)
984263bc 147{
d751f32e
MD
148 KASSERT(start_at <= MPT_MAX_PERSONALITIES,
149 ("mpt_pers_find: starting position out of range\n"));
984263bc 150
d751f32e
MD
151 while (start_at < MPT_MAX_PERSONALITIES
152 && (mpt->mpt_pers_mask & (0x1 << start_at)) == 0) {
153 start_at++;
984263bc 154 }
d751f32e 155 return (mpt_personalities[start_at]);
984263bc
MD
156}
157
d751f32e
MD
158/*
159 * Used infrequently, so no need to optimize like a forward
160 * traversal where we use the MAX+1 is guaranteed to be NULL
161 * trick.
162 */
163static __inline struct mpt_personality *
164mpt_pers_find_reverse(struct mpt_softc *mpt, u_int start_at)
984263bc 165{
d751f32e
MD
166 while (start_at < MPT_MAX_PERSONALITIES
167 && (mpt->mpt_pers_mask & (0x1 << start_at)) == 0) {
168 start_at--;
984263bc 169 }
d751f32e
MD
170 if (start_at < MPT_MAX_PERSONALITIES)
171 return (mpt_personalities[start_at]);
172 return (NULL);
984263bc
MD
173}
174
d751f32e
MD
175#define MPT_PERS_FOREACH(mpt, pers) \
176 for (pers = mpt_pers_find(mpt, /*start_at*/0); \
177 pers != NULL; \
178 pers = mpt_pers_find(mpt, /*start_at*/pers->id+1))
179
180#define MPT_PERS_FOREACH_REVERSE(mpt, pers) \
181 for (pers = mpt_pers_find_reverse(mpt, MPT_MAX_PERSONALITIES-1);\
182 pers != NULL; \
183 pers = mpt_pers_find_reverse(mpt, /*start_at*/pers->id-1))
184
185static mpt_load_handler_t mpt_stdload;
186static mpt_probe_handler_t mpt_stdprobe;
187static mpt_attach_handler_t mpt_stdattach;
188static mpt_enable_handler_t mpt_stdenable;
189static mpt_ready_handler_t mpt_stdready;
190static mpt_event_handler_t mpt_stdevent;
191static mpt_reset_handler_t mpt_stdreset;
192static mpt_shutdown_handler_t mpt_stdshutdown;
193static mpt_detach_handler_t mpt_stddetach;
194static mpt_unload_handler_t mpt_stdunload;
195static struct mpt_personality mpt_default_personality =
984263bc 196{
d751f32e
MD
197 .load = mpt_stdload,
198 .probe = mpt_stdprobe,
199 .attach = mpt_stdattach,
200 .enable = mpt_stdenable,
201 .ready = mpt_stdready,
202 .event = mpt_stdevent,
203 .reset = mpt_stdreset,
204 .shutdown = mpt_stdshutdown,
205 .detach = mpt_stddetach,
206 .unload = mpt_stdunload
207};
208
209static mpt_load_handler_t mpt_core_load;
210static mpt_attach_handler_t mpt_core_attach;
211static mpt_enable_handler_t mpt_core_enable;
212static mpt_reset_handler_t mpt_core_ioc_reset;
213static mpt_event_handler_t mpt_core_event;
214static mpt_shutdown_handler_t mpt_core_shutdown;
215static mpt_shutdown_handler_t mpt_core_detach;
216static mpt_unload_handler_t mpt_core_unload;
217static struct mpt_personality mpt_core_personality =
984263bc 218{
d751f32e
MD
219 .name = "mpt_core",
220 .load = mpt_core_load,
221// .attach = mpt_core_attach,
222// .enable = mpt_core_enable,
223 .event = mpt_core_event,
224 .reset = mpt_core_ioc_reset,
225 .shutdown = mpt_core_shutdown,
226 .detach = mpt_core_detach,
227 .unload = mpt_core_unload,
228};
984263bc 229
d751f32e
MD
230/*
231 * Manual declaration so that DECLARE_MPT_PERSONALITY doesn't need
232 * ordering information. We want the core to always register FIRST.
233 * other modules are set to SI_ORDER_SECOND.
234 */
235static moduledata_t mpt_core_mod = {
236 "mpt_core", mpt_modevent, &mpt_core_personality
237};
238DECLARE_MODULE(mpt_core, mpt_core_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
239MODULE_VERSION(mpt_core, 1);
984263bc 240
d751f32e 241#define MPT_PERS_ATTACHED(pers, mpt) ((mpt)->mpt_pers_mask & (0x1 << pers->id))
984263bc 242
984263bc 243int
d751f32e 244mpt_modevent(module_t mod, int type, void *data)
984263bc 245{
d751f32e
MD
246 struct mpt_personality *pers;
247 int error;
984263bc 248
d751f32e 249 pers = (struct mpt_personality *)data;
984263bc 250
d751f32e
MD
251 error = 0;
252 switch (type) {
253 case MOD_LOAD:
254 {
255 mpt_load_handler_t **def_handler;
256 mpt_load_handler_t **pers_handler;
257 int i;
984263bc 258
d751f32e
MD
259 for (i = 0; i < MPT_MAX_PERSONALITIES; i++) {
260 if (mpt_personalities[i] == NULL)
261 break;
262 }
263 if (i >= MPT_MAX_PERSONALITIES) {
264 error = ENOMEM;
265 break;
266 }
267 pers->id = i;
268 mpt_personalities[i] = pers;
269
270 /* Install standard/noop handlers for any NULL entries. */
271 def_handler = MPT_PERS_FIRST_HANDLER(&mpt_default_personality);
272 pers_handler = MPT_PERS_FIRST_HANDLER(pers);
273 while (pers_handler <= MPT_PERS_LAST_HANDLER(pers)) {
274 if (*pers_handler == NULL)
275 *pers_handler = *def_handler;
276 pers_handler++;
277 def_handler++;
278 }
984263bc 279
d751f32e
MD
280 error = (pers->load(pers));
281 if (error != 0)
282 mpt_personalities[i] = NULL;
283 break;
984263bc 284 }
d751f32e
MD
285 case MOD_SHUTDOWN:
286 break;
287#if __FreeBSD_version >= 500000
288 case MOD_QUIESCE:
289 break;
290#endif
291 case MOD_UNLOAD:
292 error = pers->unload(pers);
293 mpt_personalities[pers->id] = NULL;
294 break;
295 default:
296 error = EINVAL;
297 break;
298 }
299 return (error);
984263bc
MD
300}
301
d751f32e
MD
302int
303mpt_stdload(struct mpt_personality *pers)
984263bc 304{
d751f32e
MD
305 /* Load is always successfull. */
306 return (0);
984263bc
MD
307}
308
984263bc 309int
d751f32e 310mpt_stdprobe(struct mpt_softc *mpt)
984263bc 311{
d751f32e
MD
312 /* Probe is always successfull. */
313 return (0);
314}
984263bc 315
d751f32e
MD
316int
317mpt_stdattach(struct mpt_softc *mpt)
318{
319 /* Attach is always successfull. */
320 return (0);
321}
984263bc 322
d751f32e
MD
323int
324mpt_stdenable(struct mpt_softc *mpt)
325{
326 /* Enable is always successfull. */
327 return (0);
984263bc
MD
328}
329
984263bc 330void
d751f32e 331mpt_stdready(struct mpt_softc *mpt)
984263bc 332{
984263bc
MD
333}
334
d751f32e
MD
335
336int
337mpt_stdevent(struct mpt_softc *mpt, request_t *req, MSG_EVENT_NOTIFY_REPLY *msg)
984263bc 338{
3c4c549a
MD
339 mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_stdevent: 0x%lx\n",
340 msg->Event & 0xFF);
d751f32e
MD
341 /* Event was not for us. */
342 return (0);
984263bc
MD
343}
344
984263bc 345void
d751f32e
MD
346mpt_stdreset(struct mpt_softc *mpt, int type)
347{
348}
349
350void
351mpt_stdshutdown(struct mpt_softc *mpt)
352{
984263bc
MD
353}
354
984263bc 355void
d751f32e 356mpt_stddetach(struct mpt_softc *mpt)
984263bc 357{
984263bc
MD
358}
359
d751f32e
MD
360int
361mpt_stdunload(struct mpt_personality *pers)
984263bc 362{
d751f32e
MD
363 /* Unload is always successfull. */
364 return (0);
984263bc
MD
365}
366
367/*
d751f32e
MD
368 * Post driver attachment, we may want to perform some global actions.
369 * Here is the hook to do so.
984263bc 370 */
d751f32e
MD
371
372static void
373mpt_postattach(void *unused)
984263bc 374{
d751f32e
MD
375 struct mpt_softc *mpt;
376 struct mpt_personality *pers;
984263bc 377
d751f32e
MD
378 TAILQ_FOREACH(mpt, &mpt_tailq, links) {
379 MPT_PERS_FOREACH(mpt, pers)
380 pers->ready(mpt);
984263bc 381 }
d751f32e
MD
382}
383SYSINIT(mptdev, SI_SUB_CONFIGURE, SI_ORDER_MIDDLE, mpt_postattach, NULL);
984263bc 384
984263bc 385
d751f32e
MD
386/******************************* Bus DMA Support ******************************/
387void
388mpt_map_rquest(void *arg, bus_dma_segment_t *segs, int nseg, int error)
389{
390 struct mpt_map_info *map_info;
984263bc 391
d751f32e
MD
392 map_info = (struct mpt_map_info *)arg;
393 map_info->error = error;
394 map_info->phys = segs->ds_addr;
395}
984263bc 396
d751f32e
MD
397/**************************** Reply/Event Handling ****************************/
398int
399mpt_register_handler(struct mpt_softc *mpt, mpt_handler_type type,
400 mpt_handler_t handler, uint32_t *phandler_id)
401{
984263bc 402
d751f32e
MD
403 switch (type) {
404 case MPT_HANDLER_REPLY:
405 {
406 u_int cbi;
407 u_int free_cbi;
408
409 if (phandler_id == NULL)
410 return (EINVAL);
411
412 free_cbi = MPT_HANDLER_ID_NONE;
413 for (cbi = 0; cbi < MPT_NUM_REPLY_HANDLERS; cbi++) {
414 /*
415 * If the same handler is registered multiple
416 * times, don't error out. Just return the
417 * index of the original registration.
418 */
419 if (mpt_reply_handlers[cbi] == handler.reply_handler) {
420 *phandler_id = MPT_CBI_TO_HID(cbi);
421 return (0);
422 }
984263bc 423
d751f32e
MD
424 /*
425 * Fill from the front in the hope that
426 * all registered handlers consume only a
427 * single cache line.
428 *
429 * We don't break on the first empty slot so
430 * that the full table is checked to see if
431 * this handler was previously registered.
432 */
433 if (free_cbi == MPT_HANDLER_ID_NONE &&
434 (mpt_reply_handlers[cbi]
435 == mpt_default_reply_handler))
436 free_cbi = cbi;
984263bc 437 }
d751f32e
MD
438 if (free_cbi == MPT_HANDLER_ID_NONE) {
439 return (ENOMEM);
440 }
441 mpt_reply_handlers[free_cbi] = handler.reply_handler;
442 *phandler_id = MPT_CBI_TO_HID(free_cbi);
443 break;
984263bc 444 }
d751f32e
MD
445 default:
446 mpt_prt(mpt, "mpt_register_handler unknown type %d\n", type);
447 return (EINVAL);
448 }
449 return (0);
984263bc
MD
450}
451
984263bc 452int
d751f32e
MD
453mpt_deregister_handler(struct mpt_softc *mpt, mpt_handler_type type,
454 mpt_handler_t handler, uint32_t handler_id)
984263bc 455{
984263bc 456
d751f32e
MD
457 switch (type) {
458 case MPT_HANDLER_REPLY:
459 {
460 u_int cbi;
984263bc 461
d751f32e
MD
462 cbi = MPT_CBI(handler_id);
463 if (cbi >= MPT_NUM_REPLY_HANDLERS
464 || mpt_reply_handlers[cbi] != handler.reply_handler)
465 return (ENOENT);
466 mpt_reply_handlers[cbi] = mpt_default_reply_handler;
467 break;
984263bc 468 }
d751f32e
MD
469 default:
470 mpt_prt(mpt, "mpt_deregister_handler unknown type %d\n", type);
471 return (EINVAL);
984263bc 472 }
d751f32e
MD
473 return (0);
474}
984263bc 475
d751f32e
MD
476static int
477mpt_default_reply_handler(struct mpt_softc *mpt, request_t *req,
478 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
479{
480 mpt_prt(mpt,
481 "Default Handler Called: req=%p:%u reply_descriptor=%x frame=%p\n",
482 req, req->serno, reply_desc, reply_frame);
984263bc 483
d751f32e
MD
484 if (reply_frame != NULL)
485 mpt_dump_reply_frame(mpt, reply_frame);
984263bc 486
d751f32e 487 mpt_prt(mpt, "Reply Frame Ignored\n");
984263bc 488
d751f32e
MD
489 return (/*free_reply*/TRUE);
490}
984263bc 491
d751f32e
MD
492static int
493mpt_config_reply_handler(struct mpt_softc *mpt, request_t *req,
494 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
495{
496 if (req != NULL) {
497
498 if (reply_frame != NULL) {
499 MSG_CONFIG *cfgp;
500 MSG_CONFIG_REPLY *reply;
501
502 cfgp = (MSG_CONFIG *)req->req_vbuf;
503 reply = (MSG_CONFIG_REPLY *)reply_frame;
504 req->IOCStatus = le16toh(reply_frame->IOCStatus);
505 bcopy(&reply->Header, &cfgp->Header,
506 sizeof(cfgp->Header));
507 cfgp->ExtPageLength = reply->ExtPageLength;
508 cfgp->ExtPageType = reply->ExtPageType;
509 }
510 req->state &= ~REQ_STATE_QUEUED;
511 req->state |= REQ_STATE_DONE;
512 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
513 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
514 wakeup(req);
515 } else if ((req->state & REQ_STATE_TIMEDOUT) != 0) {
516 /*
517 * Whew- we can free this request (late completion)
518 */
519 mpt_free_request(mpt, req);
520 }
521 }
522
523 return (TRUE);
524}
525
526static int
527mpt_handshake_reply_handler(struct mpt_softc *mpt, request_t *req,
528 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
529{
530 /* Nothing to be done. */
531 return (TRUE);
532}
533
534static int
535mpt_event_reply_handler(struct mpt_softc *mpt, request_t *req,
536 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
537{
538 int free_reply;
539
540 KASSERT(reply_frame != NULL, ("null reply in mpt_event_reply_handler"));
541 KASSERT(req != NULL, ("null request in mpt_event_reply_handler"));
542
543 free_reply = TRUE;
544 switch (reply_frame->Function) {
545 case MPI_FUNCTION_EVENT_NOTIFICATION:
546 {
547 MSG_EVENT_NOTIFY_REPLY *msg;
548 struct mpt_personality *pers;
549 u_int handled;
550
551 handled = 0;
552 msg = (MSG_EVENT_NOTIFY_REPLY *)reply_frame;
553 msg->EventDataLength = le16toh(msg->EventDataLength);
554 msg->IOCStatus = le16toh(msg->IOCStatus);
555 msg->IOCLogInfo = le32toh(msg->IOCLogInfo);
556 msg->Event = le32toh(msg->Event);
557 MPT_PERS_FOREACH(mpt, pers)
558 handled += pers->event(mpt, req, msg);
559
560 if (handled == 0 && mpt->mpt_pers_mask == 0) {
561 mpt_lprt(mpt, MPT_PRT_INFO,
562 "No Handlers For Any Event Notify Frames. "
563 "Event %#x (ACK %sequired).\n",
3c4c549a
MD
564 (unsigned)msg->Event,
565 msg->AckRequired? "r" : "not r");
d751f32e
MD
566 } else if (handled == 0) {
567 mpt_lprt(mpt,
568 msg->AckRequired? MPT_PRT_WARN : MPT_PRT_INFO,
569 "Unhandled Event Notify Frame. Event %#x "
570 "(ACK %sequired).\n",
3c4c549a
MD
571 (unsigned)msg->Event,
572 msg->AckRequired? "r" : "not r");
d751f32e
MD
573 }
574
575 if (msg->AckRequired) {
576 request_t *ack_req;
577 uint32_t context;
578
579 context = req->index | MPT_REPLY_HANDLER_EVENTS;
580 ack_req = mpt_get_request(mpt, FALSE);
581 if (ack_req == NULL) {
582 struct mpt_evtf_record *evtf;
583
584 evtf = (struct mpt_evtf_record *)reply_frame;
585 evtf->context = context;
586 LIST_INSERT_HEAD(&mpt->ack_frames, evtf, links);
587 free_reply = FALSE;
588 break;
589 }
590 mpt_send_event_ack(mpt, ack_req, msg, context);
591 /*
592 * Don't check for CONTINUATION_REPLY here
593 */
594 return (free_reply);
595 }
596 break;
597 }
598 case MPI_FUNCTION_PORT_ENABLE:
599 mpt_lprt(mpt, MPT_PRT_DEBUG , "enable port reply\n");
600 break;
601 case MPI_FUNCTION_EVENT_ACK:
602 break;
603 default:
604 mpt_prt(mpt, "unknown event function: %x\n",
605 reply_frame->Function);
606 break;
607 }
608
609 /*
610 * I'm not sure that this continuation stuff works as it should.
611 *
612 * I've had FC async events occur that free the frame up because
613 * the continuation bit isn't set, and then additional async events
614 * then occur using the same context. As you might imagine, this
615 * leads to Very Bad Thing.
616 *
617 * Let's just be safe for now and not free them up until we figure
618 * out what's actually happening here.
619 */
620#if 0
621 if ((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0) {
622 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
623 mpt_free_request(mpt, req);
624 mpt_prt(mpt, "event_reply %x for req %p:%u NOT a continuation",
625 reply_frame->Function, req, req->serno);
626 if (reply_frame->Function == MPI_FUNCTION_EVENT_NOTIFICATION) {
627 MSG_EVENT_NOTIFY_REPLY *msg =
628 (MSG_EVENT_NOTIFY_REPLY *)reply_frame;
629 mpt_prtc(mpt, " Event=0x%x AckReq=%d",
630 msg->Event, msg->AckRequired);
631 }
632 } else {
633 mpt_prt(mpt, "event_reply %x for %p:%u IS a continuation",
634 reply_frame->Function, req, req->serno);
635 if (reply_frame->Function == MPI_FUNCTION_EVENT_NOTIFICATION) {
636 MSG_EVENT_NOTIFY_REPLY *msg =
637 (MSG_EVENT_NOTIFY_REPLY *)reply_frame;
638 mpt_prtc(mpt, " Event=0x%x AckReq=%d",
639 msg->Event, msg->AckRequired);
640 }
641 mpt_prtc(mpt, "\n");
642 }
643#endif
644 return (free_reply);
645}
646
647/*
648 * Process an asynchronous event from the IOC.
649 */
650static int
651mpt_core_event(struct mpt_softc *mpt, request_t *req,
652 MSG_EVENT_NOTIFY_REPLY *msg)
653{
654 mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_core_event: 0x%x\n",
3c4c549a 655 (unsigned)(msg->Event & 0xFF));
d751f32e
MD
656 switch(msg->Event & 0xFF) {
657 case MPI_EVENT_NONE:
658 break;
659 case MPI_EVENT_LOG_DATA:
660 {
661 int i;
662
663 /* Some error occured that LSI wants logged */
664 mpt_prt(mpt, "EvtLogData: IOCLogInfo: 0x%08x\n",
3c4c549a 665 (unsigned)msg->IOCLogInfo);
d751f32e
MD
666 mpt_prt(mpt, "\tEvtLogData: Event Data:");
667 for (i = 0; i < msg->EventDataLength; i++)
3c4c549a 668 mpt_prtc(mpt, " %08x", (unsigned)msg->Data[i]);
d751f32e
MD
669 mpt_prtc(mpt, "\n");
670 break;
671 }
672 case MPI_EVENT_EVENT_CHANGE:
673 /*
674 * This is just an acknowledgement
675 * of our mpt_send_event_request.
676 */
677 break;
678 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
679 break;
680 default:
681 return (0);
682 break;
683 }
684 return (1);
685}
686
687static void
688mpt_send_event_ack(struct mpt_softc *mpt, request_t *ack_req,
689 MSG_EVENT_NOTIFY_REPLY *msg, uint32_t context)
690{
691 MSG_EVENT_ACK *ackp;
692
693 ackp = (MSG_EVENT_ACK *)ack_req->req_vbuf;
694 memset(ackp, 0, sizeof (*ackp));
695 ackp->Function = MPI_FUNCTION_EVENT_ACK;
696 ackp->Event = htole32(msg->Event);
697 ackp->EventContext = htole32(msg->EventContext);
698 ackp->MsgContext = htole32(context);
699 mpt_check_doorbell(mpt);
700 mpt_send_cmd(mpt, ack_req);
701}
702
703/***************************** Interrupt Handling *****************************/
704void
705mpt_intr(void *arg)
706{
707 struct mpt_softc *mpt;
708 uint32_t reply_desc;
709 int ntrips = 0;
710
711 mpt = (struct mpt_softc *)arg;
712 mpt_lprt(mpt, MPT_PRT_DEBUG2, "enter mpt_intr\n");
713 MPT_LOCK_ASSERT(mpt);
714
715 while ((reply_desc = mpt_pop_reply_queue(mpt)) != MPT_REPLY_EMPTY) {
716 request_t *req;
717 MSG_DEFAULT_REPLY *reply_frame;
718 uint32_t reply_baddr;
719 uint32_t ctxt_idx;
720 u_int cb_index;
721 u_int req_index;
722 int free_rf;
723
724 req = NULL;
725 reply_frame = NULL;
726 reply_baddr = 0;
727 if ((reply_desc & MPI_ADDRESS_REPLY_A_BIT) != 0) {
728 u_int offset;
729 /*
730 * Insure that the reply frame is coherent.
731 */
732 reply_baddr = MPT_REPLY_BADDR(reply_desc);
733 offset = reply_baddr - (mpt->reply_phys & 0xFFFFFFFF);
734 bus_dmamap_sync_range(mpt->reply_dmat,
735 mpt->reply_dmap, offset, MPT_REPLY_SIZE,
736 BUS_DMASYNC_POSTREAD);
737 reply_frame = MPT_REPLY_OTOV(mpt, offset);
738 ctxt_idx = le32toh(reply_frame->MsgContext);
739 } else {
740 uint32_t type;
741
742 type = MPI_GET_CONTEXT_REPLY_TYPE(reply_desc);
743 ctxt_idx = reply_desc;
744 mpt_lprt(mpt, MPT_PRT_DEBUG1, "Context Reply: 0x%08x\n",
745 reply_desc);
746
747 switch (type) {
748 case MPI_CONTEXT_REPLY_TYPE_SCSI_INIT:
749 ctxt_idx &= MPI_CONTEXT_REPLY_CONTEXT_MASK;
750 break;
751 case MPI_CONTEXT_REPLY_TYPE_SCSI_TARGET:
752 ctxt_idx = GET_IO_INDEX(reply_desc);
753 if (mpt->tgt_cmd_ptrs == NULL) {
754 mpt_prt(mpt,
755 "mpt_intr: no target cmd ptrs\n");
756 reply_desc = MPT_REPLY_EMPTY;
757 break;
758 }
759 if (ctxt_idx >= mpt->tgt_cmds_allocated) {
760 mpt_prt(mpt,
761 "mpt_intr: bad tgt cmd ctxt %u\n",
762 ctxt_idx);
763 reply_desc = MPT_REPLY_EMPTY;
764 ntrips = 1000;
765 break;
766 }
767 req = mpt->tgt_cmd_ptrs[ctxt_idx];
768 if (req == NULL) {
769 mpt_prt(mpt, "no request backpointer "
770 "at index %u", ctxt_idx);
771 reply_desc = MPT_REPLY_EMPTY;
772 ntrips = 1000;
773 break;
774 }
775 /*
776 * Reformulate ctxt_idx to be just as if
777 * it were another type of context reply
778 * so the code below will find the request
779 * via indexing into the pool.
780 */
781 ctxt_idx =
782 req->index | mpt->scsi_tgt_handler_id;
783 req = NULL;
784 break;
785 case MPI_CONTEXT_REPLY_TYPE_LAN:
786 mpt_prt(mpt, "LAN CONTEXT REPLY: 0x%08x\n",
787 reply_desc);
788 reply_desc = MPT_REPLY_EMPTY;
789 break;
790 default:
791 mpt_prt(mpt, "Context Reply 0x%08x?\n", type);
792 reply_desc = MPT_REPLY_EMPTY;
793 break;
794 }
795 if (reply_desc == MPT_REPLY_EMPTY) {
796 if (ntrips++ > 1000) {
797 break;
798 }
799 continue;
800 }
801 }
802
803 cb_index = MPT_CONTEXT_TO_CBI(ctxt_idx);
804 req_index = MPT_CONTEXT_TO_REQI(ctxt_idx);
805 if (req_index < MPT_MAX_REQUESTS(mpt)) {
806 req = &mpt->request_pool[req_index];
807 } else {
808 mpt_prt(mpt, "WARN: mpt_intr index == %d (reply_desc =="
809 " 0x%x)\n", req_index, reply_desc);
810 }
811
812 free_rf = mpt_reply_handlers[cb_index](mpt, req,
813 reply_desc, reply_frame);
814
815 if (reply_frame != NULL && free_rf) {
816 mpt_free_reply(mpt, reply_baddr);
817 }
818
819 /*
820 * If we got ourselves disabled, don't get stuck in a loop
821 */
822 if (mpt->disabled) {
823 mpt_disable_ints(mpt);
824 break;
825 }
826 if (ntrips++ > 1000) {
827 break;
828 }
829 }
830 mpt_lprt(mpt, MPT_PRT_DEBUG2, "exit mpt_intr\n");
831}
832
833/******************************* Error Recovery *******************************/
834void
835mpt_complete_request_chain(struct mpt_softc *mpt, struct req_queue *chain,
836 u_int iocstatus)
837{
838 MSG_DEFAULT_REPLY ioc_status_frame;
839 request_t *req;
840
841 memset(&ioc_status_frame, 0, sizeof(ioc_status_frame));
842 ioc_status_frame.MsgLength = roundup2(sizeof(ioc_status_frame), 4);
843 ioc_status_frame.IOCStatus = iocstatus;
844 while((req = TAILQ_FIRST(chain)) != NULL) {
845 MSG_REQUEST_HEADER *msg_hdr;
846 u_int cb_index;
847
848 TAILQ_REMOVE(chain, req, links);
849 msg_hdr = (MSG_REQUEST_HEADER *)req->req_vbuf;
850 ioc_status_frame.Function = msg_hdr->Function;
851 ioc_status_frame.MsgContext = msg_hdr->MsgContext;
852 cb_index = MPT_CONTEXT_TO_CBI(le32toh(msg_hdr->MsgContext));
853 mpt_reply_handlers[cb_index](mpt, req, msg_hdr->MsgContext,
854 &ioc_status_frame);
855 }
856}
857
858/********************************* Diagnostics ********************************/
859/*
860 * Perform a diagnostic dump of a reply frame.
861 */
862void
863mpt_dump_reply_frame(struct mpt_softc *mpt, MSG_DEFAULT_REPLY *reply_frame)
864{
865 mpt_prt(mpt, "Address Reply:\n");
866 mpt_print_reply(reply_frame);
867}
868
869/******************************* Doorbell Access ******************************/
870static __inline uint32_t mpt_rd_db(struct mpt_softc *mpt);
871static __inline uint32_t mpt_rd_intr(struct mpt_softc *mpt);
872
873static __inline uint32_t
874mpt_rd_db(struct mpt_softc *mpt)
875{
876 return mpt_read(mpt, MPT_OFFSET_DOORBELL);
877}
878
879static __inline uint32_t
880mpt_rd_intr(struct mpt_softc *mpt)
881{
882 return mpt_read(mpt, MPT_OFFSET_INTR_STATUS);
883}
884
885/* Busy wait for a door bell to be read by IOC */
886static int
887mpt_wait_db_ack(struct mpt_softc *mpt)
888{
889 int i;
890 for (i=0; i < MPT_MAX_WAIT; i++) {
891 if (!MPT_DB_IS_BUSY(mpt_rd_intr(mpt))) {
892 maxwait_ack = i > maxwait_ack ? i : maxwait_ack;
893 return (MPT_OK);
894 }
895 DELAY(200);
896 }
897 return (MPT_FAIL);
898}
899
900/* Busy wait for a door bell interrupt */
901static int
902mpt_wait_db_int(struct mpt_softc *mpt)
903{
904 int i;
905 for (i = 0; i < MPT_MAX_WAIT; i++) {
906 if (MPT_DB_INTR(mpt_rd_intr(mpt))) {
907 maxwait_int = i > maxwait_int ? i : maxwait_int;
908 return MPT_OK;
909 }
910 DELAY(100);
911 }
912 return (MPT_FAIL);
913}
914
915/* Wait for IOC to transition to a give state */
916void
917mpt_check_doorbell(struct mpt_softc *mpt)
918{
919 uint32_t db = mpt_rd_db(mpt);
920 if (MPT_STATE(db) != MPT_DB_STATE_RUNNING) {
921 mpt_prt(mpt, "Device not running\n");
922 mpt_print_db(db);
923 }
924}
925
926/* Wait for IOC to transition to a give state */
927static int
928mpt_wait_state(struct mpt_softc *mpt, enum DB_STATE_BITS state)
929{
930 int i;
931
932 for (i = 0; i < MPT_MAX_WAIT; i++) {
933 uint32_t db = mpt_rd_db(mpt);
934 if (MPT_STATE(db) == state) {
935 maxwait_state = i > maxwait_state ? i : maxwait_state;
936 return (MPT_OK);
937 }
938 DELAY(100);
939 }
940 return (MPT_FAIL);
941}
942
943
944/************************* Intialization/Configuration ************************/
945static int mpt_download_fw(struct mpt_softc *mpt);
946
947/* Issue the reset COMMAND to the IOC */
948static int
949mpt_soft_reset(struct mpt_softc *mpt)
950{
951 mpt_lprt(mpt, MPT_PRT_DEBUG, "soft reset\n");
952
953 /* Have to use hard reset if we are not in Running state */
954 if (MPT_STATE(mpt_rd_db(mpt)) != MPT_DB_STATE_RUNNING) {
955 mpt_prt(mpt, "soft reset failed: device not running\n");
956 return (MPT_FAIL);
957 }
958
959 /* If door bell is in use we don't have a chance of getting
960 * a word in since the IOC probably crashed in message
961 * processing. So don't waste our time.
962 */
963 if (MPT_DB_IS_IN_USE(mpt_rd_db(mpt))) {
964 mpt_prt(mpt, "soft reset failed: doorbell wedged\n");
965 return (MPT_FAIL);
966 }
967
968 /* Send the reset request to the IOC */
969 mpt_write(mpt, MPT_OFFSET_DOORBELL,
970 MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET << MPI_DOORBELL_FUNCTION_SHIFT);
971 if (mpt_wait_db_ack(mpt) != MPT_OK) {
972 mpt_prt(mpt, "soft reset failed: ack timeout\n");
973 return (MPT_FAIL);
974 }
975
976 /* Wait for the IOC to reload and come out of reset state */
977 if (mpt_wait_state(mpt, MPT_DB_STATE_READY) != MPT_OK) {
978 mpt_prt(mpt, "soft reset failed: device did not restart\n");
979 return (MPT_FAIL);
980 }
981
982 return MPT_OK;
983}
984
985static int
986mpt_enable_diag_mode(struct mpt_softc *mpt)
987{
988 int try;
989
990 try = 20;
991 while (--try) {
992
993 if ((mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC) & MPI_DIAG_DRWE) != 0)
994 break;
995
996 /* Enable diagnostic registers */
997 mpt_write(mpt, MPT_OFFSET_SEQUENCE, 0xFF);
998 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_1ST_KEY_VALUE);
999 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_2ND_KEY_VALUE);
1000 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_3RD_KEY_VALUE);
1001 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_4TH_KEY_VALUE);
1002 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_5TH_KEY_VALUE);
1003
1004 DELAY(100000);
1005 }
1006 if (try == 0)
1007 return (EIO);
1008 return (0);
1009}
1010
1011static void
1012mpt_disable_diag_mode(struct mpt_softc *mpt)
1013{
1014 mpt_write(mpt, MPT_OFFSET_SEQUENCE, 0xFFFFFFFF);
1015}
1016
1017/* This is a magic diagnostic reset that resets all the ARM
1018 * processors in the chip.
1019 */
1020static void
1021mpt_hard_reset(struct mpt_softc *mpt)
1022{
1023 int error;
1024 int wait;
1025 uint32_t diagreg;
1026
1027 mpt_lprt(mpt, MPT_PRT_DEBUG, "hard reset\n");
1028
1029 error = mpt_enable_diag_mode(mpt);
1030 if (error) {
1031 mpt_prt(mpt, "WARNING - Could not enter diagnostic mode !\n");
1032 mpt_prt(mpt, "Trying to reset anyway.\n");
1033 }
1034
1035 diagreg = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC);
1036
1037 /*
1038 * This appears to be a workaround required for some
1039 * firmware or hardware revs.
1040 */
1041 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, diagreg | MPI_DIAG_DISABLE_ARM);
1042 DELAY(1000);
1043
1044 /* Diag. port is now active so we can now hit the reset bit */
1045 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, diagreg | MPI_DIAG_RESET_ADAPTER);
1046
1047 /*
1048 * Ensure that the reset has finished. We delay 1ms
1049 * prior to reading the register to make sure the chip
1050 * has sufficiently completed its reset to handle register
1051 * accesses.
1052 */
1053 wait = 5000;
1054 do {
1055 DELAY(1000);
1056 diagreg = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC);
1057 } while (--wait && (diagreg & MPI_DIAG_RESET_ADAPTER) == 0);
1058
1059 if (wait == 0) {
1060 mpt_prt(mpt, "WARNING - Failed hard reset! "
1061 "Trying to initialize anyway.\n");
1062 }
1063
1064 /*
1065 * If we have firmware to download, it must be loaded before
1066 * the controller will become operational. Do so now.
1067 */
1068 if (mpt->fw_image != NULL) {
1069
1070 error = mpt_download_fw(mpt);
1071
1072 if (error) {
1073 mpt_prt(mpt, "WARNING - Firmware Download Failed!\n");
1074 mpt_prt(mpt, "Trying to initialize anyway.\n");
1075 }
1076 }
1077
1078 /*
1079 * Reseting the controller should have disabled write
1080 * access to the diagnostic registers, but disable
1081 * manually to be sure.
1082 */
1083 mpt_disable_diag_mode(mpt);
1084}
1085
1086static void
1087mpt_core_ioc_reset(struct mpt_softc *mpt, int type)
1088{
1089 /*
1090 * Complete all pending requests with a status
1091 * appropriate for an IOC reset.
1092 */
1093 mpt_complete_request_chain(mpt, &mpt->request_pending_list,
1094 MPI_IOCSTATUS_INVALID_STATE);
1095}
1096
1097
1098/*
1099 * Reset the IOC when needed. Try software command first then if needed
1100 * poke at the magic diagnostic reset. Note that a hard reset resets
1101 * *both* IOCs on dual function chips (FC929 && LSI1030) as well as
1102 * fouls up the PCI configuration registers.
1103 */
1104int
1105mpt_reset(struct mpt_softc *mpt, int reinit)
1106{
1107 struct mpt_personality *pers;
1108 int ret;
1109 int retry_cnt = 0;
1110
1111 /*
1112 * Try a soft reset. If that fails, get out the big hammer.
1113 */
1114 again:
1115 if ((ret = mpt_soft_reset(mpt)) != MPT_OK) {
1116 int cnt;
1117 for (cnt = 0; cnt < 5; cnt++) {
1118 /* Failed; do a hard reset */
1119 mpt_hard_reset(mpt);
1120
1121 /*
1122 * Wait for the IOC to reload
1123 * and come out of reset state
1124 */
1125 ret = mpt_wait_state(mpt, MPT_DB_STATE_READY);
1126 if (ret == MPT_OK) {
1127 break;
1128 }
1129 /*
1130 * Okay- try to check again...
1131 */
1132 ret = mpt_wait_state(mpt, MPT_DB_STATE_READY);
1133 if (ret == MPT_OK) {
1134 break;
1135 }
1136 mpt_prt(mpt, "mpt_reset: failed hard reset (%d:%d)\n",
1137 retry_cnt, cnt);
1138 }
1139 }
1140
1141 if (retry_cnt == 0) {
1142 /*
1143 * Invoke reset handlers. We bump the reset count so
1144 * that mpt_wait_req() understands that regardless of
1145 * the specified wait condition, it should stop its wait.
1146 */
1147 mpt->reset_cnt++;
1148 MPT_PERS_FOREACH(mpt, pers)
1149 pers->reset(mpt, ret);
1150 }
1151
1152 if (reinit) {
1153 ret = mpt_enable_ioc(mpt, 1);
1154 if (ret == MPT_OK) {
1155 mpt_enable_ints(mpt);
1156 }
1157 }
1158 if (ret != MPT_OK && retry_cnt++ < 2) {
1159 goto again;
1160 }
1161 return ret;
1162}
1163
1164/* Return a command buffer to the free queue */
1165void
1166mpt_free_request(struct mpt_softc *mpt, request_t *req)
1167{
1168 request_t *nxt;
1169 struct mpt_evtf_record *record;
1170 uint32_t reply_baddr;
1171
1172 if (req == NULL || req != &mpt->request_pool[req->index]) {
1173 panic("mpt_free_request bad req ptr\n");
1174 return;
1175 }
1176 if ((nxt = req->chain) != NULL) {
1177 req->chain = NULL;
1178 mpt_free_request(mpt, nxt); /* NB: recursion */
1179 }
1180 KASSERT(req->state != REQ_STATE_FREE, ("freeing free request"));
1181 KASSERT(!(req->state & REQ_STATE_LOCKED), ("freeing locked request"));
1182 MPT_LOCK_ASSERT(mpt);
1183 KASSERT(mpt_req_on_free_list(mpt, req) == 0,
1184 ("mpt_free_request: req %p:%u func %x already on freelist",
1185 req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
1186 KASSERT(mpt_req_on_pending_list(mpt, req) == 0,
1187 ("mpt_free_request: req %p:%u func %x on pending list",
1188 req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
1189#ifdef INVARIANTS
1190 mpt_req_not_spcl(mpt, req, "mpt_free_request", __LINE__);
1191#endif
1192
1193 req->ccb = NULL;
1194 if (LIST_EMPTY(&mpt->ack_frames)) {
1195 /*
1196 * Insert free ones at the tail
1197 */
1198 req->serno = 0;
1199 req->state = REQ_STATE_FREE;
1200#ifdef INVARIANTS
1201 memset(req->req_vbuf, 0xff, sizeof (MSG_REQUEST_HEADER));
1202#endif
1203 TAILQ_INSERT_TAIL(&mpt->request_free_list, req, links);
1204 if (mpt->getreqwaiter != 0) {
1205 mpt->getreqwaiter = 0;
1206 wakeup(&mpt->request_free_list);
1207 }
1208 return;
1209 }
1210
1211 /*
1212 * Process an ack frame deferred due to resource shortage.
1213 */
1214 record = LIST_FIRST(&mpt->ack_frames);
1215 LIST_REMOVE(record, links);
1216 req->state = REQ_STATE_ALLOCATED;
1217 mpt_assign_serno(mpt, req);
1218 mpt_send_event_ack(mpt, req, &record->reply, record->context);
1219 reply_baddr = (uint32_t)((uint8_t *)record - mpt->reply)
1220 + (mpt->reply_phys & 0xFFFFFFFF);
1221 mpt_free_reply(mpt, reply_baddr);
1222}
1223
1224/* Get a command buffer from the free queue */
1225request_t *
1226mpt_get_request(struct mpt_softc *mpt, int sleep_ok)
1227{
1228 request_t *req;
1229
1230retry:
1231 MPT_LOCK_ASSERT(mpt);
1232 req = TAILQ_FIRST(&mpt->request_free_list);
1233 if (req != NULL) {
1234 KASSERT(req == &mpt->request_pool[req->index],
1235 ("mpt_get_request: corrupted request free list\n"));
1236 KASSERT(req->state == REQ_STATE_FREE,
1237 ("req %p:%u not free on free list %x index %d function %x",
1238 req, req->serno, req->state, req->index,
1239 ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
1240 TAILQ_REMOVE(&mpt->request_free_list, req, links);
1241 req->state = REQ_STATE_ALLOCATED;
1242 req->chain = NULL;
1243 mpt_assign_serno(mpt, req);
1244 mpt_callout_init(&req->callout);
1245 } else if (sleep_ok != 0) {
1246 mpt->getreqwaiter = 1;
1247 mpt_sleep(mpt, &mpt->request_free_list, PUSER, "mptgreq", 0);
1248 goto retry;
1249 }
1250 return (req);
1251}
1252
1253/* Pass the command to the IOC */
1254void
1255mpt_send_cmd(struct mpt_softc *mpt, request_t *req)
1256{
1257 if (mpt->verbose > MPT_PRT_DEBUG2) {
1258 mpt_dump_request(mpt, req);
1259 }
1260 bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
1261 BUS_DMASYNC_PREWRITE);
1262 req->state |= REQ_STATE_QUEUED;
1263 KASSERT(mpt_req_on_free_list(mpt, req) == 0,
1264 ("req %p:%u func %x on freelist list in mpt_send_cmd",
1265 req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
1266 KASSERT(mpt_req_on_pending_list(mpt, req) == 0,
1267 ("req %p:%u func %x already on pending list in mpt_send_cmd",
1268 req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
1269 TAILQ_INSERT_HEAD(&mpt->request_pending_list, req, links);
1270 mpt_write(mpt, MPT_OFFSET_REQUEST_Q, (uint32_t) req->req_pbuf);
1271}
1272
1273/*
1274 * Wait for a request to complete.
1275 *
1276 * Inputs:
1277 * mpt softc of controller executing request
1278 * req request to wait for
1279 * sleep_ok nonzero implies may sleep in this context
1280 * time_ms timeout in ms. 0 implies no timeout.
1281 *
1282 * Return Values:
1283 * 0 Request completed
1284 * non-0 Timeout fired before request completion.
1285 */
1286int
1287mpt_wait_req(struct mpt_softc *mpt, request_t *req,
1288 mpt_req_state_t state, mpt_req_state_t mask,
1289 int sleep_ok, int time_ms)
1290{
1291 int error;
1292 int timeout;
1293 u_int saved_cnt;
1294
1295 /*
1296 * timeout is in ms. 0 indicates infinite wait.
1297 * Convert to ticks or 500us units depending on
1298 * our sleep mode.
1299 */
1300 if (sleep_ok != 0) {
1301 timeout = (time_ms * hz) / 1000;
1302 } else {
1303 timeout = time_ms * 2;
1304 }
1305 req->state |= REQ_STATE_NEED_WAKEUP;
1306 mask &= ~REQ_STATE_NEED_WAKEUP;
1307 saved_cnt = mpt->reset_cnt;
1308 while ((req->state & mask) != state && mpt->reset_cnt == saved_cnt) {
1309 if (sleep_ok != 0) {
1310 error = mpt_sleep(mpt, req, PUSER, "mptreq", timeout);
1311 if (error == EWOULDBLOCK) {
1312 timeout = 0;
1313 break;
1314 }
1315 } else {
1316 if (time_ms != 0 && --timeout == 0) {
1317 break;
1318 }
1319 DELAY(500);
1320 mpt_intr(mpt);
1321 }
1322 }
1323 req->state &= ~REQ_STATE_NEED_WAKEUP;
1324 if (mpt->reset_cnt != saved_cnt) {
1325 return (EIO);
1326 }
1327 if (time_ms && timeout <= 0) {
1328 MSG_REQUEST_HEADER *msg_hdr = req->req_vbuf;
1329 req->state |= REQ_STATE_TIMEDOUT;
1330 mpt_prt(mpt, "mpt_wait_req(%x) timed out\n", msg_hdr->Function);
1331 return (ETIMEDOUT);
1332 }
1333 return (0);
1334}
1335
1336/*
1337 * Send a command to the IOC via the handshake register.
1338 *
1339 * Only done at initialization time and for certain unusual
1340 * commands such as device/bus reset as specified by LSI.
1341 */
1342int
1343mpt_send_handshake_cmd(struct mpt_softc *mpt, size_t len, void *cmd)
1344{
1345 int i;
1346 uint32_t data, *data32;
1347
1348 /* Check condition of the IOC */
1349 data = mpt_rd_db(mpt);
1350 if ((MPT_STATE(data) != MPT_DB_STATE_READY
1351 && MPT_STATE(data) != MPT_DB_STATE_RUNNING
1352 && MPT_STATE(data) != MPT_DB_STATE_FAULT)
1353 || MPT_DB_IS_IN_USE(data)) {
1354 mpt_prt(mpt, "handshake aborted - invalid doorbell state\n");
1355 mpt_print_db(data);
1356 return (EBUSY);
1357 }
1358
1359 /* We move things in 32 bit chunks */
1360 len = (len + 3) >> 2;
1361 data32 = cmd;
1362
1363 /* Clear any left over pending doorbell interrupts */
1364 if (MPT_DB_INTR(mpt_rd_intr(mpt)))
1365 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1366
1367 /*
1368 * Tell the handshake reg. we are going to send a command
1369 * and how long it is going to be.
1370 */
1371 data = (MPI_FUNCTION_HANDSHAKE << MPI_DOORBELL_FUNCTION_SHIFT) |
1372 (len << MPI_DOORBELL_ADD_DWORDS_SHIFT);
1373 mpt_write(mpt, MPT_OFFSET_DOORBELL, data);
1374
1375 /* Wait for the chip to notice */
1376 if (mpt_wait_db_int(mpt) != MPT_OK) {
1377 mpt_prt(mpt, "mpt_send_handshake_cmd: db ignored\n");
1378 return (ETIMEDOUT);
1379 }
1380
1381 /* Clear the interrupt */
1382 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1383
1384 if (mpt_wait_db_ack(mpt) != MPT_OK) {
1385 mpt_prt(mpt, "mpt_send_handshake_cmd: db ack timed out\n");
1386 return (ETIMEDOUT);
1387 }
1388
1389 /* Send the command */
1390 for (i = 0; i < len; i++) {
1391 mpt_write(mpt, MPT_OFFSET_DOORBELL, htole32(*data32++));
1392 if (mpt_wait_db_ack(mpt) != MPT_OK) {
1393 mpt_prt(mpt,
1394 "mpt_send_handshake_cmd: timeout @ index %d\n", i);
1395 return (ETIMEDOUT);
1396 }
1397 }
1398 return MPT_OK;
1399}
1400
1401/* Get the response from the handshake register */
1402int
1403mpt_recv_handshake_reply(struct mpt_softc *mpt, size_t reply_len, void *reply)
1404{
1405 int left, reply_left;
1406 u_int16_t *data16;
1407 uint32_t data;
1408 MSG_DEFAULT_REPLY *hdr;
1409
1410 /* We move things out in 16 bit chunks */
1411 reply_len >>= 1;
1412 data16 = (u_int16_t *)reply;
1413
1414 hdr = (MSG_DEFAULT_REPLY *)reply;
1415
1416 /* Get first word */
1417 if (mpt_wait_db_int(mpt) != MPT_OK) {
1418 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout1\n");
1419 return ETIMEDOUT;
1420 }
1421 data = mpt_read(mpt, MPT_OFFSET_DOORBELL);
1422 *data16++ = le16toh(data & MPT_DB_DATA_MASK);
1423 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1424
1425 /* Get Second Word */
1426 if (mpt_wait_db_int(mpt) != MPT_OK) {
1427 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout2\n");
1428 return ETIMEDOUT;
1429 }
1430 data = mpt_read(mpt, MPT_OFFSET_DOORBELL);
1431 *data16++ = le16toh(data & MPT_DB_DATA_MASK);
1432 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1433
1434 /*
1435 * With the second word, we can now look at the length.
1436 * Warn about a reply that's too short (except for IOC FACTS REPLY)
1437 */
1438 if ((reply_len >> 1) != hdr->MsgLength &&
1439 (hdr->Function != MPI_FUNCTION_IOC_FACTS)){
1440#if __FreeBSD_version >= 500000
1441 mpt_prt(mpt, "reply length does not match message length: "
1442 "got %x; expected %zx for function %x\n",
1443 hdr->MsgLength << 2, reply_len << 1, hdr->Function);
1444#else
1445 mpt_prt(mpt, "reply length does not match message length: "
1446 "got %x; expected %x for function %x\n",
1447 hdr->MsgLength << 2, reply_len << 1, hdr->Function);
1448#endif
1449 }
1450
1451 /* Get rest of the reply; but don't overflow the provided buffer */
1452 left = (hdr->MsgLength << 1) - 2;
1453 reply_left = reply_len - 2;
1454 while (left--) {
1455 u_int16_t datum;
1456
1457 if (mpt_wait_db_int(mpt) != MPT_OK) {
1458 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout3\n");
1459 return ETIMEDOUT;
1460 }
1461 data = mpt_read(mpt, MPT_OFFSET_DOORBELL);
1462 datum = le16toh(data & MPT_DB_DATA_MASK);
1463
1464 if (reply_left-- > 0)
1465 *data16++ = datum;
1466
1467 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1468 }
984263bc
MD
1469
1470 /* One more wait & clear at the end */
1471 if (mpt_wait_db_int(mpt) != MPT_OK) {
d751f32e 1472 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout4\n");
984263bc
MD
1473 return ETIMEDOUT;
1474 }
1475 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1476
d751f32e
MD
1477 if ((hdr->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
1478 if (mpt->verbose >= MPT_PRT_TRACE)
1479 mpt_print_reply(hdr);
1480 return (MPT_FAIL | hdr->IOCStatus);
1481 }
1482
1483 return (0);
1484}
1485
1486static int
1487mpt_get_iocfacts(struct mpt_softc *mpt, MSG_IOC_FACTS_REPLY *freplp)
1488{
1489 MSG_IOC_FACTS f_req;
1490 int error;
1491
1492 memset(&f_req, 0, sizeof f_req);
1493 f_req.Function = MPI_FUNCTION_IOC_FACTS;
1494 f_req.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
1495 error = mpt_send_handshake_cmd(mpt, sizeof f_req, &f_req);
1496 if (error) {
1497 return(error);
1498 }
1499 error = mpt_recv_handshake_reply(mpt, sizeof (*freplp), freplp);
1500 return (error);
1501}
1502
1503static int
1504mpt_get_portfacts(struct mpt_softc *mpt, U8 port, MSG_PORT_FACTS_REPLY *freplp)
1505{
1506 MSG_PORT_FACTS f_req;
1507 int error;
1508
1509 memset(&f_req, 0, sizeof f_req);
1510 f_req.Function = MPI_FUNCTION_PORT_FACTS;
1511 f_req.PortNumber = port;
1512 f_req.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
1513 error = mpt_send_handshake_cmd(mpt, sizeof f_req, &f_req);
1514 if (error) {
1515 return(error);
1516 }
1517 error = mpt_recv_handshake_reply(mpt, sizeof (*freplp), freplp);
1518 return (error);
1519}
1520
1521/*
1522 * Send the initialization request. This is where we specify how many
1523 * SCSI busses and how many devices per bus we wish to emulate.
1524 * This is also the command that specifies the max size of the reply
1525 * frames from the IOC that we will be allocating.
1526 */
1527static int
1528mpt_send_ioc_init(struct mpt_softc *mpt, uint32_t who)
1529{
1530 int error = 0;
1531 MSG_IOC_INIT init;
1532 MSG_IOC_INIT_REPLY reply;
1533
1534 memset(&init, 0, sizeof init);
1535 init.WhoInit = who;
1536 init.Function = MPI_FUNCTION_IOC_INIT;
1537 init.MaxDevices = 0; /* at least 256 devices per bus */
1538 init.MaxBuses = 16; /* at least 16 busses */
1539
1540 init.MsgVersion = htole16(MPI_VERSION);
1541 init.HeaderVersion = htole16(MPI_HEADER_VERSION);
1542 init.ReplyFrameSize = htole16(MPT_REPLY_SIZE);
1543 init.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
1544
1545 if ((error = mpt_send_handshake_cmd(mpt, sizeof init, &init)) != 0) {
1546 return(error);
1547 }
1548
1549 error = mpt_recv_handshake_reply(mpt, sizeof reply, &reply);
1550 return (error);
1551}
1552
1553
1554/*
1555 * Utiltity routine to read configuration headers and pages
1556 */
1557int
1558mpt_issue_cfg_req(struct mpt_softc *mpt, request_t *req, cfgparms_t *params,
1559 bus_addr_t addr, bus_size_t len, int sleep_ok, int timeout_ms)
1560{
1561 MSG_CONFIG *cfgp;
1562 SGE_SIMPLE32 *se;
1563
1564 cfgp = req->req_vbuf;
1565 memset(cfgp, 0, sizeof *cfgp);
1566 cfgp->Action = params->Action;
1567 cfgp->Function = MPI_FUNCTION_CONFIG;
1568 cfgp->Header.PageVersion = params->PageVersion;
1569 cfgp->Header.PageNumber = params->PageNumber;
1570 cfgp->PageAddress = htole32(params->PageAddress);
1571 if ((params->PageType & MPI_CONFIG_PAGETYPE_MASK) ==
1572 MPI_CONFIG_PAGETYPE_EXTENDED) {
1573 cfgp->Header.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
1574 cfgp->Header.PageLength = 0;
1575 cfgp->ExtPageLength = htole16(params->ExtPageLength);
1576 cfgp->ExtPageType = params->ExtPageType;
1577 } else {
1578 cfgp->Header.PageType = params->PageType;
1579 cfgp->Header.PageLength = params->PageLength;
1580 }
1581 se = (SGE_SIMPLE32 *)&cfgp->PageBufferSGE;
1582 se->Address = htole32(addr);
1583 MPI_pSGE_SET_LENGTH(se, len);
1584 MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
1585 MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1586 MPI_SGE_FLAGS_END_OF_LIST |
1587 ((params->Action == MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT
1588 || params->Action == MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM)
1589 ? MPI_SGE_FLAGS_HOST_TO_IOC : MPI_SGE_FLAGS_IOC_TO_HOST)));
1590 se->FlagsLength = htole32(se->FlagsLength);
1591 cfgp->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_CONFIG);
1592
1593 mpt_check_doorbell(mpt);
1594 mpt_send_cmd(mpt, req);
1595 return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
1596 sleep_ok, timeout_ms));
1597}
1598
1599int
1600mpt_read_extcfg_header(struct mpt_softc *mpt, int PageVersion, int PageNumber,
1601 uint32_t PageAddress, int ExtPageType,
1602 CONFIG_EXTENDED_PAGE_HEADER *rslt,
1603 int sleep_ok, int timeout_ms)
1604{
1605 request_t *req;
1606 cfgparms_t params;
1607 MSG_CONFIG_REPLY *cfgp;
1608 int error;
1609
1610 req = mpt_get_request(mpt, sleep_ok);
1611 if (req == NULL) {
1612 mpt_prt(mpt, "mpt_extread_cfg_header: Get request failed!\n");
1613 return (ENOMEM);
1614 }
1615
1616 params.Action = MPI_CONFIG_ACTION_PAGE_HEADER;
1617 params.PageVersion = PageVersion;
1618 params.PageLength = 0;
1619 params.PageNumber = PageNumber;
1620 params.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
1621 params.PageAddress = PageAddress;
1622 params.ExtPageType = ExtPageType;
1623 params.ExtPageLength = 0;
1624 error = mpt_issue_cfg_req(mpt, req, &params, /*addr*/0, /*len*/0,
1625 sleep_ok, timeout_ms);
1626 if (error != 0) {
1627 /*
1628 * Leave the request. Without resetting the chip, it's
1629 * still owned by it and we'll just get into trouble
1630 * freeing it now. Mark it as abandoned so that if it
1631 * shows up later it can be freed.
1632 */
1633 mpt_prt(mpt, "read_extcfg_header timed out\n");
1634 return (ETIMEDOUT);
1635 }
1636
1637 switch (req->IOCStatus & MPI_IOCSTATUS_MASK) {
1638 case MPI_IOCSTATUS_SUCCESS:
1639 cfgp = req->req_vbuf;
1640 rslt->PageVersion = cfgp->Header.PageVersion;
1641 rslt->PageNumber = cfgp->Header.PageNumber;
1642 rslt->PageType = cfgp->Header.PageType;
1643 rslt->ExtPageLength = le16toh(cfgp->ExtPageLength);
1644 rslt->ExtPageType = cfgp->ExtPageType;
1645 error = 0;
1646 break;
1647 case MPI_IOCSTATUS_CONFIG_INVALID_PAGE:
1648 mpt_lprt(mpt, MPT_PRT_DEBUG,
1649 "Invalid Page Type %d Number %d Addr 0x%0x\n",
1650 MPI_CONFIG_PAGETYPE_EXTENDED, PageNumber, PageAddress);
1651 error = EINVAL;
1652 break;
1653 default:
1654 mpt_prt(mpt, "mpt_read_extcfg_header: Config Info Status %x\n",
1655 req->IOCStatus);
1656 error = EIO;
1657 break;
1658 }
1659 mpt_free_request(mpt, req);
1660 return (error);
1661}
1662
1663int
1664mpt_read_extcfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress,
1665 CONFIG_EXTENDED_PAGE_HEADER *hdr, void *buf, size_t len,
1666 int sleep_ok, int timeout_ms)
1667{
1668 request_t *req;
1669 cfgparms_t params;
1670 int error;
1671
1672 req = mpt_get_request(mpt, sleep_ok);
1673 if (req == NULL) {
1674 mpt_prt(mpt, "mpt_read_extcfg_page: Get request failed!\n");
1675 return (-1);
1676 }
1677
1678 params.Action = Action;
1679 params.PageVersion = hdr->PageVersion;
1680 params.PageLength = 0;
1681 params.PageNumber = hdr->PageNumber;
1682 params.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
1683 params.PageAddress = PageAddress;
1684 params.ExtPageType = hdr->ExtPageType;
1685 params.ExtPageLength = hdr->ExtPageLength;
1686 error = mpt_issue_cfg_req(mpt, req, &params,
1687 req->req_pbuf + MPT_RQSL(mpt),
1688 len, sleep_ok, timeout_ms);
1689 if (error != 0) {
1690 mpt_prt(mpt, "read_extcfg_page(%d) timed out\n", Action);
1691 return (-1);
1692 }
1693
1694 if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
1695 mpt_prt(mpt, "mpt_read_extcfg_page: Config Info Status %x\n",
1696 req->IOCStatus);
1697 mpt_free_request(mpt, req);
1698 return (-1);
1699 }
1700 bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
1701 BUS_DMASYNC_POSTREAD);
1702 memcpy(buf, ((uint8_t *)req->req_vbuf)+MPT_RQSL(mpt), len);
1703 mpt_free_request(mpt, req);
1704 return (0);
1705}
1706
1707int
1708mpt_read_cfg_header(struct mpt_softc *mpt, int PageType, int PageNumber,
1709 uint32_t PageAddress, CONFIG_PAGE_HEADER *rslt,
1710 int sleep_ok, int timeout_ms)
1711{
1712 request_t *req;
1713 cfgparms_t params;
1714 MSG_CONFIG *cfgp;
1715 int error;
1716
1717 req = mpt_get_request(mpt, sleep_ok);
1718 if (req == NULL) {
1719 mpt_prt(mpt, "mpt_read_cfg_header: Get request failed!\n");
1720 return (ENOMEM);
1721 }
1722
1723 params.Action = MPI_CONFIG_ACTION_PAGE_HEADER;
1724 params.PageVersion = 0;
1725 params.PageLength = 0;
1726 params.PageNumber = PageNumber;
1727 params.PageType = PageType;
1728 params.PageAddress = PageAddress;
1729 error = mpt_issue_cfg_req(mpt, req, &params, /*addr*/0, /*len*/0,
1730 sleep_ok, timeout_ms);
1731 if (error != 0) {
1732 /*
1733 * Leave the request. Without resetting the chip, it's
1734 * still owned by it and we'll just get into trouble
1735 * freeing it now. Mark it as abandoned so that if it
1736 * shows up later it can be freed.
1737 */
1738 mpt_prt(mpt, "read_cfg_header timed out\n");
1739 return (ETIMEDOUT);
1740 }
1741
1742 switch (req->IOCStatus & MPI_IOCSTATUS_MASK) {
1743 case MPI_IOCSTATUS_SUCCESS:
1744 cfgp = req->req_vbuf;
1745 bcopy(&cfgp->Header, rslt, sizeof(*rslt));
1746 error = 0;
1747 break;
1748 case MPI_IOCSTATUS_CONFIG_INVALID_PAGE:
1749 mpt_lprt(mpt, MPT_PRT_DEBUG,
1750 "Invalid Page Type %d Number %d Addr 0x%0x\n",
1751 PageType, PageNumber, PageAddress);
1752 error = EINVAL;
1753 break;
1754 default:
1755 mpt_prt(mpt, "mpt_read_cfg_header: Config Info Status %x\n",
1756 req->IOCStatus);
1757 error = EIO;
1758 break;
1759 }
1760 mpt_free_request(mpt, req);
1761 return (error);
1762}
1763
1764int
1765mpt_read_cfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress,
1766 CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok,
1767 int timeout_ms)
1768{
1769 request_t *req;
1770 cfgparms_t params;
1771 int error;
1772
1773 req = mpt_get_request(mpt, sleep_ok);
1774 if (req == NULL) {
1775 mpt_prt(mpt, "mpt_read_cfg_page: Get request failed!\n");
1776 return (-1);
1777 }
1778
1779 params.Action = Action;
1780 params.PageVersion = hdr->PageVersion;
1781 params.PageLength = hdr->PageLength;
1782 params.PageNumber = hdr->PageNumber;
1783 params.PageType = hdr->PageType & MPI_CONFIG_PAGETYPE_MASK;
1784 params.PageAddress = PageAddress;
1785 error = mpt_issue_cfg_req(mpt, req, &params,
1786 req->req_pbuf + MPT_RQSL(mpt),
1787 len, sleep_ok, timeout_ms);
1788 if (error != 0) {
1789 mpt_prt(mpt, "read_cfg_page(%d) timed out\n", Action);
1790 return (-1);
1791 }
1792
1793 if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
1794 mpt_prt(mpt, "mpt_read_cfg_page: Config Info Status %x\n",
1795 req->IOCStatus);
1796 mpt_free_request(mpt, req);
1797 return (-1);
1798 }
1799 bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
1800 BUS_DMASYNC_POSTREAD);
1801 memcpy(hdr, ((uint8_t *)req->req_vbuf)+MPT_RQSL(mpt), len);
1802 mpt_free_request(mpt, req);
1803 return (0);
1804}
1805
1806int
1807mpt_write_cfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress,
1808 CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok,
1809 int timeout_ms)
1810{
1811 request_t *req;
1812 cfgparms_t params;
1813 u_int hdr_attr;
1814 int error;
1815
1816 hdr_attr = hdr->PageType & MPI_CONFIG_PAGEATTR_MASK;
1817 if (hdr_attr != MPI_CONFIG_PAGEATTR_CHANGEABLE &&
1818 hdr_attr != MPI_CONFIG_PAGEATTR_PERSISTENT) {
1819 mpt_prt(mpt, "page type 0x%x not changeable\n",
1820 hdr->PageType & MPI_CONFIG_PAGETYPE_MASK);
1821 return (-1);
1822 }
1823
1824#if 0
1825 /*
1826 * We shouldn't mask off other bits here.
1827 */
1828 hdr->PageType &= MPI_CONFIG_PAGETYPE_MASK;
1829#endif
1830
1831 req = mpt_get_request(mpt, sleep_ok);
1832 if (req == NULL)
1833 return (-1);
1834
1835 memcpy(((caddr_t)req->req_vbuf) + MPT_RQSL(mpt), hdr, len);
1836
1837 /*
1838 * There isn't any point in restoring stripped out attributes
1839 * if you then mask them going down to issue the request.
1840 */
1841
1842 params.Action = Action;
1843 params.PageVersion = hdr->PageVersion;
1844 params.PageLength = hdr->PageLength;
1845 params.PageNumber = hdr->PageNumber;
1846 params.PageAddress = PageAddress;
1847#if 0
1848 /* Restore stripped out attributes */
1849 hdr->PageType |= hdr_attr;
1850 params.PageType = hdr->PageType & MPI_CONFIG_PAGETYPE_MASK;
1851#else
1852 params.PageType = hdr->PageType;
1853#endif
1854 error = mpt_issue_cfg_req(mpt, req, &params,
1855 req->req_pbuf + MPT_RQSL(mpt),
1856 len, sleep_ok, timeout_ms);
1857 if (error != 0) {
1858 mpt_prt(mpt, "mpt_write_cfg_page timed out\n");
1859 return (-1);
984263bc
MD
1860 }
1861
d751f32e
MD
1862 if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
1863 mpt_prt(mpt, "mpt_write_cfg_page: Config Info Status %x\n",
1864 req->IOCStatus);
1865 mpt_free_request(mpt, req);
1866 return (-1);
1867 }
1868 mpt_free_request(mpt, req);
984263bc
MD
1869 return (0);
1870}
1871
d751f32e
MD
1872/*
1873 * Read IOC configuration information
1874 */
984263bc 1875static int
d751f32e 1876mpt_read_config_info_ioc(struct mpt_softc *mpt)
984263bc 1877{
d751f32e
MD
1878 CONFIG_PAGE_HEADER hdr;
1879 struct mpt_raid_volume *mpt_raid;
1880 int rv;
1881 int i;
1882 size_t len;
1883
1884 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC,
1885 2, 0, &hdr, FALSE, 5000);
1886 /*
1887 * If it's an invalid page, so what? Not a supported function....
1888 */
1889 if (rv == EINVAL) {
1890 return (0);
1891 }
1892 if (rv) {
1893 return (rv);
1894 }
1895
1896 mpt_lprt(mpt, MPT_PRT_DEBUG,
1897 "IOC Page 2 Header: Version %x len %x PageNumber %x PageType %x\n",
1898 hdr.PageVersion, hdr.PageLength << 2,
1899 hdr.PageNumber, hdr.PageType);
1900
1901 len = hdr.PageLength * sizeof(uint32_t);
2545bca0 1902 mpt->ioc_page2 = kmalloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
d751f32e
MD
1903 if (mpt->ioc_page2 == NULL) {
1904 mpt_prt(mpt, "unable to allocate memory for IOC page 2\n");
1905 mpt_raid_free_mem(mpt);
1906 return (ENOMEM);
1907 }
1908 memcpy(&mpt->ioc_page2->Header, &hdr, sizeof(hdr));
1909 rv = mpt_read_cur_cfg_page(mpt, 0,
1910 &mpt->ioc_page2->Header, len, FALSE, 5000);
1911 if (rv) {
1912 mpt_prt(mpt, "failed to read IOC Page 2\n");
1913 mpt_raid_free_mem(mpt);
1914 return (EIO);
1915 }
1916 mpt2host_config_page_ioc2(mpt->ioc_page2);
1917
1918 if (mpt->ioc_page2->CapabilitiesFlags != 0) {
1919 uint32_t mask;
1920
1921 mpt_prt(mpt, "Capabilities: (");
1922 for (mask = 1; mask != 0; mask <<= 1) {
1923 if ((mpt->ioc_page2->CapabilitiesFlags & mask) == 0) {
1924 continue;
1925 }
1926 switch (mask) {
1927 case MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT:
1928 mpt_prtc(mpt, " RAID-0");
1929 break;
1930 case MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT:
1931 mpt_prtc(mpt, " RAID-1E");
1932 break;
1933 case MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT:
1934 mpt_prtc(mpt, " RAID-1");
1935 break;
1936 case MPI_IOCPAGE2_CAP_FLAGS_SES_SUPPORT:
1937 mpt_prtc(mpt, " SES");
1938 break;
1939 case MPI_IOCPAGE2_CAP_FLAGS_SAFTE_SUPPORT:
1940 mpt_prtc(mpt, " SAFTE");
1941 break;
1942 case MPI_IOCPAGE2_CAP_FLAGS_CROSS_CHANNEL_SUPPORT:
1943 mpt_prtc(mpt, " Multi-Channel-Arrays");
1944 default:
1945 break;
1946 }
1947 }
1948 mpt_prtc(mpt, " )\n");
1949 if ((mpt->ioc_page2->CapabilitiesFlags
1950 & (MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT
1951 | MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT
1952 | MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT)) != 0) {
1953 mpt_prt(mpt, "%d Active Volume%s(%d Max)\n",
1954 mpt->ioc_page2->NumActiveVolumes,
1955 mpt->ioc_page2->NumActiveVolumes != 1
1956 ? "s " : " ",
1957 mpt->ioc_page2->MaxVolumes);
1958 mpt_prt(mpt, "%d Hidden Drive Member%s(%d Max)\n",
1959 mpt->ioc_page2->NumActivePhysDisks,
1960 mpt->ioc_page2->NumActivePhysDisks != 1
1961 ? "s " : " ",
1962 mpt->ioc_page2->MaxPhysDisks);
1963 }
1964 }
1965
1966 len = mpt->ioc_page2->MaxVolumes * sizeof(struct mpt_raid_volume);
2545bca0 1967 mpt->raid_volumes = kmalloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
d751f32e
MD
1968 if (mpt->raid_volumes == NULL) {
1969 mpt_prt(mpt, "Could not allocate RAID volume data\n");
1970 mpt_raid_free_mem(mpt);
1971 return (ENOMEM);
1972 }
1973
1974 /*
1975 * Copy critical data out of ioc_page2 so that we can
1976 * safely refresh the page without windows of unreliable
1977 * data.
1978 */
1979 mpt->raid_max_volumes = mpt->ioc_page2->MaxVolumes;
1980
1981 len = sizeof(*mpt->raid_volumes->config_page) +
1982 (sizeof (RAID_VOL0_PHYS_DISK) * (mpt->ioc_page2->MaxPhysDisks - 1));
1983 for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1984 mpt_raid = &mpt->raid_volumes[i];
1985 mpt_raid->config_page =
2545bca0 1986 kmalloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
d751f32e
MD
1987 if (mpt_raid->config_page == NULL) {
1988 mpt_prt(mpt, "Could not allocate RAID page data\n");
1989 mpt_raid_free_mem(mpt);
1990 return (ENOMEM);
1991 }
1992 }
1993 mpt->raid_page0_len = len;
1994
1995 len = mpt->ioc_page2->MaxPhysDisks * sizeof(struct mpt_raid_disk);
2545bca0 1996 mpt->raid_disks = kmalloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
d751f32e
MD
1997 if (mpt->raid_disks == NULL) {
1998 mpt_prt(mpt, "Could not allocate RAID disk data\n");
1999 mpt_raid_free_mem(mpt);
2000 return (ENOMEM);
2001 }
2002 mpt->raid_max_disks = mpt->ioc_page2->MaxPhysDisks;
2003
2004 /*
2005 * Load page 3.
2006 */
2007 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC,
2008 3, 0, &hdr, FALSE, 5000);
2009 if (rv) {
2010 mpt_raid_free_mem(mpt);
2011 return (EIO);
2012 }
2013
2014 mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC Page 3 Header: %x %x %x %x\n",
2015 hdr.PageVersion, hdr.PageLength, hdr.PageNumber, hdr.PageType);
2016
2017 len = hdr.PageLength * sizeof(uint32_t);
2545bca0 2018 mpt->ioc_page3 = kmalloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
d751f32e
MD
2019 if (mpt->ioc_page3 == NULL) {
2020 mpt_prt(mpt, "unable to allocate memory for IOC page 3\n");
2021 mpt_raid_free_mem(mpt);
2022 return (ENOMEM);
2023 }
2024 memcpy(&mpt->ioc_page3->Header, &hdr, sizeof(hdr));
2025 rv = mpt_read_cur_cfg_page(mpt, 0,
2026 &mpt->ioc_page3->Header, len, FALSE, 5000);
2027 if (rv) {
2028 mpt_raid_free_mem(mpt);
2029 return (EIO);
2030 }
2031 mpt2host_config_page_ioc3(mpt->ioc_page3);
2032 mpt_raid_wakeup(mpt);
2033 return (0);
984263bc
MD
2034}
2035
d751f32e
MD
2036/*
2037 * Enable IOC port
2038 */
984263bc 2039static int
d751f32e 2040mpt_send_port_enable(struct mpt_softc *mpt, int port)
984263bc 2041{
d751f32e
MD
2042 request_t *req;
2043 MSG_PORT_ENABLE *enable_req;
2044 int error;
2045
2046 req = mpt_get_request(mpt, /*sleep_ok*/FALSE);
2047 if (req == NULL)
2048 return (-1);
2049
2050 enable_req = req->req_vbuf;
2051 memset(enable_req, 0, MPT_RQSL(mpt));
2052
2053 enable_req->Function = MPI_FUNCTION_PORT_ENABLE;
2054 enable_req->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_CONFIG);
2055 enable_req->PortNumber = port;
2056
2057 mpt_check_doorbell(mpt);
2058 mpt_lprt(mpt, MPT_PRT_DEBUG, "enabling port %d\n", port);
2059
2060 mpt_send_cmd(mpt, req);
2061 error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
2062 FALSE, (mpt->is_sas || mpt->is_fc)? 30000 : 3000);
2063 if (error != 0) {
2064 mpt_prt(mpt, "port %d enable timed out\n", port);
2065 return (-1);
2066 }
2067 mpt_free_request(mpt, req);
2068 mpt_lprt(mpt, MPT_PRT_DEBUG, "enabled port %d\n", port);
2069 return (0);
984263bc
MD
2070}
2071
2072/*
d751f32e 2073 * Enable/Disable asynchronous event reporting.
984263bc
MD
2074 */
2075static int
d751f32e 2076mpt_send_event_request(struct mpt_softc *mpt, int onoff)
984263bc 2077{
d751f32e
MD
2078 request_t *req;
2079 MSG_EVENT_NOTIFY *enable_req;
984263bc 2080
d751f32e
MD
2081 req = mpt_get_request(mpt, FALSE);
2082 if (req == NULL) {
2083 return (ENOMEM);
984263bc 2084 }
d751f32e
MD
2085 enable_req = req->req_vbuf;
2086 memset(enable_req, 0, sizeof *enable_req);
984263bc 2087
d751f32e
MD
2088 enable_req->Function = MPI_FUNCTION_EVENT_NOTIFICATION;
2089 enable_req->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_EVENTS);
2090 enable_req->Switch = onoff;
984263bc 2091
d751f32e
MD
2092 mpt_check_doorbell(mpt);
2093 mpt_lprt(mpt, MPT_PRT_DEBUG, "%sabling async events\n",
2094 onoff ? "en" : "dis");
2095 /*
2096 * Send the command off, but don't wait for it.
2097 */
2098 mpt_send_cmd(mpt, req);
2099 return (0);
984263bc
MD
2100}
2101
984263bc 2102/*
d751f32e 2103 * Un-mask the interrupts on the chip.
984263bc 2104 */
d751f32e
MD
2105void
2106mpt_enable_ints(struct mpt_softc *mpt)
2107{
2108 /* Unmask every thing except door bell int */
2109 mpt_write(mpt, MPT_OFFSET_INTR_MASK, MPT_INTR_DB_MASK);
2110}
984263bc 2111
d751f32e
MD
2112/*
2113 * Mask the interrupts on the chip.
2114 */
2115void
2116mpt_disable_ints(struct mpt_softc *mpt)
2117{
2118 /* Mask all interrupts */
2119 mpt_write(mpt, MPT_OFFSET_INTR_MASK,
2120 MPT_INTR_REPLY_MASK | MPT_INTR_DB_MASK);
2121}
984263bc 2122
d751f32e
MD
2123static void
2124mpt_sysctl_attach(struct mpt_softc *mpt)
984263bc 2125{
d751f32e
MD
2126#if __FreeBSD_version >= 500000
2127 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev);
2128 struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev);
2129
2130 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
2131 "debug", CTLFLAG_RW, &mpt->verbose, 0,
2132 "Debugging/Verbose level");
2133 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
2134 "role", CTLFLAG_RD, &mpt->role, 0,
2135 "HBA role");
2136#ifdef MPT_TEST_MULTIPATH
2137 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
2138 "failure_id", CTLFLAG_RW, &mpt->failure_id, -1,
2139 "Next Target to Fail");
2140#endif
2141#endif
2142}
984263bc 2143
d751f32e
MD
2144int
2145mpt_attach(struct mpt_softc *mpt)
2146{
2147 struct mpt_personality *pers;
2148 int i;
2149 int error;
984263bc 2150
d751f32e
MD
2151 mpt_core_attach(mpt);
2152 mpt_core_enable(mpt);
984263bc 2153
d751f32e
MD
2154 TAILQ_INSERT_TAIL(&mpt_tailq, mpt, links);
2155 for (i = 0; i < MPT_MAX_PERSONALITIES; i++) {
2156 pers = mpt_personalities[i];
2157 if (pers == NULL) {
2158 continue;
2159 }
2160 if (pers->probe(mpt) == 0) {
2161 error = pers->attach(mpt);
2162 if (error != 0) {
2163 mpt_detach(mpt);
2164 return (error);
2165 }
2166 mpt->mpt_pers_mask |= (0x1 << pers->id);
2167 pers->use_count++;
2168 }
2169 }
984263bc 2170
d751f32e
MD
2171 /*
2172 * Now that we've attached everything, do the enable function
2173 * for all of the personalities. This allows the personalities
2174 * to do setups that are appropriate for them prior to enabling
2175 * any ports.
2176 */
2177 for (i = 0; i < MPT_MAX_PERSONALITIES; i++) {
2178 pers = mpt_personalities[i];
2179 if (pers != NULL && MPT_PERS_ATTACHED(pers, mpt) != 0) {
2180 error = pers->enable(mpt);
2181 if (error != 0) {
2182 mpt_prt(mpt, "personality %s attached but would"
2183 " not enable (%d)\n", pers->name, error);
2184 mpt_detach(mpt);
2185 return (error);
2186 }
984263bc 2187 }
984263bc 2188 }
984263bc
MD
2189 return (0);
2190}
2191
d751f32e
MD
2192int
2193mpt_shutdown(struct mpt_softc *mpt)
2194{
2195 struct mpt_personality *pers;
2196
2197 MPT_PERS_FOREACH_REVERSE(mpt, pers) {
2198 pers->shutdown(mpt);
2199 }
2200 return (0);
2201}
984263bc
MD
2202
2203int
d751f32e 2204mpt_detach(struct mpt_softc *mpt)
984263bc 2205{
d751f32e 2206 struct mpt_personality *pers;
984263bc 2207
d751f32e
MD
2208 MPT_PERS_FOREACH_REVERSE(mpt, pers) {
2209 pers->detach(mpt);
2210 mpt->mpt_pers_mask &= ~(0x1 << pers->id);
2211 pers->use_count--;
2212 }
2213 TAILQ_REMOVE(&mpt_tailq, mpt, links);
2214 return (0);
2215}
984263bc 2216
d751f32e
MD
2217int
2218mpt_core_load(struct mpt_personality *pers)
2219{
2220 int i;
984263bc 2221
d751f32e
MD
2222 /*
2223 * Setup core handlers and insert the default handler
2224 * into all "empty slots".
2225 */
2226 for (i = 0; i < MPT_NUM_REPLY_HANDLERS; i++) {
2227 mpt_reply_handlers[i] = mpt_default_reply_handler;
2228 }
984263bc 2229
d751f32e
MD
2230 mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_EVENTS)] =
2231 mpt_event_reply_handler;
2232 mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_CONFIG)] =
2233 mpt_config_reply_handler;
2234 mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_HANDSHAKE)] =
2235 mpt_handshake_reply_handler;
2236 return (0);
2237}
2238
2239/*
2240 * Initialize per-instance driver data and perform
2241 * initial controller configuration.
2242 */
2243int
2244mpt_core_attach(struct mpt_softc *mpt)
2245{
2246 int val, error;
2247
2248 LIST_INIT(&mpt->ack_frames);
2249 /* Put all request buffers on the free list */
2250 TAILQ_INIT(&mpt->request_pending_list);
2251 TAILQ_INIT(&mpt->request_free_list);
2252 TAILQ_INIT(&mpt->request_timeout_list);
2253 MPT_LOCK(mpt);
2254 for (val = 0; val < MPT_MAX_REQUESTS(mpt); val++) {
2255 request_t *req = &mpt->request_pool[val];
2256 req->state = REQ_STATE_ALLOCATED;
2257 mpt_free_request(mpt, req);
984263bc 2258 }
d751f32e
MD
2259 MPT_UNLOCK(mpt);
2260 for (val = 0; val < MPT_MAX_LUNS; val++) {
2261 STAILQ_INIT(&mpt->trt[val].atios);
2262 STAILQ_INIT(&mpt->trt[val].inots);
2263 }
2264 STAILQ_INIT(&mpt->trt_wildcard.atios);
2265 STAILQ_INIT(&mpt->trt_wildcard.inots);
2266#ifdef MPT_TEST_MULTIPATH
2267 mpt->failure_id = -1;
2268#endif
2269 mpt->scsi_tgt_handler_id = MPT_HANDLER_ID_NONE;
2270 mpt_sysctl_attach(mpt);
2271 mpt_lprt(mpt, MPT_PRT_DEBUG, "doorbell req = %s\n",
2272 mpt_ioc_diag(mpt_read(mpt, MPT_OFFSET_DOORBELL)));
2273
2274 MPT_LOCK(mpt);
2275 error = mpt_configure_ioc(mpt, 0, 0);
2276 MPT_UNLOCK(mpt);
2277
2278 return (error);
2279}
2280
2281int
2282mpt_core_enable(struct mpt_softc *mpt)
2283{
2284 /*
2285 * We enter with the IOC enabled, but async events
2286 * not enabled, ports not enabled and interrupts
2287 * not enabled.
2288 */
2289 MPT_LOCK(mpt);
2290
2291 /*
2292 * Enable asynchronous event reporting- all personalities
2293 * have attached so that they should be able to now field
2294 * async events.
2295 */
2296 mpt_send_event_request(mpt, 1);
2297
2298 /*
2299 * Catch any pending interrupts
2300 *
2301 * This seems to be crucial- otherwise
2302 * the portenable below times out.
2303 */
2304 mpt_intr(mpt);
2305
2306 /*
2307 * Enable Interrupts
2308 */
2309 mpt_enable_ints(mpt);
2310
2311 /*
2312 * Catch any pending interrupts
2313 *
2314 * This seems to be crucial- otherwise
2315 * the portenable below times out.
2316 */
2317 mpt_intr(mpt);
2318
2319 /*
2320 * Enable the port.
2321 */
2322 if (mpt_send_port_enable(mpt, 0) != MPT_OK) {
2323 mpt_prt(mpt, "failed to enable port 0\n");
2324 MPT_UNLOCK(mpt);
2325 return (ENXIO);
2326 }
2327 MPT_UNLOCK(mpt);
984263bc
MD
2328 return (0);
2329}
2330
d751f32e
MD
2331void
2332mpt_core_shutdown(struct mpt_softc *mpt)
2333{
2334 mpt_disable_ints(mpt);
2335}
2336
2337void
2338mpt_core_detach(struct mpt_softc *mpt)
2339{
2340 /*
2341 * XXX: FREE MEMORY
2342 */
2343 mpt_disable_ints(mpt);
2344}
2345
2346int
2347mpt_core_unload(struct mpt_personality *pers)
2348{
2349 /* Unload is always successfull. */
2350 return (0);
2351}
2352
2353#define FW_UPLOAD_REQ_SIZE \
2354 (sizeof(MSG_FW_UPLOAD) - sizeof(SGE_MPI_UNION) \
2355 + sizeof(FW_UPLOAD_TCSGE) + sizeof(SGE_SIMPLE32))
2356
2357static int
2358mpt_upload_fw(struct mpt_softc *mpt)
2359{
2360 uint8_t fw_req_buf[FW_UPLOAD_REQ_SIZE];
2361 MSG_FW_UPLOAD_REPLY fw_reply;
2362 MSG_FW_UPLOAD *fw_req;
2363 FW_UPLOAD_TCSGE *tsge;
2364 SGE_SIMPLE32 *sge;
2365 uint32_t flags;
2366 int error;
2367
2368 memset(&fw_req_buf, 0, sizeof(fw_req_buf));
2369 fw_req = (MSG_FW_UPLOAD *)fw_req_buf;
2370 fw_req->ImageType = MPI_FW_UPLOAD_ITYPE_FW_IOC_MEM;
2371 fw_req->Function = MPI_FUNCTION_FW_UPLOAD;
2372 fw_req->MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
2373 tsge = (FW_UPLOAD_TCSGE *)&fw_req->SGL;
2374 tsge->DetailsLength = 12;
2375 tsge->Flags = MPI_SGE_FLAGS_TRANSACTION_ELEMENT;
2376 tsge->ImageSize = htole32(mpt->fw_image_size);
2377 sge = (SGE_SIMPLE32 *)(tsge + 1);
2378 flags = (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER
2379 | MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_SIMPLE_ELEMENT
2380 | MPI_SGE_FLAGS_32_BIT_ADDRESSING | MPI_SGE_FLAGS_IOC_TO_HOST);
2381 flags <<= MPI_SGE_FLAGS_SHIFT;
2382 sge->FlagsLength = htole32(flags | mpt->fw_image_size);
2383 sge->Address = htole32(mpt->fw_phys);
2384 error = mpt_send_handshake_cmd(mpt, sizeof(fw_req_buf), &fw_req_buf);
2385 if (error)
2386 return(error);
2387 error = mpt_recv_handshake_reply(mpt, sizeof(fw_reply), &fw_reply);
2388 return (error);
2389}
2390
2391static void
2392mpt_diag_outsl(struct mpt_softc *mpt, uint32_t addr,
2393 uint32_t *data, bus_size_t len)
2394{
2395 uint32_t *data_end;
2396
2397 data_end = data + (roundup2(len, sizeof(uint32_t)) / 4);
2398 if (mpt->is_sas) {
2399 pci_enable_io(mpt->dev, SYS_RES_IOPORT);
2400 }
2401 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, addr);
2402 while (data != data_end) {
2403 mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, *data);
2404 data++;
2405 }
2406 if (mpt->is_sas) {
2407 pci_disable_io(mpt->dev, SYS_RES_IOPORT);
2408 }
2409}
2410
2411static int
2412mpt_download_fw(struct mpt_softc *mpt)
984263bc 2413{
d751f32e
MD
2414 MpiFwHeader_t *fw_hdr;
2415 int error;
2416 uint32_t ext_offset;
2417 uint32_t data;
984263bc 2418
d751f32e
MD
2419 mpt_prt(mpt, "Downloading Firmware - Image Size %d\n",
2420 mpt->fw_image_size);
984263bc 2421
d751f32e
MD
2422 error = mpt_enable_diag_mode(mpt);
2423 if (error != 0) {
2424 mpt_prt(mpt, "Could not enter diagnostic mode!\n");
2425 return (EIO);
2426 }
984263bc 2427
d751f32e
MD
2428 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC,
2429 MPI_DIAG_RW_ENABLE|MPI_DIAG_DISABLE_ARM);
2430
2431 fw_hdr = (MpiFwHeader_t *)mpt->fw_image;
2432 mpt_diag_outsl(mpt, fw_hdr->LoadStartAddress, (uint32_t*)fw_hdr,
2433 fw_hdr->ImageSize);
2434
2435 ext_offset = fw_hdr->NextImageHeaderOffset;
2436 while (ext_offset != 0) {
2437 MpiExtImageHeader_t *ext;
2438
2439 ext = (MpiExtImageHeader_t *)((uintptr_t)fw_hdr + ext_offset);
2440 ext_offset = ext->NextImageHeaderOffset;
2441
2442 mpt_diag_outsl(mpt, ext->LoadStartAddress, (uint32_t*)ext,
2443 ext->ImageSize);
984263bc 2444 }
984263bc 2445
d751f32e
MD
2446 if (mpt->is_sas) {
2447 pci_enable_io(mpt->dev, SYS_RES_IOPORT);
2448 }
2449 /* Setup the address to jump to on reset. */
2450 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, fw_hdr->IopResetRegAddr);
2451 mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, fw_hdr->IopResetVectorValue);
984263bc 2452
d751f32e
MD
2453 /*
2454 * The controller sets the "flash bad" status after attempting
2455 * to auto-boot from flash. Clear the status so that the controller
2456 * will continue the boot process with our newly installed firmware.
2457 */
2458 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, MPT_DIAG_MEM_CFG_BASE);
2459 data = mpt_pio_read(mpt, MPT_OFFSET_DIAG_DATA) | MPT_DIAG_MEM_CFG_BADFL;
2460 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, MPT_DIAG_MEM_CFG_BASE);
2461 mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, data);
984263bc 2462
d751f32e
MD
2463 if (mpt->is_sas) {
2464 pci_disable_io(mpt->dev, SYS_RES_IOPORT);
984263bc 2465 }
984263bc 2466
d751f32e
MD
2467 /*
2468 * Re-enable the processor and clear the boot halt flag.
2469 */
2470 data = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC);
2471 data &= ~(MPI_DIAG_PREVENT_IOC_BOOT|MPI_DIAG_DISABLE_ARM);
2472 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, data);
2473
2474 mpt_disable_diag_mode(mpt);
984263bc
MD
2475 return (0);
2476}
2477
2478/*
d751f32e
MD
2479 * Allocate/Initialize data structures for the controller. Called
2480 * once at instance startup.
984263bc
MD
2481 */
2482static int
d751f32e 2483mpt_configure_ioc(struct mpt_softc *mpt, int tn, int needreset)
984263bc 2484{
d751f32e
MD
2485 PTR_MSG_PORT_FACTS_REPLY pfp;
2486 int error, port;
2487 size_t len;
984263bc 2488
d751f32e 2489 if (tn == MPT_MAX_TRYS) {
984263bc
MD
2490 return (-1);
2491 }
984263bc 2492
d751f32e
MD
2493 /*
2494 * No need to reset if the IOC is already in the READY state.
2495 *
2496 * Force reset if initialization failed previously.
2497 * Note that a hard_reset of the second channel of a '929
2498 * will stop operation of the first channel. Hopefully, if the
2499 * first channel is ok, the second will not require a hard
2500 * reset.
2501 */
2502 if (needreset || MPT_STATE(mpt_rd_db(mpt)) != MPT_DB_STATE_READY) {
2503 if (mpt_reset(mpt, FALSE) != MPT_OK) {
2504 return (mpt_configure_ioc(mpt, tn++, 1));
2505 }
2506 needreset = 0;
984263bc 2507 }
d751f32e
MD
2508
2509 if (mpt_get_iocfacts(mpt, &mpt->ioc_facts) != MPT_OK) {
2510 mpt_prt(mpt, "mpt_get_iocfacts failed\n");
2511 return (mpt_configure_ioc(mpt, tn++, 1));
984263bc 2512 }
d751f32e 2513 mpt2host_iocfacts_reply(&mpt->ioc_facts);
984263bc 2514
d751f32e
MD
2515 mpt_prt(mpt, "MPI Version=%d.%d.%d.%d\n",
2516 mpt->ioc_facts.MsgVersion >> 8,
2517 mpt->ioc_facts.MsgVersion & 0xFF,
2518 mpt->ioc_facts.HeaderVersion >> 8,
2519 mpt->ioc_facts.HeaderVersion & 0xFF);
2520
2521 /*
2522 * Now that we know request frame size, we can calculate
2523 * the actual (reasonable) segment limit for read/write I/O.
2524 *
2525 * This limit is constrained by:
2526 *
2527 * + The size of each area we allocate per command (and how
2528 * many chain segments we can fit into it).
2529 * + The total number of areas we've set up.
2530 * + The actual chain depth the card will allow.
2531 *
2532 * The first area's segment count is limited by the I/O request
2533 * at the head of it. We cannot allocate realistically more
2534 * than MPT_MAX_REQUESTS areas. Therefore, to account for both
2535 * conditions, we'll just start out with MPT_MAX_REQUESTS-2.
2536 *
2537 */
2538 /* total number of request areas we (can) allocate */
2539 mpt->max_seg_cnt = MPT_MAX_REQUESTS(mpt) - 2;
2540
2541 /* converted to the number of chain areas possible */
2542 mpt->max_seg_cnt *= MPT_NRFM(mpt);
2543
2544 /* limited by the number of chain areas the card will support */
2545 if (mpt->max_seg_cnt > mpt->ioc_facts.MaxChainDepth) {
2546 mpt_lprt(mpt, MPT_PRT_DEBUG,
2547 "chain depth limited to %u (from %u)\n",
2548 mpt->ioc_facts.MaxChainDepth, mpt->max_seg_cnt);
2549 mpt->max_seg_cnt = mpt->ioc_facts.MaxChainDepth;
984263bc
MD
2550 }
2551
d751f32e
MD
2552 /* converted to the number of simple sges in chain segments. */
2553 mpt->max_seg_cnt *= (MPT_NSGL(mpt) - 1);
2554
2555 mpt_lprt(mpt, MPT_PRT_DEBUG, "Maximum Segment Count: %u\n",
2556 mpt->max_seg_cnt);
2557 mpt_lprt(mpt, MPT_PRT_DEBUG, "MsgLength=%u IOCNumber = %d\n",
2558 mpt->ioc_facts.MsgLength, mpt->ioc_facts.IOCNumber);
2559 mpt_lprt(mpt, MPT_PRT_DEBUG,
2560 "IOCFACTS: GlobalCredits=%d BlockSize=%u bytes "
2561 "Request Frame Size %u bytes Max Chain Depth %u\n",
2562 mpt->ioc_facts.GlobalCredits, mpt->ioc_facts.BlockSize,
2563 mpt->ioc_facts.RequestFrameSize << 2,
2564 mpt->ioc_facts.MaxChainDepth);
2565 mpt_lprt(mpt, MPT_PRT_DEBUG, "IOCFACTS: Num Ports %d, FWImageSize %d, "
2566 "Flags=%#x\n", mpt->ioc_facts.NumberOfPorts,
3c4c549a 2567 (int)mpt->ioc_facts.FWImageSize, mpt->ioc_facts.Flags);
d751f32e
MD
2568
2569 len = mpt->ioc_facts.NumberOfPorts * sizeof (MSG_PORT_FACTS_REPLY);
2545bca0 2570 mpt->port_facts = kmalloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
d751f32e
MD
2571 if (mpt->port_facts == NULL) {
2572 mpt_prt(mpt, "unable to allocate memory for port facts\n");
2573 return (ENOMEM);
984263bc
MD
2574 }
2575
d751f32e
MD
2576
2577 if ((mpt->ioc_facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT) &&
2578 (mpt->fw_uploaded == 0)) {
2579 struct mpt_map_info mi;
2580
2581 /*
2582 * In some configurations, the IOC's firmware is
2583 * stored in a shared piece of system NVRAM that
2584 * is only accessable via the BIOS. In this
2585 * case, the firmware keeps a copy of firmware in
2586 * RAM until the OS driver retrieves it. Once
2587 * retrieved, we are responsible for re-downloading
2588 * the firmware after any hard-reset.
2589 */
2590 mpt->fw_image_size = mpt->ioc_facts.FWImageSize;
2591 error = mpt_dma_tag_create(mpt, mpt->parent_dmat, 1, 0,
2592 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2593 mpt->fw_image_size, 1, mpt->fw_image_size, 0,
2594 &mpt->fw_dmat);
2595 if (error != 0) {
2596 mpt_prt(mpt, "cannot create firmwarew dma tag\n");
2597 return (ENOMEM);
984263bc 2598 }
d751f32e
MD
2599 error = bus_dmamem_alloc(mpt->fw_dmat,
2600 (void **)&mpt->fw_image, BUS_DMA_NOWAIT, &mpt->fw_dmap);
2601 if (error != 0) {
2602 mpt_prt(mpt, "cannot allocate firmware memory\n");
2603 bus_dma_tag_destroy(mpt->fw_dmat);
2604 return (ENOMEM);
984263bc 2605 }
d751f32e
MD
2606 mi.mpt = mpt;
2607 mi.error = 0;
2608 bus_dmamap_load(mpt->fw_dmat, mpt->fw_dmap,
2609 mpt->fw_image, mpt->fw_image_size, mpt_map_rquest, &mi, 0);
2610 mpt->fw_phys = mi.phys;
2611
2612 error = mpt_upload_fw(mpt);
2613 if (error != 0) {
2614 mpt_prt(mpt, "firmware upload failed.\n");
2615 bus_dmamap_unload(mpt->fw_dmat, mpt->fw_dmap);
2616 bus_dmamem_free(mpt->fw_dmat, mpt->fw_image,
2617 mpt->fw_dmap);
2618 bus_dma_tag_destroy(mpt->fw_dmat);
2619 mpt->fw_image = NULL;
2620 return (EIO);
2621 }
2622 mpt->fw_uploaded = 1;
2623 }
2624
2625 for (port = 0; port < mpt->ioc_facts.NumberOfPorts; port++) {
2626 pfp = &mpt->port_facts[port];
2627 error = mpt_get_portfacts(mpt, 0, pfp);
2628 if (error != MPT_OK) {
2629 mpt_prt(mpt,
2630 "mpt_get_portfacts on port %d failed\n", port);
2545bca0 2631 kfree(mpt->port_facts, M_DEVBUF);
d751f32e
MD
2632 mpt->port_facts = NULL;
2633 return (mpt_configure_ioc(mpt, tn++, 1));
984263bc 2634 }
d751f32e
MD
2635 mpt2host_portfacts_reply(pfp);
2636
2637 if (port > 0) {
2638 error = MPT_PRT_INFO;
2639 } else {
2640 error = MPT_PRT_DEBUG;
984263bc 2641 }
d751f32e
MD
2642 mpt_lprt(mpt, error,
2643 "PORTFACTS[%d]: Type %x PFlags %x IID %d MaxDev %d\n",
2644 port, pfp->PortType, pfp->ProtocolFlags, pfp->PortSCSIID,
2645 pfp->MaxDevices);
2646
984263bc
MD
2647 }
2648
2649 /*
d751f32e 2650 * XXX: Not yet supporting more than port 0
984263bc 2651 */
d751f32e
MD
2652 pfp = &mpt->port_facts[0];
2653 if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_FC) {
2654 mpt->is_fc = 1;
2655 mpt->is_sas = 0;
2656 mpt->is_spi = 0;
2657 } else if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_SAS) {
2658 mpt->is_fc = 0;
2659 mpt->is_sas = 1;
2660 mpt->is_spi = 0;
2661 } else if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_SCSI) {
2662 mpt->is_fc = 0;
2663 mpt->is_sas = 0;
2664 mpt->is_spi = 1;
2665 } else if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_ISCSI) {
2666 mpt_prt(mpt, "iSCSI not supported yet\n");
2667 return (ENXIO);
2668 } else if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_INACTIVE) {
2669 mpt_prt(mpt, "Inactive Port\n");
2670 return (ENXIO);
2671 } else {
2672 mpt_prt(mpt, "unknown Port Type %#x\n", pfp->PortType);
2673 return (ENXIO);
984263bc
MD
2674 }
2675
d751f32e
MD
2676 /*
2677 * Set our role with what this port supports.
2678 *
2679 * Note this might be changed later in different modules
2680 * if this is different from what is wanted.
2681 */
2682 mpt->role = MPT_ROLE_NONE;
2683 if (pfp->ProtocolFlags & MPI_PORTFACTS_PROTOCOL_INITIATOR) {
2684 mpt->role |= MPT_ROLE_INITIATOR;
984263bc 2685 }
d751f32e
MD
2686 if (pfp->ProtocolFlags & MPI_PORTFACTS_PROTOCOL_TARGET) {
2687 mpt->role |= MPT_ROLE_TARGET;
984263bc
MD
2688 }
2689
d751f32e
MD
2690 /*
2691 * Enable the IOC
2692 */
2693 if (mpt_enable_ioc(mpt, 1) != MPT_OK) {
2694 mpt_prt(mpt, "unable to initialize IOC\n");
2695 return (ENXIO);
984263bc 2696 }
984263bc 2697
d751f32e
MD
2698 /*
2699 * Read IOC configuration information.
2700 *
2701 * We need this to determine whether or not we have certain
2702 * settings for Integrated Mirroring (e.g.).
2703 */
2704 mpt_read_config_info_ioc(mpt);
2705
984263bc
MD
2706 return (0);
2707}
2708
984263bc 2709static int
d751f32e 2710mpt_enable_ioc(struct mpt_softc *mpt, int portenable)
984263bc 2711{
d751f32e
MD
2712 uint32_t pptr;
2713 int val;
984263bc 2714
d751f32e
MD
2715 if (mpt_send_ioc_init(mpt, MPI_WHOINIT_HOST_DRIVER) != MPT_OK) {
2716 mpt_prt(mpt, "mpt_send_ioc_init failed\n");
2717 return (EIO);
2718 }
984263bc 2719
d751f32e 2720 mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_send_ioc_init ok\n");
984263bc 2721
d751f32e
MD
2722 if (mpt_wait_state(mpt, MPT_DB_STATE_RUNNING) != MPT_OK) {
2723 mpt_prt(mpt, "IOC failed to go to run state\n");
2724 return (ENXIO);
2725 }
2726 mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC now at RUNSTATE\n");
984263bc 2727
d751f32e
MD
2728 /*
2729 * Give it reply buffers
2730 *
2731 * Do *not* exceed global credits.
2732 */
2733 for (val = 0, pptr = mpt->reply_phys;
2734 (pptr + MPT_REPLY_SIZE) < (mpt->reply_phys + PAGE_SIZE);
2735 pptr += MPT_REPLY_SIZE) {
2736 mpt_free_reply(mpt, pptr);
2737 if (++val == mpt->ioc_facts.GlobalCredits - 1)
2738 break;
984263bc 2739 }
984263bc 2740
d751f32e
MD
2741
2742 /*
2743 * Enable the port if asked. This is only done if we're resetting
2744 * the IOC after initial startup.
2745 */
2746 if (portenable) {
2747 /*
2748 * Enable asynchronous event reporting
2749 */
2750 mpt_send_event_request(mpt, 1);
2751
2752 if (mpt_send_port_enable(mpt, 0) != MPT_OK) {
2753 mpt_prt(mpt, "failed to enable port 0\n");
2754 return (ENXIO);
984263bc 2755 }
d751f32e
MD
2756 }
2757 return (MPT_OK);
984263bc
MD
2758}
2759
2760/*
d751f32e 2761 * Endian Conversion Functions- only used on Big Endian machines
984263bc 2762 */
d751f32e
MD
2763#if _BYTE_ORDER == _BIG_ENDIAN
2764void
2765mpt2host_sge_simple_union(SGE_SIMPLE_UNION *sge)
984263bc 2766{
984263bc 2767
d751f32e
MD
2768 MPT_2_HOST32(sge, FlagsLength);
2769 MPT_2_HOST32(sge, u.Address64.Low);
2770 MPT_2_HOST32(sge, u.Address64.High);
2771}
984263bc 2772
d751f32e
MD
2773void
2774mpt2host_iocfacts_reply(MSG_IOC_FACTS_REPLY *rp)
2775{
984263bc 2776
d751f32e
MD
2777 MPT_2_HOST16(rp, MsgVersion);
2778 MPT_2_HOST16(rp, HeaderVersion);
2779 MPT_2_HOST32(rp, MsgContext);
2780 MPT_2_HOST16(rp, IOCExceptions);
2781 MPT_2_HOST16(rp, IOCStatus);
2782 MPT_2_HOST32(rp, IOCLogInfo);
2783 MPT_2_HOST16(rp, ReplyQueueDepth);
2784 MPT_2_HOST16(rp, RequestFrameSize);
2785 MPT_2_HOST16(rp, Reserved_0101_FWVersion);
2786 MPT_2_HOST16(rp, ProductID);
2787 MPT_2_HOST32(rp, CurrentHostMfaHighAddr);
2788 MPT_2_HOST16(rp, GlobalCredits);
2789 MPT_2_HOST32(rp, CurrentSenseBufferHighAddr);
2790 MPT_2_HOST16(rp, CurReplyFrameSize);
2791 MPT_2_HOST32(rp, FWImageSize);
2792 MPT_2_HOST32(rp, IOCCapabilities);
2793 MPT_2_HOST32(rp, FWVersion.Word);
2794 MPT_2_HOST16(rp, HighPriorityQueueDepth);
2795 MPT_2_HOST16(rp, Reserved2);
2796 mpt2host_sge_simple_union(&rp->HostPageBufferSGE);
2797 MPT_2_HOST32(rp, ReplyFifoHostSignalingAddr);
2798}
984263bc 2799
d751f32e
MD
2800void
2801mpt2host_portfacts_reply(MSG_PORT_FACTS_REPLY *pfp)
2802{
984263bc 2803
d751f32e
MD
2804 MPT_2_HOST16(pfp, Reserved);
2805 MPT_2_HOST16(pfp, Reserved1);
2806 MPT_2_HOST32(pfp, MsgContext);
2807 MPT_2_HOST16(pfp, Reserved2);
2808 MPT_2_HOST16(pfp, IOCStatus);
2809 MPT_2_HOST32(pfp, IOCLogInfo);
2810 MPT_2_HOST16(pfp, MaxDevices);
2811 MPT_2_HOST16(pfp, PortSCSIID);
2812 MPT_2_HOST16(pfp, ProtocolFlags);
2813 MPT_2_HOST16(pfp, MaxPostedCmdBuffers);
2814 MPT_2_HOST16(pfp, MaxPersistentIDs);
2815 MPT_2_HOST16(pfp, MaxLanBuckets);
2816 MPT_2_HOST16(pfp, Reserved4);
2817 MPT_2_HOST32(pfp, Reserved5);
984263bc
MD
2818}
2819
984263bc 2820void
d751f32e 2821mpt2host_config_page_ioc2(CONFIG_PAGE_IOC_2 *ioc2)
984263bc 2822{
d751f32e
MD
2823 int i;
2824
2825 MPT_2_HOST32(ioc2, CapabilitiesFlags);
2826 for (i = 0; i < MPI_IOC_PAGE_2_RAID_VOLUME_MAX; i++) {
2827 MPT_2_HOST16(ioc2, RaidVolume[i].Reserved3);
2828 }
984263bc
MD
2829}
2830
984263bc 2831void
d751f32e 2832mpt2host_config_page_ioc3(CONFIG_PAGE_IOC_3 *ioc3)
984263bc 2833{
d751f32e
MD
2834
2835 MPT_2_HOST16(ioc3, Reserved2);
984263bc
MD
2836}
2837
d751f32e
MD
2838void
2839mpt2host_config_page_scsi_port_0(CONFIG_PAGE_SCSI_PORT_0 *sp0)
984263bc 2840{
984263bc 2841
d751f32e
MD
2842 MPT_2_HOST32(sp0, Capabilities);
2843 MPT_2_HOST32(sp0, PhysicalInterface);
2844}
984263bc 2845
d751f32e
MD
2846void
2847mpt2host_config_page_scsi_port_1(CONFIG_PAGE_SCSI_PORT_1 *sp1)
2848{
984263bc 2849
d751f32e
MD
2850 MPT_2_HOST32(sp1, Configuration);
2851 MPT_2_HOST32(sp1, OnBusTimerValue);
2852 MPT_2_HOST16(sp1, IDConfig);
2853}
984263bc 2854
d751f32e
MD
2855void
2856host2mpt_config_page_scsi_port_1(CONFIG_PAGE_SCSI_PORT_1 *sp1)
2857{
984263bc 2858
d751f32e
MD
2859 HOST_2_MPT32(sp1, Configuration);
2860 HOST_2_MPT32(sp1, OnBusTimerValue);
2861 HOST_2_MPT16(sp1, IDConfig);
2862}
984263bc 2863
d751f32e
MD
2864void
2865mpt2host_config_page_scsi_port_2(CONFIG_PAGE_SCSI_PORT_2 *sp2)
2866{
2867 int i;
984263bc 2868
d751f32e
MD
2869 MPT_2_HOST32(sp2, PortFlags);
2870 MPT_2_HOST32(sp2, PortSettings);
2871 for (i = 0; i < sizeof(sp2->DeviceSettings) /
2872 sizeof(*sp2->DeviceSettings); i++) {
2873 MPT_2_HOST16(sp2, DeviceSettings[i].DeviceFlags);
2874 }
2875}
984263bc 2876
d751f32e
MD
2877void
2878mpt2host_config_page_scsi_device_0(CONFIG_PAGE_SCSI_DEVICE_0 *sd0)
2879{
984263bc 2880
d751f32e
MD
2881 MPT_2_HOST32(sd0, NegotiatedParameters);
2882 MPT_2_HOST32(sd0, Information);
2883}
984263bc 2884
d751f32e
MD
2885void
2886mpt2host_config_page_scsi_device_1(CONFIG_PAGE_SCSI_DEVICE_1 *sd1)
2887{
984263bc 2888
d751f32e
MD
2889 MPT_2_HOST32(sd1, RequestedParameters);
2890 MPT_2_HOST32(sd1, Reserved);
2891 MPT_2_HOST32(sd1, Configuration);
2892}
984263bc 2893
d751f32e
MD
2894void
2895host2mpt_config_page_scsi_device_1(CONFIG_PAGE_SCSI_DEVICE_1 *sd1)
2896{
984263bc 2897
d751f32e
MD
2898 HOST_2_MPT32(sd1, RequestedParameters);
2899 HOST_2_MPT32(sd1, Reserved);
2900 HOST_2_MPT32(sd1, Configuration);
2901}
984263bc 2902
d751f32e
MD
2903void
2904mpt2host_config_page_fc_port_0(CONFIG_PAGE_FC_PORT_0 *fp0)
2905{
984263bc 2906
d751f32e
MD
2907 MPT_2_HOST32(fp0, Flags);
2908 MPT_2_HOST32(fp0, PortIdentifier);
2909 MPT_2_HOST32(fp0, WWNN.Low);
2910 MPT_2_HOST32(fp0, WWNN.High);
2911 MPT_2_HOST32(fp0, WWPN.Low);
2912 MPT_2_HOST32(fp0, WWPN.High);
2913 MPT_2_HOST32(fp0, SupportedServiceClass);
2914 MPT_2_HOST32(fp0, SupportedSpeeds);
2915 MPT_2_HOST32(fp0, CurrentSpeed);
2916 MPT_2_HOST32(fp0, MaxFrameSize);
2917 MPT_2_HOST32(fp0, FabricWWNN.Low);
2918 MPT_2_HOST32(fp0, FabricWWNN.High);
2919 MPT_2_HOST32(fp0, FabricWWPN.Low);
2920 MPT_2_HOST32(fp0, FabricWWPN.High);
2921 MPT_2_HOST32(fp0, DiscoveredPortsCount);
2922 MPT_2_HOST32(fp0, MaxInitiators);
2923}
984263bc 2924
d751f32e
MD
2925void
2926mpt2host_config_page_fc_port_1(CONFIG_PAGE_FC_PORT_1 *fp1)
2927{
984263bc 2928
d751f32e
MD
2929 MPT_2_HOST32(fp1, Flags);
2930 MPT_2_HOST32(fp1, NoSEEPROMWWNN.Low);
2931 MPT_2_HOST32(fp1, NoSEEPROMWWNN.High);
2932 MPT_2_HOST32(fp1, NoSEEPROMWWPN.Low);
2933 MPT_2_HOST32(fp1, NoSEEPROMWWPN.High);
2934}
984263bc 2935
d751f32e
MD
2936void
2937host2mpt_config_page_fc_port_1(CONFIG_PAGE_FC_PORT_1 *fp1)
2938{
984263bc 2939
d751f32e
MD
2940 HOST_2_MPT32(fp1, Flags);
2941 HOST_2_MPT32(fp1, NoSEEPROMWWNN.Low);
2942 HOST_2_MPT32(fp1, NoSEEPROMWWNN.High);
2943 HOST_2_MPT32(fp1, NoSEEPROMWWPN.Low);
2944 HOST_2_MPT32(fp1, NoSEEPROMWWPN.High);
2945}
984263bc 2946
d751f32e
MD
2947void
2948mpt2host_config_page_raid_vol_0(CONFIG_PAGE_RAID_VOL_0 *volp)
2949{
2950 int i;
984263bc 2951
d751f32e
MD
2952 MPT_2_HOST16(volp, VolumeStatus.Reserved);
2953 MPT_2_HOST16(volp, VolumeSettings.Settings);
2954 MPT_2_HOST32(volp, MaxLBA);
2955 MPT_2_HOST32(volp, MaxLBAHigh);
2956 MPT_2_HOST32(volp, StripeSize);
2957 MPT_2_HOST32(volp, Reserved2);
2958 MPT_2_HOST32(volp, Reserved3);
2959 for (i = 0; i < MPI_RAID_VOL_PAGE_0_PHYSDISK_MAX; i++) {
2960 MPT_2_HOST16(volp, PhysDisk[i].Reserved);
984263bc 2961 }
d751f32e 2962}
984263bc 2963
d751f32e
MD
2964void
2965mpt2host_config_page_raid_phys_disk_0(CONFIG_PAGE_RAID_PHYS_DISK_0 *rpd0)
2966{
2967
2968 MPT_2_HOST32(rpd0, Reserved1);
2969 MPT_2_HOST16(rpd0, PhysDiskStatus.Reserved);
2970 MPT_2_HOST32(rpd0, MaxLBA);
2971 MPT_2_HOST16(rpd0, ErrorData.Reserved);
2972 MPT_2_HOST16(rpd0, ErrorData.ErrorCount);
2973 MPT_2_HOST16(rpd0, ErrorData.SmartCount);
2974}
2975
2976void
2977mpt2host_mpi_raid_vol_indicator(MPI_RAID_VOL_INDICATOR *vi)
2978{
2979
2980 MPT_2_HOST16(vi, TotalBlocks.High);
2981 MPT_2_HOST16(vi, TotalBlocks.Low);
2982 MPT_2_HOST16(vi, BlocksRemaining.High);
2983 MPT_2_HOST16(vi, BlocksRemaining.Low);
984263bc 2984}
d751f32e 2985#endif