mpt(4): Sync with FreeBSD.
[dragonfly.git] / sys / dev / disk / mpt / mpt.c
CommitLineData
d751f32e
MD
1/*-
2 * Generic routines for LSI Fusion adapters.
984263bc
MD
3 * FreeBSD Version.
4 *
5 * Copyright (c) 2000, 2001 by Greg Ansley
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice immediately at the beginning of the file, without modification,
12 * this list of conditions, and the following disclaimer.
13 * 2. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
d751f32e
MD
28/*-
29 * Copyright (c) 2002, 2006 by Matthew Jacob
30 * All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions are
34 * met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
38 * substantially similar to the "NO WARRANTY" disclaimer below
39 * ("Disclaimer") and any redistribution must be conditioned upon including
40 * a substantially similar Disclaimer requirement for further binary
41 * redistribution.
42 * 3. Neither the names of the above listed copyright holders nor the names
43 * of any contributors may be used to endorse or promote products derived
44 * from this software without specific prior written permission.
45 *
46 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
47 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
50 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
51 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
52 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
53 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
54 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
55 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
56 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
57 *
58 * Support from Chris Ellsworth in order to make SAS adapters work
59 * is gratefully acknowledged.
60 *
61 *
62 * Support from LSI-Logic has also gone a great deal toward making this a
63 * workable subsystem and is gratefully acknowledged.
64 */
65/*-
66 * Copyright (c) 2004, Avid Technology, Inc. and its contributors.
67 * Copyright (c) 2005, WHEEL Sp. z o.o.
68 * Copyright (c) 2004, 2005 Justin T. Gibbs
69 * All rights reserved.
70 *
71 * Redistribution and use in source and binary forms, with or without
72 * modification, are permitted provided that the following conditions are
73 * met:
74 * 1. Redistributions of source code must retain the above copyright
75 * notice, this list of conditions and the following disclaimer.
76 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
77 * substantially similar to the "NO WARRANTY" disclaimer below
78 * ("Disclaimer") and any redistribution must be conditioned upon including
79 * a substantially similar Disclaimer requirement for further binary
80 * redistribution.
81 * 3. Neither the names of the above listed copyright holders nor the names
82 * of any contributors may be used to endorse or promote products derived
83 * from this software without specific prior written permission.
84 *
85 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
86 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
87 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
88 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
89 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
90 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
91 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
92 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
93 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
94 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
95 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32af04f7 96 *
4c42baf4 97 * $FreeBSD: src/sys/dev/mpt/mpt.c,v 1.61 2012/02/11 12:03:44 marius Exp $
984263bc
MD
98 */
99
d751f32e
MD
100#include <dev/disk/mpt/mpt.h>
101#include <dev/disk/mpt/mpt_cam.h> /* XXX For static handler registration */
102#include <dev/disk/mpt/mpt_raid.h> /* XXX For static handler registration */
103
104#include <dev/disk/mpt/mpilib/mpi.h>
105#include <dev/disk/mpt/mpilib/mpi_ioc.h>
106#include <dev/disk/mpt/mpilib/mpi_fc.h>
107#include <dev/disk/mpt/mpilib/mpi_targ.h>
108
109#include <sys/sysctl.h>
984263bc
MD
110
111#define MPT_MAX_TRYS 3
112#define MPT_MAX_WAIT 300000
113
114static int maxwait_ack = 0;
115static int maxwait_int = 0;
116static int maxwait_state = 0;
117
d751f32e
MD
118static TAILQ_HEAD(, mpt_softc) mpt_tailq = TAILQ_HEAD_INITIALIZER(mpt_tailq);
119mpt_reply_handler_t *mpt_reply_handlers[MPT_NUM_REPLY_HANDLERS];
120
121static mpt_reply_handler_t mpt_default_reply_handler;
122static mpt_reply_handler_t mpt_config_reply_handler;
123static mpt_reply_handler_t mpt_handshake_reply_handler;
124static mpt_reply_handler_t mpt_event_reply_handler;
125static void mpt_send_event_ack(struct mpt_softc *mpt, request_t *ack_req,
126 MSG_EVENT_NOTIFY_REPLY *msg, uint32_t context);
127static int mpt_send_event_request(struct mpt_softc *mpt, int onoff);
128static int mpt_soft_reset(struct mpt_softc *mpt);
129static void mpt_hard_reset(struct mpt_softc *mpt);
6d259fc1
SW
130static int mpt_dma_buf_alloc(struct mpt_softc *mpt);
131static void mpt_dma_buf_free(struct mpt_softc *mpt);
d751f32e
MD
132static int mpt_configure_ioc(struct mpt_softc *mpt, int, int);
133static int mpt_enable_ioc(struct mpt_softc *mpt, int);
134
135/************************* Personality Module Support *************************/
136/*
137 * We include one extra entry that is guaranteed to be NULL
138 * to simplify our itterator.
139 */
140static struct mpt_personality *mpt_personalities[MPT_MAX_PERSONALITIES + 1];
141static __inline struct mpt_personality*
142 mpt_pers_find(struct mpt_softc *, u_int);
143static __inline struct mpt_personality*
144 mpt_pers_find_reverse(struct mpt_softc *, u_int);
145
146static __inline struct mpt_personality *
147mpt_pers_find(struct mpt_softc *mpt, u_int start_at)
984263bc 148{
d751f32e 149 KASSERT(start_at <= MPT_MAX_PERSONALITIES,
4c42baf4 150 ("mpt_pers_find: starting position out of range"));
984263bc 151
d751f32e
MD
152 while (start_at < MPT_MAX_PERSONALITIES
153 && (mpt->mpt_pers_mask & (0x1 << start_at)) == 0) {
154 start_at++;
984263bc 155 }
d751f32e 156 return (mpt_personalities[start_at]);
984263bc
MD
157}
158
d751f32e
MD
159/*
160 * Used infrequently, so no need to optimize like a forward
161 * traversal where we use the MAX+1 is guaranteed to be NULL
162 * trick.
163 */
164static __inline struct mpt_personality *
165mpt_pers_find_reverse(struct mpt_softc *mpt, u_int start_at)
984263bc 166{
d751f32e
MD
167 while (start_at < MPT_MAX_PERSONALITIES
168 && (mpt->mpt_pers_mask & (0x1 << start_at)) == 0) {
169 start_at--;
984263bc 170 }
d751f32e
MD
171 if (start_at < MPT_MAX_PERSONALITIES)
172 return (mpt_personalities[start_at]);
173 return (NULL);
984263bc
MD
174}
175
d751f32e
MD
176#define MPT_PERS_FOREACH(mpt, pers) \
177 for (pers = mpt_pers_find(mpt, /*start_at*/0); \
178 pers != NULL; \
179 pers = mpt_pers_find(mpt, /*start_at*/pers->id+1))
180
181#define MPT_PERS_FOREACH_REVERSE(mpt, pers) \
182 for (pers = mpt_pers_find_reverse(mpt, MPT_MAX_PERSONALITIES-1);\
183 pers != NULL; \
184 pers = mpt_pers_find_reverse(mpt, /*start_at*/pers->id-1))
185
186static mpt_load_handler_t mpt_stdload;
187static mpt_probe_handler_t mpt_stdprobe;
188static mpt_attach_handler_t mpt_stdattach;
189static mpt_enable_handler_t mpt_stdenable;
190static mpt_ready_handler_t mpt_stdready;
191static mpt_event_handler_t mpt_stdevent;
192static mpt_reset_handler_t mpt_stdreset;
193static mpt_shutdown_handler_t mpt_stdshutdown;
194static mpt_detach_handler_t mpt_stddetach;
195static mpt_unload_handler_t mpt_stdunload;
196static struct mpt_personality mpt_default_personality =
984263bc 197{
d751f32e
MD
198 .load = mpt_stdload,
199 .probe = mpt_stdprobe,
200 .attach = mpt_stdattach,
201 .enable = mpt_stdenable,
202 .ready = mpt_stdready,
203 .event = mpt_stdevent,
204 .reset = mpt_stdreset,
205 .shutdown = mpt_stdshutdown,
206 .detach = mpt_stddetach,
207 .unload = mpt_stdunload
208};
209
210static mpt_load_handler_t mpt_core_load;
211static mpt_attach_handler_t mpt_core_attach;
212static mpt_enable_handler_t mpt_core_enable;
213static mpt_reset_handler_t mpt_core_ioc_reset;
214static mpt_event_handler_t mpt_core_event;
215static mpt_shutdown_handler_t mpt_core_shutdown;
216static mpt_shutdown_handler_t mpt_core_detach;
217static mpt_unload_handler_t mpt_core_unload;
218static struct mpt_personality mpt_core_personality =
984263bc 219{
d751f32e
MD
220 .name = "mpt_core",
221 .load = mpt_core_load,
222// .attach = mpt_core_attach,
223// .enable = mpt_core_enable,
224 .event = mpt_core_event,
225 .reset = mpt_core_ioc_reset,
226 .shutdown = mpt_core_shutdown,
227 .detach = mpt_core_detach,
228 .unload = mpt_core_unload,
229};
984263bc 230
d751f32e
MD
231/*
232 * Manual declaration so that DECLARE_MPT_PERSONALITY doesn't need
233 * ordering information. We want the core to always register FIRST.
234 * other modules are set to SI_ORDER_SECOND.
235 */
236static moduledata_t mpt_core_mod = {
237 "mpt_core", mpt_modevent, &mpt_core_personality
238};
239DECLARE_MODULE(mpt_core, mpt_core_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
240MODULE_VERSION(mpt_core, 1);
984263bc 241
d751f32e 242#define MPT_PERS_ATTACHED(pers, mpt) ((mpt)->mpt_pers_mask & (0x1 << pers->id))
984263bc 243
984263bc 244int
d751f32e 245mpt_modevent(module_t mod, int type, void *data)
984263bc 246{
d751f32e
MD
247 struct mpt_personality *pers;
248 int error;
984263bc 249
d751f32e 250 pers = (struct mpt_personality *)data;
984263bc 251
d751f32e
MD
252 error = 0;
253 switch (type) {
254 case MOD_LOAD:
255 {
256 mpt_load_handler_t **def_handler;
257 mpt_load_handler_t **pers_handler;
258 int i;
984263bc 259
d751f32e
MD
260 for (i = 0; i < MPT_MAX_PERSONALITIES; i++) {
261 if (mpt_personalities[i] == NULL)
262 break;
263 }
264 if (i >= MPT_MAX_PERSONALITIES) {
265 error = ENOMEM;
266 break;
267 }
268 pers->id = i;
269 mpt_personalities[i] = pers;
270
271 /* Install standard/noop handlers for any NULL entries. */
272 def_handler = MPT_PERS_FIRST_HANDLER(&mpt_default_personality);
273 pers_handler = MPT_PERS_FIRST_HANDLER(pers);
274 while (pers_handler <= MPT_PERS_LAST_HANDLER(pers)) {
275 if (*pers_handler == NULL)
276 *pers_handler = *def_handler;
277 pers_handler++;
278 def_handler++;
279 }
984263bc 280
d751f32e
MD
281 error = (pers->load(pers));
282 if (error != 0)
283 mpt_personalities[i] = NULL;
284 break;
984263bc 285 }
d751f32e
MD
286 case MOD_SHUTDOWN:
287 break;
288#if __FreeBSD_version >= 500000
289 case MOD_QUIESCE:
290 break;
291#endif
292 case MOD_UNLOAD:
293 error = pers->unload(pers);
294 mpt_personalities[pers->id] = NULL;
295 break;
296 default:
297 error = EINVAL;
298 break;
299 }
300 return (error);
984263bc
MD
301}
302
4c42baf4 303static int
d751f32e 304mpt_stdload(struct mpt_personality *pers)
984263bc 305{
4c42baf4 306
6d259fc1 307 /* Load is always successful. */
d751f32e 308 return (0);
984263bc
MD
309}
310
4c42baf4 311static int
d751f32e 312mpt_stdprobe(struct mpt_softc *mpt)
984263bc 313{
4c42baf4 314
6d259fc1 315 /* Probe is always successful. */
d751f32e
MD
316 return (0);
317}
984263bc 318
4c42baf4 319static int
d751f32e
MD
320mpt_stdattach(struct mpt_softc *mpt)
321{
4c42baf4 322
6d259fc1 323 /* Attach is always successful. */
d751f32e
MD
324 return (0);
325}
984263bc 326
4c42baf4 327static int
d751f32e
MD
328mpt_stdenable(struct mpt_softc *mpt)
329{
4c42baf4 330
6d259fc1 331 /* Enable is always successful. */
d751f32e 332 return (0);
984263bc
MD
333}
334
4c42baf4 335static void
d751f32e 336mpt_stdready(struct mpt_softc *mpt)
984263bc 337{
984263bc 338
4c42baf4 339}
d751f32e 340
4c42baf4 341static int
d751f32e 342mpt_stdevent(struct mpt_softc *mpt, request_t *req, MSG_EVENT_NOTIFY_REPLY *msg)
984263bc 343{
4c42baf4 344
6d259fc1 345 mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_stdevent: 0x%x\n", msg->Event & 0xFF);
d751f32e
MD
346 /* Event was not for us. */
347 return (0);
984263bc
MD
348}
349
4c42baf4 350static void
d751f32e
MD
351mpt_stdreset(struct mpt_softc *mpt, int type)
352{
4c42baf4 353
d751f32e
MD
354}
355
4c42baf4 356static void
d751f32e
MD
357mpt_stdshutdown(struct mpt_softc *mpt)
358{
4c42baf4 359
984263bc
MD
360}
361
4c42baf4 362static void
d751f32e 363mpt_stddetach(struct mpt_softc *mpt)
984263bc 364{
4c42baf4 365
984263bc
MD
366}
367
4c42baf4 368static int
d751f32e 369mpt_stdunload(struct mpt_personality *pers)
984263bc 370{
4c42baf4 371
6d259fc1 372 /* Unload is always successful. */
d751f32e 373 return (0);
984263bc
MD
374}
375
376/*
d751f32e
MD
377 * Post driver attachment, we may want to perform some global actions.
378 * Here is the hook to do so.
984263bc 379 */
d751f32e
MD
380
381static void
382mpt_postattach(void *unused)
984263bc 383{
d751f32e
MD
384 struct mpt_softc *mpt;
385 struct mpt_personality *pers;
984263bc 386
d751f32e
MD
387 TAILQ_FOREACH(mpt, &mpt_tailq, links) {
388 MPT_PERS_FOREACH(mpt, pers)
389 pers->ready(mpt);
984263bc 390 }
d751f32e
MD
391}
392SYSINIT(mptdev, SI_SUB_CONFIGURE, SI_ORDER_MIDDLE, mpt_postattach, NULL);
984263bc 393
d751f32e
MD
394/******************************* Bus DMA Support ******************************/
395void
396mpt_map_rquest(void *arg, bus_dma_segment_t *segs, int nseg, int error)
397{
398 struct mpt_map_info *map_info;
984263bc 399
d751f32e
MD
400 map_info = (struct mpt_map_info *)arg;
401 map_info->error = error;
402 map_info->phys = segs->ds_addr;
403}
984263bc 404
d751f32e
MD
405/**************************** Reply/Event Handling ****************************/
406int
407mpt_register_handler(struct mpt_softc *mpt, mpt_handler_type type,
408 mpt_handler_t handler, uint32_t *phandler_id)
409{
984263bc 410
d751f32e
MD
411 switch (type) {
412 case MPT_HANDLER_REPLY:
413 {
414 u_int cbi;
415 u_int free_cbi;
416
417 if (phandler_id == NULL)
418 return (EINVAL);
419
420 free_cbi = MPT_HANDLER_ID_NONE;
421 for (cbi = 0; cbi < MPT_NUM_REPLY_HANDLERS; cbi++) {
422 /*
423 * If the same handler is registered multiple
424 * times, don't error out. Just return the
425 * index of the original registration.
426 */
427 if (mpt_reply_handlers[cbi] == handler.reply_handler) {
428 *phandler_id = MPT_CBI_TO_HID(cbi);
429 return (0);
430 }
984263bc 431
d751f32e
MD
432 /*
433 * Fill from the front in the hope that
434 * all registered handlers consume only a
435 * single cache line.
436 *
437 * We don't break on the first empty slot so
438 * that the full table is checked to see if
439 * this handler was previously registered.
440 */
441 if (free_cbi == MPT_HANDLER_ID_NONE &&
442 (mpt_reply_handlers[cbi]
443 == mpt_default_reply_handler))
444 free_cbi = cbi;
984263bc 445 }
d751f32e
MD
446 if (free_cbi == MPT_HANDLER_ID_NONE) {
447 return (ENOMEM);
448 }
449 mpt_reply_handlers[free_cbi] = handler.reply_handler;
450 *phandler_id = MPT_CBI_TO_HID(free_cbi);
451 break;
984263bc 452 }
d751f32e
MD
453 default:
454 mpt_prt(mpt, "mpt_register_handler unknown type %d\n", type);
455 return (EINVAL);
456 }
457 return (0);
984263bc
MD
458}
459
984263bc 460int
d751f32e
MD
461mpt_deregister_handler(struct mpt_softc *mpt, mpt_handler_type type,
462 mpt_handler_t handler, uint32_t handler_id)
984263bc 463{
984263bc 464
d751f32e
MD
465 switch (type) {
466 case MPT_HANDLER_REPLY:
467 {
468 u_int cbi;
984263bc 469
d751f32e
MD
470 cbi = MPT_CBI(handler_id);
471 if (cbi >= MPT_NUM_REPLY_HANDLERS
472 || mpt_reply_handlers[cbi] != handler.reply_handler)
473 return (ENOENT);
474 mpt_reply_handlers[cbi] = mpt_default_reply_handler;
475 break;
984263bc 476 }
d751f32e
MD
477 default:
478 mpt_prt(mpt, "mpt_deregister_handler unknown type %d\n", type);
479 return (EINVAL);
984263bc 480 }
d751f32e
MD
481 return (0);
482}
984263bc 483
d751f32e
MD
484static int
485mpt_default_reply_handler(struct mpt_softc *mpt, request_t *req,
486 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
487{
4c42baf4 488
d751f32e
MD
489 mpt_prt(mpt,
490 "Default Handler Called: req=%p:%u reply_descriptor=%x frame=%p\n",
491 req, req->serno, reply_desc, reply_frame);
984263bc 492
d751f32e
MD
493 if (reply_frame != NULL)
494 mpt_dump_reply_frame(mpt, reply_frame);
984263bc 495
d751f32e 496 mpt_prt(mpt, "Reply Frame Ignored\n");
984263bc 497
d751f32e
MD
498 return (/*free_reply*/TRUE);
499}
984263bc 500
d751f32e
MD
501static int
502mpt_config_reply_handler(struct mpt_softc *mpt, request_t *req,
503 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
504{
d751f32e 505
4c42baf4 506 if (req != NULL) {
d751f32e
MD
507 if (reply_frame != NULL) {
508 MSG_CONFIG *cfgp;
509 MSG_CONFIG_REPLY *reply;
510
511 cfgp = (MSG_CONFIG *)req->req_vbuf;
512 reply = (MSG_CONFIG_REPLY *)reply_frame;
513 req->IOCStatus = le16toh(reply_frame->IOCStatus);
514 bcopy(&reply->Header, &cfgp->Header,
515 sizeof(cfgp->Header));
516 cfgp->ExtPageLength = reply->ExtPageLength;
517 cfgp->ExtPageType = reply->ExtPageType;
518 }
519 req->state &= ~REQ_STATE_QUEUED;
520 req->state |= REQ_STATE_DONE;
521 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
522 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
523 wakeup(req);
524 } else if ((req->state & REQ_STATE_TIMEDOUT) != 0) {
525 /*
526 * Whew- we can free this request (late completion)
527 */
528 mpt_free_request(mpt, req);
529 }
530 }
531
532 return (TRUE);
533}
534
535static int
536mpt_handshake_reply_handler(struct mpt_softc *mpt, request_t *req,
537 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
538{
4c42baf4 539
d751f32e
MD
540 /* Nothing to be done. */
541 return (TRUE);
542}
543
544static int
545mpt_event_reply_handler(struct mpt_softc *mpt, request_t *req,
546 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
547{
548 int free_reply;
549
550 KASSERT(reply_frame != NULL, ("null reply in mpt_event_reply_handler"));
551 KASSERT(req != NULL, ("null request in mpt_event_reply_handler"));
552
553 free_reply = TRUE;
554 switch (reply_frame->Function) {
555 case MPI_FUNCTION_EVENT_NOTIFICATION:
556 {
557 MSG_EVENT_NOTIFY_REPLY *msg;
558 struct mpt_personality *pers;
559 u_int handled;
560
561 handled = 0;
562 msg = (MSG_EVENT_NOTIFY_REPLY *)reply_frame;
563 msg->EventDataLength = le16toh(msg->EventDataLength);
564 msg->IOCStatus = le16toh(msg->IOCStatus);
565 msg->IOCLogInfo = le32toh(msg->IOCLogInfo);
566 msg->Event = le32toh(msg->Event);
567 MPT_PERS_FOREACH(mpt, pers)
568 handled += pers->event(mpt, req, msg);
569
570 if (handled == 0 && mpt->mpt_pers_mask == 0) {
571 mpt_lprt(mpt, MPT_PRT_INFO,
572 "No Handlers For Any Event Notify Frames. "
573 "Event %#x (ACK %sequired).\n",
6d259fc1 574 msg->Event, msg->AckRequired? "r" : "not r");
d751f32e
MD
575 } else if (handled == 0) {
576 mpt_lprt(mpt,
577 msg->AckRequired? MPT_PRT_WARN : MPT_PRT_INFO,
578 "Unhandled Event Notify Frame. Event %#x "
579 "(ACK %sequired).\n",
6d259fc1 580 msg->Event, msg->AckRequired? "r" : "not r");
d751f32e
MD
581 }
582
583 if (msg->AckRequired) {
584 request_t *ack_req;
585 uint32_t context;
586
587 context = req->index | MPT_REPLY_HANDLER_EVENTS;
588 ack_req = mpt_get_request(mpt, FALSE);
589 if (ack_req == NULL) {
590 struct mpt_evtf_record *evtf;
591
592 evtf = (struct mpt_evtf_record *)reply_frame;
593 evtf->context = context;
594 LIST_INSERT_HEAD(&mpt->ack_frames, evtf, links);
595 free_reply = FALSE;
596 break;
597 }
598 mpt_send_event_ack(mpt, ack_req, msg, context);
599 /*
600 * Don't check for CONTINUATION_REPLY here
601 */
602 return (free_reply);
603 }
604 break;
605 }
606 case MPI_FUNCTION_PORT_ENABLE:
607 mpt_lprt(mpt, MPT_PRT_DEBUG , "enable port reply\n");
608 break;
609 case MPI_FUNCTION_EVENT_ACK:
610 break;
611 default:
612 mpt_prt(mpt, "unknown event function: %x\n",
613 reply_frame->Function);
614 break;
615 }
616
617 /*
618 * I'm not sure that this continuation stuff works as it should.
619 *
620 * I've had FC async events occur that free the frame up because
621 * the continuation bit isn't set, and then additional async events
622 * then occur using the same context. As you might imagine, this
623 * leads to Very Bad Thing.
624 *
625 * Let's just be safe for now and not free them up until we figure
626 * out what's actually happening here.
627 */
628#if 0
629 if ((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0) {
630 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
631 mpt_free_request(mpt, req);
632 mpt_prt(mpt, "event_reply %x for req %p:%u NOT a continuation",
633 reply_frame->Function, req, req->serno);
634 if (reply_frame->Function == MPI_FUNCTION_EVENT_NOTIFICATION) {
635 MSG_EVENT_NOTIFY_REPLY *msg =
636 (MSG_EVENT_NOTIFY_REPLY *)reply_frame;
637 mpt_prtc(mpt, " Event=0x%x AckReq=%d",
638 msg->Event, msg->AckRequired);
639 }
640 } else {
641 mpt_prt(mpt, "event_reply %x for %p:%u IS a continuation",
642 reply_frame->Function, req, req->serno);
643 if (reply_frame->Function == MPI_FUNCTION_EVENT_NOTIFICATION) {
644 MSG_EVENT_NOTIFY_REPLY *msg =
645 (MSG_EVENT_NOTIFY_REPLY *)reply_frame;
646 mpt_prtc(mpt, " Event=0x%x AckReq=%d",
647 msg->Event, msg->AckRequired);
648 }
649 mpt_prtc(mpt, "\n");
650 }
651#endif
652 return (free_reply);
653}
654
655/*
656 * Process an asynchronous event from the IOC.
657 */
658static int
659mpt_core_event(struct mpt_softc *mpt, request_t *req,
660 MSG_EVENT_NOTIFY_REPLY *msg)
661{
4c42baf4 662
d751f32e 663 mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_core_event: 0x%x\n",
6d259fc1 664 msg->Event & 0xFF);
d751f32e
MD
665 switch(msg->Event & 0xFF) {
666 case MPI_EVENT_NONE:
667 break;
668 case MPI_EVENT_LOG_DATA:
669 {
670 int i;
671
6d259fc1 672 /* Some error occurred that LSI wants logged */
d751f32e 673 mpt_prt(mpt, "EvtLogData: IOCLogInfo: 0x%08x\n",
6d259fc1 674 msg->IOCLogInfo);
d751f32e
MD
675 mpt_prt(mpt, "\tEvtLogData: Event Data:");
676 for (i = 0; i < msg->EventDataLength; i++)
6d259fc1 677 mpt_prtc(mpt, " %08x", msg->Data[i]);
d751f32e
MD
678 mpt_prtc(mpt, "\n");
679 break;
680 }
681 case MPI_EVENT_EVENT_CHANGE:
682 /*
683 * This is just an acknowledgement
684 * of our mpt_send_event_request.
685 */
686 break;
687 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
688 break;
689 default:
690 return (0);
691 break;
692 }
693 return (1);
694}
695
696static void
697mpt_send_event_ack(struct mpt_softc *mpt, request_t *ack_req,
698 MSG_EVENT_NOTIFY_REPLY *msg, uint32_t context)
699{
700 MSG_EVENT_ACK *ackp;
701
702 ackp = (MSG_EVENT_ACK *)ack_req->req_vbuf;
703 memset(ackp, 0, sizeof (*ackp));
704 ackp->Function = MPI_FUNCTION_EVENT_ACK;
705 ackp->Event = htole32(msg->Event);
706 ackp->EventContext = htole32(msg->EventContext);
707 ackp->MsgContext = htole32(context);
708 mpt_check_doorbell(mpt);
709 mpt_send_cmd(mpt, ack_req);
710}
711
712/***************************** Interrupt Handling *****************************/
713void
714mpt_intr(void *arg)
715{
716 struct mpt_softc *mpt;
717 uint32_t reply_desc;
718 int ntrips = 0;
719
720 mpt = (struct mpt_softc *)arg;
721 mpt_lprt(mpt, MPT_PRT_DEBUG2, "enter mpt_intr\n");
722 MPT_LOCK_ASSERT(mpt);
723
724 while ((reply_desc = mpt_pop_reply_queue(mpt)) != MPT_REPLY_EMPTY) {
725 request_t *req;
726 MSG_DEFAULT_REPLY *reply_frame;
727 uint32_t reply_baddr;
728 uint32_t ctxt_idx;
729 u_int cb_index;
730 u_int req_index;
6d259fc1 731 u_int offset;
d751f32e
MD
732 int free_rf;
733
734 req = NULL;
735 reply_frame = NULL;
736 reply_baddr = 0;
6d259fc1 737 offset = 0;
d751f32e 738 if ((reply_desc & MPI_ADDRESS_REPLY_A_BIT) != 0) {
d751f32e 739 /*
6d259fc1 740 * Ensure that the reply frame is coherent.
d751f32e
MD
741 */
742 reply_baddr = MPT_REPLY_BADDR(reply_desc);
743 offset = reply_baddr - (mpt->reply_phys & 0xFFFFFFFF);
744 bus_dmamap_sync_range(mpt->reply_dmat,
745 mpt->reply_dmap, offset, MPT_REPLY_SIZE,
746 BUS_DMASYNC_POSTREAD);
747 reply_frame = MPT_REPLY_OTOV(mpt, offset);
748 ctxt_idx = le32toh(reply_frame->MsgContext);
749 } else {
750 uint32_t type;
751
752 type = MPI_GET_CONTEXT_REPLY_TYPE(reply_desc);
753 ctxt_idx = reply_desc;
754 mpt_lprt(mpt, MPT_PRT_DEBUG1, "Context Reply: 0x%08x\n",
755 reply_desc);
756
757 switch (type) {
758 case MPI_CONTEXT_REPLY_TYPE_SCSI_INIT:
759 ctxt_idx &= MPI_CONTEXT_REPLY_CONTEXT_MASK;
760 break;
761 case MPI_CONTEXT_REPLY_TYPE_SCSI_TARGET:
762 ctxt_idx = GET_IO_INDEX(reply_desc);
763 if (mpt->tgt_cmd_ptrs == NULL) {
764 mpt_prt(mpt,
765 "mpt_intr: no target cmd ptrs\n");
766 reply_desc = MPT_REPLY_EMPTY;
767 break;
768 }
769 if (ctxt_idx >= mpt->tgt_cmds_allocated) {
770 mpt_prt(mpt,
771 "mpt_intr: bad tgt cmd ctxt %u\n",
772 ctxt_idx);
773 reply_desc = MPT_REPLY_EMPTY;
774 ntrips = 1000;
775 break;
776 }
777 req = mpt->tgt_cmd_ptrs[ctxt_idx];
778 if (req == NULL) {
779 mpt_prt(mpt, "no request backpointer "
780 "at index %u", ctxt_idx);
781 reply_desc = MPT_REPLY_EMPTY;
782 ntrips = 1000;
783 break;
784 }
785 /*
786 * Reformulate ctxt_idx to be just as if
787 * it were another type of context reply
788 * so the code below will find the request
789 * via indexing into the pool.
790 */
791 ctxt_idx =
792 req->index | mpt->scsi_tgt_handler_id;
793 req = NULL;
794 break;
795 case MPI_CONTEXT_REPLY_TYPE_LAN:
796 mpt_prt(mpt, "LAN CONTEXT REPLY: 0x%08x\n",
797 reply_desc);
798 reply_desc = MPT_REPLY_EMPTY;
799 break;
800 default:
801 mpt_prt(mpt, "Context Reply 0x%08x?\n", type);
802 reply_desc = MPT_REPLY_EMPTY;
803 break;
804 }
805 if (reply_desc == MPT_REPLY_EMPTY) {
806 if (ntrips++ > 1000) {
807 break;
808 }
809 continue;
810 }
811 }
812
813 cb_index = MPT_CONTEXT_TO_CBI(ctxt_idx);
814 req_index = MPT_CONTEXT_TO_REQI(ctxt_idx);
815 if (req_index < MPT_MAX_REQUESTS(mpt)) {
816 req = &mpt->request_pool[req_index];
817 } else {
818 mpt_prt(mpt, "WARN: mpt_intr index == %d (reply_desc =="
819 " 0x%x)\n", req_index, reply_desc);
820 }
821
6d259fc1
SW
822 bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
823 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
d751f32e
MD
824 free_rf = mpt_reply_handlers[cb_index](mpt, req,
825 reply_desc, reply_frame);
826
827 if (reply_frame != NULL && free_rf) {
6d259fc1
SW
828 bus_dmamap_sync_range(mpt->reply_dmat,
829 mpt->reply_dmap, offset, MPT_REPLY_SIZE,
830 BUS_DMASYNC_PREREAD);
d751f32e
MD
831 mpt_free_reply(mpt, reply_baddr);
832 }
833
834 /*
835 * If we got ourselves disabled, don't get stuck in a loop
836 */
837 if (mpt->disabled) {
838 mpt_disable_ints(mpt);
839 break;
840 }
841 if (ntrips++ > 1000) {
842 break;
843 }
844 }
845 mpt_lprt(mpt, MPT_PRT_DEBUG2, "exit mpt_intr\n");
846}
847
848/******************************* Error Recovery *******************************/
849void
850mpt_complete_request_chain(struct mpt_softc *mpt, struct req_queue *chain,
851 u_int iocstatus)
852{
853 MSG_DEFAULT_REPLY ioc_status_frame;
854 request_t *req;
855
856 memset(&ioc_status_frame, 0, sizeof(ioc_status_frame));
857 ioc_status_frame.MsgLength = roundup2(sizeof(ioc_status_frame), 4);
858 ioc_status_frame.IOCStatus = iocstatus;
859 while((req = TAILQ_FIRST(chain)) != NULL) {
860 MSG_REQUEST_HEADER *msg_hdr;
861 u_int cb_index;
862
6d259fc1
SW
863 bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
864 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
d751f32e
MD
865 msg_hdr = (MSG_REQUEST_HEADER *)req->req_vbuf;
866 ioc_status_frame.Function = msg_hdr->Function;
867 ioc_status_frame.MsgContext = msg_hdr->MsgContext;
868 cb_index = MPT_CONTEXT_TO_CBI(le32toh(msg_hdr->MsgContext));
869 mpt_reply_handlers[cb_index](mpt, req, msg_hdr->MsgContext,
870 &ioc_status_frame);
6d259fc1
SW
871 if (mpt_req_on_pending_list(mpt, req) != 0)
872 TAILQ_REMOVE(chain, req, links);
d751f32e
MD
873 }
874}
875
876/********************************* Diagnostics ********************************/
877/*
878 * Perform a diagnostic dump of a reply frame.
879 */
880void
881mpt_dump_reply_frame(struct mpt_softc *mpt, MSG_DEFAULT_REPLY *reply_frame)
882{
4c42baf4 883
d751f32e
MD
884 mpt_prt(mpt, "Address Reply:\n");
885 mpt_print_reply(reply_frame);
886}
887
888/******************************* Doorbell Access ******************************/
889static __inline uint32_t mpt_rd_db(struct mpt_softc *mpt);
890static __inline uint32_t mpt_rd_intr(struct mpt_softc *mpt);
891
892static __inline uint32_t
893mpt_rd_db(struct mpt_softc *mpt)
894{
4c42baf4 895
d751f32e
MD
896 return mpt_read(mpt, MPT_OFFSET_DOORBELL);
897}
898
899static __inline uint32_t
900mpt_rd_intr(struct mpt_softc *mpt)
901{
4c42baf4 902
d751f32e
MD
903 return mpt_read(mpt, MPT_OFFSET_INTR_STATUS);
904}
905
906/* Busy wait for a door bell to be read by IOC */
907static int
908mpt_wait_db_ack(struct mpt_softc *mpt)
909{
910 int i;
4c42baf4 911
d751f32e
MD
912 for (i=0; i < MPT_MAX_WAIT; i++) {
913 if (!MPT_DB_IS_BUSY(mpt_rd_intr(mpt))) {
914 maxwait_ack = i > maxwait_ack ? i : maxwait_ack;
915 return (MPT_OK);
916 }
917 DELAY(200);
918 }
919 return (MPT_FAIL);
920}
921
922/* Busy wait for a door bell interrupt */
923static int
924mpt_wait_db_int(struct mpt_softc *mpt)
925{
926 int i;
4c42baf4 927
d751f32e
MD
928 for (i = 0; i < MPT_MAX_WAIT; i++) {
929 if (MPT_DB_INTR(mpt_rd_intr(mpt))) {
930 maxwait_int = i > maxwait_int ? i : maxwait_int;
931 return MPT_OK;
932 }
933 DELAY(100);
934 }
935 return (MPT_FAIL);
936}
937
938/* Wait for IOC to transition to a give state */
939void
940mpt_check_doorbell(struct mpt_softc *mpt)
941{
942 uint32_t db = mpt_rd_db(mpt);
4c42baf4 943
d751f32e
MD
944 if (MPT_STATE(db) != MPT_DB_STATE_RUNNING) {
945 mpt_prt(mpt, "Device not running\n");
946 mpt_print_db(db);
947 }
948}
949
950/* Wait for IOC to transition to a give state */
951static int
952mpt_wait_state(struct mpt_softc *mpt, enum DB_STATE_BITS state)
953{
954 int i;
955
956 for (i = 0; i < MPT_MAX_WAIT; i++) {
957 uint32_t db = mpt_rd_db(mpt);
958 if (MPT_STATE(db) == state) {
959 maxwait_state = i > maxwait_state ? i : maxwait_state;
960 return (MPT_OK);
961 }
962 DELAY(100);
963 }
964 return (MPT_FAIL);
965}
966
967
968/************************* Intialization/Configuration ************************/
969static int mpt_download_fw(struct mpt_softc *mpt);
970
971/* Issue the reset COMMAND to the IOC */
972static int
973mpt_soft_reset(struct mpt_softc *mpt)
974{
4c42baf4 975
d751f32e
MD
976 mpt_lprt(mpt, MPT_PRT_DEBUG, "soft reset\n");
977
978 /* Have to use hard reset if we are not in Running state */
979 if (MPT_STATE(mpt_rd_db(mpt)) != MPT_DB_STATE_RUNNING) {
980 mpt_prt(mpt, "soft reset failed: device not running\n");
981 return (MPT_FAIL);
982 }
983
984 /* If door bell is in use we don't have a chance of getting
985 * a word in since the IOC probably crashed in message
986 * processing. So don't waste our time.
987 */
988 if (MPT_DB_IS_IN_USE(mpt_rd_db(mpt))) {
989 mpt_prt(mpt, "soft reset failed: doorbell wedged\n");
990 return (MPT_FAIL);
991 }
992
993 /* Send the reset request to the IOC */
994 mpt_write(mpt, MPT_OFFSET_DOORBELL,
995 MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET << MPI_DOORBELL_FUNCTION_SHIFT);
996 if (mpt_wait_db_ack(mpt) != MPT_OK) {
997 mpt_prt(mpt, "soft reset failed: ack timeout\n");
998 return (MPT_FAIL);
999 }
1000
1001 /* Wait for the IOC to reload and come out of reset state */
1002 if (mpt_wait_state(mpt, MPT_DB_STATE_READY) != MPT_OK) {
1003 mpt_prt(mpt, "soft reset failed: device did not restart\n");
1004 return (MPT_FAIL);
1005 }
1006
1007 return MPT_OK;
1008}
1009
1010static int
1011mpt_enable_diag_mode(struct mpt_softc *mpt)
1012{
1013 int try;
1014
1015 try = 20;
1016 while (--try) {
1017
1018 if ((mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC) & MPI_DIAG_DRWE) != 0)
1019 break;
1020
1021 /* Enable diagnostic registers */
1022 mpt_write(mpt, MPT_OFFSET_SEQUENCE, 0xFF);
1023 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_1ST_KEY_VALUE);
1024 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_2ND_KEY_VALUE);
1025 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_3RD_KEY_VALUE);
1026 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_4TH_KEY_VALUE);
1027 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_5TH_KEY_VALUE);
1028
1029 DELAY(100000);
1030 }
1031 if (try == 0)
1032 return (EIO);
1033 return (0);
1034}
1035
1036static void
1037mpt_disable_diag_mode(struct mpt_softc *mpt)
1038{
4c42baf4 1039
d751f32e
MD
1040 mpt_write(mpt, MPT_OFFSET_SEQUENCE, 0xFFFFFFFF);
1041}
1042
1043/* This is a magic diagnostic reset that resets all the ARM
1044 * processors in the chip.
1045 */
1046static void
1047mpt_hard_reset(struct mpt_softc *mpt)
1048{
1049 int error;
1050 int wait;
1051 uint32_t diagreg;
1052
1053 mpt_lprt(mpt, MPT_PRT_DEBUG, "hard reset\n");
1054
4c42baf4
SW
1055 if (mpt->is_1078) {
1056 mpt_write(mpt, MPT_OFFSET_RESET_1078, 0x07);
1057 DELAY(1000);
1058 return;
1059 }
1060
d751f32e
MD
1061 error = mpt_enable_diag_mode(mpt);
1062 if (error) {
1063 mpt_prt(mpt, "WARNING - Could not enter diagnostic mode !\n");
1064 mpt_prt(mpt, "Trying to reset anyway.\n");
1065 }
1066
1067 diagreg = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC);
1068
1069 /*
1070 * This appears to be a workaround required for some
1071 * firmware or hardware revs.
1072 */
1073 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, diagreg | MPI_DIAG_DISABLE_ARM);
1074 DELAY(1000);
1075
1076 /* Diag. port is now active so we can now hit the reset bit */
1077 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, diagreg | MPI_DIAG_RESET_ADAPTER);
1078
1079 /*
1080 * Ensure that the reset has finished. We delay 1ms
1081 * prior to reading the register to make sure the chip
1082 * has sufficiently completed its reset to handle register
1083 * accesses.
1084 */
1085 wait = 5000;
1086 do {
1087 DELAY(1000);
1088 diagreg = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC);
1089 } while (--wait && (diagreg & MPI_DIAG_RESET_ADAPTER) == 0);
1090
1091 if (wait == 0) {
1092 mpt_prt(mpt, "WARNING - Failed hard reset! "
1093 "Trying to initialize anyway.\n");
1094 }
1095
1096 /*
1097 * If we have firmware to download, it must be loaded before
1098 * the controller will become operational. Do so now.
1099 */
1100 if (mpt->fw_image != NULL) {
1101
1102 error = mpt_download_fw(mpt);
1103
1104 if (error) {
1105 mpt_prt(mpt, "WARNING - Firmware Download Failed!\n");
1106 mpt_prt(mpt, "Trying to initialize anyway.\n");
1107 }
1108 }
1109
1110 /*
1111 * Reseting the controller should have disabled write
1112 * access to the diagnostic registers, but disable
1113 * manually to be sure.
1114 */
1115 mpt_disable_diag_mode(mpt);
1116}
1117
1118static void
1119mpt_core_ioc_reset(struct mpt_softc *mpt, int type)
1120{
4c42baf4 1121
d751f32e
MD
1122 /*
1123 * Complete all pending requests with a status
1124 * appropriate for an IOC reset.
1125 */
1126 mpt_complete_request_chain(mpt, &mpt->request_pending_list,
1127 MPI_IOCSTATUS_INVALID_STATE);
1128}
1129
d751f32e
MD
1130/*
1131 * Reset the IOC when needed. Try software command first then if needed
1132 * poke at the magic diagnostic reset. Note that a hard reset resets
1133 * *both* IOCs on dual function chips (FC929 && LSI1030) as well as
1134 * fouls up the PCI configuration registers.
1135 */
1136int
1137mpt_reset(struct mpt_softc *mpt, int reinit)
1138{
1139 struct mpt_personality *pers;
1140 int ret;
1141 int retry_cnt = 0;
1142
1143 /*
1144 * Try a soft reset. If that fails, get out the big hammer.
1145 */
1146 again:
1147 if ((ret = mpt_soft_reset(mpt)) != MPT_OK) {
1148 int cnt;
1149 for (cnt = 0; cnt < 5; cnt++) {
1150 /* Failed; do a hard reset */
1151 mpt_hard_reset(mpt);
1152
1153 /*
1154 * Wait for the IOC to reload
1155 * and come out of reset state
1156 */
1157 ret = mpt_wait_state(mpt, MPT_DB_STATE_READY);
1158 if (ret == MPT_OK) {
1159 break;
1160 }
1161 /*
1162 * Okay- try to check again...
1163 */
1164 ret = mpt_wait_state(mpt, MPT_DB_STATE_READY);
1165 if (ret == MPT_OK) {
1166 break;
1167 }
1168 mpt_prt(mpt, "mpt_reset: failed hard reset (%d:%d)\n",
1169 retry_cnt, cnt);
1170 }
1171 }
1172
1173 if (retry_cnt == 0) {
1174 /*
1175 * Invoke reset handlers. We bump the reset count so
1176 * that mpt_wait_req() understands that regardless of
1177 * the specified wait condition, it should stop its wait.
1178 */
1179 mpt->reset_cnt++;
1180 MPT_PERS_FOREACH(mpt, pers)
1181 pers->reset(mpt, ret);
1182 }
1183
1184 if (reinit) {
1185 ret = mpt_enable_ioc(mpt, 1);
1186 if (ret == MPT_OK) {
1187 mpt_enable_ints(mpt);
1188 }
1189 }
1190 if (ret != MPT_OK && retry_cnt++ < 2) {
1191 goto again;
1192 }
1193 return ret;
1194}
1195
1196/* Return a command buffer to the free queue */
1197void
1198mpt_free_request(struct mpt_softc *mpt, request_t *req)
1199{
1200 request_t *nxt;
1201 struct mpt_evtf_record *record;
6d259fc1 1202 uint32_t offset, reply_baddr;
d751f32e
MD
1203
1204 if (req == NULL || req != &mpt->request_pool[req->index]) {
4c42baf4 1205 panic("mpt_free_request: bad req ptr");
d751f32e
MD
1206 }
1207 if ((nxt = req->chain) != NULL) {
1208 req->chain = NULL;
1209 mpt_free_request(mpt, nxt); /* NB: recursion */
1210 }
1211 KASSERT(req->state != REQ_STATE_FREE, ("freeing free request"));
1212 KASSERT(!(req->state & REQ_STATE_LOCKED), ("freeing locked request"));
1213 MPT_LOCK_ASSERT(mpt);
1214 KASSERT(mpt_req_on_free_list(mpt, req) == 0,
1215 ("mpt_free_request: req %p:%u func %x already on freelist",
1216 req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
1217 KASSERT(mpt_req_on_pending_list(mpt, req) == 0,
1218 ("mpt_free_request: req %p:%u func %x on pending list",
1219 req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
1220#ifdef INVARIANTS
1221 mpt_req_not_spcl(mpt, req, "mpt_free_request", __LINE__);
1222#endif
1223
1224 req->ccb = NULL;
1225 if (LIST_EMPTY(&mpt->ack_frames)) {
1226 /*
1227 * Insert free ones at the tail
1228 */
1229 req->serno = 0;
1230 req->state = REQ_STATE_FREE;
1231#ifdef INVARIANTS
1232 memset(req->req_vbuf, 0xff, sizeof (MSG_REQUEST_HEADER));
1233#endif
1234 TAILQ_INSERT_TAIL(&mpt->request_free_list, req, links);
1235 if (mpt->getreqwaiter != 0) {
1236 mpt->getreqwaiter = 0;
1237 wakeup(&mpt->request_free_list);
1238 }
1239 return;
1240 }
1241
1242 /*
1243 * Process an ack frame deferred due to resource shortage.
1244 */
1245 record = LIST_FIRST(&mpt->ack_frames);
1246 LIST_REMOVE(record, links);
1247 req->state = REQ_STATE_ALLOCATED;
1248 mpt_assign_serno(mpt, req);
1249 mpt_send_event_ack(mpt, req, &record->reply, record->context);
6d259fc1
SW
1250 offset = (uint32_t)((uint8_t *)record - mpt->reply);
1251 reply_baddr = offset + (mpt->reply_phys & 0xFFFFFFFF);
1252 bus_dmamap_sync_range(mpt->reply_dmat, mpt->reply_dmap, offset,
1253 MPT_REPLY_SIZE, BUS_DMASYNC_PREREAD);
d751f32e
MD
1254 mpt_free_reply(mpt, reply_baddr);
1255}
1256
1257/* Get a command buffer from the free queue */
1258request_t *
1259mpt_get_request(struct mpt_softc *mpt, int sleep_ok)
1260{
1261 request_t *req;
1262
1263retry:
1264 MPT_LOCK_ASSERT(mpt);
1265 req = TAILQ_FIRST(&mpt->request_free_list);
1266 if (req != NULL) {
1267 KASSERT(req == &mpt->request_pool[req->index],
4c42baf4 1268 ("mpt_get_request: corrupted request free list"));
d751f32e
MD
1269 KASSERT(req->state == REQ_STATE_FREE,
1270 ("req %p:%u not free on free list %x index %d function %x",
1271 req, req->serno, req->state, req->index,
1272 ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
1273 TAILQ_REMOVE(&mpt->request_free_list, req, links);
1274 req->state = REQ_STATE_ALLOCATED;
1275 req->chain = NULL;
1276 mpt_assign_serno(mpt, req);
d751f32e
MD
1277 } else if (sleep_ok != 0) {
1278 mpt->getreqwaiter = 1;
6d259fc1 1279 mpt_sleep(mpt, &mpt->request_free_list, 0, "mptgreq", 0);
d751f32e
MD
1280 goto retry;
1281 }
1282 return (req);
1283}
1284
1285/* Pass the command to the IOC */
1286void
1287mpt_send_cmd(struct mpt_softc *mpt, request_t *req)
1288{
4c42baf4 1289
d751f32e
MD
1290 if (mpt->verbose > MPT_PRT_DEBUG2) {
1291 mpt_dump_request(mpt, req);
1292 }
1293 bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
6d259fc1 1294 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
d751f32e
MD
1295 req->state |= REQ_STATE_QUEUED;
1296 KASSERT(mpt_req_on_free_list(mpt, req) == 0,
1297 ("req %p:%u func %x on freelist list in mpt_send_cmd",
1298 req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
1299 KASSERT(mpt_req_on_pending_list(mpt, req) == 0,
1300 ("req %p:%u func %x already on pending list in mpt_send_cmd",
1301 req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
1302 TAILQ_INSERT_HEAD(&mpt->request_pending_list, req, links);
1303 mpt_write(mpt, MPT_OFFSET_REQUEST_Q, (uint32_t) req->req_pbuf);
1304}
1305
1306/*
1307 * Wait for a request to complete.
1308 *
1309 * Inputs:
1310 * mpt softc of controller executing request
1311 * req request to wait for
1312 * sleep_ok nonzero implies may sleep in this context
1313 * time_ms timeout in ms. 0 implies no timeout.
1314 *
1315 * Return Values:
1316 * 0 Request completed
1317 * non-0 Timeout fired before request completion.
1318 */
1319int
1320mpt_wait_req(struct mpt_softc *mpt, request_t *req,
1321 mpt_req_state_t state, mpt_req_state_t mask,
1322 int sleep_ok, int time_ms)
1323{
1324 int error;
1325 int timeout;
1326 u_int saved_cnt;
1327
1328 /*
1329 * timeout is in ms. 0 indicates infinite wait.
1330 * Convert to ticks or 500us units depending on
1331 * our sleep mode.
1332 */
1333 if (sleep_ok != 0) {
1334 timeout = (time_ms * hz) / 1000;
1335 } else {
1336 timeout = time_ms * 2;
1337 }
1338 req->state |= REQ_STATE_NEED_WAKEUP;
1339 mask &= ~REQ_STATE_NEED_WAKEUP;
1340 saved_cnt = mpt->reset_cnt;
1341 while ((req->state & mask) != state && mpt->reset_cnt == saved_cnt) {
1342 if (sleep_ok != 0) {
6d259fc1 1343 error = mpt_sleep(mpt, req, 0, "mptreq", timeout);
d751f32e
MD
1344 if (error == EWOULDBLOCK) {
1345 timeout = 0;
1346 break;
1347 }
1348 } else {
1349 if (time_ms != 0 && --timeout == 0) {
1350 break;
1351 }
1352 DELAY(500);
1353 mpt_intr(mpt);
1354 }
1355 }
1356 req->state &= ~REQ_STATE_NEED_WAKEUP;
1357 if (mpt->reset_cnt != saved_cnt) {
1358 return (EIO);
1359 }
1360 if (time_ms && timeout <= 0) {
1361 MSG_REQUEST_HEADER *msg_hdr = req->req_vbuf;
1362 req->state |= REQ_STATE_TIMEDOUT;
1363 mpt_prt(mpt, "mpt_wait_req(%x) timed out\n", msg_hdr->Function);
1364 return (ETIMEDOUT);
1365 }
1366 return (0);
1367}
1368
1369/*
1370 * Send a command to the IOC via the handshake register.
1371 *
1372 * Only done at initialization time and for certain unusual
1373 * commands such as device/bus reset as specified by LSI.
1374 */
1375int
1376mpt_send_handshake_cmd(struct mpt_softc *mpt, size_t len, void *cmd)
1377{
1378 int i;
1379 uint32_t data, *data32;
1380
1381 /* Check condition of the IOC */
1382 data = mpt_rd_db(mpt);
1383 if ((MPT_STATE(data) != MPT_DB_STATE_READY
1384 && MPT_STATE(data) != MPT_DB_STATE_RUNNING
1385 && MPT_STATE(data) != MPT_DB_STATE_FAULT)
1386 || MPT_DB_IS_IN_USE(data)) {
1387 mpt_prt(mpt, "handshake aborted - invalid doorbell state\n");
1388 mpt_print_db(data);
1389 return (EBUSY);
1390 }
1391
1392 /* We move things in 32 bit chunks */
1393 len = (len + 3) >> 2;
1394 data32 = cmd;
1395
1396 /* Clear any left over pending doorbell interrupts */
1397 if (MPT_DB_INTR(mpt_rd_intr(mpt)))
1398 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1399
1400 /*
1401 * Tell the handshake reg. we are going to send a command
1402 * and how long it is going to be.
1403 */
1404 data = (MPI_FUNCTION_HANDSHAKE << MPI_DOORBELL_FUNCTION_SHIFT) |
1405 (len << MPI_DOORBELL_ADD_DWORDS_SHIFT);
1406 mpt_write(mpt, MPT_OFFSET_DOORBELL, data);
1407
1408 /* Wait for the chip to notice */
1409 if (mpt_wait_db_int(mpt) != MPT_OK) {
1410 mpt_prt(mpt, "mpt_send_handshake_cmd: db ignored\n");
1411 return (ETIMEDOUT);
1412 }
1413
1414 /* Clear the interrupt */
1415 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1416
1417 if (mpt_wait_db_ack(mpt) != MPT_OK) {
1418 mpt_prt(mpt, "mpt_send_handshake_cmd: db ack timed out\n");
1419 return (ETIMEDOUT);
1420 }
1421
1422 /* Send the command */
1423 for (i = 0; i < len; i++) {
1424 mpt_write(mpt, MPT_OFFSET_DOORBELL, htole32(*data32++));
1425 if (mpt_wait_db_ack(mpt) != MPT_OK) {
1426 mpt_prt(mpt,
1427 "mpt_send_handshake_cmd: timeout @ index %d\n", i);
1428 return (ETIMEDOUT);
1429 }
1430 }
1431 return MPT_OK;
1432}
1433
1434/* Get the response from the handshake register */
1435int
1436mpt_recv_handshake_reply(struct mpt_softc *mpt, size_t reply_len, void *reply)
1437{
1438 int left, reply_left;
1439 u_int16_t *data16;
1440 uint32_t data;
1441 MSG_DEFAULT_REPLY *hdr;
1442
1443 /* We move things out in 16 bit chunks */
1444 reply_len >>= 1;
1445 data16 = (u_int16_t *)reply;
1446
1447 hdr = (MSG_DEFAULT_REPLY *)reply;
1448
1449 /* Get first word */
1450 if (mpt_wait_db_int(mpt) != MPT_OK) {
1451 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout1\n");
1452 return ETIMEDOUT;
1453 }
1454 data = mpt_read(mpt, MPT_OFFSET_DOORBELL);
1455 *data16++ = le16toh(data & MPT_DB_DATA_MASK);
1456 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1457
1458 /* Get Second Word */
1459 if (mpt_wait_db_int(mpt) != MPT_OK) {
1460 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout2\n");
1461 return ETIMEDOUT;
1462 }
1463 data = mpt_read(mpt, MPT_OFFSET_DOORBELL);
1464 *data16++ = le16toh(data & MPT_DB_DATA_MASK);
1465 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1466
1467 /*
1468 * With the second word, we can now look at the length.
1469 * Warn about a reply that's too short (except for IOC FACTS REPLY)
1470 */
1471 if ((reply_len >> 1) != hdr->MsgLength &&
1472 (hdr->Function != MPI_FUNCTION_IOC_FACTS)){
d751f32e
MD
1473 mpt_prt(mpt, "reply length does not match message length: "
1474 "got %x; expected %zx for function %x\n",
1475 hdr->MsgLength << 2, reply_len << 1, hdr->Function);
d751f32e
MD
1476 }
1477
1478 /* Get rest of the reply; but don't overflow the provided buffer */
1479 left = (hdr->MsgLength << 1) - 2;
1480 reply_left = reply_len - 2;
1481 while (left--) {
1482 u_int16_t datum;
1483
1484 if (mpt_wait_db_int(mpt) != MPT_OK) {
1485 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout3\n");
1486 return ETIMEDOUT;
1487 }
1488 data = mpt_read(mpt, MPT_OFFSET_DOORBELL);
1489 datum = le16toh(data & MPT_DB_DATA_MASK);
1490
1491 if (reply_left-- > 0)
1492 *data16++ = datum;
1493
1494 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1495 }
984263bc
MD
1496
1497 /* One more wait & clear at the end */
1498 if (mpt_wait_db_int(mpt) != MPT_OK) {
d751f32e 1499 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout4\n");
984263bc
MD
1500 return ETIMEDOUT;
1501 }
1502 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1503
d751f32e
MD
1504 if ((hdr->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
1505 if (mpt->verbose >= MPT_PRT_TRACE)
1506 mpt_print_reply(hdr);
1507 return (MPT_FAIL | hdr->IOCStatus);
1508 }
1509
1510 return (0);
1511}
1512
1513static int
1514mpt_get_iocfacts(struct mpt_softc *mpt, MSG_IOC_FACTS_REPLY *freplp)
1515{
1516 MSG_IOC_FACTS f_req;
1517 int error;
6d259fc1 1518
d751f32e
MD
1519 memset(&f_req, 0, sizeof f_req);
1520 f_req.Function = MPI_FUNCTION_IOC_FACTS;
1521 f_req.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
1522 error = mpt_send_handshake_cmd(mpt, sizeof f_req, &f_req);
1523 if (error) {
1524 return(error);
1525 }
1526 error = mpt_recv_handshake_reply(mpt, sizeof (*freplp), freplp);
1527 return (error);
1528}
1529
1530static int
1531mpt_get_portfacts(struct mpt_softc *mpt, U8 port, MSG_PORT_FACTS_REPLY *freplp)
1532{
1533 MSG_PORT_FACTS f_req;
1534 int error;
6d259fc1 1535
d751f32e
MD
1536 memset(&f_req, 0, sizeof f_req);
1537 f_req.Function = MPI_FUNCTION_PORT_FACTS;
1538 f_req.PortNumber = port;
1539 f_req.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
1540 error = mpt_send_handshake_cmd(mpt, sizeof f_req, &f_req);
1541 if (error) {
1542 return(error);
1543 }
1544 error = mpt_recv_handshake_reply(mpt, sizeof (*freplp), freplp);
1545 return (error);
1546}
1547
1548/*
1549 * Send the initialization request. This is where we specify how many
1550 * SCSI busses and how many devices per bus we wish to emulate.
1551 * This is also the command that specifies the max size of the reply
1552 * frames from the IOC that we will be allocating.
1553 */
1554static int
1555mpt_send_ioc_init(struct mpt_softc *mpt, uint32_t who)
1556{
1557 int error = 0;
1558 MSG_IOC_INIT init;
1559 MSG_IOC_INIT_REPLY reply;
1560
1561 memset(&init, 0, sizeof init);
1562 init.WhoInit = who;
1563 init.Function = MPI_FUNCTION_IOC_INIT;
1564 init.MaxDevices = 0; /* at least 256 devices per bus */
1565 init.MaxBuses = 16; /* at least 16 busses */
1566
1567 init.MsgVersion = htole16(MPI_VERSION);
1568 init.HeaderVersion = htole16(MPI_HEADER_VERSION);
1569 init.ReplyFrameSize = htole16(MPT_REPLY_SIZE);
1570 init.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
1571
1572 if ((error = mpt_send_handshake_cmd(mpt, sizeof init, &init)) != 0) {
1573 return(error);
1574 }
1575
1576 error = mpt_recv_handshake_reply(mpt, sizeof reply, &reply);
1577 return (error);
1578}
1579
1580
1581/*
1582 * Utiltity routine to read configuration headers and pages
1583 */
1584int
1585mpt_issue_cfg_req(struct mpt_softc *mpt, request_t *req, cfgparms_t *params,
1586 bus_addr_t addr, bus_size_t len, int sleep_ok, int timeout_ms)
1587{
1588 MSG_CONFIG *cfgp;
1589 SGE_SIMPLE32 *se;
1590
1591 cfgp = req->req_vbuf;
1592 memset(cfgp, 0, sizeof *cfgp);
1593 cfgp->Action = params->Action;
1594 cfgp->Function = MPI_FUNCTION_CONFIG;
1595 cfgp->Header.PageVersion = params->PageVersion;
1596 cfgp->Header.PageNumber = params->PageNumber;
1597 cfgp->PageAddress = htole32(params->PageAddress);
1598 if ((params->PageType & MPI_CONFIG_PAGETYPE_MASK) ==
1599 MPI_CONFIG_PAGETYPE_EXTENDED) {
1600 cfgp->Header.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
1601 cfgp->Header.PageLength = 0;
1602 cfgp->ExtPageLength = htole16(params->ExtPageLength);
1603 cfgp->ExtPageType = params->ExtPageType;
1604 } else {
1605 cfgp->Header.PageType = params->PageType;
1606 cfgp->Header.PageLength = params->PageLength;
1607 }
1608 se = (SGE_SIMPLE32 *)&cfgp->PageBufferSGE;
1609 se->Address = htole32(addr);
1610 MPI_pSGE_SET_LENGTH(se, len);
1611 MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
1612 MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1613 MPI_SGE_FLAGS_END_OF_LIST |
1614 ((params->Action == MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT
1615 || params->Action == MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM)
1616 ? MPI_SGE_FLAGS_HOST_TO_IOC : MPI_SGE_FLAGS_IOC_TO_HOST)));
1617 se->FlagsLength = htole32(se->FlagsLength);
1618 cfgp->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_CONFIG);
1619
1620 mpt_check_doorbell(mpt);
1621 mpt_send_cmd(mpt, req);
1622 return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
1623 sleep_ok, timeout_ms));
1624}
1625
1626int
1627mpt_read_extcfg_header(struct mpt_softc *mpt, int PageVersion, int PageNumber,
1628 uint32_t PageAddress, int ExtPageType,
1629 CONFIG_EXTENDED_PAGE_HEADER *rslt,
1630 int sleep_ok, int timeout_ms)
1631{
1632 request_t *req;
1633 cfgparms_t params;
1634 MSG_CONFIG_REPLY *cfgp;
1635 int error;
1636
1637 req = mpt_get_request(mpt, sleep_ok);
1638 if (req == NULL) {
1639 mpt_prt(mpt, "mpt_extread_cfg_header: Get request failed!\n");
1640 return (ENOMEM);
1641 }
1642
1643 params.Action = MPI_CONFIG_ACTION_PAGE_HEADER;
1644 params.PageVersion = PageVersion;
1645 params.PageLength = 0;
1646 params.PageNumber = PageNumber;
1647 params.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
1648 params.PageAddress = PageAddress;
1649 params.ExtPageType = ExtPageType;
1650 params.ExtPageLength = 0;
1651 error = mpt_issue_cfg_req(mpt, req, &params, /*addr*/0, /*len*/0,
1652 sleep_ok, timeout_ms);
1653 if (error != 0) {
1654 /*
1655 * Leave the request. Without resetting the chip, it's
1656 * still owned by it and we'll just get into trouble
1657 * freeing it now. Mark it as abandoned so that if it
1658 * shows up later it can be freed.
1659 */
1660 mpt_prt(mpt, "read_extcfg_header timed out\n");
1661 return (ETIMEDOUT);
1662 }
1663
1664 switch (req->IOCStatus & MPI_IOCSTATUS_MASK) {
1665 case MPI_IOCSTATUS_SUCCESS:
1666 cfgp = req->req_vbuf;
1667 rslt->PageVersion = cfgp->Header.PageVersion;
1668 rslt->PageNumber = cfgp->Header.PageNumber;
1669 rslt->PageType = cfgp->Header.PageType;
1670 rslt->ExtPageLength = le16toh(cfgp->ExtPageLength);
1671 rslt->ExtPageType = cfgp->ExtPageType;
1672 error = 0;
1673 break;
1674 case MPI_IOCSTATUS_CONFIG_INVALID_PAGE:
1675 mpt_lprt(mpt, MPT_PRT_DEBUG,
1676 "Invalid Page Type %d Number %d Addr 0x%0x\n",
1677 MPI_CONFIG_PAGETYPE_EXTENDED, PageNumber, PageAddress);
1678 error = EINVAL;
1679 break;
1680 default:
1681 mpt_prt(mpt, "mpt_read_extcfg_header: Config Info Status %x\n",
1682 req->IOCStatus);
1683 error = EIO;
1684 break;
1685 }
1686 mpt_free_request(mpt, req);
1687 return (error);
1688}
1689
1690int
1691mpt_read_extcfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress,
1692 CONFIG_EXTENDED_PAGE_HEADER *hdr, void *buf, size_t len,
1693 int sleep_ok, int timeout_ms)
1694{
1695 request_t *req;
1696 cfgparms_t params;
1697 int error;
1698
1699 req = mpt_get_request(mpt, sleep_ok);
1700 if (req == NULL) {
1701 mpt_prt(mpt, "mpt_read_extcfg_page: Get request failed!\n");
1702 return (-1);
1703 }
1704
1705 params.Action = Action;
1706 params.PageVersion = hdr->PageVersion;
1707 params.PageLength = 0;
1708 params.PageNumber = hdr->PageNumber;
1709 params.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
1710 params.PageAddress = PageAddress;
1711 params.ExtPageType = hdr->ExtPageType;
1712 params.ExtPageLength = hdr->ExtPageLength;
1713 error = mpt_issue_cfg_req(mpt, req, &params,
1714 req->req_pbuf + MPT_RQSL(mpt),
1715 len, sleep_ok, timeout_ms);
1716 if (error != 0) {
1717 mpt_prt(mpt, "read_extcfg_page(%d) timed out\n", Action);
1718 return (-1);
1719 }
1720
1721 if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
1722 mpt_prt(mpt, "mpt_read_extcfg_page: Config Info Status %x\n",
1723 req->IOCStatus);
1724 mpt_free_request(mpt, req);
1725 return (-1);
1726 }
d751f32e
MD
1727 memcpy(buf, ((uint8_t *)req->req_vbuf)+MPT_RQSL(mpt), len);
1728 mpt_free_request(mpt, req);
1729 return (0);
1730}
1731
1732int
1733mpt_read_cfg_header(struct mpt_softc *mpt, int PageType, int PageNumber,
1734 uint32_t PageAddress, CONFIG_PAGE_HEADER *rslt,
1735 int sleep_ok, int timeout_ms)
1736{
1737 request_t *req;
1738 cfgparms_t params;
1739 MSG_CONFIG *cfgp;
1740 int error;
1741
1742 req = mpt_get_request(mpt, sleep_ok);
1743 if (req == NULL) {
1744 mpt_prt(mpt, "mpt_read_cfg_header: Get request failed!\n");
1745 return (ENOMEM);
1746 }
1747
1748 params.Action = MPI_CONFIG_ACTION_PAGE_HEADER;
1749 params.PageVersion = 0;
1750 params.PageLength = 0;
1751 params.PageNumber = PageNumber;
1752 params.PageType = PageType;
1753 params.PageAddress = PageAddress;
1754 error = mpt_issue_cfg_req(mpt, req, &params, /*addr*/0, /*len*/0,
1755 sleep_ok, timeout_ms);
1756 if (error != 0) {
1757 /*
1758 * Leave the request. Without resetting the chip, it's
1759 * still owned by it and we'll just get into trouble
1760 * freeing it now. Mark it as abandoned so that if it
1761 * shows up later it can be freed.
1762 */
1763 mpt_prt(mpt, "read_cfg_header timed out\n");
1764 return (ETIMEDOUT);
1765 }
1766
1767 switch (req->IOCStatus & MPI_IOCSTATUS_MASK) {
1768 case MPI_IOCSTATUS_SUCCESS:
1769 cfgp = req->req_vbuf;
1770 bcopy(&cfgp->Header, rslt, sizeof(*rslt));
1771 error = 0;
1772 break;
1773 case MPI_IOCSTATUS_CONFIG_INVALID_PAGE:
1774 mpt_lprt(mpt, MPT_PRT_DEBUG,
1775 "Invalid Page Type %d Number %d Addr 0x%0x\n",
1776 PageType, PageNumber, PageAddress);
1777 error = EINVAL;
1778 break;
1779 default:
1780 mpt_prt(mpt, "mpt_read_cfg_header: Config Info Status %x\n",
1781 req->IOCStatus);
1782 error = EIO;
1783 break;
1784 }
1785 mpt_free_request(mpt, req);
1786 return (error);
1787}
1788
1789int
1790mpt_read_cfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress,
1791 CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok,
1792 int timeout_ms)
1793{
1794 request_t *req;
1795 cfgparms_t params;
1796 int error;
1797
1798 req = mpt_get_request(mpt, sleep_ok);
1799 if (req == NULL) {
1800 mpt_prt(mpt, "mpt_read_cfg_page: Get request failed!\n");
1801 return (-1);
1802 }
1803
1804 params.Action = Action;
1805 params.PageVersion = hdr->PageVersion;
1806 params.PageLength = hdr->PageLength;
1807 params.PageNumber = hdr->PageNumber;
1808 params.PageType = hdr->PageType & MPI_CONFIG_PAGETYPE_MASK;
1809 params.PageAddress = PageAddress;
1810 error = mpt_issue_cfg_req(mpt, req, &params,
1811 req->req_pbuf + MPT_RQSL(mpt),
1812 len, sleep_ok, timeout_ms);
1813 if (error != 0) {
1814 mpt_prt(mpt, "read_cfg_page(%d) timed out\n", Action);
1815 return (-1);
1816 }
1817
1818 if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
1819 mpt_prt(mpt, "mpt_read_cfg_page: Config Info Status %x\n",
1820 req->IOCStatus);
1821 mpt_free_request(mpt, req);
1822 return (-1);
1823 }
d751f32e
MD
1824 memcpy(hdr, ((uint8_t *)req->req_vbuf)+MPT_RQSL(mpt), len);
1825 mpt_free_request(mpt, req);
1826 return (0);
1827}
1828
1829int
1830mpt_write_cfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress,
1831 CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok,
1832 int timeout_ms)
1833{
1834 request_t *req;
1835 cfgparms_t params;
1836 u_int hdr_attr;
1837 int error;
1838
1839 hdr_attr = hdr->PageType & MPI_CONFIG_PAGEATTR_MASK;
1840 if (hdr_attr != MPI_CONFIG_PAGEATTR_CHANGEABLE &&
1841 hdr_attr != MPI_CONFIG_PAGEATTR_PERSISTENT) {
1842 mpt_prt(mpt, "page type 0x%x not changeable\n",
1843 hdr->PageType & MPI_CONFIG_PAGETYPE_MASK);
1844 return (-1);
1845 }
1846
1847#if 0
1848 /*
1849 * We shouldn't mask off other bits here.
1850 */
1851 hdr->PageType &= MPI_CONFIG_PAGETYPE_MASK;
1852#endif
1853
1854 req = mpt_get_request(mpt, sleep_ok);
1855 if (req == NULL)
1856 return (-1);
1857
1858 memcpy(((caddr_t)req->req_vbuf) + MPT_RQSL(mpt), hdr, len);
1859
1860 /*
1861 * There isn't any point in restoring stripped out attributes
1862 * if you then mask them going down to issue the request.
1863 */
1864
1865 params.Action = Action;
1866 params.PageVersion = hdr->PageVersion;
1867 params.PageLength = hdr->PageLength;
1868 params.PageNumber = hdr->PageNumber;
1869 params.PageAddress = PageAddress;
1870#if 0
1871 /* Restore stripped out attributes */
1872 hdr->PageType |= hdr_attr;
1873 params.PageType = hdr->PageType & MPI_CONFIG_PAGETYPE_MASK;
1874#else
1875 params.PageType = hdr->PageType;
1876#endif
1877 error = mpt_issue_cfg_req(mpt, req, &params,
1878 req->req_pbuf + MPT_RQSL(mpt),
1879 len, sleep_ok, timeout_ms);
1880 if (error != 0) {
1881 mpt_prt(mpt, "mpt_write_cfg_page timed out\n");
1882 return (-1);
984263bc
MD
1883 }
1884
d751f32e
MD
1885 if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
1886 mpt_prt(mpt, "mpt_write_cfg_page: Config Info Status %x\n",
1887 req->IOCStatus);
1888 mpt_free_request(mpt, req);
1889 return (-1);
1890 }
1891 mpt_free_request(mpt, req);
984263bc
MD
1892 return (0);
1893}
1894
d751f32e
MD
1895/*
1896 * Read IOC configuration information
1897 */
984263bc 1898static int
d751f32e 1899mpt_read_config_info_ioc(struct mpt_softc *mpt)
984263bc 1900{
d751f32e
MD
1901 CONFIG_PAGE_HEADER hdr;
1902 struct mpt_raid_volume *mpt_raid;
1903 int rv;
1904 int i;
1905 size_t len;
1906
1907 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC,
1908 2, 0, &hdr, FALSE, 5000);
1909 /*
1910 * If it's an invalid page, so what? Not a supported function....
1911 */
1912 if (rv == EINVAL) {
1913 return (0);
1914 }
1915 if (rv) {
1916 return (rv);
1917 }
1918
1919 mpt_lprt(mpt, MPT_PRT_DEBUG,
1920 "IOC Page 2 Header: Version %x len %x PageNumber %x PageType %x\n",
1921 hdr.PageVersion, hdr.PageLength << 2,
1922 hdr.PageNumber, hdr.PageType);
1923
1924 len = hdr.PageLength * sizeof(uint32_t);
2545bca0 1925 mpt->ioc_page2 = kmalloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
d751f32e
MD
1926 if (mpt->ioc_page2 == NULL) {
1927 mpt_prt(mpt, "unable to allocate memory for IOC page 2\n");
1928 mpt_raid_free_mem(mpt);
1929 return (ENOMEM);
1930 }
1931 memcpy(&mpt->ioc_page2->Header, &hdr, sizeof(hdr));
1932 rv = mpt_read_cur_cfg_page(mpt, 0,
1933 &mpt->ioc_page2->Header, len, FALSE, 5000);
1934 if (rv) {
1935 mpt_prt(mpt, "failed to read IOC Page 2\n");
1936 mpt_raid_free_mem(mpt);
1937 return (EIO);
1938 }
1939 mpt2host_config_page_ioc2(mpt->ioc_page2);
1940
1941 if (mpt->ioc_page2->CapabilitiesFlags != 0) {
1942 uint32_t mask;
1943
1944 mpt_prt(mpt, "Capabilities: (");
1945 for (mask = 1; mask != 0; mask <<= 1) {
1946 if ((mpt->ioc_page2->CapabilitiesFlags & mask) == 0) {
1947 continue;
1948 }
1949 switch (mask) {
1950 case MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT:
1951 mpt_prtc(mpt, " RAID-0");
1952 break;
1953 case MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT:
1954 mpt_prtc(mpt, " RAID-1E");
1955 break;
1956 case MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT:
1957 mpt_prtc(mpt, " RAID-1");
1958 break;
1959 case MPI_IOCPAGE2_CAP_FLAGS_SES_SUPPORT:
1960 mpt_prtc(mpt, " SES");
1961 break;
1962 case MPI_IOCPAGE2_CAP_FLAGS_SAFTE_SUPPORT:
1963 mpt_prtc(mpt, " SAFTE");
1964 break;
1965 case MPI_IOCPAGE2_CAP_FLAGS_CROSS_CHANNEL_SUPPORT:
1966 mpt_prtc(mpt, " Multi-Channel-Arrays");
1967 default:
1968 break;
1969 }
1970 }
1971 mpt_prtc(mpt, " )\n");
1972 if ((mpt->ioc_page2->CapabilitiesFlags
1973 & (MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT
1974 | MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT
1975 | MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT)) != 0) {
1976 mpt_prt(mpt, "%d Active Volume%s(%d Max)\n",
1977 mpt->ioc_page2->NumActiveVolumes,
1978 mpt->ioc_page2->NumActiveVolumes != 1
1979 ? "s " : " ",
1980 mpt->ioc_page2->MaxVolumes);
1981 mpt_prt(mpt, "%d Hidden Drive Member%s(%d Max)\n",
1982 mpt->ioc_page2->NumActivePhysDisks,
1983 mpt->ioc_page2->NumActivePhysDisks != 1
1984 ? "s " : " ",
1985 mpt->ioc_page2->MaxPhysDisks);
1986 }
1987 }
1988
1989 len = mpt->ioc_page2->MaxVolumes * sizeof(struct mpt_raid_volume);
2545bca0 1990 mpt->raid_volumes = kmalloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
d751f32e
MD
1991 if (mpt->raid_volumes == NULL) {
1992 mpt_prt(mpt, "Could not allocate RAID volume data\n");
1993 mpt_raid_free_mem(mpt);
1994 return (ENOMEM);
1995 }
1996
1997 /*
1998 * Copy critical data out of ioc_page2 so that we can
1999 * safely refresh the page without windows of unreliable
2000 * data.
2001 */
2002 mpt->raid_max_volumes = mpt->ioc_page2->MaxVolumes;
2003
2004 len = sizeof(*mpt->raid_volumes->config_page) +
2005 (sizeof (RAID_VOL0_PHYS_DISK) * (mpt->ioc_page2->MaxPhysDisks - 1));
2006 for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
2007 mpt_raid = &mpt->raid_volumes[i];
2008 mpt_raid->config_page =
2545bca0 2009 kmalloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
d751f32e
MD
2010 if (mpt_raid->config_page == NULL) {
2011 mpt_prt(mpt, "Could not allocate RAID page data\n");
2012 mpt_raid_free_mem(mpt);
2013 return (ENOMEM);
2014 }
2015 }
2016 mpt->raid_page0_len = len;
2017
2018 len = mpt->ioc_page2->MaxPhysDisks * sizeof(struct mpt_raid_disk);
2545bca0 2019 mpt->raid_disks = kmalloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
d751f32e
MD
2020 if (mpt->raid_disks == NULL) {
2021 mpt_prt(mpt, "Could not allocate RAID disk data\n");
2022 mpt_raid_free_mem(mpt);
2023 return (ENOMEM);
2024 }
2025 mpt->raid_max_disks = mpt->ioc_page2->MaxPhysDisks;
2026
2027 /*
2028 * Load page 3.
2029 */
2030 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC,
2031 3, 0, &hdr, FALSE, 5000);
2032 if (rv) {
2033 mpt_raid_free_mem(mpt);
2034 return (EIO);
2035 }
2036
2037 mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC Page 3 Header: %x %x %x %x\n",
2038 hdr.PageVersion, hdr.PageLength, hdr.PageNumber, hdr.PageType);
2039
2040 len = hdr.PageLength * sizeof(uint32_t);
2545bca0 2041 mpt->ioc_page3 = kmalloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
d751f32e
MD
2042 if (mpt->ioc_page3 == NULL) {
2043 mpt_prt(mpt, "unable to allocate memory for IOC page 3\n");
2044 mpt_raid_free_mem(mpt);
2045 return (ENOMEM);
2046 }
2047 memcpy(&mpt->ioc_page3->Header, &hdr, sizeof(hdr));
2048 rv = mpt_read_cur_cfg_page(mpt, 0,
2049 &mpt->ioc_page3->Header, len, FALSE, 5000);
2050 if (rv) {
2051 mpt_raid_free_mem(mpt);
2052 return (EIO);
2053 }
2054 mpt2host_config_page_ioc3(mpt->ioc_page3);
2055 mpt_raid_wakeup(mpt);
2056 return (0);
984263bc
MD
2057}
2058
d751f32e
MD
2059/*
2060 * Enable IOC port
2061 */
984263bc 2062static int
d751f32e 2063mpt_send_port_enable(struct mpt_softc *mpt, int port)
984263bc 2064{
d751f32e
MD
2065 request_t *req;
2066 MSG_PORT_ENABLE *enable_req;
2067 int error;
2068
2069 req = mpt_get_request(mpt, /*sleep_ok*/FALSE);
2070 if (req == NULL)
2071 return (-1);
2072
2073 enable_req = req->req_vbuf;
2074 memset(enable_req, 0, MPT_RQSL(mpt));
2075
2076 enable_req->Function = MPI_FUNCTION_PORT_ENABLE;
2077 enable_req->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_CONFIG);
2078 enable_req->PortNumber = port;
2079
2080 mpt_check_doorbell(mpt);
2081 mpt_lprt(mpt, MPT_PRT_DEBUG, "enabling port %d\n", port);
2082
2083 mpt_send_cmd(mpt, req);
2084 error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
4c42baf4 2085 FALSE, (mpt->is_sas || mpt->is_fc)? 300000 : 30000);
d751f32e
MD
2086 if (error != 0) {
2087 mpt_prt(mpt, "port %d enable timed out\n", port);
2088 return (-1);
2089 }
2090 mpt_free_request(mpt, req);
2091 mpt_lprt(mpt, MPT_PRT_DEBUG, "enabled port %d\n", port);
2092 return (0);
984263bc
MD
2093}
2094
2095/*
d751f32e 2096 * Enable/Disable asynchronous event reporting.
984263bc
MD
2097 */
2098static int
d751f32e 2099mpt_send_event_request(struct mpt_softc *mpt, int onoff)
984263bc 2100{
d751f32e
MD
2101 request_t *req;
2102 MSG_EVENT_NOTIFY *enable_req;
984263bc 2103
d751f32e
MD
2104 req = mpt_get_request(mpt, FALSE);
2105 if (req == NULL) {
2106 return (ENOMEM);
984263bc 2107 }
d751f32e
MD
2108 enable_req = req->req_vbuf;
2109 memset(enable_req, 0, sizeof *enable_req);
984263bc 2110
d751f32e
MD
2111 enable_req->Function = MPI_FUNCTION_EVENT_NOTIFICATION;
2112 enable_req->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_EVENTS);
2113 enable_req->Switch = onoff;
984263bc 2114
d751f32e
MD
2115 mpt_check_doorbell(mpt);
2116 mpt_lprt(mpt, MPT_PRT_DEBUG, "%sabling async events\n",
2117 onoff ? "en" : "dis");
2118 /*
2119 * Send the command off, but don't wait for it.
2120 */
2121 mpt_send_cmd(mpt, req);
2122 return (0);
984263bc
MD
2123}
2124
984263bc 2125/*
d751f32e 2126 * Un-mask the interrupts on the chip.
984263bc 2127 */
d751f32e
MD
2128void
2129mpt_enable_ints(struct mpt_softc *mpt)
2130{
4c42baf4 2131
d751f32e
MD
2132 /* Unmask every thing except door bell int */
2133 mpt_write(mpt, MPT_OFFSET_INTR_MASK, MPT_INTR_DB_MASK);
2134}
984263bc 2135
d751f32e
MD
2136/*
2137 * Mask the interrupts on the chip.
2138 */
2139void
2140mpt_disable_ints(struct mpt_softc *mpt)
2141{
4c42baf4 2142
d751f32e
MD
2143 /* Mask all interrupts */
2144 mpt_write(mpt, MPT_OFFSET_INTR_MASK,
2145 MPT_INTR_REPLY_MASK | MPT_INTR_DB_MASK);
2146}
984263bc 2147
d751f32e
MD
2148static void
2149mpt_sysctl_attach(struct mpt_softc *mpt)
984263bc 2150{
6d259fc1
SW
2151 SYSCTL_ADD_UINT(&mpt->mpt_sysctl_ctx,
2152 SYSCTL_CHILDREN(mpt->mpt_sysctl_tree), OID_AUTO,
d751f32e
MD
2153 "debug", CTLFLAG_RW, &mpt->verbose, 0,
2154 "Debugging/Verbose level");
6d259fc1
SW
2155 SYSCTL_ADD_UINT(&mpt->mpt_sysctl_ctx,
2156 SYSCTL_CHILDREN(mpt->mpt_sysctl_tree), OID_AUTO,
d751f32e
MD
2157 "role", CTLFLAG_RD, &mpt->role, 0,
2158 "HBA role");
2159#ifdef MPT_TEST_MULTIPATH
6d259fc1
SW
2160 SYSCTL_ADD_INT(&mpt->mpt_sysctl_ctx,
2161 SYSCTL_CHILDREN(mpt->mpt_sysctl_tree), OID_AUTO,
d751f32e
MD
2162 "failure_id", CTLFLAG_RW, &mpt->failure_id, -1,
2163 "Next Target to Fail");
2164#endif
d751f32e 2165}
984263bc 2166
d751f32e
MD
2167int
2168mpt_attach(struct mpt_softc *mpt)
2169{
2170 struct mpt_personality *pers;
2171 int i;
2172 int error;
984263bc 2173
d751f32e
MD
2174 mpt_core_attach(mpt);
2175 mpt_core_enable(mpt);
984263bc 2176
d751f32e
MD
2177 TAILQ_INSERT_TAIL(&mpt_tailq, mpt, links);
2178 for (i = 0; i < MPT_MAX_PERSONALITIES; i++) {
2179 pers = mpt_personalities[i];
2180 if (pers == NULL) {
2181 continue;
2182 }
2183 if (pers->probe(mpt) == 0) {
2184 error = pers->attach(mpt);
2185 if (error != 0) {
2186 mpt_detach(mpt);
2187 return (error);
2188 }
2189 mpt->mpt_pers_mask |= (0x1 << pers->id);
2190 pers->use_count++;
2191 }
2192 }
984263bc 2193
d751f32e
MD
2194 /*
2195 * Now that we've attached everything, do the enable function
2196 * for all of the personalities. This allows the personalities
2197 * to do setups that are appropriate for them prior to enabling
2198 * any ports.
2199 */
2200 for (i = 0; i < MPT_MAX_PERSONALITIES; i++) {
2201 pers = mpt_personalities[i];
2202 if (pers != NULL && MPT_PERS_ATTACHED(pers, mpt) != 0) {
2203 error = pers->enable(mpt);
2204 if (error != 0) {
2205 mpt_prt(mpt, "personality %s attached but would"
2206 " not enable (%d)\n", pers->name, error);
2207 mpt_detach(mpt);
2208 return (error);
2209 }
984263bc 2210 }
984263bc 2211 }
984263bc
MD
2212 return (0);
2213}
2214
d751f32e
MD
2215int
2216mpt_shutdown(struct mpt_softc *mpt)
2217{
2218 struct mpt_personality *pers;
2219
2220 MPT_PERS_FOREACH_REVERSE(mpt, pers) {
2221 pers->shutdown(mpt);
2222 }
2223 return (0);
2224}
984263bc
MD
2225
2226int
d751f32e 2227mpt_detach(struct mpt_softc *mpt)
984263bc 2228{
d751f32e 2229 struct mpt_personality *pers;
984263bc 2230
d751f32e
MD
2231 MPT_PERS_FOREACH_REVERSE(mpt, pers) {
2232 pers->detach(mpt);
2233 mpt->mpt_pers_mask &= ~(0x1 << pers->id);
2234 pers->use_count--;
2235 }
2236 TAILQ_REMOVE(&mpt_tailq, mpt, links);
2237 return (0);
2238}
984263bc 2239
4c42baf4 2240static int
d751f32e
MD
2241mpt_core_load(struct mpt_personality *pers)
2242{
2243 int i;
984263bc 2244
d751f32e
MD
2245 /*
2246 * Setup core handlers and insert the default handler
2247 * into all "empty slots".
2248 */
2249 for (i = 0; i < MPT_NUM_REPLY_HANDLERS; i++) {
2250 mpt_reply_handlers[i] = mpt_default_reply_handler;
2251 }
984263bc 2252
d751f32e
MD
2253 mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_EVENTS)] =
2254 mpt_event_reply_handler;
2255 mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_CONFIG)] =
2256 mpt_config_reply_handler;
2257 mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_HANDSHAKE)] =
2258 mpt_handshake_reply_handler;
2259 return (0);
2260}
2261
2262/*
2263 * Initialize per-instance driver data and perform
2264 * initial controller configuration.
2265 */
4c42baf4 2266static int
d751f32e
MD
2267mpt_core_attach(struct mpt_softc *mpt)
2268{
2269 int val, error;
2270
2271 LIST_INIT(&mpt->ack_frames);
2272 /* Put all request buffers on the free list */
2273 TAILQ_INIT(&mpt->request_pending_list);
2274 TAILQ_INIT(&mpt->request_free_list);
2275 TAILQ_INIT(&mpt->request_timeout_list);
d751f32e
MD
2276 for (val = 0; val < MPT_MAX_LUNS; val++) {
2277 STAILQ_INIT(&mpt->trt[val].atios);
2278 STAILQ_INIT(&mpt->trt[val].inots);
2279 }
2280 STAILQ_INIT(&mpt->trt_wildcard.atios);
2281 STAILQ_INIT(&mpt->trt_wildcard.inots);
2282#ifdef MPT_TEST_MULTIPATH
2283 mpt->failure_id = -1;
2284#endif
2285 mpt->scsi_tgt_handler_id = MPT_HANDLER_ID_NONE;
6d259fc1
SW
2286 sysctl_ctx_init(&mpt->mpt_sysctl_ctx);
2287 mpt->mpt_sysctl_tree = SYSCTL_ADD_NODE(&mpt->mpt_sysctl_ctx,
2288 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
2289 device_get_nameunit(mpt->dev), CTLFLAG_RD, 0, "");
2290 if (mpt->mpt_sysctl_tree == NULL) {
2291 device_printf(mpt->dev, "can't add sysctl node\n");
2292 return (EINVAL);
2293 }
d751f32e
MD
2294 mpt_sysctl_attach(mpt);
2295 mpt_lprt(mpt, MPT_PRT_DEBUG, "doorbell req = %s\n",
2296 mpt_ioc_diag(mpt_read(mpt, MPT_OFFSET_DOORBELL)));
2297
2298 MPT_LOCK(mpt);
2299 error = mpt_configure_ioc(mpt, 0, 0);
2300 MPT_UNLOCK(mpt);
2301
2302 return (error);
2303}
2304
4c42baf4 2305static int
d751f32e
MD
2306mpt_core_enable(struct mpt_softc *mpt)
2307{
4c42baf4 2308
d751f32e
MD
2309 /*
2310 * We enter with the IOC enabled, but async events
2311 * not enabled, ports not enabled and interrupts
2312 * not enabled.
2313 */
2314 MPT_LOCK(mpt);
2315
2316 /*
2317 * Enable asynchronous event reporting- all personalities
2318 * have attached so that they should be able to now field
2319 * async events.
2320 */
2321 mpt_send_event_request(mpt, 1);
2322
2323 /*
2324 * Catch any pending interrupts
2325 *
2326 * This seems to be crucial- otherwise
2327 * the portenable below times out.
2328 */
2329 mpt_intr(mpt);
2330
2331 /*
2332 * Enable Interrupts
2333 */
2334 mpt_enable_ints(mpt);
2335
2336 /*
2337 * Catch any pending interrupts
2338 *
2339 * This seems to be crucial- otherwise
2340 * the portenable below times out.
2341 */
2342 mpt_intr(mpt);
2343
2344 /*
2345 * Enable the port.
2346 */
2347 if (mpt_send_port_enable(mpt, 0) != MPT_OK) {
2348 mpt_prt(mpt, "failed to enable port 0\n");
2349 MPT_UNLOCK(mpt);
2350 return (ENXIO);
2351 }
2352 MPT_UNLOCK(mpt);
984263bc
MD
2353 return (0);
2354}
2355
4c42baf4 2356static void
d751f32e
MD
2357mpt_core_shutdown(struct mpt_softc *mpt)
2358{
4c42baf4 2359
d751f32e
MD
2360 mpt_disable_ints(mpt);
2361}
2362
4c42baf4 2363static void
d751f32e
MD
2364mpt_core_detach(struct mpt_softc *mpt)
2365{
6d259fc1
SW
2366 int val;
2367
d751f32e
MD
2368 /*
2369 * XXX: FREE MEMORY
2370 */
2371 mpt_disable_ints(mpt);
6d259fc1
SW
2372
2373 /* Make sure no request has pending timeouts. */
2374 for (val = 0; val < MPT_MAX_REQUESTS(mpt); val++) {
2375 request_t *req = &mpt->request_pool[val];
2376 callout_stop(&req->callout);
2377 }
2378
2379 mpt_dma_buf_free(mpt);
2380
2381 if (mpt->mpt_sysctl_tree != NULL)
2382 sysctl_ctx_free(&mpt->mpt_sysctl_ctx);
d751f32e
MD
2383}
2384
4c42baf4 2385static int
d751f32e
MD
2386mpt_core_unload(struct mpt_personality *pers)
2387{
4c42baf4 2388
6d259fc1 2389 /* Unload is always successful. */
d751f32e
MD
2390 return (0);
2391}
2392
2393#define FW_UPLOAD_REQ_SIZE \
2394 (sizeof(MSG_FW_UPLOAD) - sizeof(SGE_MPI_UNION) \
2395 + sizeof(FW_UPLOAD_TCSGE) + sizeof(SGE_SIMPLE32))
2396
2397static int
2398mpt_upload_fw(struct mpt_softc *mpt)
2399{
2400 uint8_t fw_req_buf[FW_UPLOAD_REQ_SIZE];
2401 MSG_FW_UPLOAD_REPLY fw_reply;
2402 MSG_FW_UPLOAD *fw_req;
2403 FW_UPLOAD_TCSGE *tsge;
2404 SGE_SIMPLE32 *sge;
2405 uint32_t flags;
2406 int error;
6d259fc1 2407
d751f32e
MD
2408 memset(&fw_req_buf, 0, sizeof(fw_req_buf));
2409 fw_req = (MSG_FW_UPLOAD *)fw_req_buf;
2410 fw_req->ImageType = MPI_FW_UPLOAD_ITYPE_FW_IOC_MEM;
2411 fw_req->Function = MPI_FUNCTION_FW_UPLOAD;
2412 fw_req->MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
2413 tsge = (FW_UPLOAD_TCSGE *)&fw_req->SGL;
2414 tsge->DetailsLength = 12;
2415 tsge->Flags = MPI_SGE_FLAGS_TRANSACTION_ELEMENT;
2416 tsge->ImageSize = htole32(mpt->fw_image_size);
2417 sge = (SGE_SIMPLE32 *)(tsge + 1);
2418 flags = (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER
2419 | MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_SIMPLE_ELEMENT
2420 | MPI_SGE_FLAGS_32_BIT_ADDRESSING | MPI_SGE_FLAGS_IOC_TO_HOST);
2421 flags <<= MPI_SGE_FLAGS_SHIFT;
2422 sge->FlagsLength = htole32(flags | mpt->fw_image_size);
2423 sge->Address = htole32(mpt->fw_phys);
6d259fc1 2424 bus_dmamap_sync(mpt->fw_dmat, mpt->fw_dmap, BUS_DMASYNC_PREREAD);
d751f32e
MD
2425 error = mpt_send_handshake_cmd(mpt, sizeof(fw_req_buf), &fw_req_buf);
2426 if (error)
2427 return(error);
2428 error = mpt_recv_handshake_reply(mpt, sizeof(fw_reply), &fw_reply);
6d259fc1 2429 bus_dmamap_sync(mpt->fw_dmat, mpt->fw_dmap, BUS_DMASYNC_POSTREAD);
d751f32e
MD
2430 return (error);
2431}
2432
2433static void
2434mpt_diag_outsl(struct mpt_softc *mpt, uint32_t addr,
2435 uint32_t *data, bus_size_t len)
2436{
2437 uint32_t *data_end;
2438
2439 data_end = data + (roundup2(len, sizeof(uint32_t)) / 4);
2440 if (mpt->is_sas) {
2441 pci_enable_io(mpt->dev, SYS_RES_IOPORT);
2442 }
2443 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, addr);
2444 while (data != data_end) {
2445 mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, *data);
2446 data++;
2447 }
2448 if (mpt->is_sas) {
2449 pci_disable_io(mpt->dev, SYS_RES_IOPORT);
2450 }
2451}
2452
2453static int
2454mpt_download_fw(struct mpt_softc *mpt)
984263bc 2455{
d751f32e
MD
2456 MpiFwHeader_t *fw_hdr;
2457 int error;
2458 uint32_t ext_offset;
2459 uint32_t data;
984263bc 2460
4c42baf4
SW
2461 if (mpt->pci_pio_reg == NULL) {
2462 mpt_prt(mpt, "No PIO resource!\n");
2463 return (ENXIO);
2464 }
2465
d751f32e
MD
2466 mpt_prt(mpt, "Downloading Firmware - Image Size %d\n",
2467 mpt->fw_image_size);
984263bc 2468
d751f32e
MD
2469 error = mpt_enable_diag_mode(mpt);
2470 if (error != 0) {
2471 mpt_prt(mpt, "Could not enter diagnostic mode!\n");
2472 return (EIO);
2473 }
984263bc 2474
d751f32e
MD
2475 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC,
2476 MPI_DIAG_RW_ENABLE|MPI_DIAG_DISABLE_ARM);
2477
2478 fw_hdr = (MpiFwHeader_t *)mpt->fw_image;
6d259fc1 2479 bus_dmamap_sync(mpt->fw_dmat, mpt->fw_dmap, BUS_DMASYNC_PREWRITE);
d751f32e
MD
2480 mpt_diag_outsl(mpt, fw_hdr->LoadStartAddress, (uint32_t*)fw_hdr,
2481 fw_hdr->ImageSize);
6d259fc1 2482 bus_dmamap_sync(mpt->fw_dmat, mpt->fw_dmap, BUS_DMASYNC_POSTWRITE);
d751f32e
MD
2483
2484 ext_offset = fw_hdr->NextImageHeaderOffset;
2485 while (ext_offset != 0) {
2486 MpiExtImageHeader_t *ext;
2487
2488 ext = (MpiExtImageHeader_t *)((uintptr_t)fw_hdr + ext_offset);
2489 ext_offset = ext->NextImageHeaderOffset;
6d259fc1
SW
2490 bus_dmamap_sync(mpt->fw_dmat, mpt->fw_dmap,
2491 BUS_DMASYNC_PREWRITE);
d751f32e
MD
2492 mpt_diag_outsl(mpt, ext->LoadStartAddress, (uint32_t*)ext,
2493 ext->ImageSize);
6d259fc1
SW
2494 bus_dmamap_sync(mpt->fw_dmat, mpt->fw_dmap,
2495 BUS_DMASYNC_POSTWRITE);
984263bc 2496 }
984263bc 2497
d751f32e
MD
2498 if (mpt->is_sas) {
2499 pci_enable_io(mpt->dev, SYS_RES_IOPORT);
2500 }
2501 /* Setup the address to jump to on reset. */
2502 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, fw_hdr->IopResetRegAddr);
2503 mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, fw_hdr->IopResetVectorValue);
984263bc 2504
d751f32e
MD
2505 /*
2506 * The controller sets the "flash bad" status after attempting
2507 * to auto-boot from flash. Clear the status so that the controller
2508 * will continue the boot process with our newly installed firmware.
2509 */
2510 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, MPT_DIAG_MEM_CFG_BASE);
2511 data = mpt_pio_read(mpt, MPT_OFFSET_DIAG_DATA) | MPT_DIAG_MEM_CFG_BADFL;
2512 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, MPT_DIAG_MEM_CFG_BASE);
2513 mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, data);
984263bc 2514
d751f32e
MD
2515 if (mpt->is_sas) {
2516 pci_disable_io(mpt->dev, SYS_RES_IOPORT);
984263bc 2517 }
984263bc 2518
d751f32e
MD
2519 /*
2520 * Re-enable the processor and clear the boot halt flag.
2521 */
2522 data = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC);
2523 data &= ~(MPI_DIAG_PREVENT_IOC_BOOT|MPI_DIAG_DISABLE_ARM);
2524 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, data);
2525
2526 mpt_disable_diag_mode(mpt);
984263bc
MD
2527 return (0);
2528}
2529
6d259fc1
SW
2530static int
2531mpt_dma_buf_alloc(struct mpt_softc *mpt)
2532{
2533 struct mpt_map_info mi;
2534 uint8_t *vptr;
2535 uint32_t pptr, end;
2536 int i, error;
2537
2538 /* Create a child tag for data buffers */
2539 if (mpt_dma_tag_create(mpt, mpt->parent_dmat, 1,
2540 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
2541 NULL, NULL, (mpt->max_cam_seg_cnt - 1) * PAGE_SIZE,
2542 mpt->max_cam_seg_cnt, BUS_SPACE_MAXSIZE_32BIT, 0,
2543 &mpt->buffer_dmat) != 0) {
2544 mpt_prt(mpt, "cannot create a dma tag for data buffers\n");
2545 return (1);
2546 }
2547
2548 /* Create a child tag for request buffers */
2549 if (mpt_dma_tag_create(mpt, mpt->parent_dmat, PAGE_SIZE, 0,
2550 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR,
2551 NULL, NULL, MPT_REQ_MEM_SIZE(mpt), 1, BUS_SPACE_MAXSIZE_32BIT, 0,
2552 &mpt->request_dmat) != 0) {
2553 mpt_prt(mpt, "cannot create a dma tag for requests\n");
2554 return (1);
2555 }
2556
2557 /* Allocate some DMA accessible memory for requests */
2558 if (bus_dmamem_alloc(mpt->request_dmat, (void **)&mpt->request,
2559 BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &mpt->request_dmap) != 0) {
2560 mpt_prt(mpt, "cannot allocate %d bytes of request memory\n",
2561 MPT_REQ_MEM_SIZE(mpt));
2562 return (1);
2563 }
2564
2565 mi.mpt = mpt;
2566 mi.error = 0;
2567
2568 /* Load and lock it into "bus space" */
2569 bus_dmamap_load(mpt->request_dmat, mpt->request_dmap, mpt->request,
2570 MPT_REQ_MEM_SIZE(mpt), mpt_map_rquest, &mi, 0);
2571
2572 if (mi.error) {
2573 mpt_prt(mpt, "error %d loading dma map for DMA request queue\n",
2574 mi.error);
2575 return (1);
2576 }
2577 mpt->request_phys = mi.phys;
2578
2579 /*
2580 * Now create per-request dma maps
2581 */
2582 i = 0;
2583 pptr = mpt->request_phys;
2584 vptr = mpt->request;
2585 end = pptr + MPT_REQ_MEM_SIZE(mpt);
2586 while(pptr < end) {
2587 request_t *req = &mpt->request_pool[i];
2588 req->index = i++;
2589
2590 /* Store location of Request Data */
2591 req->req_pbuf = pptr;
2592 req->req_vbuf = vptr;
2593
2594 pptr += MPT_REQUEST_AREA;
2595 vptr += MPT_REQUEST_AREA;
2596
2597 req->sense_pbuf = (pptr - MPT_SENSE_SIZE);
2598 req->sense_vbuf = (vptr - MPT_SENSE_SIZE);
2599
2600 error = bus_dmamap_create(mpt->buffer_dmat, 0, &req->dmap);
2601 if (error) {
2602 mpt_prt(mpt, "error %d creating per-cmd DMA maps\n",
2603 error);
2604 return (1);
2605 }
2606 }
2607
2608 return (0);
2609}
2610
2611static void
2612mpt_dma_buf_free(struct mpt_softc *mpt)
2613{
2614 int i;
4c42baf4 2615
6d259fc1
SW
2616 if (mpt->request_dmat == 0) {
2617 mpt_lprt(mpt, MPT_PRT_DEBUG, "already released dma memory\n");
2618 return;
2619 }
2620 for (i = 0; i < MPT_MAX_REQUESTS(mpt); i++) {
2621 bus_dmamap_destroy(mpt->buffer_dmat, mpt->request_pool[i].dmap);
2622 }
2623 bus_dmamap_unload(mpt->request_dmat, mpt->request_dmap);
2624 bus_dmamem_free(mpt->request_dmat, mpt->request, mpt->request_dmap);
2625 bus_dma_tag_destroy(mpt->request_dmat);
2626 mpt->request_dmat = 0;
2627 bus_dma_tag_destroy(mpt->buffer_dmat);
2628}
2629
984263bc 2630/*
d751f32e
MD
2631 * Allocate/Initialize data structures for the controller. Called
2632 * once at instance startup.
984263bc
MD
2633 */
2634static int
d751f32e 2635mpt_configure_ioc(struct mpt_softc *mpt, int tn, int needreset)
984263bc 2636{
d751f32e 2637 PTR_MSG_PORT_FACTS_REPLY pfp;
6d259fc1 2638 int error, port, val;
d751f32e 2639 size_t len;
984263bc 2640
d751f32e 2641 if (tn == MPT_MAX_TRYS) {
984263bc
MD
2642 return (-1);
2643 }
984263bc 2644
d751f32e
MD
2645 /*
2646 * No need to reset if the IOC is already in the READY state.
2647 *
2648 * Force reset if initialization failed previously.
2649 * Note that a hard_reset of the second channel of a '929
2650 * will stop operation of the first channel. Hopefully, if the
2651 * first channel is ok, the second will not require a hard
2652 * reset.
2653 */
2654 if (needreset || MPT_STATE(mpt_rd_db(mpt)) != MPT_DB_STATE_READY) {
2655 if (mpt_reset(mpt, FALSE) != MPT_OK) {
2656 return (mpt_configure_ioc(mpt, tn++, 1));
2657 }
2658 needreset = 0;
984263bc 2659 }
d751f32e
MD
2660
2661 if (mpt_get_iocfacts(mpt, &mpt->ioc_facts) != MPT_OK) {
2662 mpt_prt(mpt, "mpt_get_iocfacts failed\n");
2663 return (mpt_configure_ioc(mpt, tn++, 1));
984263bc 2664 }
d751f32e 2665 mpt2host_iocfacts_reply(&mpt->ioc_facts);
984263bc 2666
d751f32e
MD
2667 mpt_prt(mpt, "MPI Version=%d.%d.%d.%d\n",
2668 mpt->ioc_facts.MsgVersion >> 8,
2669 mpt->ioc_facts.MsgVersion & 0xFF,
2670 mpt->ioc_facts.HeaderVersion >> 8,
2671 mpt->ioc_facts.HeaderVersion & 0xFF);
2672
2673 /*
2674 * Now that we know request frame size, we can calculate
2675 * the actual (reasonable) segment limit for read/write I/O.
2676 *
2677 * This limit is constrained by:
2678 *
2679 * + The size of each area we allocate per command (and how
2680 * many chain segments we can fit into it).
2681 * + The total number of areas we've set up.
2682 * + The actual chain depth the card will allow.
2683 *
2684 * The first area's segment count is limited by the I/O request
2685 * at the head of it. We cannot allocate realistically more
2686 * than MPT_MAX_REQUESTS areas. Therefore, to account for both
2687 * conditions, we'll just start out with MPT_MAX_REQUESTS-2.
2688 *
2689 */
2690 /* total number of request areas we (can) allocate */
2691 mpt->max_seg_cnt = MPT_MAX_REQUESTS(mpt) - 2;
2692
2693 /* converted to the number of chain areas possible */
2694 mpt->max_seg_cnt *= MPT_NRFM(mpt);
2695
2696 /* limited by the number of chain areas the card will support */
2697 if (mpt->max_seg_cnt > mpt->ioc_facts.MaxChainDepth) {
6d259fc1 2698 mpt_lprt(mpt, MPT_PRT_INFO,
d751f32e
MD
2699 "chain depth limited to %u (from %u)\n",
2700 mpt->ioc_facts.MaxChainDepth, mpt->max_seg_cnt);
2701 mpt->max_seg_cnt = mpt->ioc_facts.MaxChainDepth;
984263bc
MD
2702 }
2703
d751f32e
MD
2704 /* converted to the number of simple sges in chain segments. */
2705 mpt->max_seg_cnt *= (MPT_NSGL(mpt) - 1);
2706
6d259fc1
SW
2707 /*
2708 * Use this as the basis for reporting the maximum I/O size to CAM.
2709 */
2710 mpt->max_cam_seg_cnt = min(mpt->max_seg_cnt, (MAXPHYS / PAGE_SIZE) + 1);
2711
2712 error = mpt_dma_buf_alloc(mpt);
2713 if (error != 0) {
2714 mpt_prt(mpt, "mpt_dma_buf_alloc() failed!\n");
2715 return (EIO);
2716 }
2717
2718 for (val = 0; val < MPT_MAX_REQUESTS(mpt); val++) {
2719 request_t *req = &mpt->request_pool[val];
2720 req->state = REQ_STATE_ALLOCATED;
2721 mpt_callout_init(mpt, &req->callout);
2722 mpt_free_request(mpt, req);
2723 }
2724
2725 mpt_lprt(mpt, MPT_PRT_INFO, "Maximum Segment Count: %u, Maximum "
2726 "CAM Segment Count: %u\n", mpt->max_seg_cnt,
2727 mpt->max_cam_seg_cnt);
2728
2729 mpt_lprt(mpt, MPT_PRT_INFO, "MsgLength=%u IOCNumber = %d\n",
d751f32e 2730 mpt->ioc_facts.MsgLength, mpt->ioc_facts.IOCNumber);
6d259fc1 2731 mpt_lprt(mpt, MPT_PRT_INFO,
d751f32e
MD
2732 "IOCFACTS: GlobalCredits=%d BlockSize=%u bytes "
2733 "Request Frame Size %u bytes Max Chain Depth %u\n",
2734 mpt->ioc_facts.GlobalCredits, mpt->ioc_facts.BlockSize,
2735 mpt->ioc_facts.RequestFrameSize << 2,
2736 mpt->ioc_facts.MaxChainDepth);
6d259fc1 2737 mpt_lprt(mpt, MPT_PRT_INFO, "IOCFACTS: Num Ports %d, FWImageSize %d, "
d751f32e 2738 "Flags=%#x\n", mpt->ioc_facts.NumberOfPorts,
6d259fc1 2739 mpt->ioc_facts.FWImageSize, mpt->ioc_facts.Flags);
d751f32e
MD
2740
2741 len = mpt->ioc_facts.NumberOfPorts * sizeof (MSG_PORT_FACTS_REPLY);
2545bca0 2742 mpt->port_facts = kmalloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
d751f32e
MD
2743 if (mpt->port_facts == NULL) {
2744 mpt_prt(mpt, "unable to allocate memory for port facts\n");
2745 return (ENOMEM);
984263bc
MD
2746 }
2747
d751f32e
MD
2748
2749 if ((mpt->ioc_facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT) &&
2750 (mpt->fw_uploaded == 0)) {
2751 struct mpt_map_info mi;
2752
2753 /*
2754 * In some configurations, the IOC's firmware is
2755 * stored in a shared piece of system NVRAM that
6d259fc1 2756 * is only accessible via the BIOS. In this
d751f32e
MD
2757 * case, the firmware keeps a copy of firmware in
2758 * RAM until the OS driver retrieves it. Once
2759 * retrieved, we are responsible for re-downloading
2760 * the firmware after any hard-reset.
2761 */
2762 mpt->fw_image_size = mpt->ioc_facts.FWImageSize;
2763 error = mpt_dma_tag_create(mpt, mpt->parent_dmat, 1, 0,
2764 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2765 mpt->fw_image_size, 1, mpt->fw_image_size, 0,
2766 &mpt->fw_dmat);
2767 if (error != 0) {
6d259fc1 2768 mpt_prt(mpt, "cannot create firmware dma tag\n");
d751f32e 2769 return (ENOMEM);
984263bc 2770 }
d751f32e 2771 error = bus_dmamem_alloc(mpt->fw_dmat,
6d259fc1
SW
2772 (void **)&mpt->fw_image, BUS_DMA_NOWAIT |
2773 BUS_DMA_COHERENT, &mpt->fw_dmap);
d751f32e
MD
2774 if (error != 0) {
2775 mpt_prt(mpt, "cannot allocate firmware memory\n");
2776 bus_dma_tag_destroy(mpt->fw_dmat);
2777 return (ENOMEM);
984263bc 2778 }
d751f32e
MD
2779 mi.mpt = mpt;
2780 mi.error = 0;
2781 bus_dmamap_load(mpt->fw_dmat, mpt->fw_dmap,
2782 mpt->fw_image, mpt->fw_image_size, mpt_map_rquest, &mi, 0);
2783 mpt->fw_phys = mi.phys;
2784
2785 error = mpt_upload_fw(mpt);
2786 if (error != 0) {
2787 mpt_prt(mpt, "firmware upload failed.\n");
2788 bus_dmamap_unload(mpt->fw_dmat, mpt->fw_dmap);
2789 bus_dmamem_free(mpt->fw_dmat, mpt->fw_image,
2790 mpt->fw_dmap);
2791 bus_dma_tag_destroy(mpt->fw_dmat);
2792 mpt->fw_image = NULL;
2793 return (EIO);
2794 }
2795 mpt->fw_uploaded = 1;
2796 }
2797
2798 for (port = 0; port < mpt->ioc_facts.NumberOfPorts; port++) {
2799 pfp = &mpt->port_facts[port];
2800 error = mpt_get_portfacts(mpt, 0, pfp);
2801 if (error != MPT_OK) {
2802 mpt_prt(mpt,
2803 "mpt_get_portfacts on port %d failed\n", port);
2545bca0 2804 kfree(mpt->port_facts, M_DEVBUF);
d751f32e
MD
2805 mpt->port_facts = NULL;
2806 return (mpt_configure_ioc(mpt, tn++, 1));
984263bc 2807 }
d751f32e
MD
2808 mpt2host_portfacts_reply(pfp);
2809
2810 if (port > 0) {
2811 error = MPT_PRT_INFO;
2812 } else {
2813 error = MPT_PRT_DEBUG;
984263bc 2814 }
d751f32e
MD
2815 mpt_lprt(mpt, error,
2816 "PORTFACTS[%d]: Type %x PFlags %x IID %d MaxDev %d\n",
2817 port, pfp->PortType, pfp->ProtocolFlags, pfp->PortSCSIID,
2818 pfp->MaxDevices);
2819
984263bc
MD
2820 }
2821
2822 /*
d751f32e 2823 * XXX: Not yet supporting more than port 0
984263bc 2824 */
d751f32e
MD
2825 pfp = &mpt->port_facts[0];
2826 if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_FC) {
2827 mpt->is_fc = 1;
2828 mpt->is_sas = 0;
2829 mpt->is_spi = 0;
2830 } else if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_SAS) {
2831 mpt->is_fc = 0;
2832 mpt->is_sas = 1;
2833 mpt->is_spi = 0;
2834 } else if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_SCSI) {
2835 mpt->is_fc = 0;
2836 mpt->is_sas = 0;
2837 mpt->is_spi = 1;
6d259fc1
SW
2838 if (mpt->mpt_ini_id == MPT_INI_ID_NONE)
2839 mpt->mpt_ini_id = pfp->PortSCSIID;
d751f32e
MD
2840 } else if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_ISCSI) {
2841 mpt_prt(mpt, "iSCSI not supported yet\n");
2842 return (ENXIO);
2843 } else if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_INACTIVE) {
2844 mpt_prt(mpt, "Inactive Port\n");
2845 return (ENXIO);
2846 } else {
2847 mpt_prt(mpt, "unknown Port Type %#x\n", pfp->PortType);
2848 return (ENXIO);
984263bc
MD
2849 }
2850
d751f32e
MD
2851 /*
2852 * Set our role with what this port supports.
2853 *
2854 * Note this might be changed later in different modules
2855 * if this is different from what is wanted.
2856 */
2857 mpt->role = MPT_ROLE_NONE;
2858 if (pfp->ProtocolFlags & MPI_PORTFACTS_PROTOCOL_INITIATOR) {
2859 mpt->role |= MPT_ROLE_INITIATOR;
984263bc 2860 }
d751f32e
MD
2861 if (pfp->ProtocolFlags & MPI_PORTFACTS_PROTOCOL_TARGET) {
2862 mpt->role |= MPT_ROLE_TARGET;
984263bc
MD
2863 }
2864
d751f32e
MD
2865 /*
2866 * Enable the IOC
2867 */
2868 if (mpt_enable_ioc(mpt, 1) != MPT_OK) {
2869 mpt_prt(mpt, "unable to initialize IOC\n");
2870 return (ENXIO);
984263bc 2871 }
984263bc 2872
d751f32e
MD
2873 /*
2874 * Read IOC configuration information.
2875 *
2876 * We need this to determine whether or not we have certain
2877 * settings for Integrated Mirroring (e.g.).
2878 */
2879 mpt_read_config_info_ioc(mpt);
2880
984263bc
MD
2881 return (0);
2882}
2883
984263bc 2884static int
d751f32e 2885mpt_enable_ioc(struct mpt_softc *mpt, int portenable)
984263bc 2886{
d751f32e
MD
2887 uint32_t pptr;
2888 int val;
984263bc 2889
d751f32e
MD
2890 if (mpt_send_ioc_init(mpt, MPI_WHOINIT_HOST_DRIVER) != MPT_OK) {
2891 mpt_prt(mpt, "mpt_send_ioc_init failed\n");
2892 return (EIO);
2893 }
984263bc 2894
d751f32e 2895 mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_send_ioc_init ok\n");
984263bc 2896
d751f32e
MD
2897 if (mpt_wait_state(mpt, MPT_DB_STATE_RUNNING) != MPT_OK) {
2898 mpt_prt(mpt, "IOC failed to go to run state\n");
2899 return (ENXIO);
2900 }
2901 mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC now at RUNSTATE\n");
984263bc 2902
d751f32e
MD
2903 /*
2904 * Give it reply buffers
2905 *
2906 * Do *not* exceed global credits.
2907 */
2908 for (val = 0, pptr = mpt->reply_phys;
2909 (pptr + MPT_REPLY_SIZE) < (mpt->reply_phys + PAGE_SIZE);
2910 pptr += MPT_REPLY_SIZE) {
2911 mpt_free_reply(mpt, pptr);
2912 if (++val == mpt->ioc_facts.GlobalCredits - 1)
2913 break;
984263bc 2914 }
984263bc 2915
d751f32e
MD
2916
2917 /*
2918 * Enable the port if asked. This is only done if we're resetting
2919 * the IOC after initial startup.
2920 */
2921 if (portenable) {
2922 /*
2923 * Enable asynchronous event reporting
2924 */
2925 mpt_send_event_request(mpt, 1);
2926
2927 if (mpt_send_port_enable(mpt, 0) != MPT_OK) {
6d259fc1 2928 mpt_prt(mpt, "%s: failed to enable port 0\n", __func__);
d751f32e 2929 return (ENXIO);
984263bc 2930 }
d751f32e
MD
2931 }
2932 return (MPT_OK);
984263bc
MD
2933}
2934
2935/*
d751f32e 2936 * Endian Conversion Functions- only used on Big Endian machines
984263bc 2937 */
d751f32e
MD
2938#if _BYTE_ORDER == _BIG_ENDIAN
2939void
2940mpt2host_sge_simple_union(SGE_SIMPLE_UNION *sge)
984263bc 2941{
984263bc 2942
d751f32e
MD
2943 MPT_2_HOST32(sge, FlagsLength);
2944 MPT_2_HOST32(sge, u.Address64.Low);
2945 MPT_2_HOST32(sge, u.Address64.High);
2946}
984263bc 2947
d751f32e
MD
2948void
2949mpt2host_iocfacts_reply(MSG_IOC_FACTS_REPLY *rp)
2950{
984263bc 2951
d751f32e
MD
2952 MPT_2_HOST16(rp, MsgVersion);
2953 MPT_2_HOST16(rp, HeaderVersion);
2954 MPT_2_HOST32(rp, MsgContext);
2955 MPT_2_HOST16(rp, IOCExceptions);
2956 MPT_2_HOST16(rp, IOCStatus);
2957 MPT_2_HOST32(rp, IOCLogInfo);
2958 MPT_2_HOST16(rp, ReplyQueueDepth);
2959 MPT_2_HOST16(rp, RequestFrameSize);
2960 MPT_2_HOST16(rp, Reserved_0101_FWVersion);
2961 MPT_2_HOST16(rp, ProductID);
2962 MPT_2_HOST32(rp, CurrentHostMfaHighAddr);
2963 MPT_2_HOST16(rp, GlobalCredits);
2964 MPT_2_HOST32(rp, CurrentSenseBufferHighAddr);
2965 MPT_2_HOST16(rp, CurReplyFrameSize);
2966 MPT_2_HOST32(rp, FWImageSize);
2967 MPT_2_HOST32(rp, IOCCapabilities);
2968 MPT_2_HOST32(rp, FWVersion.Word);
2969 MPT_2_HOST16(rp, HighPriorityQueueDepth);
2970 MPT_2_HOST16(rp, Reserved2);
2971 mpt2host_sge_simple_union(&rp->HostPageBufferSGE);
2972 MPT_2_HOST32(rp, ReplyFifoHostSignalingAddr);
2973}
984263bc 2974
d751f32e
MD
2975void
2976mpt2host_portfacts_reply(MSG_PORT_FACTS_REPLY *pfp)
2977{
984263bc 2978
d751f32e
MD
2979 MPT_2_HOST16(pfp, Reserved);
2980 MPT_2_HOST16(pfp, Reserved1);
2981 MPT_2_HOST32(pfp, MsgContext);
2982 MPT_2_HOST16(pfp, Reserved2);
2983 MPT_2_HOST16(pfp, IOCStatus);
2984 MPT_2_HOST32(pfp, IOCLogInfo);
2985 MPT_2_HOST16(pfp, MaxDevices);
2986 MPT_2_HOST16(pfp, PortSCSIID);
2987 MPT_2_HOST16(pfp, ProtocolFlags);
2988 MPT_2_HOST16(pfp, MaxPostedCmdBuffers);
2989 MPT_2_HOST16(pfp, MaxPersistentIDs);
2990 MPT_2_HOST16(pfp, MaxLanBuckets);
2991 MPT_2_HOST16(pfp, Reserved4);
2992 MPT_2_HOST32(pfp, Reserved5);
984263bc
MD
2993}
2994
984263bc 2995void
d751f32e 2996mpt2host_config_page_ioc2(CONFIG_PAGE_IOC_2 *ioc2)
984263bc 2997{
d751f32e
MD
2998 int i;
2999
3000 MPT_2_HOST32(ioc2, CapabilitiesFlags);
3001 for (i = 0; i < MPI_IOC_PAGE_2_RAID_VOLUME_MAX; i++) {
3002 MPT_2_HOST16(ioc2, RaidVolume[i].Reserved3);
3003 }
984263bc
MD
3004}
3005
984263bc 3006void
d751f32e 3007mpt2host_config_page_ioc3(CONFIG_PAGE_IOC_3 *ioc3)
984263bc 3008{
d751f32e
MD
3009
3010 MPT_2_HOST16(ioc3, Reserved2);
984263bc
MD
3011}
3012
d751f32e
MD
3013void
3014mpt2host_config_page_scsi_port_0(CONFIG_PAGE_SCSI_PORT_0 *sp0)
984263bc 3015{
984263bc 3016
d751f32e
MD
3017 MPT_2_HOST32(sp0, Capabilities);
3018 MPT_2_HOST32(sp0, PhysicalInterface);
3019}
984263bc 3020
d751f32e
MD
3021void
3022mpt2host_config_page_scsi_port_1(CONFIG_PAGE_SCSI_PORT_1 *sp1)
3023{
984263bc 3024
d751f32e
MD
3025 MPT_2_HOST32(sp1, Configuration);
3026 MPT_2_HOST32(sp1, OnBusTimerValue);
3027 MPT_2_HOST16(sp1, IDConfig);
3028}
984263bc 3029
d751f32e
MD
3030void
3031host2mpt_config_page_scsi_port_1(CONFIG_PAGE_SCSI_PORT_1 *sp1)
3032{
984263bc 3033
d751f32e
MD
3034 HOST_2_MPT32(sp1, Configuration);
3035 HOST_2_MPT32(sp1, OnBusTimerValue);
3036 HOST_2_MPT16(sp1, IDConfig);
3037}
984263bc 3038
d751f32e
MD
3039void
3040mpt2host_config_page_scsi_port_2(CONFIG_PAGE_SCSI_PORT_2 *sp2)
3041{
3042 int i;
984263bc 3043
d751f32e
MD
3044 MPT_2_HOST32(sp2, PortFlags);
3045 MPT_2_HOST32(sp2, PortSettings);
3046 for (i = 0; i < sizeof(sp2->DeviceSettings) /
3047 sizeof(*sp2->DeviceSettings); i++) {
3048 MPT_2_HOST16(sp2, DeviceSettings[i].DeviceFlags);
3049 }
3050}
984263bc 3051
d751f32e
MD
3052void
3053mpt2host_config_page_scsi_device_0(CONFIG_PAGE_SCSI_DEVICE_0 *sd0)
3054{
984263bc 3055
d751f32e
MD
3056 MPT_2_HOST32(sd0, NegotiatedParameters);
3057 MPT_2_HOST32(sd0, Information);
3058}
984263bc 3059
d751f32e
MD
3060void
3061mpt2host_config_page_scsi_device_1(CONFIG_PAGE_SCSI_DEVICE_1 *sd1)
3062{
984263bc 3063
d751f32e
MD
3064 MPT_2_HOST32(sd1, RequestedParameters);
3065 MPT_2_HOST32(sd1, Reserved);
3066 MPT_2_HOST32(sd1, Configuration);
3067}
984263bc 3068
d751f32e
MD
3069void
3070host2mpt_config_page_scsi_device_1(CONFIG_PAGE_SCSI_DEVICE_1 *sd1)
3071{
984263bc 3072
d751f32e
MD
3073 HOST_2_MPT32(sd1, RequestedParameters);
3074 HOST_2_MPT32(sd1, Reserved);
3075 HOST_2_MPT32(sd1, Configuration);
3076}
984263bc 3077
d751f32e
MD
3078void
3079mpt2host_config_page_fc_port_0(CONFIG_PAGE_FC_PORT_0 *fp0)
3080{
984263bc 3081
d751f32e
MD
3082 MPT_2_HOST32(fp0, Flags);
3083 MPT_2_HOST32(fp0, PortIdentifier);
3084 MPT_2_HOST32(fp0, WWNN.Low);
3085 MPT_2_HOST32(fp0, WWNN.High);
3086 MPT_2_HOST32(fp0, WWPN.Low);
3087 MPT_2_HOST32(fp0, WWPN.High);
3088 MPT_2_HOST32(fp0, SupportedServiceClass);
3089 MPT_2_HOST32(fp0, SupportedSpeeds);
3090 MPT_2_HOST32(fp0, CurrentSpeed);
3091 MPT_2_HOST32(fp0, MaxFrameSize);
3092 MPT_2_HOST32(fp0, FabricWWNN.Low);
3093 MPT_2_HOST32(fp0, FabricWWNN.High);
3094 MPT_2_HOST32(fp0, FabricWWPN.Low);
3095 MPT_2_HOST32(fp0, FabricWWPN.High);
3096 MPT_2_HOST32(fp0, DiscoveredPortsCount);
3097 MPT_2_HOST32(fp0, MaxInitiators);
3098}
984263bc 3099
d751f32e
MD
3100void
3101mpt2host_config_page_fc_port_1(CONFIG_PAGE_FC_PORT_1 *fp1)
3102{
984263bc 3103
d751f32e
MD
3104 MPT_2_HOST32(fp1, Flags);
3105 MPT_2_HOST32(fp1, NoSEEPROMWWNN.Low);
3106 MPT_2_HOST32(fp1, NoSEEPROMWWNN.High);
3107 MPT_2_HOST32(fp1, NoSEEPROMWWPN.Low);
3108 MPT_2_HOST32(fp1, NoSEEPROMWWPN.High);
3109}
984263bc 3110
d751f32e
MD
3111void
3112host2mpt_config_page_fc_port_1(CONFIG_PAGE_FC_PORT_1 *fp1)
3113{
984263bc 3114
d751f32e
MD
3115 HOST_2_MPT32(fp1, Flags);
3116 HOST_2_MPT32(fp1, NoSEEPROMWWNN.Low);
3117 HOST_2_MPT32(fp1, NoSEEPROMWWNN.High);
3118 HOST_2_MPT32(fp1, NoSEEPROMWWPN.Low);
3119 HOST_2_MPT32(fp1, NoSEEPROMWWPN.High);
3120}
984263bc 3121
d751f32e
MD
3122void
3123mpt2host_config_page_raid_vol_0(CONFIG_PAGE_RAID_VOL_0 *volp)
3124{
3125 int i;
984263bc 3126
d751f32e
MD
3127 MPT_2_HOST16(volp, VolumeStatus.Reserved);
3128 MPT_2_HOST16(volp, VolumeSettings.Settings);
3129 MPT_2_HOST32(volp, MaxLBA);
3130 MPT_2_HOST32(volp, MaxLBAHigh);
3131 MPT_2_HOST32(volp, StripeSize);
3132 MPT_2_HOST32(volp, Reserved2);
3133 MPT_2_HOST32(volp, Reserved3);
3134 for (i = 0; i < MPI_RAID_VOL_PAGE_0_PHYSDISK_MAX; i++) {
3135 MPT_2_HOST16(volp, PhysDisk[i].Reserved);
984263bc 3136 }
d751f32e 3137}
984263bc 3138
d751f32e
MD
3139void
3140mpt2host_config_page_raid_phys_disk_0(CONFIG_PAGE_RAID_PHYS_DISK_0 *rpd0)
3141{
3142
3143 MPT_2_HOST32(rpd0, Reserved1);
3144 MPT_2_HOST16(rpd0, PhysDiskStatus.Reserved);
3145 MPT_2_HOST32(rpd0, MaxLBA);
3146 MPT_2_HOST16(rpd0, ErrorData.Reserved);
3147 MPT_2_HOST16(rpd0, ErrorData.ErrorCount);
3148 MPT_2_HOST16(rpd0, ErrorData.SmartCount);
3149}
3150
3151void
3152mpt2host_mpi_raid_vol_indicator(MPI_RAID_VOL_INDICATOR *vi)
3153{
3154
3155 MPT_2_HOST16(vi, TotalBlocks.High);
3156 MPT_2_HOST16(vi, TotalBlocks.Low);
3157 MPT_2_HOST16(vi, BlocksRemaining.High);
3158 MPT_2_HOST16(vi, BlocksRemaining.Low);
984263bc 3159}
d751f32e 3160#endif