mpt(4): Mark two functions __printflike.
[dragonfly.git] / sys / dev / disk / mpt / mpt_raid.c
1 /*-
2  * Routines for handling the integrated RAID features LSI MPT Fusion adapters.
3  *
4  * Copyright (c) 2005, WHEEL Sp. z o.o.
5  * Copyright (c) 2005 Justin T. Gibbs.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are
10  * met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14  *    substantially similar to the "NO WARRANTY" disclaimer below
15  *    ("Disclaimer") and any redistribution must be conditioned upon including
16  *    a substantially similar Disclaimer requirement for further binary
17  *    redistribution.
18  * 3. Neither the names of the above listed copyright holders nor the names
19  *    of any contributors may be used to endorse or promote products derived
20  *    from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
32  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 /*-
35  * Some Breakage and Bug Fixing added later.
36  * Copyright (c) 2006, by Matthew Jacob
37  * All Rights Reserved
38  *
39  * Support from LSI-Logic has also gone a great deal toward making this a
40  * workable subsystem and is gratefully acknowledged.
41  *
42  * $FreeBSD: src/sys/dev/mpt/mpt_raid.c,v 1.30 2011/07/29 18:38:31 marius Exp $
43  */
44
45 #include <dev/disk/mpt/mpt.h>
46 #include <dev/disk/mpt/mpt_raid.h>
47
48 #include "dev/disk/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
49 #include "dev/disk/mpt/mpilib/mpi_raid.h"
50
51 #include <bus/cam/cam.h>
52 #include <bus/cam/cam_ccb.h>
53 #include <bus/cam/cam_sim.h>
54 #include <bus/cam/cam_xpt_sim.h>
55 #include <bus/cam/cam_periph.h>
56
57 #include <sys/callout.h>
58 #include <sys/kthread.h>
59 #include <sys/sysctl.h>
60
61 #include <machine/stdarg.h>
62
63 struct mpt_raid_action_result
64 {
65         union {
66                 MPI_RAID_VOL_INDICATOR  indicator_struct;
67                 uint32_t                new_settings;
68                 uint8_t                 phys_disk_num;
69         } action_data;
70         uint16_t                        action_status;
71 };
72
73 #define REQ_TO_RAID_ACTION_RESULT(req) ((struct mpt_raid_action_result *) \
74         (((MSG_RAID_ACTION_REQUEST *)(req->req_vbuf)) + 1))
75
76 #define REQ_IOCSTATUS(req) ((req)->IOCStatus & MPI_IOCSTATUS_MASK)
77
78 static mpt_probe_handler_t      mpt_raid_probe;
79 static mpt_attach_handler_t     mpt_raid_attach;
80 static mpt_enable_handler_t     mpt_raid_enable;
81 static mpt_event_handler_t      mpt_raid_event;
82 static mpt_shutdown_handler_t   mpt_raid_shutdown;
83 static mpt_reset_handler_t      mpt_raid_ioc_reset;
84 static mpt_detach_handler_t     mpt_raid_detach;
85
86 static struct mpt_personality mpt_raid_personality =
87 {
88         .name           = "mpt_raid",
89         .probe          = mpt_raid_probe,
90         .attach         = mpt_raid_attach,
91         .enable         = mpt_raid_enable,
92         .event          = mpt_raid_event,
93         .reset          = mpt_raid_ioc_reset,
94         .shutdown       = mpt_raid_shutdown,
95         .detach         = mpt_raid_detach,
96 };
97
98 DECLARE_MPT_PERSONALITY(mpt_raid, SI_ORDER_THIRD);
99 MPT_PERSONALITY_DEPEND(mpt_raid, mpt_cam, 1, 1, 1);
100
101 static mpt_reply_handler_t mpt_raid_reply_handler;
102 static int mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
103                                         MSG_DEFAULT_REPLY *reply_frame);
104 static int mpt_spawn_raid_thread(struct mpt_softc *mpt);
105 static void mpt_terminate_raid_thread(struct mpt_softc *mpt);
106 static void mpt_raid_thread(void *arg);
107 static timeout_t mpt_raid_timer;
108 #if 0
109 static void mpt_enable_vol(struct mpt_softc *mpt,
110                            struct mpt_raid_volume *mpt_vol, int enable);
111 #endif
112 static void mpt_verify_mwce(struct mpt_softc *, struct mpt_raid_volume *);
113 static void mpt_adjust_queue_depth(struct mpt_softc *, struct mpt_raid_volume *,
114     struct cam_path *);
115 static void mpt_raid_sysctl_attach(struct mpt_softc *);
116
117 static const char *mpt_vol_type(struct mpt_raid_volume *vol);
118 static const char *mpt_vol_state(struct mpt_raid_volume *vol);
119 static const char *mpt_disk_state(struct mpt_raid_disk *disk);
120 static void mpt_vol_prt(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
121     const char *fmt, ...) __printflike(3, 4);
122 static void mpt_disk_prt(struct mpt_softc *mpt, struct mpt_raid_disk *disk,
123     const char *fmt, ...) __printflike(3, 4);
124
125 static int mpt_issue_raid_req(struct mpt_softc *mpt,
126     struct mpt_raid_volume *vol, struct mpt_raid_disk *disk, request_t *req,
127     u_int Action, uint32_t ActionDataWord, bus_addr_t addr, bus_size_t len,
128     int write, int wait);
129
130 static int mpt_refresh_raid_data(struct mpt_softc *mpt);
131 static void mpt_schedule_raid_refresh(struct mpt_softc *mpt);
132
133 static uint32_t raid_handler_id = MPT_HANDLER_ID_NONE;
134
135 static const char *
136 mpt_vol_type(struct mpt_raid_volume *vol)
137 {
138         switch (vol->config_page->VolumeType) {
139         case MPI_RAID_VOL_TYPE_IS:
140                 return ("RAID-0");
141         case MPI_RAID_VOL_TYPE_IME:
142                 return ("RAID-1E");
143         case MPI_RAID_VOL_TYPE_IM:
144                 return ("RAID-1");
145         default:
146                 return ("Unknown");
147         }
148 }
149
150 static const char *
151 mpt_vol_state(struct mpt_raid_volume *vol)
152 {
153         switch (vol->config_page->VolumeStatus.State) {
154         case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL:
155                 return ("Optimal");
156         case MPI_RAIDVOL0_STATUS_STATE_DEGRADED:
157                 return ("Degraded");
158         case MPI_RAIDVOL0_STATUS_STATE_FAILED:
159                 return ("Failed");
160         default:
161                 return ("Unknown");
162         }
163 }
164
165 static const char *
166 mpt_disk_state(struct mpt_raid_disk *disk)
167 {
168         switch (disk->config_page.PhysDiskStatus.State) {
169         case MPI_PHYSDISK0_STATUS_ONLINE:
170                 return ("Online");
171         case MPI_PHYSDISK0_STATUS_MISSING:
172                 return ("Missing");
173         case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
174                 return ("Incompatible");
175         case MPI_PHYSDISK0_STATUS_FAILED:
176                 return ("Failed");
177         case MPI_PHYSDISK0_STATUS_INITIALIZING:
178                 return ("Initializing");
179         case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
180                 return ("Offline Requested");
181         case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
182                 return ("Failed per Host Request");
183         case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
184                 return ("Offline");
185         default:
186                 return ("Unknown");
187         }
188 }
189
190 static void
191 mpt_vol_prt(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
192             const char *fmt, ...)
193 {
194         __va_list ap;
195
196         kprintf("%s:vol%d(%s:%d:%d): ", device_get_nameunit(mpt->dev),
197                (u_int)(vol - mpt->raid_volumes), device_get_nameunit(mpt->dev),
198                vol->config_page->VolumeBus, vol->config_page->VolumeID);
199         __va_start(ap, fmt);
200         kvprintf(fmt, ap);
201         __va_end(ap);
202 }
203
204 static void
205 mpt_disk_prt(struct mpt_softc *mpt, struct mpt_raid_disk *disk,
206              const char *fmt, ...)
207 {
208         __va_list ap;
209
210         if (disk->volume != NULL) {
211                 kprintf("(%s:vol%d:%d): ",
212                        device_get_nameunit(mpt->dev),
213                        disk->volume->config_page->VolumeID,
214                        disk->member_number);
215         } else {
216                 kprintf("(%s:%d:%d): ", device_get_nameunit(mpt->dev),
217                        disk->config_page.PhysDiskBus,
218                        disk->config_page.PhysDiskID);
219         }
220         __va_start(ap, fmt);
221         kvprintf(fmt, ap);
222         __va_end(ap);
223 }
224
225 static void
226 mpt_raid_async(void *callback_arg, u_int32_t code,
227                struct cam_path *path, void *arg)
228 {
229         struct mpt_softc *mpt;
230
231         mpt = (struct mpt_softc*)callback_arg;
232         switch (code) {
233         case AC_FOUND_DEVICE:
234         {
235                 struct ccb_getdev *cgd;
236                 struct mpt_raid_volume *mpt_vol;
237
238                 cgd = (struct ccb_getdev *)arg;
239                 if (cgd == NULL) {
240                         break;
241                 }
242
243                 mpt_lprt(mpt, MPT_PRT_DEBUG, "Callback for %d\n",
244                          cgd->ccb_h.target_id);
245
246                 RAID_VOL_FOREACH(mpt, mpt_vol) {
247                         if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
248                                 continue;
249
250                         if (mpt_vol->config_page->VolumeID
251                          == cgd->ccb_h.target_id) {
252                                 mpt_adjust_queue_depth(mpt, mpt_vol, path);
253                                 break;
254                         }
255                 }
256         }
257         default:
258                 break;
259         }
260 }
261
262 static int
263 mpt_raid_probe(struct mpt_softc *mpt)
264 {
265
266         if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
267                 return (ENODEV);
268         }
269         return (0);
270 }
271
272 static int
273 mpt_raid_attach(struct mpt_softc *mpt)
274 {
275         struct ccb_setasync csa;
276         mpt_handler_t    handler;
277         int              error;
278
279         mpt_callout_init(mpt, &mpt->raid_timer);
280
281         error = mpt_spawn_raid_thread(mpt);
282         if (error != 0) {
283                 mpt_prt(mpt, "Unable to spawn RAID thread!\n");
284                 goto cleanup;
285         }
286
287         MPT_LOCK(mpt);
288         handler.reply_handler = mpt_raid_reply_handler;
289         error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
290                                      &raid_handler_id);
291         if (error != 0) {
292                 mpt_prt(mpt, "Unable to register RAID haandler!\n");
293                 goto cleanup;
294         }
295
296         xpt_setup_ccb(&csa.ccb_h, mpt->path, 5);
297         csa.ccb_h.func_code = XPT_SASYNC_CB;
298         csa.event_enable = AC_FOUND_DEVICE;
299         csa.callback = mpt_raid_async;
300         csa.callback_arg = mpt;
301         xpt_action((union ccb *)&csa);
302         if (csa.ccb_h.status != CAM_REQ_CMP) {
303                 mpt_prt(mpt, "mpt_raid_attach: Unable to register "
304                         "CAM async handler.\n");
305         }
306         MPT_UNLOCK(mpt);
307
308         mpt_raid_sysctl_attach(mpt);
309         return (0);
310 cleanup:
311         MPT_UNLOCK(mpt);
312         mpt_raid_detach(mpt);
313         return (error);
314 }
315
316 static int
317 mpt_raid_enable(struct mpt_softc *mpt)
318 {
319
320         return (0);
321 }
322
323 static void
324 mpt_raid_detach(struct mpt_softc *mpt)
325 {
326         struct ccb_setasync csa;
327         mpt_handler_t handler;
328
329         callout_stop(&mpt->raid_timer);
330
331         MPT_LOCK(mpt);
332         mpt_terminate_raid_thread(mpt);
333         handler.reply_handler = mpt_raid_reply_handler;
334         mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
335                                raid_handler_id);
336         xpt_setup_ccb(&csa.ccb_h, mpt->path, /*priority*/5);
337         csa.ccb_h.func_code = XPT_SASYNC_CB;
338         csa.event_enable = 0;
339         csa.callback = mpt_raid_async;
340         csa.callback_arg = mpt;
341         xpt_action((union ccb *)&csa);
342         MPT_UNLOCK(mpt);
343 }
344
345 static void
346 mpt_raid_ioc_reset(struct mpt_softc *mpt, int type)
347 {
348
349         /* Nothing to do yet. */
350 }
351
352 static const char *raid_event_txt[] =
353 {
354         "Volume Created",
355         "Volume Deleted",
356         "Volume Settings Changed",
357         "Volume Status Changed",
358         "Volume Physical Disk Membership Changed",
359         "Physical Disk Created",
360         "Physical Disk Deleted",
361         "Physical Disk Settings Changed",
362         "Physical Disk Status Changed",
363         "Domain Validation Required",
364         "SMART Data Received",
365         "Replace Action Started",
366 };
367
368 static int
369 mpt_raid_event(struct mpt_softc *mpt, request_t *req,
370                MSG_EVENT_NOTIFY_REPLY *msg)
371 {
372         EVENT_DATA_RAID *raid_event;
373         struct mpt_raid_volume *mpt_vol;
374         struct mpt_raid_disk *mpt_disk;
375         CONFIG_PAGE_RAID_VOL_0 *vol_pg;
376         int i;
377         int print_event;
378
379         if (msg->Event != MPI_EVENT_INTEGRATED_RAID) {
380                 return (0);
381         }
382
383         raid_event = (EVENT_DATA_RAID *)&msg->Data;
384
385         mpt_vol = NULL;
386         vol_pg = NULL;
387         if (mpt->raid_volumes != NULL && mpt->ioc_page2 != NULL) {
388                 for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
389                         mpt_vol = &mpt->raid_volumes[i];
390                         vol_pg = mpt_vol->config_page;
391
392                         if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
393                                 continue;
394
395                         if (vol_pg->VolumeID == raid_event->VolumeID
396                          && vol_pg->VolumeBus == raid_event->VolumeBus)
397                                 break;
398                 }
399                 if (i >= mpt->ioc_page2->MaxVolumes) {
400                         mpt_vol = NULL;
401                         vol_pg = NULL;
402                 }
403         }
404
405         mpt_disk = NULL;
406         if (raid_event->PhysDiskNum != 0xFF && mpt->raid_disks != NULL) {
407                 mpt_disk = mpt->raid_disks + raid_event->PhysDiskNum;
408                 if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0) {
409                         mpt_disk = NULL;
410                 }
411         }
412
413         print_event = 1;
414         switch(raid_event->ReasonCode) {
415         case MPI_EVENT_RAID_RC_VOLUME_CREATED:
416         case MPI_EVENT_RAID_RC_VOLUME_DELETED:
417                 break;
418         case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED:
419                 if (mpt_vol != NULL) {
420                         if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0) {
421                                 mpt_vol->flags &= ~MPT_RVF_UP2DATE;
422                         } else {
423                                 /*
424                                  * Coalesce status messages into one
425                                  * per background run of our RAID thread.
426                                  * This removes "spurious" status messages
427                                  * from our output.
428                                  */
429                                 print_event = 0;
430                         }
431                 }
432                 break;
433         case MPI_EVENT_RAID_RC_VOLUME_SETTINGS_CHANGED:
434         case MPI_EVENT_RAID_RC_VOLUME_PHYSDISK_CHANGED:
435                 mpt->raid_rescan++;
436                 if (mpt_vol != NULL) {
437                         mpt_vol->flags &= ~(MPT_RVF_UP2DATE|MPT_RVF_ANNOUNCED);
438                 }
439                 break;
440         case MPI_EVENT_RAID_RC_PHYSDISK_CREATED:
441         case MPI_EVENT_RAID_RC_PHYSDISK_DELETED:
442                 mpt->raid_rescan++;
443                 break;
444         case MPI_EVENT_RAID_RC_PHYSDISK_SETTINGS_CHANGED:
445         case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED:
446                 mpt->raid_rescan++;
447                 if (mpt_disk != NULL) {
448                         mpt_disk->flags &= ~MPT_RDF_UP2DATE;
449                 }
450                 break;
451         case MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED:
452                 mpt->raid_rescan++;
453                 break;
454         case MPI_EVENT_RAID_RC_SMART_DATA:
455         case MPI_EVENT_RAID_RC_REPLACE_ACTION_STARTED:
456                 break;
457         }
458
459         if (print_event) {
460                 if (mpt_disk != NULL) {
461                         mpt_disk_prt(mpt, mpt_disk, "%s", "");
462                 } else if (mpt_vol != NULL) {
463                         mpt_vol_prt(mpt, mpt_vol, "%s", "");
464                 } else {
465                         mpt_prt(mpt, "Volume(%d:%d", raid_event->VolumeBus,
466                                 raid_event->VolumeID);
467
468                         if (raid_event->PhysDiskNum != 0xFF)
469                                 mpt_prtc(mpt, ":%d): ",
470                                          raid_event->PhysDiskNum);
471                         else
472                                 mpt_prtc(mpt, "): ");
473                 }
474
475                 if (raid_event->ReasonCode >= NUM_ELEMENTS(raid_event_txt))
476                         mpt_prtc(mpt, "Unhandled RaidEvent %#x\n",
477                                  raid_event->ReasonCode);
478                 else
479                         mpt_prtc(mpt, "%s\n",
480                                  raid_event_txt[raid_event->ReasonCode]);
481         }
482
483         if (raid_event->ReasonCode == MPI_EVENT_RAID_RC_SMART_DATA) {
484                 /* XXX Use CAM's print sense for this... */
485                 if (mpt_disk != NULL)
486                         mpt_disk_prt(mpt, mpt_disk, "%s", "");
487                 else
488                         mpt_prt(mpt, "Volume(%d:%d:%d: ",
489                             raid_event->VolumeBus, raid_event->VolumeID,
490                             raid_event->PhysDiskNum);
491                 mpt_prtc(mpt, "ASC 0x%x, ASCQ 0x%x)\n",
492                          raid_event->ASC, raid_event->ASCQ);
493         }
494
495         mpt_raid_wakeup(mpt);
496         return (1);
497 }
498
499 static void
500 mpt_raid_shutdown(struct mpt_softc *mpt)
501 {
502         struct mpt_raid_volume *mpt_vol;
503
504         if (mpt->raid_mwce_setting != MPT_RAID_MWCE_REBUILD_ONLY) {
505                 return;
506         }
507
508         mpt->raid_mwce_setting = MPT_RAID_MWCE_OFF;
509         RAID_VOL_FOREACH(mpt, mpt_vol) {
510                 mpt_verify_mwce(mpt, mpt_vol);
511         }
512 }
513
514 static int
515 mpt_raid_reply_handler(struct mpt_softc *mpt, request_t *req,
516     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
517 {
518         int free_req;
519
520         if (req == NULL)
521                 return (TRUE);
522
523         free_req = TRUE;
524         if (reply_frame != NULL)
525                 free_req = mpt_raid_reply_frame_handler(mpt, req, reply_frame);
526 #ifdef NOTYET
527         else if (req->ccb != NULL) {
528                 /* Complete Quiesce CCB with error... */
529         }
530 #endif
531
532         req->state &= ~REQ_STATE_QUEUED;
533         req->state |= REQ_STATE_DONE;
534         TAILQ_REMOVE(&mpt->request_pending_list, req, links);
535
536         if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
537                 wakeup(req);
538         } else if (free_req) {
539                 mpt_free_request(mpt, req);
540         }
541
542         return (TRUE);
543 }
544
545 /*
546  * Parse additional completion information in the reply
547  * frame for RAID I/O requests.
548  */
549 static int
550 mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
551     MSG_DEFAULT_REPLY *reply_frame)
552 {
553         MSG_RAID_ACTION_REPLY *reply;
554         struct mpt_raid_action_result *action_result;
555         MSG_RAID_ACTION_REQUEST *rap;
556
557         reply = (MSG_RAID_ACTION_REPLY *)reply_frame;
558         req->IOCStatus = le16toh(reply->IOCStatus);
559         rap = (MSG_RAID_ACTION_REQUEST *)req->req_vbuf;
560
561         switch (rap->Action) {
562         case MPI_RAID_ACTION_QUIESCE_PHYS_IO:
563                 mpt_prt(mpt, "QUIESCE PHYSIO DONE\n");
564                 break;
565         case MPI_RAID_ACTION_ENABLE_PHYS_IO:
566                 mpt_prt(mpt, "ENABLY PHYSIO DONE\n");
567                 break;
568         default:
569                 break;
570         }
571         action_result = REQ_TO_RAID_ACTION_RESULT(req);
572         memcpy(&action_result->action_data, &reply->ActionData,
573             sizeof(action_result->action_data));
574         action_result->action_status = le16toh(reply->ActionStatus);
575         return (TRUE);
576 }
577
578 /*
579  * Utiltity routine to perform a RAID action command;
580  */
581 static int
582 mpt_issue_raid_req(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
583                    struct mpt_raid_disk *disk, request_t *req, u_int Action,
584                    uint32_t ActionDataWord, bus_addr_t addr, bus_size_t len,
585                    int write, int wait)
586 {
587         MSG_RAID_ACTION_REQUEST *rap;
588         SGE_SIMPLE32 *se;
589
590         rap = req->req_vbuf;
591         memset(rap, 0, sizeof *rap);
592         rap->Action = Action;
593         rap->ActionDataWord = htole32(ActionDataWord);
594         rap->Function = MPI_FUNCTION_RAID_ACTION;
595         rap->VolumeID = vol->config_page->VolumeID;
596         rap->VolumeBus = vol->config_page->VolumeBus;
597         if (disk != NULL)
598                 rap->PhysDiskNum = disk->config_page.PhysDiskNum;
599         else
600                 rap->PhysDiskNum = 0xFF;
601         se = (SGE_SIMPLE32 *)&rap->ActionDataSGE;
602         se->Address = htole32(addr);
603         MPI_pSGE_SET_LENGTH(se, len);
604         MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
605             MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
606             MPI_SGE_FLAGS_END_OF_LIST |
607             write ? MPI_SGE_FLAGS_HOST_TO_IOC : MPI_SGE_FLAGS_IOC_TO_HOST));
608         se->FlagsLength = htole32(se->FlagsLength);
609         rap->MsgContext = htole32(req->index | raid_handler_id);
610
611         mpt_check_doorbell(mpt);
612         mpt_send_cmd(mpt, req);
613
614         if (wait) {
615                 return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
616                                      /*sleep_ok*/FALSE, /*time_ms*/2000));
617         } else {
618                 return (0);
619         }
620 }
621
622 /*************************** RAID Status Monitoring ***************************/
623 static int
624 mpt_spawn_raid_thread(struct mpt_softc *mpt)
625 {
626         int error;
627
628         /*
629          * Freeze out any CAM transactions until our thread
630          * is able to run at least once.  We need to update
631          * our RAID pages before acception I/O or we may
632          * reject I/O to an ID we later determine is for a
633          * hidden physdisk.
634          */
635         MPT_LOCK(mpt);
636         xpt_freeze_simq(mpt->phydisk_sim, 1);
637         MPT_UNLOCK(mpt);
638         error = mpt_kthread_create(mpt_raid_thread, mpt,
639             &mpt->raid_thread, /*flags*/0, /*altstack*/0,
640             "mpt_raid%d", mpt->unit);
641         if (error != 0) {
642                 MPT_LOCK(mpt);
643                 xpt_release_simq(mpt->phydisk_sim, /*run_queue*/FALSE);
644                 MPT_UNLOCK(mpt);
645         }
646         return (error);
647 }
648
649 static void
650 mpt_terminate_raid_thread(struct mpt_softc *mpt)
651 {
652
653         if (mpt->raid_thread == NULL) {
654                 return;
655         }
656         mpt->shutdwn_raid = 1;
657         wakeup(&mpt->raid_volumes);
658         /*
659          * Sleep on a slightly different location
660          * for this interlock just for added safety.
661          */
662         mpt_sleep(mpt, &mpt->raid_thread, 0, "thtrm", 0);
663 }
664
665 static void
666 mpt_cam_rescan_callback(struct cam_periph *periph, union ccb *ccb)
667 {
668     xpt_free_path(ccb->ccb_h.path);
669     kfree(ccb, M_TEMP);
670 }
671
672 static void
673 mpt_raid_thread(void *arg)
674 {
675         struct mpt_softc *mpt;
676         int firstrun;
677
678         mpt = (struct mpt_softc *)arg;
679         firstrun = 1;
680         MPT_LOCK(mpt);
681         while (mpt->shutdwn_raid == 0) {
682
683                 if (mpt->raid_wakeup == 0) {
684                         mpt_sleep(mpt, &mpt->raid_volumes, 0, "idle", 0);
685                         continue;
686                 }
687
688                 mpt->raid_wakeup = 0;
689
690                 if (mpt_refresh_raid_data(mpt)) {
691                         mpt_schedule_raid_refresh(mpt); /* XX NOT QUITE RIGHT */
692                         continue;
693                 }
694
695                 /*
696                  * Now that we have our first snapshot of RAID data,
697                  * allow CAM to access our physical disk bus.
698                  */
699                 if (firstrun) {
700                         firstrun = 0;
701                         MPTLOCK_2_CAMLOCK(mpt);
702                         xpt_release_simq(mpt->phydisk_sim, TRUE);
703                         CAMLOCK_2_MPTLOCK(mpt);
704                 }
705
706                 if (mpt->raid_rescan != 0) {
707                         union ccb *ccb;
708                         int error;
709
710                         mpt->raid_rescan = 0;
711                         MPT_UNLOCK(mpt);
712
713                         ccb = kmalloc(sizeof(union ccb), M_TEMP,
714                             M_WAITOK | M_ZERO);
715
716                         MPT_LOCK(mpt);
717                         error = xpt_create_path(&ccb->ccb_h.path, xpt_periph,
718                             cam_sim_path(mpt->phydisk_sim),
719                             CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
720                         if (error != CAM_REQ_CMP) {
721                                 kfree(ccb, M_TEMP);
722                                 mpt_prt(mpt, "Unable to rescan RAID Bus!\n");
723                         } else {
724                                 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
725                                     5/*priority (low)*/);
726                                 ccb->ccb_h.func_code = XPT_SCAN_BUS;
727                                 ccb->ccb_h.cbfcnp = mpt_cam_rescan_callback;
728                                 ccb->crcn.flags = CAM_FLAG_NONE;
729                                 xpt_action(ccb);
730
731                                 /* scan is now in progress */
732                         }
733                 }
734         }
735         mpt->raid_thread = NULL;
736         wakeup(&mpt->raid_thread);
737         MPT_UNLOCK(mpt);
738         mpt_kthread_exit(0);
739 }
740
741 #if 0
742 static void
743 mpt_raid_quiesce_timeout(void *arg)
744 {
745
746         /* Complete the CCB with error */
747         /* COWWWW */
748 }
749
750 static timeout_t mpt_raid_quiesce_timeout;
751 cam_status
752 mpt_raid_quiesce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
753                       request_t *req)
754 {
755         union ccb *ccb;
756
757         ccb = req->ccb;
758         if ((mpt_disk->flags & MPT_RDF_QUIESCED) != 0)
759                 return (CAM_REQ_CMP);
760
761         if ((mpt_disk->flags & MPT_RDF_QUIESCING) == 0) {
762                 int rv;
763
764                 mpt_disk->flags |= MPT_RDF_QUIESCING;
765                 xpt_freeze_devq(ccb->ccb_h.path, 1);
766
767                 rv = mpt_issue_raid_req(mpt, mpt_disk->volume, mpt_disk, req,
768                                         MPI_RAID_ACTION_QUIESCE_PHYS_IO,
769                                         /*ActionData*/0, /*addr*/0,
770                                         /*len*/0, /*write*/FALSE,
771                                         /*wait*/FALSE);
772                 if (rv != 0)
773                         return (CAM_REQ_CMP_ERR);
774
775                 mpt_req_timeout(req, mpt_raid_quiesce_timeout, ccb, 5 * hz);
776 #if 0
777                 if (rv == ETIMEDOUT) {
778                         mpt_disk_prt(mpt, mpt_disk, "mpt_raid_quiesce_disk: "
779                                      "Quiece Timed-out\n");
780                         xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
781                         return (CAM_REQ_CMP_ERR);
782                 }
783
784                 ar = REQ_TO_RAID_ACTION_RESULT(req);
785                 if (rv != 0
786                  || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
787                  || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
788                         mpt_disk_prt(mpt, mpt_disk, "Quiece Failed"
789                                     "%d:%x:%x\n", rv, req->IOCStatus,
790                                     ar->action_status);
791                         xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
792                         return (CAM_REQ_CMP_ERR);
793                 }
794 #endif
795                 return (CAM_REQ_INPROG);
796         }
797         return (CAM_REQUEUE_REQ);
798 }
799 #endif
800
801 /* XXX Ignores that there may be multiple busses/IOCs involved. */
802 cam_status
803 mpt_map_physdisk(struct mpt_softc *mpt, union ccb *ccb, target_id_t *tgt)
804 {
805         struct mpt_raid_disk *mpt_disk;
806
807         mpt_disk = mpt->raid_disks + ccb->ccb_h.target_id;
808         if (ccb->ccb_h.target_id < mpt->raid_max_disks
809          && (mpt_disk->flags & MPT_RDF_ACTIVE) != 0) {
810                 *tgt = mpt_disk->config_page.PhysDiskID;
811                 return (0);
812         }
813         mpt_lprt(mpt, MPT_PRT_DEBUG1, "mpt_map_physdisk(%d) - Not Active\n",
814                  ccb->ccb_h.target_id);
815         return (-1);
816 }
817
818 /* XXX Ignores that there may be multiple busses/IOCs involved. */
819 int
820 mpt_is_raid_member(struct mpt_softc *mpt, target_id_t tgt)
821 {
822         struct mpt_raid_disk *mpt_disk;
823         int i;
824
825         if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0)
826                 return (0);
827         for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
828                 mpt_disk = &mpt->raid_disks[i];
829                 if ((mpt_disk->flags & MPT_RDF_ACTIVE) != 0 &&
830                     mpt_disk->config_page.PhysDiskID == tgt)
831                         return (1);
832         }
833         return (0);
834
835 }
836
837 /* XXX Ignores that there may be multiple busses/IOCs involved. */
838 int
839 mpt_is_raid_volume(struct mpt_softc *mpt, target_id_t tgt)
840 {
841         CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
842         CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol;
843
844         if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
845                 return (0);
846         }
847         ioc_vol = mpt->ioc_page2->RaidVolume;
848         ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
849         for (;ioc_vol != ioc_last_vol; ioc_vol++) {
850                 if (ioc_vol->VolumeID == tgt) {
851                         return (1);
852                 }
853         }
854         return (0);
855 }
856
857 #if 0
858 static void
859 mpt_enable_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
860                int enable)
861 {
862         request_t *req;
863         struct mpt_raid_action_result *ar;
864         CONFIG_PAGE_RAID_VOL_0 *vol_pg;
865         int enabled;
866         int rv;
867
868         vol_pg = mpt_vol->config_page;
869         enabled = vol_pg->VolumeStatus.Flags & MPI_RAIDVOL0_STATUS_FLAG_ENABLED;
870
871         /*
872          * If the setting matches the configuration,
873          * there is nothing to do.
874          */
875         if ((enabled && enable)
876          || (!enabled && !enable))
877                 return;
878
879         req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
880         if (req == NULL) {
881                 mpt_vol_prt(mpt, mpt_vol,
882                             "mpt_enable_vol: Get request failed!\n");
883                 return;
884         }
885
886         rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
887                                 enable ? MPI_RAID_ACTION_ENABLE_VOLUME
888                                        : MPI_RAID_ACTION_DISABLE_VOLUME,
889                                 /*data*/0, /*addr*/0, /*len*/0,
890                                 /*write*/FALSE, /*wait*/TRUE);
891         if (rv == ETIMEDOUT) {
892                 mpt_vol_prt(mpt, mpt_vol, "mpt_enable_vol: "
893                             "%s Volume Timed-out\n",
894                             enable ? "Enable" : "Disable");
895                 return;
896         }
897         ar = REQ_TO_RAID_ACTION_RESULT(req);
898         if (rv != 0
899          || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
900          || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
901                 mpt_vol_prt(mpt, mpt_vol, "%s Volume Failed: %d:%x:%x\n",
902                             enable ? "Enable" : "Disable",
903                             rv, req->IOCStatus, ar->action_status);
904         }
905
906         mpt_free_request(mpt, req);
907 }
908 #endif
909
910 static void
911 mpt_verify_mwce(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
912 {
913         request_t *req;
914         struct mpt_raid_action_result *ar;
915         CONFIG_PAGE_RAID_VOL_0 *vol_pg;
916         uint32_t data;
917         int rv;
918         int resyncing;
919         int mwce;
920
921         vol_pg = mpt_vol->config_page;
922         resyncing = vol_pg->VolumeStatus.Flags
923                   & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
924         mwce = vol_pg->VolumeSettings.Settings
925              & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
926
927         /*
928          * If the setting matches the configuration,
929          * there is nothing to do.
930          */
931         switch (mpt->raid_mwce_setting) {
932         case MPT_RAID_MWCE_REBUILD_ONLY:
933                 if ((resyncing && mwce) || (!resyncing && !mwce)) {
934                         return;
935                 }
936                 mpt_vol->flags ^= MPT_RVF_WCE_CHANGED;
937                 if ((mpt_vol->flags & MPT_RVF_WCE_CHANGED) == 0) {
938                         /*
939                          * Wait one more status update to see if
940                          * resyncing gets enabled.  It gets disabled
941                          * temporarilly when WCE is changed.
942                          */
943                         return;
944                 }
945                 break;
946         case MPT_RAID_MWCE_ON:
947                 if (mwce)
948                         return;
949                 break;
950         case MPT_RAID_MWCE_OFF:
951                 if (!mwce)
952                         return;
953                 break;
954         case MPT_RAID_MWCE_NC:
955                 return;
956         }
957
958         req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
959         if (req == NULL) {
960                 mpt_vol_prt(mpt, mpt_vol,
961                             "mpt_verify_mwce: Get request failed!\n");
962                 return;
963         }
964
965         vol_pg->VolumeSettings.Settings ^=
966             MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
967         memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
968         vol_pg->VolumeSettings.Settings ^=
969             MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
970         rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
971                                 MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
972                                 data, /*addr*/0, /*len*/0,
973                                 /*write*/FALSE, /*wait*/TRUE);
974         if (rv == ETIMEDOUT) {
975                 mpt_vol_prt(mpt, mpt_vol, "mpt_verify_mwce: "
976                             "Write Cache Enable Timed-out\n");
977                 return;
978         }
979         ar = REQ_TO_RAID_ACTION_RESULT(req);
980         if (rv != 0
981          || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
982          || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
983                 mpt_vol_prt(mpt, mpt_vol, "Write Cache Enable Failed: "
984                             "%d:%x:%x\n", rv, req->IOCStatus,
985                             ar->action_status);
986         } else {
987                 vol_pg->VolumeSettings.Settings ^=
988                     MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
989         }
990         mpt_free_request(mpt, req);
991 }
992
993 static void
994 mpt_verify_resync_rate(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
995 {
996         request_t *req;
997         struct mpt_raid_action_result *ar;
998         CONFIG_PAGE_RAID_VOL_0  *vol_pg;
999         u_int prio;
1000         int rv;
1001
1002         vol_pg = mpt_vol->config_page;
1003
1004         if (mpt->raid_resync_rate == MPT_RAID_RESYNC_RATE_NC)
1005                 return;
1006
1007         /*
1008          * If the current RAID resync rate does not
1009          * match our configured rate, update it.
1010          */
1011         prio = vol_pg->VolumeSettings.Settings
1012              & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1013         if (vol_pg->ResyncRate != 0
1014          && vol_pg->ResyncRate != mpt->raid_resync_rate) {
1015
1016                 req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
1017                 if (req == NULL) {
1018                         mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
1019                                     "Get request failed!\n");
1020                         return;
1021                 }
1022
1023                 rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
1024                                         MPI_RAID_ACTION_SET_RESYNC_RATE,
1025                                         mpt->raid_resync_rate, /*addr*/0,
1026                                         /*len*/0, /*write*/FALSE, /*wait*/TRUE);
1027                 if (rv == ETIMEDOUT) {
1028                         mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
1029                                     "Resync Rate Setting Timed-out\n");
1030                         return;
1031                 }
1032
1033                 ar = REQ_TO_RAID_ACTION_RESULT(req);
1034                 if (rv != 0
1035                  || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
1036                  || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
1037                         mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
1038                                     "%d:%x:%x\n", rv, req->IOCStatus,
1039                                     ar->action_status);
1040                 } else
1041                         vol_pg->ResyncRate = mpt->raid_resync_rate;
1042                 mpt_free_request(mpt, req);
1043         } else if ((prio && mpt->raid_resync_rate < 128)
1044                 || (!prio && mpt->raid_resync_rate >= 128)) {
1045                 uint32_t data;
1046
1047                 req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
1048                 if (req == NULL) {
1049                         mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
1050                                     "Get request failed!\n");
1051                         return;
1052                 }
1053
1054                 vol_pg->VolumeSettings.Settings ^=
1055                     MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1056                 memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
1057                 vol_pg->VolumeSettings.Settings ^=
1058                     MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1059                 rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
1060                                         MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
1061                                         data, /*addr*/0, /*len*/0,
1062                                         /*write*/FALSE, /*wait*/TRUE);
1063                 if (rv == ETIMEDOUT) {
1064                         mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
1065                                     "Resync Rate Setting Timed-out\n");
1066                         return;
1067                 }
1068                 ar = REQ_TO_RAID_ACTION_RESULT(req);
1069                 if (rv != 0
1070                  || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
1071                  || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
1072                         mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
1073                                     "%d:%x:%x\n", rv, req->IOCStatus,
1074                                     ar->action_status);
1075                 } else {
1076                         vol_pg->VolumeSettings.Settings ^=
1077                             MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1078                 }
1079
1080                 mpt_free_request(mpt, req);
1081         }
1082 }
1083
1084 static void
1085 mpt_adjust_queue_depth(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1086                        struct cam_path *path)
1087 {
1088         struct ccb_relsim crs;
1089
1090         xpt_setup_ccb(&crs.ccb_h, path, /*priority*/5);
1091         crs.ccb_h.func_code = XPT_REL_SIMQ;
1092         crs.ccb_h.flags = CAM_DEV_QFREEZE;
1093         crs.release_flags = RELSIM_ADJUST_OPENINGS;
1094         crs.openings = mpt->raid_queue_depth;
1095         xpt_action((union ccb *)&crs);
1096         if (crs.ccb_h.status != CAM_REQ_CMP)
1097                 mpt_vol_prt(mpt, mpt_vol, "mpt_adjust_queue_depth failed "
1098                             "with CAM status %#x\n", crs.ccb_h.status);
1099 }
1100
1101 static void
1102 mpt_announce_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
1103 {
1104         CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1105         u_int i;
1106
1107         vol_pg = mpt_vol->config_page;
1108         mpt_vol_prt(mpt, mpt_vol, "Settings (");
1109         for (i = 1; i <= 0x8000; i <<= 1) {
1110                 switch (vol_pg->VolumeSettings.Settings & i) {
1111                 case MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE:
1112                         mpt_prtc(mpt, " Member-WCE");
1113                         break;
1114                 case MPI_RAIDVOL0_SETTING_OFFLINE_ON_SMART:
1115                         mpt_prtc(mpt, " Offline-On-SMART-Err");
1116                         break;
1117                 case MPI_RAIDVOL0_SETTING_AUTO_CONFIGURE:
1118                         mpt_prtc(mpt, " Hot-Plug-Spares");
1119                         break;
1120                 case MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC:
1121                         mpt_prtc(mpt, " High-Priority-ReSync");
1122                         break;
1123                 default:
1124                         break;
1125                 }
1126         }
1127         mpt_prtc(mpt, " )\n");
1128         if (vol_pg->VolumeSettings.HotSparePool != 0) {
1129                 mpt_vol_prt(mpt, mpt_vol, "Using Spare Pool%s",
1130                             powerof2(vol_pg->VolumeSettings.HotSparePool)
1131                           ? ":" : "s:");
1132                 for (i = 0; i < 8; i++) {
1133                         u_int mask;
1134
1135                         mask = 0x1 << i;
1136                         if ((vol_pg->VolumeSettings.HotSparePool & mask) == 0)
1137                                 continue;
1138                         mpt_prtc(mpt, " %d", i);
1139                 }
1140                 mpt_prtc(mpt, "\n");
1141         }
1142         mpt_vol_prt(mpt, mpt_vol, "%d Members:\n", vol_pg->NumPhysDisks);
1143         for (i = 0; i < vol_pg->NumPhysDisks; i++){
1144                 struct mpt_raid_disk *mpt_disk;
1145                 CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1146                 int pt_bus = cam_sim_bus(mpt->phydisk_sim);
1147                 U8 f, s;
1148
1149                 mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum;
1150                 disk_pg = &mpt_disk->config_page;
1151                 mpt_prtc(mpt, "      ");
1152                 mpt_prtc(mpt, "(%s:%d:%d:0): ", device_get_nameunit(mpt->dev),
1153                          pt_bus, disk_pg->PhysDiskID);
1154                 if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM) {
1155                         mpt_prtc(mpt, "%s", mpt_disk->member_number == 0?
1156                             "Primary" : "Secondary");
1157                 } else {
1158                         mpt_prtc(mpt, "Stripe Position %d",
1159                                  mpt_disk->member_number);
1160                 }
1161                 f = disk_pg->PhysDiskStatus.Flags;
1162                 s = disk_pg->PhysDiskStatus.State;
1163                 if (f & MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC) {
1164                         mpt_prtc(mpt, " Out of Sync");
1165                 }
1166                 if (f & MPI_PHYSDISK0_STATUS_FLAG_QUIESCED) {
1167                         mpt_prtc(mpt, " Quiesced");
1168                 }
1169                 if (f & MPI_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME) {
1170                         mpt_prtc(mpt, " Inactive");
1171                 }
1172                 if (f & MPI_PHYSDISK0_STATUS_FLAG_OPTIMAL_PREVIOUS) {
1173                         mpt_prtc(mpt, " Was Optimal");
1174                 }
1175                 if (f & MPI_PHYSDISK0_STATUS_FLAG_NOT_OPTIMAL_PREVIOUS) {
1176                         mpt_prtc(mpt, " Was Non-Optimal");
1177                 }
1178                 switch (s) {
1179                 case MPI_PHYSDISK0_STATUS_ONLINE:
1180                         mpt_prtc(mpt, " Online");
1181                         break;
1182                 case MPI_PHYSDISK0_STATUS_MISSING:
1183                         mpt_prtc(mpt, " Missing");
1184                         break;
1185                 case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
1186                         mpt_prtc(mpt, " Incompatible");
1187                         break;
1188                 case MPI_PHYSDISK0_STATUS_FAILED:
1189                         mpt_prtc(mpt, " Failed");
1190                         break;
1191                 case MPI_PHYSDISK0_STATUS_INITIALIZING:
1192                         mpt_prtc(mpt, " Initializing");
1193                         break;
1194                 case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
1195                         mpt_prtc(mpt, " Requested Offline");
1196                         break;
1197                 case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
1198                         mpt_prtc(mpt, " Requested Failed");
1199                         break;
1200                 case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
1201                 default:
1202                         mpt_prtc(mpt, " Offline Other (%x)", s);
1203                         break;
1204                 }
1205                 mpt_prtc(mpt, "\n");
1206         }
1207 }
1208
1209 static void
1210 mpt_announce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk)
1211 {
1212         CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1213         int rd_bus = cam_sim_bus(mpt->sim);
1214         int pt_bus = cam_sim_bus(mpt->phydisk_sim);
1215         u_int i;
1216
1217         disk_pg = &mpt_disk->config_page;
1218         mpt_disk_prt(mpt, mpt_disk,
1219                      "Physical (%s:%d:%d:0), Pass-thru (%s:%d:%d:0)\n",
1220                      device_get_nameunit(mpt->dev), rd_bus,
1221                      disk_pg->PhysDiskID, device_get_nameunit(mpt->dev),
1222                      pt_bus, (int)(mpt_disk - mpt->raid_disks));
1223         if (disk_pg->PhysDiskSettings.HotSparePool == 0)
1224                 return;
1225         mpt_disk_prt(mpt, mpt_disk, "Member of Hot Spare Pool%s",
1226                      powerof2(disk_pg->PhysDiskSettings.HotSparePool)
1227                    ? ":" : "s:");
1228         for (i = 0; i < 8; i++) {
1229                 u_int mask;
1230
1231                 mask = 0x1 << i;
1232                 if ((disk_pg->PhysDiskSettings.HotSparePool & mask) == 0)
1233                         continue;
1234                 mpt_prtc(mpt, " %d", i);
1235         }
1236         mpt_prtc(mpt, "\n");
1237 }
1238
1239 static void
1240 mpt_refresh_raid_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
1241                       IOC_3_PHYS_DISK *ioc_disk)
1242 {
1243         int rv;
1244
1245         rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_PHYSDISK,
1246                                  /*PageNumber*/0, ioc_disk->PhysDiskNum,
1247                                  &mpt_disk->config_page.Header,
1248                                  /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1249         if (rv != 0) {
1250                 mpt_prt(mpt, "mpt_refresh_raid_disk: "
1251                         "Failed to read RAID Disk Hdr(%d)\n",
1252                         ioc_disk->PhysDiskNum);
1253                 return;
1254         }
1255         rv = mpt_read_cur_cfg_page(mpt, ioc_disk->PhysDiskNum,
1256                                    &mpt_disk->config_page.Header,
1257                                    sizeof(mpt_disk->config_page),
1258                                    /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1259         if (rv != 0)
1260                 mpt_prt(mpt, "mpt_refresh_raid_disk: "
1261                         "Failed to read RAID Disk Page(%d)\n",
1262                         ioc_disk->PhysDiskNum);
1263         mpt2host_config_page_raid_phys_disk_0(&mpt_disk->config_page);
1264 }
1265
1266 static void
1267 mpt_refresh_raid_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1268     CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol)
1269 {
1270         CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1271         struct mpt_raid_action_result *ar;
1272         request_t *req;
1273         int rv;
1274         int i;
1275
1276         vol_pg = mpt_vol->config_page;
1277         mpt_vol->flags &= ~MPT_RVF_UP2DATE;
1278
1279         rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_VOLUME, 0,
1280             ioc_vol->VolumePageNumber, &vol_pg->Header, TRUE, 5000);
1281         if (rv != 0) {
1282                 mpt_vol_prt(mpt, mpt_vol,
1283                     "mpt_refresh_raid_vol: Failed to read RAID Vol Hdr(%d)\n",
1284                     ioc_vol->VolumePageNumber);
1285                 return;
1286         }
1287
1288         rv = mpt_read_cur_cfg_page(mpt, ioc_vol->VolumePageNumber,
1289             &vol_pg->Header, mpt->raid_page0_len, TRUE, 5000);
1290         if (rv != 0) {
1291                 mpt_vol_prt(mpt, mpt_vol,
1292                     "mpt_refresh_raid_vol: Failed to read RAID Vol Page(%d)\n",
1293                     ioc_vol->VolumePageNumber);
1294                 return;
1295         }
1296         mpt2host_config_page_raid_vol_0(vol_pg);
1297
1298         mpt_vol->flags |= MPT_RVF_ACTIVE;
1299
1300         /* Update disk entry array data. */
1301         for (i = 0; i < vol_pg->NumPhysDisks; i++) {
1302                 struct mpt_raid_disk *mpt_disk;
1303                 mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum;
1304                 mpt_disk->volume = mpt_vol;
1305                 mpt_disk->member_number = vol_pg->PhysDisk[i].PhysDiskMap;
1306                 if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM) {
1307                         mpt_disk->member_number--;
1308                 }
1309         }
1310
1311         if ((vol_pg->VolumeStatus.Flags
1312            & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1313                 return;
1314
1315         req = mpt_get_request(mpt, TRUE);
1316         if (req == NULL) {
1317                 mpt_vol_prt(mpt, mpt_vol,
1318                     "mpt_refresh_raid_vol: Get request failed!\n");
1319                 return;
1320         }
1321         rv = mpt_issue_raid_req(mpt, mpt_vol, NULL, req,
1322             MPI_RAID_ACTION_INDICATOR_STRUCT, 0, 0, 0, FALSE, TRUE);
1323         if (rv == ETIMEDOUT) {
1324                 mpt_vol_prt(mpt, mpt_vol,
1325                     "mpt_refresh_raid_vol: Progress Indicator fetch timeout\n");
1326                 mpt_free_request(mpt, req);
1327                 return;
1328         }
1329
1330         ar = REQ_TO_RAID_ACTION_RESULT(req);
1331         if (rv == 0
1332          && ar->action_status == MPI_RAID_ACTION_ASTATUS_SUCCESS
1333          && REQ_IOCSTATUS(req) == MPI_IOCSTATUS_SUCCESS) {
1334                 memcpy(&mpt_vol->sync_progress,
1335                        &ar->action_data.indicator_struct,
1336                        sizeof(mpt_vol->sync_progress));
1337                 mpt2host_mpi_raid_vol_indicator(&mpt_vol->sync_progress);
1338         } else {
1339                 mpt_vol_prt(mpt, mpt_vol,
1340                     "mpt_refresh_raid_vol: Progress indicator fetch failed!\n");
1341         }
1342         mpt_free_request(mpt, req);
1343 }
1344
1345 /*
1346  * Update in-core information about RAID support.  We update any entries
1347  * that didn't previously exists or have been marked as needing to
1348  * be updated by our event handler.  Interesting changes are displayed
1349  * to the console.
1350  */
1351 static int
1352 mpt_refresh_raid_data(struct mpt_softc *mpt)
1353 {
1354         CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
1355         CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol;
1356         IOC_3_PHYS_DISK *ioc_disk;
1357         IOC_3_PHYS_DISK *ioc_last_disk;
1358         CONFIG_PAGE_RAID_VOL_0  *vol_pg;
1359         size_t len;
1360         int rv;
1361         int i;
1362         u_int nonopt_volumes;
1363
1364         if (mpt->ioc_page2 == NULL || mpt->ioc_page3 == NULL) {
1365                 return (0);
1366         }
1367
1368         /*
1369          * Mark all items as unreferenced by the configuration.
1370          * This allows us to find, report, and discard stale
1371          * entries.
1372          */
1373         for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1374                 mpt->raid_disks[i].flags &= ~MPT_RDF_REFERENCED;
1375         }
1376         for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1377                 mpt->raid_volumes[i].flags &= ~MPT_RVF_REFERENCED;
1378         }
1379
1380         /*
1381          * Get Physical Disk information.
1382          */
1383         len = mpt->ioc_page3->Header.PageLength * sizeof(uint32_t);
1384         rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1385                                    &mpt->ioc_page3->Header, len,
1386                                    /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1387         if (rv) {
1388                 mpt_prt(mpt,
1389                     "mpt_refresh_raid_data: Failed to read IOC Page 3\n");
1390                 return (-1);
1391         }
1392         mpt2host_config_page_ioc3(mpt->ioc_page3);
1393
1394         ioc_disk = mpt->ioc_page3->PhysDisk;
1395         ioc_last_disk = ioc_disk + mpt->ioc_page3->NumPhysDisks;
1396         for (; ioc_disk != ioc_last_disk; ioc_disk++) {
1397                 struct mpt_raid_disk *mpt_disk;
1398
1399                 mpt_disk = mpt->raid_disks + ioc_disk->PhysDiskNum;
1400                 mpt_disk->flags |= MPT_RDF_REFERENCED;
1401                 if ((mpt_disk->flags & (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE))
1402                  != (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE)) {
1403
1404                         mpt_refresh_raid_disk(mpt, mpt_disk, ioc_disk);
1405
1406                 }
1407                 mpt_disk->flags |= MPT_RDF_ACTIVE;
1408                 mpt->raid_rescan++;
1409         }
1410
1411         /*
1412          * Refresh volume data.
1413          */
1414         len = mpt->ioc_page2->Header.PageLength * sizeof(uint32_t);
1415         rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1416                                    &mpt->ioc_page2->Header, len,
1417                                    /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1418         if (rv) {
1419                 mpt_prt(mpt, "mpt_refresh_raid_data: "
1420                         "Failed to read IOC Page 2\n");
1421                 return (-1);
1422         }
1423         mpt2host_config_page_ioc2(mpt->ioc_page2);
1424
1425         ioc_vol = mpt->ioc_page2->RaidVolume;
1426         ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
1427         for (;ioc_vol != ioc_last_vol; ioc_vol++) {
1428                 struct mpt_raid_volume *mpt_vol;
1429
1430                 mpt_vol = mpt->raid_volumes + ioc_vol->VolumePageNumber;
1431                 mpt_vol->flags |= MPT_RVF_REFERENCED;
1432                 vol_pg = mpt_vol->config_page;
1433                 if (vol_pg == NULL)
1434                         continue;
1435                 if (((mpt_vol->flags & (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1436                   != (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1437                  || (vol_pg->VolumeStatus.Flags
1438                    & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) != 0) {
1439
1440                         mpt_refresh_raid_vol(mpt, mpt_vol, ioc_vol);
1441                 }
1442                 mpt_vol->flags |= MPT_RVF_ACTIVE;
1443         }
1444
1445         nonopt_volumes = 0;
1446         for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1447                 struct mpt_raid_volume *mpt_vol;
1448                 uint64_t total;
1449                 uint64_t left;
1450                 int m;
1451                 u_int prio;
1452
1453                 mpt_vol = &mpt->raid_volumes[i];
1454
1455                 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) {
1456                         continue;
1457                 }
1458
1459                 vol_pg = mpt_vol->config_page;
1460                 if ((mpt_vol->flags & (MPT_RVF_REFERENCED|MPT_RVF_ANNOUNCED))
1461                  == MPT_RVF_ANNOUNCED) {
1462                         mpt_vol_prt(mpt, mpt_vol, "No longer configured\n");
1463                         mpt_vol->flags = 0;
1464                         continue;
1465                 }
1466
1467                 if ((mpt_vol->flags & MPT_RVF_ANNOUNCED) == 0) {
1468                         mpt_announce_vol(mpt, mpt_vol);
1469                         mpt_vol->flags |= MPT_RVF_ANNOUNCED;
1470                 }
1471
1472                 if (vol_pg->VolumeStatus.State !=
1473                     MPI_RAIDVOL0_STATUS_STATE_OPTIMAL)
1474                         nonopt_volumes++;
1475
1476                 if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0)
1477                         continue;
1478
1479                 mpt_vol->flags |= MPT_RVF_UP2DATE;
1480                 mpt_vol_prt(mpt, mpt_vol, "%s - %s\n",
1481                     mpt_vol_type(mpt_vol), mpt_vol_state(mpt_vol));
1482                 mpt_verify_mwce(mpt, mpt_vol);
1483
1484                 if (vol_pg->VolumeStatus.Flags == 0) {
1485                         continue;
1486                 }
1487
1488                 mpt_vol_prt(mpt, mpt_vol, "Status (");
1489                 for (m = 1; m <= 0x80; m <<= 1) {
1490                         switch (vol_pg->VolumeStatus.Flags & m) {
1491                         case MPI_RAIDVOL0_STATUS_FLAG_ENABLED:
1492                                 mpt_prtc(mpt, " Enabled");
1493                                 break;
1494                         case MPI_RAIDVOL0_STATUS_FLAG_QUIESCED:
1495                                 mpt_prtc(mpt, " Quiesced");
1496                                 break;
1497                         case MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS:
1498                                 mpt_prtc(mpt, " Re-Syncing");
1499                                 break;
1500                         case MPI_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE:
1501                                 mpt_prtc(mpt, " Inactive");
1502                                 break;
1503                         default:
1504                                 break;
1505                         }
1506                 }
1507                 mpt_prtc(mpt, " )\n");
1508
1509                 if ((vol_pg->VolumeStatus.Flags
1510                    & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1511                         continue;
1512
1513                 mpt_verify_resync_rate(mpt, mpt_vol);
1514
1515                 left = MPT_U64_2_SCALAR(mpt_vol->sync_progress.BlocksRemaining);
1516                 total = MPT_U64_2_SCALAR(mpt_vol->sync_progress.TotalBlocks);
1517                 if (vol_pg->ResyncRate != 0) {
1518
1519                         prio = ((u_int)vol_pg->ResyncRate * 100000) / 0xFF;
1520                         mpt_vol_prt(mpt, mpt_vol, "Rate %d.%d%%\n",
1521                             prio / 1000, prio % 1000);
1522                 } else {
1523                         prio = vol_pg->VolumeSettings.Settings
1524                              & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1525                         mpt_vol_prt(mpt, mpt_vol, "%s Priority Re-Sync\n",
1526                             prio ? "High" : "Low");
1527                 }
1528                 mpt_vol_prt(mpt, mpt_vol, "%ju of %ju "
1529                             "blocks remaining\n", (uintmax_t)left,
1530                             (uintmax_t)total);
1531
1532                 /* Periodically report on sync progress. */
1533                 mpt_schedule_raid_refresh(mpt);
1534         }
1535
1536         for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1537                 struct mpt_raid_disk *mpt_disk;
1538                 CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1539                 int m;
1540
1541                 mpt_disk = &mpt->raid_disks[i];
1542                 disk_pg = &mpt_disk->config_page;
1543
1544                 if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0)
1545                         continue;
1546
1547                 if ((mpt_disk->flags & (MPT_RDF_REFERENCED|MPT_RDF_ANNOUNCED))
1548                  == MPT_RDF_ANNOUNCED) {
1549                         mpt_disk_prt(mpt, mpt_disk, "No longer configured\n");
1550                         mpt_disk->flags = 0;
1551                         mpt->raid_rescan++;
1552                         continue;
1553                 }
1554
1555                 if ((mpt_disk->flags & MPT_RDF_ANNOUNCED) == 0) {
1556
1557                         mpt_announce_disk(mpt, mpt_disk);
1558                         mpt_disk->flags |= MPT_RVF_ANNOUNCED;
1559                 }
1560
1561                 if ((mpt_disk->flags & MPT_RDF_UP2DATE) != 0)
1562                         continue;
1563
1564                 mpt_disk->flags |= MPT_RDF_UP2DATE;
1565                 mpt_disk_prt(mpt, mpt_disk, "%s\n", mpt_disk_state(mpt_disk));
1566                 if (disk_pg->PhysDiskStatus.Flags == 0)
1567                         continue;
1568
1569                 mpt_disk_prt(mpt, mpt_disk, "Status (");
1570                 for (m = 1; m <= 0x80; m <<= 1) {
1571                         switch (disk_pg->PhysDiskStatus.Flags & m) {
1572                         case MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC:
1573                                 mpt_prtc(mpt, " Out-Of-Sync");
1574                                 break;
1575                         case MPI_PHYSDISK0_STATUS_FLAG_QUIESCED:
1576                                 mpt_prtc(mpt, " Quiesced");
1577                                 break;
1578                         default:
1579                                 break;
1580                         }
1581                 }
1582                 mpt_prtc(mpt, " )\n");
1583         }
1584
1585         mpt->raid_nonopt_volumes = nonopt_volumes;
1586         return (0);
1587 }
1588
1589 static void
1590 mpt_raid_timer(void *arg)
1591 {
1592         struct mpt_softc *mpt;
1593
1594         mpt = (struct mpt_softc *)arg;
1595         MPT_LOCK_ASSERT(mpt);
1596         mpt_raid_wakeup(mpt);
1597 }
1598
1599 static void
1600 mpt_schedule_raid_refresh(struct mpt_softc *mpt)
1601 {
1602
1603         callout_reset(&mpt->raid_timer, MPT_RAID_SYNC_REPORT_INTERVAL,
1604                       mpt_raid_timer, mpt);
1605 }
1606
1607 void
1608 mpt_raid_free_mem(struct mpt_softc *mpt)
1609 {
1610
1611         if (mpt->raid_volumes) {
1612                 struct mpt_raid_volume *mpt_raid;
1613                 int i;
1614                 for (i = 0; i < mpt->raid_max_volumes; i++) {
1615                         mpt_raid = &mpt->raid_volumes[i];
1616                         if (mpt_raid->config_page) {
1617                                 kfree(mpt_raid->config_page, M_DEVBUF);
1618                                 mpt_raid->config_page = NULL;
1619                         }
1620                 }
1621                 kfree(mpt->raid_volumes, M_DEVBUF);
1622                 mpt->raid_volumes = NULL;
1623         }
1624         if (mpt->raid_disks) {
1625                 kfree(mpt->raid_disks, M_DEVBUF);
1626                 mpt->raid_disks = NULL;
1627         }
1628         if (mpt->ioc_page2) {
1629                 kfree(mpt->ioc_page2, M_DEVBUF);
1630                 mpt->ioc_page2 = NULL;
1631         }
1632         if (mpt->ioc_page3) {
1633                 kfree(mpt->ioc_page3, M_DEVBUF);
1634                 mpt->ioc_page3 = NULL;
1635         }
1636         mpt->raid_max_volumes =  0;
1637         mpt->raid_max_disks =  0;
1638 }
1639
1640 static int
1641 mpt_raid_set_vol_resync_rate(struct mpt_softc *mpt, u_int rate)
1642 {
1643         struct mpt_raid_volume *mpt_vol;
1644
1645         if ((rate > MPT_RAID_RESYNC_RATE_MAX
1646           || rate < MPT_RAID_RESYNC_RATE_MIN)
1647          && rate != MPT_RAID_RESYNC_RATE_NC)
1648                 return (EINVAL);
1649
1650         MPT_LOCK(mpt);
1651         mpt->raid_resync_rate = rate;
1652         RAID_VOL_FOREACH(mpt, mpt_vol) {
1653                 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) {
1654                         continue;
1655                 }
1656                 mpt_verify_resync_rate(mpt, mpt_vol);
1657         }
1658         MPT_UNLOCK(mpt);
1659         return (0);
1660 }
1661
1662 static int
1663 mpt_raid_set_vol_queue_depth(struct mpt_softc *mpt, u_int vol_queue_depth)
1664 {
1665         struct mpt_raid_volume *mpt_vol;
1666
1667         if (vol_queue_depth > 255 || vol_queue_depth < 1)
1668                 return (EINVAL);
1669
1670         MPT_LOCK(mpt);
1671         mpt->raid_queue_depth = vol_queue_depth;
1672         RAID_VOL_FOREACH(mpt, mpt_vol) {
1673                 struct cam_path *path;
1674                 int error;
1675
1676                 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1677                         continue;
1678
1679                 mpt->raid_rescan = 0;
1680
1681                 MPTLOCK_2_CAMLOCK(mpt);
1682                 error = xpt_create_path(&path, xpt_periph,
1683                                         cam_sim_path(mpt->sim),
1684                                         mpt_vol->config_page->VolumeID,
1685                                         /*lun*/0);
1686                 if (error != CAM_REQ_CMP) {
1687                         CAMLOCK_2_MPTLOCK(mpt);
1688                         mpt_vol_prt(mpt, mpt_vol, "Unable to allocate path!\n");
1689                         continue;
1690                 }
1691                 mpt_adjust_queue_depth(mpt, mpt_vol, path);
1692                 xpt_free_path(path);
1693                 CAMLOCK_2_MPTLOCK(mpt);
1694         }
1695         MPT_UNLOCK(mpt);
1696         return (0);
1697 }
1698
1699 static int
1700 mpt_raid_set_vol_mwce(struct mpt_softc *mpt, mpt_raid_mwce_t mwce)
1701 {
1702         struct mpt_raid_volume *mpt_vol;
1703         int force_full_resync;
1704
1705         MPT_LOCK(mpt);
1706         if (mwce == mpt->raid_mwce_setting) {
1707                 MPT_UNLOCK(mpt);
1708                 return (0);
1709         }
1710
1711         /*
1712          * Catch MWCE being left on due to a failed shutdown.  Since
1713          * sysctls cannot be set by the loader, we treat the first
1714          * setting of this varible specially and force a full volume
1715          * resync if MWCE is enabled and a resync is in progress.
1716          */
1717         force_full_resync = 0;
1718         if (mpt->raid_mwce_set == 0
1719          && mpt->raid_mwce_setting == MPT_RAID_MWCE_NC
1720          && mwce == MPT_RAID_MWCE_REBUILD_ONLY)
1721                 force_full_resync = 1;
1722
1723         mpt->raid_mwce_setting = mwce;
1724         RAID_VOL_FOREACH(mpt, mpt_vol) {
1725                 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1726                 int resyncing;
1727                 int mwce;
1728
1729                 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1730                         continue;
1731
1732                 vol_pg = mpt_vol->config_page;
1733                 resyncing = vol_pg->VolumeStatus.Flags
1734                           & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
1735                 mwce = vol_pg->VolumeSettings.Settings
1736                      & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
1737                 if (force_full_resync && resyncing && mwce) {
1738
1739                         /*
1740                          * XXX disable/enable volume should force a resync,
1741                          *     but we'll need to queice, drain, and restart
1742                          *     I/O to do that.
1743                          */
1744                         mpt_vol_prt(mpt, mpt_vol, "WARNING - Unsafe shutdown "
1745                                     "detected.  Suggest full resync.\n");
1746                 }
1747                 mpt_verify_mwce(mpt, mpt_vol);
1748         }
1749         mpt->raid_mwce_set = 1;
1750         MPT_UNLOCK(mpt);
1751         return (0);
1752 }
1753
1754 static const char *mpt_vol_mwce_strs[] =
1755 {
1756         "On",
1757         "Off",
1758         "On-During-Rebuild",
1759         "NC"
1760 };
1761
1762 static int
1763 mpt_raid_sysctl_vol_member_wce(SYSCTL_HANDLER_ARGS)
1764 {
1765         char inbuf[20];
1766         struct mpt_softc *mpt;
1767         const char *str;
1768         int error;
1769         u_int size;
1770         u_int i;
1771
1772         mpt = (struct mpt_softc *)arg1;
1773         str = mpt_vol_mwce_strs[mpt->raid_mwce_setting];
1774         error = SYSCTL_OUT(req, str, strlen(str) + 1);
1775         if (error || !req->newptr) {
1776                 return (error);
1777         }
1778
1779         size = req->newlen - req->newidx;
1780         if (size >= sizeof(inbuf)) {
1781                 return (EINVAL);
1782         }
1783
1784         error = SYSCTL_IN(req, inbuf, size);
1785         if (error) {
1786                 return (error);
1787         }
1788         inbuf[size] = '\0';
1789         for (i = 0; i < NUM_ELEMENTS(mpt_vol_mwce_strs); i++) {
1790                 if (strcmp(mpt_vol_mwce_strs[i], inbuf) == 0) {
1791                         return (mpt_raid_set_vol_mwce(mpt, i));
1792                 }
1793         }
1794         return (EINVAL);
1795 }
1796
1797 static int
1798 mpt_raid_sysctl_vol_resync_rate(SYSCTL_HANDLER_ARGS)
1799 {
1800         struct mpt_softc *mpt;
1801         u_int raid_resync_rate;
1802         int error;
1803
1804         mpt = (struct mpt_softc *)arg1;
1805         raid_resync_rate = mpt->raid_resync_rate;
1806
1807         error = sysctl_handle_int(oidp, &raid_resync_rate, 0, req);
1808         if (error || !req->newptr) {
1809                 return error;
1810         }
1811
1812         return (mpt_raid_set_vol_resync_rate(mpt, raid_resync_rate));
1813 }
1814
1815 static int
1816 mpt_raid_sysctl_vol_queue_depth(SYSCTL_HANDLER_ARGS)
1817 {
1818         struct mpt_softc *mpt;
1819         u_int raid_queue_depth;
1820         int error;
1821
1822         mpt = (struct mpt_softc *)arg1;
1823         raid_queue_depth = mpt->raid_queue_depth;
1824
1825         error = sysctl_handle_int(oidp, &raid_queue_depth, 0, req);
1826         if (error || !req->newptr) {
1827                 return error;
1828         }
1829
1830         return (mpt_raid_set_vol_queue_depth(mpt, raid_queue_depth));
1831 }
1832
1833 static void
1834 mpt_raid_sysctl_attach(struct mpt_softc *mpt)
1835 {
1836         SYSCTL_ADD_PROC(&mpt->mpt_sysctl_ctx,
1837                         SYSCTL_CHILDREN(mpt->mpt_sysctl_tree), OID_AUTO,
1838                         "vol_member_wce", CTLTYPE_STRING | CTLFLAG_RW, mpt, 0,
1839                         mpt_raid_sysctl_vol_member_wce, "A",
1840                         "volume member WCE(On,Off,On-During-Rebuild,NC)");
1841
1842         SYSCTL_ADD_PROC(&mpt->mpt_sysctl_ctx,
1843                         SYSCTL_CHILDREN(mpt->mpt_sysctl_tree), OID_AUTO,
1844                         "vol_queue_depth", CTLTYPE_INT | CTLFLAG_RW, mpt, 0,
1845                         mpt_raid_sysctl_vol_queue_depth, "I",
1846                         "default volume queue depth");
1847
1848         SYSCTL_ADD_PROC(&mpt->mpt_sysctl_ctx,
1849                         SYSCTL_CHILDREN(mpt->mpt_sysctl_tree), OID_AUTO,
1850                         "vol_resync_rate", CTLTYPE_INT | CTLFLAG_RW, mpt, 0,
1851                         mpt_raid_sysctl_vol_resync_rate, "I",
1852                         "volume resync priority (0 == NC, 1 - 255)");
1853         SYSCTL_ADD_UINT(&mpt->mpt_sysctl_ctx,
1854                         SYSCTL_CHILDREN(mpt->mpt_sysctl_tree), OID_AUTO,
1855                         "nonoptimal_volumes", CTLFLAG_RD,
1856                         &mpt->raid_nonopt_volumes, 0,
1857                         "number of nonoptimal volumes");
1858 }