Merge branch 'apic_io'
[dragonfly.git] / sys / dev / disk / mpt / mpt_raid.c
1 /*-
2  * Routines for handling the integrated RAID features LSI MPT Fusion adapters.
3  *
4  * Copyright (c) 2005, WHEEL Sp. z o.o.
5  * Copyright (c) 2005 Justin T. Gibbs.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are
10  * met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14  *    substantially similar to the "NO WARRANTY" disclaimer below
15  *    ("Disclaimer") and any redistribution must be conditioned upon including
16  *    a substantially similar Disclaimer requirement for further binary
17  *    redistribution.
18  * 3. Neither the names of the above listed copyright holders nor the names
19  *    of any contributors may be used to endorse or promote products derived
20  *    from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
32  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 /*-
35  * Some Breakage and Bug Fixing added later.
36  * Copyright (c) 2006, by Matthew Jacob
37  * All Rights Reserved
38  *
39  * Support from LSI-Logic has also gone a great deal toward making this a
40  * workable subsystem and is gratefully acknowledged.
41  *
42  * $FreeBSD: src/sys/dev/mpt/mpt_raid.c,v 1.20 2009/05/21 12:36:40 jhb Exp $
43  */
44
45 #include <dev/disk/mpt/mpt.h>
46 #include <dev/disk/mpt/mpt_raid.h>
47
48 #include "dev/disk/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
49 #include "dev/disk/mpt/mpilib/mpi_raid.h"
50
51 #include <bus/cam/cam.h>
52 #include <bus/cam/cam_ccb.h>
53 #include <bus/cam/cam_sim.h>
54 #include <bus/cam/cam_xpt_periph.h>
55 #include <bus/cam/cam_xpt_sim.h>
56
57 #if __FreeBSD_version < 500000
58 #include <sys/devicestat.h>
59 #define GIANT_REQUIRED
60 #endif
61 #include <bus/cam/cam_periph.h>
62
63 #include <sys/callout.h>
64 #include <sys/kthread.h>
65 #include <sys/sysctl.h>
66
67 #include <machine/stdarg.h>
68
69 struct mpt_raid_action_result
70 {
71         union {
72                 MPI_RAID_VOL_INDICATOR  indicator_struct;
73                 uint32_t                new_settings;
74                 uint8_t                 phys_disk_num;
75         } action_data;
76         uint16_t                        action_status;
77 };
78
79 #define REQ_TO_RAID_ACTION_RESULT(req) ((struct mpt_raid_action_result *) \
80         (((MSG_RAID_ACTION_REQUEST *)(req->req_vbuf)) + 1))
81
82 #define REQ_IOCSTATUS(req) ((req)->IOCStatus & MPI_IOCSTATUS_MASK)
83
84
85 static mpt_probe_handler_t      mpt_raid_probe;
86 static mpt_attach_handler_t     mpt_raid_attach;
87 static mpt_enable_handler_t     mpt_raid_enable;
88 static mpt_event_handler_t      mpt_raid_event;
89 static mpt_shutdown_handler_t   mpt_raid_shutdown;
90 static mpt_reset_handler_t      mpt_raid_ioc_reset;
91 static mpt_detach_handler_t     mpt_raid_detach;
92
93 static struct mpt_personality mpt_raid_personality =
94 {
95         .name           = "mpt_raid",
96         .probe          = mpt_raid_probe,
97         .attach         = mpt_raid_attach,
98         .enable         = mpt_raid_enable,
99         .event          = mpt_raid_event,
100         .reset          = mpt_raid_ioc_reset,
101         .shutdown       = mpt_raid_shutdown,
102         .detach         = mpt_raid_detach,
103 };
104
105 DECLARE_MPT_PERSONALITY(mpt_raid, SI_ORDER_THIRD);
106 MPT_PERSONALITY_DEPEND(mpt_raid, mpt_cam, 1, 1, 1);
107
108 static mpt_reply_handler_t mpt_raid_reply_handler;
109 static int mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
110                                         MSG_DEFAULT_REPLY *reply_frame);
111 static int mpt_spawn_raid_thread(struct mpt_softc *mpt);
112 static void mpt_terminate_raid_thread(struct mpt_softc *mpt);
113 static void mpt_raid_thread(void *arg);
114 static timeout_t mpt_raid_timer;
115 #if 0
116 static void mpt_enable_vol(struct mpt_softc *mpt,
117                            struct mpt_raid_volume *mpt_vol, int enable);
118 #endif
119 static void mpt_verify_mwce(struct mpt_softc *, struct mpt_raid_volume *);
120 static void mpt_adjust_queue_depth(struct mpt_softc *, struct mpt_raid_volume *,
121     struct cam_path *);
122 #if __FreeBSD_version < 500000
123 #define mpt_raid_sysctl_attach(x)       do { } while (0)
124 #else
125 static void mpt_raid_sysctl_attach(struct mpt_softc *);
126 #endif
127
128 static uint32_t raid_handler_id = MPT_HANDLER_ID_NONE;
129
130 const char *
131 mpt_vol_type(struct mpt_raid_volume *vol)
132 {
133         switch (vol->config_page->VolumeType) {
134         case MPI_RAID_VOL_TYPE_IS:
135                 return ("RAID-0");
136         case MPI_RAID_VOL_TYPE_IME:
137                 return ("RAID-1E");
138         case MPI_RAID_VOL_TYPE_IM:
139                 return ("RAID-1");
140         default:
141                 return ("Unknown");
142         }
143 }
144
145 const char *
146 mpt_vol_state(struct mpt_raid_volume *vol)
147 {
148         switch (vol->config_page->VolumeStatus.State) {
149         case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL:
150                 return ("Optimal");
151         case MPI_RAIDVOL0_STATUS_STATE_DEGRADED:
152                 return ("Degraded");
153         case MPI_RAIDVOL0_STATUS_STATE_FAILED:
154                 return ("Failed");
155         default:
156                 return ("Unknown");
157         }
158 }
159
160 const char *
161 mpt_disk_state(struct mpt_raid_disk *disk)
162 {
163         switch (disk->config_page.PhysDiskStatus.State) {
164         case MPI_PHYSDISK0_STATUS_ONLINE:
165                 return ("Online");
166         case MPI_PHYSDISK0_STATUS_MISSING:
167                 return ("Missing");
168         case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
169                 return ("Incompatible");
170         case MPI_PHYSDISK0_STATUS_FAILED:
171                 return ("Failed");
172         case MPI_PHYSDISK0_STATUS_INITIALIZING:
173                 return ("Initializing");
174         case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
175                 return ("Offline Requested");
176         case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
177                 return ("Failed per Host Request");
178         case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
179                 return ("Offline");
180         default:
181                 return ("Unknown");
182         }
183 }
184
185 void
186 mpt_vol_prt(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
187             const char *fmt, ...)
188 {
189         __va_list ap;
190
191         kprintf("%s:vol%d(%s:%d:%d): ", device_get_nameunit(mpt->dev),
192                (u_int)(vol - mpt->raid_volumes), device_get_nameunit(mpt->dev),
193                vol->config_page->VolumeBus, vol->config_page->VolumeID);
194         __va_start(ap, fmt);
195         kvprintf(fmt, ap);
196         __va_end(ap);
197 }
198
199 void
200 mpt_disk_prt(struct mpt_softc *mpt, struct mpt_raid_disk *disk,
201              const char *fmt, ...)
202 {
203         __va_list ap;
204
205         if (disk->volume != NULL) {
206                 kprintf("(%s:vol%d:%d): ",
207                        device_get_nameunit(mpt->dev),
208                        disk->volume->config_page->VolumeID,
209                        disk->member_number);
210         } else {
211                 kprintf("(%s:%d:%d): ", device_get_nameunit(mpt->dev),
212                        disk->config_page.PhysDiskBus,
213                        disk->config_page.PhysDiskID);
214         }
215         __va_start(ap, fmt);
216         kvprintf(fmt, ap);
217         __va_end(ap);
218 }
219
220 static void
221 mpt_raid_async(void *callback_arg, u_int32_t code,
222                struct cam_path *path, void *arg)
223 {
224         struct mpt_softc *mpt;
225
226         mpt = (struct mpt_softc*)callback_arg;
227         switch (code) {
228         case AC_FOUND_DEVICE:
229         {
230                 struct ccb_getdev *cgd;
231                 struct mpt_raid_volume *mpt_vol;
232
233                 cgd = (struct ccb_getdev *)arg;
234                 if (cgd == NULL) {
235                         break;
236                 }
237
238                 mpt_lprt(mpt, MPT_PRT_DEBUG, "Callback for %d\n",
239                          cgd->ccb_h.target_id);
240
241                 RAID_VOL_FOREACH(mpt, mpt_vol) {
242                         if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
243                                 continue;
244
245                         if (mpt_vol->config_page->VolumeID
246                          == cgd->ccb_h.target_id) {
247                                 mpt_adjust_queue_depth(mpt, mpt_vol, path);
248                                 break;
249                         }
250                 }
251         }
252         default:
253                 break;
254         }
255 }
256
257 int
258 mpt_raid_probe(struct mpt_softc *mpt)
259 {
260         if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
261                 return (ENODEV);
262         }
263         return (0);
264 }
265
266 int
267 mpt_raid_attach(struct mpt_softc *mpt)
268 {
269         struct ccb_setasync csa;
270         mpt_handler_t    handler;
271         int              error;
272
273         mpt_callout_init(&mpt->raid_timer);
274
275         error = mpt_spawn_raid_thread(mpt);
276         if (error != 0) {
277                 mpt_prt(mpt, "Unable to spawn RAID thread!\n");
278                 goto cleanup;
279         }
280
281         MPT_LOCK(mpt);
282         handler.reply_handler = mpt_raid_reply_handler;
283         error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
284                                      &raid_handler_id);
285         if (error != 0) {
286                 mpt_prt(mpt, "Unable to register RAID haandler!\n");
287                 goto cleanup;
288         }
289
290         xpt_setup_ccb(&csa.ccb_h, mpt->path, 5);
291         csa.ccb_h.func_code = XPT_SASYNC_CB;
292         csa.event_enable = AC_FOUND_DEVICE;
293         csa.callback = mpt_raid_async;
294         csa.callback_arg = mpt;
295         xpt_action((union ccb *)&csa);
296         if (csa.ccb_h.status != CAM_REQ_CMP) {
297                 mpt_prt(mpt, "mpt_raid_attach: Unable to register "
298                         "CAM async handler.\n");
299         }
300         MPT_UNLOCK(mpt);
301
302         mpt_raid_sysctl_attach(mpt);
303         return (0);
304 cleanup:
305         MPT_UNLOCK(mpt);
306         mpt_raid_detach(mpt);
307         return (error);
308 }
309
310 int
311 mpt_raid_enable(struct mpt_softc *mpt)
312 {
313         return (0);
314 }
315
316 void
317 mpt_raid_detach(struct mpt_softc *mpt)
318 {
319         struct ccb_setasync csa;
320         mpt_handler_t handler;
321
322         callout_stop(&mpt->raid_timer);
323         MPT_LOCK(mpt);
324         mpt_terminate_raid_thread(mpt);
325
326         handler.reply_handler = mpt_raid_reply_handler;
327         mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
328                                raid_handler_id);
329         xpt_setup_ccb(&csa.ccb_h, mpt->path, /*priority*/5);
330         csa.ccb_h.func_code = XPT_SASYNC_CB;
331         csa.event_enable = 0;
332         csa.callback = mpt_raid_async;
333         csa.callback_arg = mpt;
334         xpt_action((union ccb *)&csa);
335         MPT_UNLOCK(mpt);
336 }
337
338 static void
339 mpt_raid_ioc_reset(struct mpt_softc *mpt, int type)
340 {
341         /* Nothing to do yet. */
342 }
343
344 static const char *raid_event_txt[] =
345 {
346         "Volume Created",
347         "Volume Deleted",
348         "Volume Settings Changed",
349         "Volume Status Changed",
350         "Volume Physical Disk Membership Changed",
351         "Physical Disk Created",
352         "Physical Disk Deleted",
353         "Physical Disk Settings Changed",
354         "Physical Disk Status Changed",
355         "Domain Validation Required",
356         "SMART Data Received",
357         "Replace Action Started",
358 };
359
360 static int
361 mpt_raid_event(struct mpt_softc *mpt, request_t *req,
362                MSG_EVENT_NOTIFY_REPLY *msg)
363 {
364         EVENT_DATA_RAID *raid_event;
365         struct mpt_raid_volume *mpt_vol;
366         struct mpt_raid_disk *mpt_disk;
367         CONFIG_PAGE_RAID_VOL_0 *vol_pg;
368         int i;
369         int print_event;
370
371         if (msg->Event != MPI_EVENT_INTEGRATED_RAID) {
372                 return (0);
373         }
374
375         raid_event = (EVENT_DATA_RAID *)&msg->Data;
376
377         mpt_vol = NULL;
378         vol_pg = NULL;
379         if (mpt->raid_volumes != NULL && mpt->ioc_page2 != NULL) {
380                 for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
381                         mpt_vol = &mpt->raid_volumes[i];
382                         vol_pg = mpt_vol->config_page;
383
384                         if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
385                                 continue;
386
387                         if (vol_pg->VolumeID == raid_event->VolumeID
388                          && vol_pg->VolumeBus == raid_event->VolumeBus)
389                                 break;
390                 }
391                 if (i >= mpt->ioc_page2->MaxVolumes) {
392                         mpt_vol = NULL;
393                         vol_pg = NULL;
394                 }
395         }
396
397         mpt_disk = NULL;
398         if (raid_event->PhysDiskNum != 0xFF && mpt->raid_disks != NULL) {
399                 mpt_disk = mpt->raid_disks + raid_event->PhysDiskNum;
400                 if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0) {
401                         mpt_disk = NULL;
402                 }
403         }
404
405         print_event = 1;
406         switch(raid_event->ReasonCode) {
407         case MPI_EVENT_RAID_RC_VOLUME_CREATED:
408         case MPI_EVENT_RAID_RC_VOLUME_DELETED:
409                 break;
410         case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED:
411                 if (mpt_vol != NULL) {
412                         if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0) {
413                                 mpt_vol->flags &= ~MPT_RVF_UP2DATE;
414                         } else {
415                                 /*
416                                  * Coalesce status messages into one
417                                  * per background run of our RAID thread.
418                                  * This removes "spurious" status messages
419                                  * from our output.
420                                  */
421                                 print_event = 0;
422                         }
423                 }
424                 break;
425         case MPI_EVENT_RAID_RC_VOLUME_SETTINGS_CHANGED:
426         case MPI_EVENT_RAID_RC_VOLUME_PHYSDISK_CHANGED:
427                 mpt->raid_rescan++;
428                 if (mpt_vol != NULL) {
429                         mpt_vol->flags &= ~(MPT_RVF_UP2DATE|MPT_RVF_ANNOUNCED);
430                 }
431                 break;
432         case MPI_EVENT_RAID_RC_PHYSDISK_CREATED:
433         case MPI_EVENT_RAID_RC_PHYSDISK_DELETED:
434                 mpt->raid_rescan++;
435                 break;
436         case MPI_EVENT_RAID_RC_PHYSDISK_SETTINGS_CHANGED:
437         case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED:
438                 mpt->raid_rescan++;
439                 if (mpt_disk != NULL) {
440                         mpt_disk->flags &= ~MPT_RDF_UP2DATE;
441                 }
442                 break;
443         case MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED:
444                 mpt->raid_rescan++;
445                 break;
446         case MPI_EVENT_RAID_RC_SMART_DATA:
447         case MPI_EVENT_RAID_RC_REPLACE_ACTION_STARTED:
448                 break;
449         }
450
451         if (print_event) {
452                 if (mpt_disk != NULL) {
453                         mpt_disk_prt(mpt, mpt_disk, NULL);
454                 } else if (mpt_vol != NULL) {
455                         mpt_vol_prt(mpt, mpt_vol, NULL);
456                 } else {
457                         mpt_prt(mpt, "Volume(%d:%d", raid_event->VolumeBus,
458                                 raid_event->VolumeID);
459
460                         if (raid_event->PhysDiskNum != 0xFF)
461                                 mpt_prtc(mpt, ":%d): ",
462                                          raid_event->PhysDiskNum);
463                         else
464                                 mpt_prtc(mpt, "): ");
465                 }
466
467                 if (raid_event->ReasonCode >= NUM_ELEMENTS(raid_event_txt))
468                         mpt_prtc(mpt, "Unhandled RaidEvent %#x\n",
469                                  raid_event->ReasonCode);
470                 else
471                         mpt_prtc(mpt, "%s\n",
472                                  raid_event_txt[raid_event->ReasonCode]);
473         }
474
475         if (raid_event->ReasonCode == MPI_EVENT_RAID_RC_SMART_DATA) {
476                 /* XXX Use CAM's print sense for this... */
477                 if (mpt_disk != NULL)
478                         mpt_disk_prt(mpt, mpt_disk, NULL);
479                 else
480                         mpt_prt(mpt, "Volume(%d:%d:%d: ",
481                             raid_event->VolumeBus, raid_event->VolumeID,
482                             raid_event->PhysDiskNum);
483                 mpt_prtc(mpt, "ASC 0x%x, ASCQ 0x%x)\n",
484                          raid_event->ASC, raid_event->ASCQ);
485         }
486
487         mpt_raid_wakeup(mpt);
488         return (1);
489 }
490
491 static void
492 mpt_raid_shutdown(struct mpt_softc *mpt)
493 {
494         struct mpt_raid_volume *mpt_vol;
495
496         if (mpt->raid_mwce_setting != MPT_RAID_MWCE_REBUILD_ONLY) {
497                 return;
498         }
499
500         mpt->raid_mwce_setting = MPT_RAID_MWCE_OFF;
501         RAID_VOL_FOREACH(mpt, mpt_vol) {
502                 mpt_verify_mwce(mpt, mpt_vol);
503         }
504 }
505
506 static int
507 mpt_raid_reply_handler(struct mpt_softc *mpt, request_t *req,
508     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
509 {
510         int free_req;
511
512         if (req == NULL)
513                 return (TRUE);
514
515         free_req = TRUE;
516         if (reply_frame != NULL)
517                 free_req = mpt_raid_reply_frame_handler(mpt, req, reply_frame);
518 #ifdef NOTYET
519         else if (req->ccb != NULL) {
520                 /* Complete Quiesce CCB with error... */
521         }
522 #endif
523
524         req->state &= ~REQ_STATE_QUEUED;
525         req->state |= REQ_STATE_DONE;
526         TAILQ_REMOVE(&mpt->request_pending_list, req, links);
527
528         if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
529                 wakeup(req);
530         } else if (free_req) {
531                 mpt_free_request(mpt, req);
532         }
533
534         return (TRUE);
535 }
536
537 /*
538  * Parse additional completion information in the reply
539  * frame for RAID I/O requests.
540  */
541 static int
542 mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
543     MSG_DEFAULT_REPLY *reply_frame)
544 {
545         MSG_RAID_ACTION_REPLY *reply;
546         struct mpt_raid_action_result *action_result;
547         MSG_RAID_ACTION_REQUEST *rap;
548
549         reply = (MSG_RAID_ACTION_REPLY *)reply_frame;
550         req->IOCStatus = le16toh(reply->IOCStatus);
551         rap = (MSG_RAID_ACTION_REQUEST *)req->req_vbuf;
552
553         switch (rap->Action) {
554         case MPI_RAID_ACTION_QUIESCE_PHYS_IO:
555                 mpt_prt(mpt, "QUIESCE PHYSIO DONE\n");
556                 break;
557         case MPI_RAID_ACTION_ENABLE_PHYS_IO:
558                 mpt_prt(mpt, "ENABLY PHYSIO DONE\n");
559                 break;
560         default:
561                 break;
562         }
563         action_result = REQ_TO_RAID_ACTION_RESULT(req);
564         memcpy(&action_result->action_data, &reply->ActionData,
565             sizeof(action_result->action_data));
566         action_result->action_status = le16toh(reply->ActionStatus);
567         return (TRUE);
568 }
569
570 /*
571  * Utiltity routine to perform a RAID action command;
572  */
573 int
574 mpt_issue_raid_req(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
575                    struct mpt_raid_disk *disk, request_t *req, u_int Action,
576                    uint32_t ActionDataWord, bus_addr_t addr, bus_size_t len,
577                    int write, int wait)
578 {
579         MSG_RAID_ACTION_REQUEST *rap;
580         SGE_SIMPLE32 *se;
581
582         rap = req->req_vbuf;
583         memset(rap, 0, sizeof *rap);
584         rap->Action = Action;
585         rap->ActionDataWord = htole32(ActionDataWord);
586         rap->Function = MPI_FUNCTION_RAID_ACTION;
587         rap->VolumeID = vol->config_page->VolumeID;
588         rap->VolumeBus = vol->config_page->VolumeBus;
589         if (disk != 0)
590                 rap->PhysDiskNum = disk->config_page.PhysDiskNum;
591         else
592                 rap->PhysDiskNum = 0xFF;
593         se = (SGE_SIMPLE32 *)&rap->ActionDataSGE;
594         se->Address = htole32(addr);
595         MPI_pSGE_SET_LENGTH(se, len);
596         MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
597             MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
598             MPI_SGE_FLAGS_END_OF_LIST |
599             write ? MPI_SGE_FLAGS_HOST_TO_IOC : MPI_SGE_FLAGS_IOC_TO_HOST));
600         se->FlagsLength = htole32(se->FlagsLength);
601         rap->MsgContext = htole32(req->index | raid_handler_id);
602
603         mpt_check_doorbell(mpt);
604         mpt_send_cmd(mpt, req);
605
606         if (wait) {
607                 return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
608                                      /*sleep_ok*/FALSE, /*time_ms*/2000));
609         } else {
610                 return (0);
611         }
612 }
613
614 /*************************** RAID Status Monitoring ***************************/
615 static int
616 mpt_spawn_raid_thread(struct mpt_softc *mpt)
617 {
618         int error;
619
620         /*
621          * Freeze out any CAM transactions until our thread
622          * is able to run at least once.  We need to update
623          * our RAID pages before acception I/O or we may
624          * reject I/O to an ID we later determine is for a
625          * hidden physdisk.
626          */
627         MPT_LOCK(mpt);
628         xpt_freeze_simq(mpt->phydisk_sim, 1);
629         MPT_UNLOCK(mpt);
630         error = mpt_kthread_create(mpt_raid_thread, mpt,
631             &mpt->raid_thread, /*flags*/0, /*altstack*/0,
632             "mpt_raid%d", mpt->unit);
633         if (error != 0) {
634                 MPT_LOCK(mpt);
635                 xpt_release_simq(mpt->phydisk_sim, /*run_queue*/FALSE);
636                 MPT_UNLOCK(mpt);
637         }
638         return (error);
639 }
640
641 static void
642 mpt_terminate_raid_thread(struct mpt_softc *mpt)
643 {
644
645         if (mpt->raid_thread == NULL) {
646                 return;
647         }
648         mpt->shutdwn_raid = 1;
649         wakeup(mpt->raid_volumes);
650         /*
651          * Sleep on a slightly different location
652          * for this interlock just for added safety.
653          */
654         mpt_sleep(mpt, &mpt->raid_thread, PUSER, "thtrm", 0);
655 }
656
657 static void
658 mpt_cam_rescan_callback(struct cam_periph *periph, union ccb *ccb)
659 {
660
661         xpt_free_path(ccb->ccb_h.path);
662         xpt_free_ccb(ccb);
663 }
664
665 static void
666 mpt_raid_thread(void *arg)
667 {
668         struct mpt_softc *mpt;
669         int firstrun;
670
671         mpt = (struct mpt_softc *)arg;
672         firstrun = 1;
673
674         get_mplock();
675         MPT_LOCK(mpt);
676
677         while (mpt->shutdwn_raid == 0) {
678
679                 if (mpt->raid_wakeup == 0) {
680                         mpt_sleep(mpt, &mpt->raid_volumes, PUSER, "idle", 0);
681                         continue;
682                 }
683
684                 mpt->raid_wakeup = 0;
685
686                 if (mpt_refresh_raid_data(mpt)) {
687                         mpt_schedule_raid_refresh(mpt); /* XX NOT QUITE RIGHT */
688                         continue;
689                 }
690
691                 /*
692                  * Now that we have our first snapshot of RAID data,
693                  * allow CAM to access our physical disk bus.
694                  */
695                 if (firstrun) {
696                         firstrun = 0;
697                         MPTLOCK_2_CAMLOCK(mpt);
698                         xpt_release_simq(mpt->phydisk_sim, TRUE);
699                         CAMLOCK_2_MPTLOCK(mpt);
700                 }
701
702                 if (mpt->raid_rescan != 0) {
703                         union ccb *ccb;
704                         struct cam_path *path;
705                         int error;
706
707                         mpt->raid_rescan = 0;
708                         MPT_UNLOCK(mpt);
709
710                         ccb = xpt_alloc_ccb();
711
712                         MPT_LOCK(mpt);
713                         error = xpt_create_path(&path, xpt_periph,
714                             cam_sim_path(mpt->phydisk_sim),
715                             CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
716                         if (error != CAM_REQ_CMP) {
717                                 xpt_free_ccb(ccb);
718                                 mpt_prt(mpt, "Unable to rescan RAID Bus!\n");
719                         } else {
720                                 xpt_setup_ccb(&ccb->ccb_h, path, 5);
721                                 ccb->ccb_h.func_code = XPT_SCAN_BUS;
722                                 ccb->ccb_h.cbfcnp = mpt_cam_rescan_callback;
723                                 ccb->crcn.flags = CAM_FLAG_NONE;
724                                 MPTLOCK_2_CAMLOCK(mpt);
725                                 xpt_action(ccb);
726                                 CAMLOCK_2_MPTLOCK(mpt);
727                         }
728                 }
729         }
730         mpt->raid_thread = NULL;
731         wakeup(&mpt->raid_thread);
732         MPT_UNLOCK(mpt);
733         rel_mplock();
734 }
735
736 #if 0
737 static void
738 mpt_raid_quiesce_timeout(void *arg)
739 {
740         /* Complete the CCB with error */
741         /* COWWWW */
742 }
743
744 static timeout_t mpt_raid_quiesce_timeout;
745 cam_status
746 mpt_raid_quiesce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
747                       request_t *req)
748 {
749         union ccb *ccb;
750
751         ccb = req->ccb;
752         if ((mpt_disk->flags & MPT_RDF_QUIESCED) != 0)
753                 return (CAM_REQ_CMP);
754
755         if ((mpt_disk->flags & MPT_RDF_QUIESCING) == 0) {
756                 int rv;
757
758                 mpt_disk->flags |= MPT_RDF_QUIESCING;
759                 xpt_freeze_devq(ccb->ccb_h.path, 1);
760
761                 rv = mpt_issue_raid_req(mpt, mpt_disk->volume, mpt_disk, req,
762                                         MPI_RAID_ACTION_QUIESCE_PHYS_IO,
763                                         /*ActionData*/0, /*addr*/0,
764                                         /*len*/0, /*write*/FALSE,
765                                         /*wait*/FALSE);
766                 if (rv != 0)
767                         return (CAM_REQ_CMP_ERR);
768
769                 mpt_req_timeout(req, mpt_raid_quiesce_timeout, ccb, 5 * hz);
770 #if 0
771                 if (rv == ETIMEDOUT) {
772                         mpt_disk_prt(mpt, mpt_disk, "mpt_raid_quiesce_disk: "
773                                      "Quiece Timed-out\n");
774                         xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
775                         return (CAM_REQ_CMP_ERR);
776                 }
777
778                 ar = REQ_TO_RAID_ACTION_RESULT(req);
779                 if (rv != 0
780                  || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
781                  || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
782                         mpt_disk_prt(mpt, mpt_disk, "Quiece Failed"
783                                     "%d:%x:%x\n", rv, req->IOCStatus,
784                                     ar->action_status);
785                         xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
786                         return (CAM_REQ_CMP_ERR);
787                 }
788 #endif
789                 return (CAM_REQ_INPROG);
790         }
791         return (CAM_REQUEUE_REQ);
792 }
793 #endif
794
795 /* XXX Ignores that there may be multiple busses/IOCs involved. */
796 cam_status
797 mpt_map_physdisk(struct mpt_softc *mpt, union ccb *ccb, u_int *tgt)
798 {
799         struct mpt_raid_disk *mpt_disk;
800
801         mpt_disk = mpt->raid_disks + ccb->ccb_h.target_id;
802         if (ccb->ccb_h.target_id < mpt->raid_max_disks
803          && (mpt_disk->flags & MPT_RDF_ACTIVE) != 0) {
804                 *tgt = mpt_disk->config_page.PhysDiskID;
805                 return (0);
806         }
807         mpt_lprt(mpt, MPT_PRT_DEBUG1, "mpt_map_physdisk(%d) - Not Active\n",
808                  ccb->ccb_h.target_id);
809         return (-1);
810 }
811
812 /* XXX Ignores that there may be multiple busses/IOCs involved. */
813 int
814 mpt_is_raid_volume(struct mpt_softc *mpt, int tgt)
815 {
816         CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
817         CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol;
818
819         if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
820                 return (0);
821         }
822         ioc_vol = mpt->ioc_page2->RaidVolume;
823         ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
824         for (;ioc_vol != ioc_last_vol; ioc_vol++) {
825                 if (ioc_vol->VolumeID == tgt) {
826                         return (1);
827                 }
828         }
829         return (0);
830 }
831
832 #if 0
833 static void
834 mpt_enable_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
835                int enable)
836 {
837         request_t *req;
838         struct mpt_raid_action_result *ar;
839         CONFIG_PAGE_RAID_VOL_0 *vol_pg;
840         int enabled;
841         int rv;
842
843         vol_pg = mpt_vol->config_page;
844         enabled = vol_pg->VolumeStatus.Flags & MPI_RAIDVOL0_STATUS_FLAG_ENABLED;
845
846         /*
847          * If the setting matches the configuration,
848          * there is nothing to do.
849          */
850         if ((enabled && enable)
851          || (!enabled && !enable))
852                 return;
853
854         req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
855         if (req == NULL) {
856                 mpt_vol_prt(mpt, mpt_vol,
857                             "mpt_enable_vol: Get request failed!\n");
858                 return;
859         }
860
861         rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
862                                 enable ? MPI_RAID_ACTION_ENABLE_VOLUME
863                                        : MPI_RAID_ACTION_DISABLE_VOLUME,
864                                 /*data*/0, /*addr*/0, /*len*/0,
865                                 /*write*/FALSE, /*wait*/TRUE);
866         if (rv == ETIMEDOUT) {
867                 mpt_vol_prt(mpt, mpt_vol, "mpt_enable_vol: "
868                             "%s Volume Timed-out\n",
869                             enable ? "Enable" : "Disable");
870                 return;
871         }
872         ar = REQ_TO_RAID_ACTION_RESULT(req);
873         if (rv != 0
874          || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
875          || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
876                 mpt_vol_prt(mpt, mpt_vol, "%s Volume Failed: %d:%x:%x\n",
877                             enable ? "Enable" : "Disable",
878                             rv, req->IOCStatus, ar->action_status);
879         }
880
881         mpt_free_request(mpt, req);
882 }
883 #endif
884
885 static void
886 mpt_verify_mwce(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
887 {
888         request_t *req;
889         struct mpt_raid_action_result *ar;
890         CONFIG_PAGE_RAID_VOL_0 *vol_pg;
891         uint32_t data;
892         int rv;
893         int resyncing;
894         int mwce;
895
896         vol_pg = mpt_vol->config_page;
897         resyncing = vol_pg->VolumeStatus.Flags
898                   & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
899         mwce = vol_pg->VolumeSettings.Settings
900              & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
901
902         /*
903          * If the setting matches the configuration,
904          * there is nothing to do.
905          */
906         switch (mpt->raid_mwce_setting) {
907         case MPT_RAID_MWCE_REBUILD_ONLY:
908                 if ((resyncing && mwce) || (!resyncing && !mwce)) {
909                         return;
910                 }
911                 mpt_vol->flags ^= MPT_RVF_WCE_CHANGED;
912                 if ((mpt_vol->flags & MPT_RVF_WCE_CHANGED) == 0) {
913                         /*
914                          * Wait one more status update to see if
915                          * resyncing gets enabled.  It gets disabled
916                          * temporarilly when WCE is changed.
917                          */
918                         return;
919                 }
920                 break;
921         case MPT_RAID_MWCE_ON:
922                 if (mwce)
923                         return;
924                 break;
925         case MPT_RAID_MWCE_OFF:
926                 if (!mwce)
927                         return;
928                 break;
929         case MPT_RAID_MWCE_NC:
930                 return;
931         }
932
933         req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
934         if (req == NULL) {
935                 mpt_vol_prt(mpt, mpt_vol,
936                             "mpt_verify_mwce: Get request failed!\n");
937                 return;
938         }
939
940         vol_pg->VolumeSettings.Settings ^=
941             MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
942         memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
943         vol_pg->VolumeSettings.Settings ^=
944             MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
945         rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
946                                 MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
947                                 data, /*addr*/0, /*len*/0,
948                                 /*write*/FALSE, /*wait*/TRUE);
949         if (rv == ETIMEDOUT) {
950                 mpt_vol_prt(mpt, mpt_vol, "mpt_verify_mwce: "
951                             "Write Cache Enable Timed-out\n");
952                 return;
953         }
954         ar = REQ_TO_RAID_ACTION_RESULT(req);
955         if (rv != 0
956          || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
957          || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
958                 mpt_vol_prt(mpt, mpt_vol, "Write Cache Enable Failed: "
959                             "%d:%x:%x\n", rv, req->IOCStatus,
960                             ar->action_status);
961         } else {
962                 vol_pg->VolumeSettings.Settings ^=
963                     MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
964         }
965         mpt_free_request(mpt, req);
966 }
967
968 static void
969 mpt_verify_resync_rate(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
970 {
971         request_t *req;
972         struct mpt_raid_action_result *ar;
973         CONFIG_PAGE_RAID_VOL_0  *vol_pg;
974         u_int prio;
975         int rv;
976
977         vol_pg = mpt_vol->config_page;
978
979         if (mpt->raid_resync_rate == MPT_RAID_RESYNC_RATE_NC)
980                 return;
981
982         /*
983          * If the current RAID resync rate does not
984          * match our configured rate, update it.
985          */
986         prio = vol_pg->VolumeSettings.Settings
987              & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
988         if (vol_pg->ResyncRate != 0
989          && vol_pg->ResyncRate != mpt->raid_resync_rate) {
990
991                 req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
992                 if (req == NULL) {
993                         mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
994                                     "Get request failed!\n");
995                         return;
996                 }
997
998                 rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
999                                         MPI_RAID_ACTION_SET_RESYNC_RATE,
1000                                         mpt->raid_resync_rate, /*addr*/0,
1001                                         /*len*/0, /*write*/FALSE, /*wait*/TRUE);
1002                 if (rv == ETIMEDOUT) {
1003                         mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
1004                                     "Resync Rate Setting Timed-out\n");
1005                         return;
1006                 }
1007
1008                 ar = REQ_TO_RAID_ACTION_RESULT(req);
1009                 if (rv != 0
1010                  || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
1011                  || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
1012                         mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
1013                                     "%d:%x:%x\n", rv, req->IOCStatus,
1014                                     ar->action_status);
1015                 } else
1016                         vol_pg->ResyncRate = mpt->raid_resync_rate;
1017                 mpt_free_request(mpt, req);
1018         } else if ((prio && mpt->raid_resync_rate < 128)
1019                 || (!prio && mpt->raid_resync_rate >= 128)) {
1020                 uint32_t data;
1021
1022                 req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
1023                 if (req == NULL) {
1024                         mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
1025                                     "Get request failed!\n");
1026                         return;
1027                 }
1028
1029                 vol_pg->VolumeSettings.Settings ^=
1030                     MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1031                 memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
1032                 vol_pg->VolumeSettings.Settings ^=
1033                     MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1034                 rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
1035                                         MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
1036                                         data, /*addr*/0, /*len*/0,
1037                                         /*write*/FALSE, /*wait*/TRUE);
1038                 if (rv == ETIMEDOUT) {
1039                         mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
1040                                     "Resync Rate Setting Timed-out\n");
1041                         return;
1042                 }
1043                 ar = REQ_TO_RAID_ACTION_RESULT(req);
1044                 if (rv != 0
1045                  || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
1046                  || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
1047                         mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
1048                                     "%d:%x:%x\n", rv, req->IOCStatus,
1049                                     ar->action_status);
1050                 } else {
1051                         vol_pg->VolumeSettings.Settings ^=
1052                             MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1053                 }
1054
1055                 mpt_free_request(mpt, req);
1056         }
1057 }
1058
1059 static void
1060 mpt_adjust_queue_depth(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1061                        struct cam_path *path)
1062 {
1063         struct ccb_relsim crs;
1064
1065         xpt_setup_ccb(&crs.ccb_h, path, /*priority*/5);
1066         crs.ccb_h.func_code = XPT_REL_SIMQ;
1067         crs.release_flags = RELSIM_ADJUST_OPENINGS;
1068         crs.openings = mpt->raid_queue_depth;
1069         xpt_action((union ccb *)&crs);
1070         if (crs.ccb_h.status != CAM_REQ_CMP)
1071                 mpt_vol_prt(mpt, mpt_vol, "mpt_adjust_queue_depth failed "
1072                             "with CAM status %#x\n", crs.ccb_h.status);
1073 }
1074
1075 static void
1076 mpt_announce_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
1077 {
1078         CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1079         u_int i;
1080
1081         vol_pg = mpt_vol->config_page;
1082         mpt_vol_prt(mpt, mpt_vol, "Settings (");
1083         for (i = 1; i <= 0x8000; i <<= 1) {
1084                 switch (vol_pg->VolumeSettings.Settings & i) {
1085                 case MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE:
1086                         mpt_prtc(mpt, " Member-WCE");
1087                         break;
1088                 case MPI_RAIDVOL0_SETTING_OFFLINE_ON_SMART:
1089                         mpt_prtc(mpt, " Offline-On-SMART-Err");
1090                         break;
1091                 case MPI_RAIDVOL0_SETTING_AUTO_CONFIGURE:
1092                         mpt_prtc(mpt, " Hot-Plug-Spares");
1093                         break;
1094                 case MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC:
1095                         mpt_prtc(mpt, " High-Priority-ReSync");
1096                         break;
1097                 default:
1098                         break;
1099                 }
1100         }
1101         mpt_prtc(mpt, " )\n");
1102         if (vol_pg->VolumeSettings.HotSparePool != 0) {
1103                 mpt_vol_prt(mpt, mpt_vol, "Using Spare Pool%s",
1104                             powerof2(vol_pg->VolumeSettings.HotSparePool)
1105                           ? ":" : "s:");
1106                 for (i = 0; i < 8; i++) {
1107                         u_int mask;
1108
1109                         mask = 0x1 << i;
1110                         if ((vol_pg->VolumeSettings.HotSparePool & mask) == 0)
1111                                 continue;
1112                         mpt_prtc(mpt, " %d", i);
1113                 }
1114                 mpt_prtc(mpt, "\n");
1115         }
1116         mpt_vol_prt(mpt, mpt_vol, "%d Members:\n", vol_pg->NumPhysDisks);
1117         for (i = 0; i < vol_pg->NumPhysDisks; i++){
1118                 struct mpt_raid_disk *mpt_disk;
1119                 CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1120                 int pt_bus = cam_sim_bus(mpt->phydisk_sim);
1121                 U8 f, s;
1122
1123                 mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum;
1124                 disk_pg = &mpt_disk->config_page;
1125                 mpt_prtc(mpt, "      ");
1126                 mpt_prtc(mpt, "(%s:%d:%d:0): ", device_get_nameunit(mpt->dev),
1127                          pt_bus, disk_pg->PhysDiskID);
1128                 if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM) {
1129                         mpt_prtc(mpt, "%s", mpt_disk->member_number == 0?
1130                             "Primary" : "Secondary");
1131                 } else {
1132                         mpt_prtc(mpt, "Stripe Position %d",
1133                                  mpt_disk->member_number);
1134                 }
1135                 f = disk_pg->PhysDiskStatus.Flags;
1136                 s = disk_pg->PhysDiskStatus.State;
1137                 if (f & MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC) {
1138                         mpt_prtc(mpt, " Out of Sync");
1139                 }
1140                 if (f & MPI_PHYSDISK0_STATUS_FLAG_QUIESCED) {
1141                         mpt_prtc(mpt, " Quiesced");
1142                 }
1143                 if (f & MPI_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME) {
1144                         mpt_prtc(mpt, " Inactive");
1145                 }
1146                 if (f & MPI_PHYSDISK0_STATUS_FLAG_OPTIMAL_PREVIOUS) {
1147                         mpt_prtc(mpt, " Was Optimal");
1148                 }
1149                 if (f & MPI_PHYSDISK0_STATUS_FLAG_NOT_OPTIMAL_PREVIOUS) {
1150                         mpt_prtc(mpt, " Was Non-Optimal");
1151                 }
1152                 switch (s) {
1153                 case MPI_PHYSDISK0_STATUS_ONLINE:
1154                         mpt_prtc(mpt, " Online");
1155                         break;
1156                 case MPI_PHYSDISK0_STATUS_MISSING:
1157                         mpt_prtc(mpt, " Missing");
1158                         break;
1159                 case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
1160                         mpt_prtc(mpt, " Incompatible");
1161                         break;
1162                 case MPI_PHYSDISK0_STATUS_FAILED:
1163                         mpt_prtc(mpt, " Failed");
1164                         break;
1165                 case MPI_PHYSDISK0_STATUS_INITIALIZING:
1166                         mpt_prtc(mpt, " Initializing");
1167                         break;
1168                 case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
1169                         mpt_prtc(mpt, " Requested Offline");
1170                         break;
1171                 case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
1172                         mpt_prtc(mpt, " Requested Failed");
1173                         break;
1174                 case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
1175                 default:
1176                         mpt_prtc(mpt, " Offline Other (%x)", s);
1177                         break;
1178                 }
1179                 mpt_prtc(mpt, "\n");
1180         }
1181 }
1182
1183 static void
1184 mpt_announce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk)
1185 {
1186         CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1187         int rd_bus = cam_sim_bus(mpt->sim);
1188         int pt_bus = cam_sim_bus(mpt->phydisk_sim);
1189         u_int i;
1190
1191         disk_pg = &mpt_disk->config_page;
1192         mpt_disk_prt(mpt, mpt_disk,
1193                      "Physical (%s:%d:%d:0), Pass-thru (%s:%d:%jd:0)\n",
1194                      device_get_nameunit(mpt->dev), rd_bus,
1195                      disk_pg->PhysDiskID, device_get_nameunit(mpt->dev),
1196                      pt_bus, (intmax_t)(mpt_disk - mpt->raid_disks));
1197         if (disk_pg->PhysDiskSettings.HotSparePool == 0)
1198                 return;
1199         mpt_disk_prt(mpt, mpt_disk, "Member of Hot Spare Pool%s",
1200                      powerof2(disk_pg->PhysDiskSettings.HotSparePool)
1201                    ? ":" : "s:");
1202         for (i = 0; i < 8; i++) {
1203                 u_int mask;
1204
1205                 mask = 0x1 << i;
1206                 if ((disk_pg->PhysDiskSettings.HotSparePool & mask) == 0)
1207                         continue;
1208                 mpt_prtc(mpt, " %d", i);
1209         }
1210         mpt_prtc(mpt, "\n");
1211 }
1212
1213 static void
1214 mpt_refresh_raid_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
1215                       IOC_3_PHYS_DISK *ioc_disk)
1216 {
1217         int rv;
1218
1219         rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_PHYSDISK,
1220                                  /*PageNumber*/0, ioc_disk->PhysDiskNum,
1221                                  &mpt_disk->config_page.Header,
1222                                  /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1223         if (rv != 0) {
1224                 mpt_prt(mpt, "mpt_refresh_raid_disk: "
1225                         "Failed to read RAID Disk Hdr(%d)\n",
1226                         ioc_disk->PhysDiskNum);
1227                 return;
1228         }
1229         rv = mpt_read_cur_cfg_page(mpt, ioc_disk->PhysDiskNum,
1230                                    &mpt_disk->config_page.Header,
1231                                    sizeof(mpt_disk->config_page),
1232                                    /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1233         if (rv != 0)
1234                 mpt_prt(mpt, "mpt_refresh_raid_disk: "
1235                         "Failed to read RAID Disk Page(%d)\n",
1236                         ioc_disk->PhysDiskNum);
1237         mpt2host_config_page_raid_phys_disk_0(&mpt_disk->config_page);
1238 }
1239
1240 static void
1241 mpt_refresh_raid_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1242     CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol)
1243 {
1244         CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1245         struct mpt_raid_action_result *ar;
1246         request_t *req;
1247         int rv;
1248         int i;
1249
1250         vol_pg = mpt_vol->config_page;
1251         mpt_vol->flags &= ~MPT_RVF_UP2DATE;
1252
1253         rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_VOLUME, 0,
1254             ioc_vol->VolumePageNumber, &vol_pg->Header, TRUE, 5000);
1255         if (rv != 0) {
1256                 mpt_vol_prt(mpt, mpt_vol,
1257                     "mpt_refresh_raid_vol: Failed to read RAID Vol Hdr(%d)\n",
1258                     ioc_vol->VolumePageNumber);
1259                 return;
1260         }
1261
1262         rv = mpt_read_cur_cfg_page(mpt, ioc_vol->VolumePageNumber,
1263             &vol_pg->Header, mpt->raid_page0_len, TRUE, 5000);
1264         if (rv != 0) {
1265                 mpt_vol_prt(mpt, mpt_vol,
1266                     "mpt_refresh_raid_vol: Failed to read RAID Vol Page(%d)\n",
1267                     ioc_vol->VolumePageNumber);
1268                 return;
1269         }
1270         mpt2host_config_page_raid_vol_0(vol_pg);
1271
1272         mpt_vol->flags |= MPT_RVF_ACTIVE;
1273
1274         /* Update disk entry array data. */
1275         for (i = 0; i < vol_pg->NumPhysDisks; i++) {
1276                 struct mpt_raid_disk *mpt_disk;
1277                 mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum;
1278                 mpt_disk->volume = mpt_vol;
1279                 mpt_disk->member_number = vol_pg->PhysDisk[i].PhysDiskMap;
1280                 if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM) {
1281                         mpt_disk->member_number--;
1282                 }
1283         }
1284
1285         if ((vol_pg->VolumeStatus.Flags
1286            & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1287                 return;
1288
1289         req = mpt_get_request(mpt, TRUE);
1290         if (req == NULL) {
1291                 mpt_vol_prt(mpt, mpt_vol,
1292                     "mpt_refresh_raid_vol: Get request failed!\n");
1293                 return;
1294         }
1295         rv = mpt_issue_raid_req(mpt, mpt_vol, NULL, req,
1296             MPI_RAID_ACTION_INDICATOR_STRUCT, 0, 0, 0, FALSE, TRUE);
1297         if (rv == ETIMEDOUT) {
1298                 mpt_vol_prt(mpt, mpt_vol,
1299                     "mpt_refresh_raid_vol: Progress Indicator fetch timeout\n");
1300                 mpt_free_request(mpt, req);
1301                 return;
1302         }
1303
1304         ar = REQ_TO_RAID_ACTION_RESULT(req);
1305         if (rv == 0
1306          && ar->action_status == MPI_RAID_ACTION_ASTATUS_SUCCESS
1307          && REQ_IOCSTATUS(req) == MPI_IOCSTATUS_SUCCESS) {
1308                 memcpy(&mpt_vol->sync_progress,
1309                        &ar->action_data.indicator_struct,
1310                        sizeof(mpt_vol->sync_progress));
1311                 mpt2host_mpi_raid_vol_indicator(&mpt_vol->sync_progress);
1312         } else {
1313                 mpt_vol_prt(mpt, mpt_vol,
1314                     "mpt_refresh_raid_vol: Progress indicator fetch failed!\n");
1315         }
1316         mpt_free_request(mpt, req);
1317 }
1318
1319 /*
1320  * Update in-core information about RAID support.  We update any entries
1321  * that didn't previously exists or have been marked as needing to
1322  * be updated by our event handler.  Interesting changes are displayed
1323  * to the console.
1324  */
1325 int
1326 mpt_refresh_raid_data(struct mpt_softc *mpt)
1327 {
1328         CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
1329         CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol;
1330         IOC_3_PHYS_DISK *ioc_disk;
1331         IOC_3_PHYS_DISK *ioc_last_disk;
1332         CONFIG_PAGE_RAID_VOL_0  *vol_pg;
1333         size_t len;
1334         int rv;
1335         int i;
1336         u_int nonopt_volumes;
1337
1338         if (mpt->ioc_page2 == NULL || mpt->ioc_page3 == NULL) {
1339                 return (0);
1340         }
1341
1342         /*
1343          * Mark all items as unreferenced by the configuration.
1344          * This allows us to find, report, and discard stale
1345          * entries.
1346          */
1347         for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1348                 mpt->raid_disks[i].flags &= ~MPT_RDF_REFERENCED;
1349         }
1350         for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1351                 mpt->raid_volumes[i].flags &= ~MPT_RVF_REFERENCED;
1352         }
1353
1354         /*
1355          * Get Physical Disk information.
1356          */
1357         len = mpt->ioc_page3->Header.PageLength * sizeof(uint32_t);
1358         rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1359                                    &mpt->ioc_page3->Header, len,
1360                                    /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1361         if (rv) {
1362                 mpt_prt(mpt,
1363                     "mpt_refresh_raid_data: Failed to read IOC Page 3\n");
1364                 return (-1);
1365         }
1366         mpt2host_config_page_ioc3(mpt->ioc_page3);
1367
1368         ioc_disk = mpt->ioc_page3->PhysDisk;
1369         ioc_last_disk = ioc_disk + mpt->ioc_page3->NumPhysDisks;
1370         for (; ioc_disk != ioc_last_disk; ioc_disk++) {
1371                 struct mpt_raid_disk *mpt_disk;
1372
1373                 mpt_disk = mpt->raid_disks + ioc_disk->PhysDiskNum;
1374                 mpt_disk->flags |= MPT_RDF_REFERENCED;
1375                 if ((mpt_disk->flags & (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE))
1376                  != (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE)) {
1377
1378                         mpt_refresh_raid_disk(mpt, mpt_disk, ioc_disk);
1379
1380                 }
1381                 mpt_disk->flags |= MPT_RDF_ACTIVE;
1382                 mpt->raid_rescan++;
1383         }
1384
1385         /*
1386          * Refresh volume data.
1387          */
1388         len = mpt->ioc_page2->Header.PageLength * sizeof(uint32_t);
1389         rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1390                                    &mpt->ioc_page2->Header, len,
1391                                    /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1392         if (rv) {
1393                 mpt_prt(mpt, "mpt_refresh_raid_data: "
1394                         "Failed to read IOC Page 2\n");
1395                 return (-1);
1396         }
1397         mpt2host_config_page_ioc2(mpt->ioc_page2);
1398
1399         ioc_vol = mpt->ioc_page2->RaidVolume;
1400         ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
1401         for (;ioc_vol != ioc_last_vol; ioc_vol++) {
1402                 struct mpt_raid_volume *mpt_vol;
1403
1404                 mpt_vol = mpt->raid_volumes + ioc_vol->VolumePageNumber;
1405                 mpt_vol->flags |= MPT_RVF_REFERENCED;
1406                 vol_pg = mpt_vol->config_page;
1407                 if (vol_pg == NULL)
1408                         continue;
1409                 if (((mpt_vol->flags & (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1410                   != (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1411                  || (vol_pg->VolumeStatus.Flags
1412                    & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) != 0) {
1413
1414                         mpt_refresh_raid_vol(mpt, mpt_vol, ioc_vol);
1415                 }
1416                 mpt_vol->flags |= MPT_RVF_ACTIVE;
1417         }
1418
1419         nonopt_volumes = 0;
1420         for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1421                 struct mpt_raid_volume *mpt_vol;
1422                 uint64_t total;
1423                 uint64_t left;
1424                 int m;
1425                 u_int prio;
1426
1427                 mpt_vol = &mpt->raid_volumes[i];
1428
1429                 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) {
1430                         continue;
1431                 }
1432
1433                 vol_pg = mpt_vol->config_page;
1434                 if ((mpt_vol->flags & (MPT_RVF_REFERENCED|MPT_RVF_ANNOUNCED))
1435                  == MPT_RVF_ANNOUNCED) {
1436                         mpt_vol_prt(mpt, mpt_vol, "No longer configured\n");
1437                         mpt_vol->flags = 0;
1438                         continue;
1439                 }
1440
1441                 if ((mpt_vol->flags & MPT_RVF_ANNOUNCED) == 0) {
1442                         mpt_announce_vol(mpt, mpt_vol);
1443                         mpt_vol->flags |= MPT_RVF_ANNOUNCED;
1444                 }
1445
1446                 if (vol_pg->VolumeStatus.State !=
1447                     MPI_RAIDVOL0_STATUS_STATE_OPTIMAL)
1448                         nonopt_volumes++;
1449
1450                 if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0)
1451                         continue;
1452
1453                 mpt_vol->flags |= MPT_RVF_UP2DATE;
1454                 mpt_vol_prt(mpt, mpt_vol, "%s - %s\n",
1455                     mpt_vol_type(mpt_vol), mpt_vol_state(mpt_vol));
1456                 mpt_verify_mwce(mpt, mpt_vol);
1457
1458                 if (vol_pg->VolumeStatus.Flags == 0) {
1459                         continue;
1460                 }
1461
1462                 mpt_vol_prt(mpt, mpt_vol, "Status (");
1463                 for (m = 1; m <= 0x80; m <<= 1) {
1464                         switch (vol_pg->VolumeStatus.Flags & m) {
1465                         case MPI_RAIDVOL0_STATUS_FLAG_ENABLED:
1466                                 mpt_prtc(mpt, " Enabled");
1467                                 break;
1468                         case MPI_RAIDVOL0_STATUS_FLAG_QUIESCED:
1469                                 mpt_prtc(mpt, " Quiesced");
1470                                 break;
1471                         case MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS:
1472                                 mpt_prtc(mpt, " Re-Syncing");
1473                                 break;
1474                         case MPI_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE:
1475                                 mpt_prtc(mpt, " Inactive");
1476                                 break;
1477                         default:
1478                                 break;
1479                         }
1480                 }
1481                 mpt_prtc(mpt, " )\n");
1482
1483                 if ((vol_pg->VolumeStatus.Flags
1484                    & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1485                         continue;
1486
1487                 mpt_verify_resync_rate(mpt, mpt_vol);
1488
1489                 left = MPT_U64_2_SCALAR(mpt_vol->sync_progress.BlocksRemaining);
1490                 total = MPT_U64_2_SCALAR(mpt_vol->sync_progress.TotalBlocks);
1491                 if (vol_pg->ResyncRate != 0) {
1492
1493                         prio = ((u_int)vol_pg->ResyncRate * 100000) / 0xFF;
1494                         mpt_vol_prt(mpt, mpt_vol, "Rate %d.%d%%\n",
1495                             prio / 1000, prio % 1000);
1496                 } else {
1497                         prio = vol_pg->VolumeSettings.Settings
1498                              & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1499                         mpt_vol_prt(mpt, mpt_vol, "%s Priority Re-Sync\n",
1500                             prio ? "High" : "Low");
1501                 }
1502                 mpt_vol_prt(mpt, mpt_vol, "%ju of %ju "
1503                             "blocks remaining\n", (uintmax_t)left,
1504                             (uintmax_t)total);
1505
1506                 /* Periodically report on sync progress. */
1507                 mpt_schedule_raid_refresh(mpt);
1508         }
1509
1510         for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1511                 struct mpt_raid_disk *mpt_disk;
1512                 CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1513                 int m;
1514
1515                 mpt_disk = &mpt->raid_disks[i];
1516                 disk_pg = &mpt_disk->config_page;
1517
1518                 if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0)
1519                         continue;
1520
1521                 if ((mpt_disk->flags & (MPT_RDF_REFERENCED|MPT_RDF_ANNOUNCED))
1522                  == MPT_RDF_ANNOUNCED) {
1523                         mpt_disk_prt(mpt, mpt_disk, "No longer configured\n");
1524                         mpt_disk->flags = 0;
1525                         mpt->raid_rescan++;
1526                         continue;
1527                 }
1528
1529                 if ((mpt_disk->flags & MPT_RDF_ANNOUNCED) == 0) {
1530
1531                         mpt_announce_disk(mpt, mpt_disk);
1532                         mpt_disk->flags |= MPT_RVF_ANNOUNCED;
1533                 }
1534
1535                 if ((mpt_disk->flags & MPT_RDF_UP2DATE) != 0)
1536                         continue;
1537
1538                 mpt_disk->flags |= MPT_RDF_UP2DATE;
1539                 mpt_disk_prt(mpt, mpt_disk, "%s\n", mpt_disk_state(mpt_disk));
1540                 if (disk_pg->PhysDiskStatus.Flags == 0)
1541                         continue;
1542
1543                 mpt_disk_prt(mpt, mpt_disk, "Status (");
1544                 for (m = 1; m <= 0x80; m <<= 1) {
1545                         switch (disk_pg->PhysDiskStatus.Flags & m) {
1546                         case MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC:
1547                                 mpt_prtc(mpt, " Out-Of-Sync");
1548                                 break;
1549                         case MPI_PHYSDISK0_STATUS_FLAG_QUIESCED:
1550                                 mpt_prtc(mpt, " Quiesced");
1551                                 break;
1552                         default:
1553                                 break;
1554                         }
1555                 }
1556                 mpt_prtc(mpt, " )\n");
1557         }
1558
1559         mpt->raid_nonopt_volumes = nonopt_volumes;
1560         return (0);
1561 }
1562
1563 static void
1564 mpt_raid_timer(void *arg)
1565 {
1566         struct mpt_softc *mpt;
1567
1568         mpt = (struct mpt_softc *)arg;
1569         MPT_LOCK(mpt);
1570         mpt_raid_wakeup(mpt);
1571         MPT_UNLOCK(mpt);
1572 }
1573
1574 void
1575 mpt_schedule_raid_refresh(struct mpt_softc *mpt)
1576 {
1577         callout_reset(&mpt->raid_timer, MPT_RAID_SYNC_REPORT_INTERVAL,
1578                       mpt_raid_timer, mpt);
1579 }
1580
1581 void
1582 mpt_raid_free_mem(struct mpt_softc *mpt)
1583 {
1584
1585         if (mpt->raid_volumes) {
1586                 struct mpt_raid_volume *mpt_raid;
1587                 int i;
1588                 for (i = 0; i < mpt->raid_max_volumes; i++) {
1589                         mpt_raid = &mpt->raid_volumes[i];
1590                         if (mpt_raid->config_page) {
1591                                 kfree(mpt_raid->config_page, M_DEVBUF);
1592                                 mpt_raid->config_page = NULL;
1593                         }
1594                 }
1595                 kfree(mpt->raid_volumes, M_DEVBUF);
1596                 mpt->raid_volumes = NULL;
1597         }
1598         if (mpt->raid_disks) {
1599                 kfree(mpt->raid_disks, M_DEVBUF);
1600                 mpt->raid_disks = NULL;
1601         }
1602         if (mpt->ioc_page2) {
1603                 kfree(mpt->ioc_page2, M_DEVBUF);
1604                 mpt->ioc_page2 = NULL;
1605         }
1606         if (mpt->ioc_page3) {
1607                 kfree(mpt->ioc_page3, M_DEVBUF);
1608                 mpt->ioc_page3 = NULL;
1609         }
1610         mpt->raid_max_volumes =  0;
1611         mpt->raid_max_disks =  0;
1612 }
1613
1614 #if __FreeBSD_version >= 500000
1615 static int
1616 mpt_raid_set_vol_resync_rate(struct mpt_softc *mpt, u_int rate)
1617 {
1618         struct mpt_raid_volume *mpt_vol;
1619
1620         if ((rate > MPT_RAID_RESYNC_RATE_MAX
1621           || rate < MPT_RAID_RESYNC_RATE_MIN)
1622          && rate != MPT_RAID_RESYNC_RATE_NC)
1623                 return (EINVAL);
1624
1625         MPT_LOCK(mpt);
1626         mpt->raid_resync_rate = rate;
1627         RAID_VOL_FOREACH(mpt, mpt_vol) {
1628                 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) {
1629                         continue;
1630                 }
1631                 mpt_verify_resync_rate(mpt, mpt_vol);
1632         }
1633         MPT_UNLOCK(mpt);
1634         return (0);
1635 }
1636
1637 static int
1638 mpt_raid_set_vol_queue_depth(struct mpt_softc *mpt, u_int vol_queue_depth)
1639 {
1640         struct mpt_raid_volume *mpt_vol;
1641
1642         if (vol_queue_depth > 255 || vol_queue_depth < 1)
1643                 return (EINVAL);
1644
1645         MPT_LOCK(mpt);
1646         mpt->raid_queue_depth = vol_queue_depth;
1647         RAID_VOL_FOREACH(mpt, mpt_vol) {
1648                 struct cam_path *path;
1649                 int error;
1650
1651                 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1652                         continue;
1653
1654                 mpt->raid_rescan = 0;
1655
1656                 MPTLOCK_2_CAMLOCK(mpt);
1657                 error = xpt_create_path(&path, xpt_periph,
1658                                         cam_sim_path(mpt->sim),
1659                                         mpt_vol->config_page->VolumeID,
1660                                         /*lun*/0);
1661                 if (error != CAM_REQ_CMP) {
1662                         CAMLOCK_2_MPTLOCK(mpt);
1663                         mpt_vol_prt(mpt, mpt_vol, "Unable to allocate path!\n");
1664                         continue;
1665                 }
1666                 mpt_adjust_queue_depth(mpt, mpt_vol, path);
1667                 xpt_free_path(path);
1668                 CAMLOCK_2_MPTLOCK(mpt);
1669         }
1670         MPT_UNLOCK(mpt);
1671         return (0);
1672 }
1673
1674 static int
1675 mpt_raid_set_vol_mwce(struct mpt_softc *mpt, mpt_raid_mwce_t mwce)
1676 {
1677         struct mpt_raid_volume *mpt_vol;
1678         int force_full_resync;
1679
1680         MPT_LOCK(mpt);
1681         if (mwce == mpt->raid_mwce_setting) {
1682                 MPT_UNLOCK(mpt);
1683                 return (0);
1684         }
1685
1686         /*
1687          * Catch MWCE being left on due to a failed shutdown.  Since
1688          * sysctls cannot be set by the loader, we treat the first
1689          * setting of this varible specially and force a full volume
1690          * resync if MWCE is enabled and a resync is in progress.
1691          */
1692         force_full_resync = 0;
1693         if (mpt->raid_mwce_set == 0
1694          && mpt->raid_mwce_setting == MPT_RAID_MWCE_NC
1695          && mwce == MPT_RAID_MWCE_REBUILD_ONLY)
1696                 force_full_resync = 1;
1697
1698         mpt->raid_mwce_setting = mwce;
1699         RAID_VOL_FOREACH(mpt, mpt_vol) {
1700                 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1701                 int resyncing;
1702                 int mwce;
1703
1704                 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1705                         continue;
1706
1707                 vol_pg = mpt_vol->config_page;
1708                 resyncing = vol_pg->VolumeStatus.Flags
1709                           & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
1710                 mwce = vol_pg->VolumeSettings.Settings
1711                      & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
1712                 if (force_full_resync && resyncing && mwce) {
1713
1714                         /*
1715                          * XXX disable/enable volume should force a resync,
1716                          *     but we'll need to queice, drain, and restart
1717                          *     I/O to do that.
1718                          */
1719                         mpt_vol_prt(mpt, mpt_vol, "WARNING - Unsafe shutdown "
1720                                     "detected.  Suggest full resync.\n");
1721                 }
1722                 mpt_verify_mwce(mpt, mpt_vol);
1723         }
1724         mpt->raid_mwce_set = 1;
1725         MPT_UNLOCK(mpt);
1726         return (0);
1727 }
1728 const char *mpt_vol_mwce_strs[] =
1729 {
1730         "On",
1731         "Off",
1732         "On-During-Rebuild",
1733         "NC"
1734 };
1735
1736 static int
1737 mpt_raid_sysctl_vol_member_wce(SYSCTL_HANDLER_ARGS)
1738 {
1739         char inbuf[20];
1740         struct mpt_softc *mpt;
1741         const char *str;
1742         int error;
1743         u_int size;
1744         u_int i;
1745
1746         GIANT_REQUIRED;
1747
1748         mpt = (struct mpt_softc *)arg1;
1749         str = mpt_vol_mwce_strs[mpt->raid_mwce_setting];
1750         error = SYSCTL_OUT(req, str, strlen(str) + 1);
1751         if (error || !req->newptr) {
1752                 return (error);
1753         }
1754
1755         size = req->newlen - req->newidx;
1756         if (size >= sizeof(inbuf)) {
1757                 return (EINVAL);
1758         }
1759
1760         error = SYSCTL_IN(req, inbuf, size);
1761         if (error) {
1762                 return (error);
1763         }
1764         inbuf[size] = '\0';
1765         for (i = 0; i < NUM_ELEMENTS(mpt_vol_mwce_strs); i++) {
1766                 if (strcmp(mpt_vol_mwce_strs[i], inbuf) == 0) {
1767                         return (mpt_raid_set_vol_mwce(mpt, i));
1768                 }
1769         }
1770         return (EINVAL);
1771 }
1772
1773 static int
1774 mpt_raid_sysctl_vol_resync_rate(SYSCTL_HANDLER_ARGS)
1775 {
1776         struct mpt_softc *mpt;
1777         u_int raid_resync_rate;
1778         int error;
1779
1780         GIANT_REQUIRED;
1781
1782         mpt = (struct mpt_softc *)arg1;
1783         raid_resync_rate = mpt->raid_resync_rate;
1784
1785         error = sysctl_handle_int(oidp, &raid_resync_rate, 0, req);
1786         if (error || !req->newptr) {
1787                 return error;
1788         }
1789
1790         return (mpt_raid_set_vol_resync_rate(mpt, raid_resync_rate));
1791 }
1792
1793 static int
1794 mpt_raid_sysctl_vol_queue_depth(SYSCTL_HANDLER_ARGS)
1795 {
1796         struct mpt_softc *mpt;
1797         u_int raid_queue_depth;
1798         int error;
1799
1800         GIANT_REQUIRED;
1801
1802         mpt = (struct mpt_softc *)arg1;
1803         raid_queue_depth = mpt->raid_queue_depth;
1804
1805         error = sysctl_handle_int(oidp, &raid_queue_depth, 0, req);
1806         if (error || !req->newptr) {
1807                 return error;
1808         }
1809
1810         return (mpt_raid_set_vol_queue_depth(mpt, raid_queue_depth));
1811 }
1812
1813 static void
1814 mpt_raid_sysctl_attach(struct mpt_softc *mpt)
1815 {
1816         struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev);
1817         struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev);
1818
1819         SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1820                         "vol_member_wce", CTLTYPE_STRING | CTLFLAG_RW, mpt, 0,
1821                         mpt_raid_sysctl_vol_member_wce, "A",
1822                         "volume member WCE(On,Off,On-During-Rebuild,NC)");
1823
1824         SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1825                         "vol_queue_depth", CTLTYPE_INT | CTLFLAG_RW, mpt, 0,
1826                         mpt_raid_sysctl_vol_queue_depth, "I",
1827                         "default volume queue depth");
1828
1829         SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1830                         "vol_resync_rate", CTLTYPE_INT | CTLFLAG_RW, mpt, 0,
1831                         mpt_raid_sysctl_vol_resync_rate, "I",
1832                         "volume resync priority (0 == NC, 1 - 255)");
1833         SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1834                         "nonoptimal_volumes", CTLFLAG_RD,
1835                         &mpt->raid_nonopt_volumes, 0,
1836                         "number of nonoptimal volumes");
1837 }
1838 #endif