mpt(4): Sync with FreeBSD.
[dragonfly.git] / sys / dev / disk / mpt / mpt_raid.c
1 /*-
2  * Routines for handling the integrated RAID features LSI MPT Fusion adapters.
3  *
4  * Copyright (c) 2005, WHEEL Sp. z o.o.
5  * Copyright (c) 2005 Justin T. Gibbs.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are
10  * met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14  *    substantially similar to the "NO WARRANTY" disclaimer below
15  *    ("Disclaimer") and any redistribution must be conditioned upon including
16  *    a substantially similar Disclaimer requirement for further binary
17  *    redistribution.
18  * 3. Neither the names of the above listed copyright holders nor the names
19  *    of any contributors may be used to endorse or promote products derived
20  *    from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
32  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 /*-
35  * Some Breakage and Bug Fixing added later.
36  * Copyright (c) 2006, by Matthew Jacob
37  * All Rights Reserved
38  *
39  * Support from LSI-Logic has also gone a great deal toward making this a
40  * workable subsystem and is gratefully acknowledged.
41  *
42  * $FreeBSD: src/sys/dev/mpt/mpt_raid.c,v 1.30 2011/07/29 18:38:31 marius Exp $
43  */
44
45 #include <dev/disk/mpt/mpt.h>
46 #include <dev/disk/mpt/mpt_raid.h>
47
48 #include "dev/disk/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
49 #include "dev/disk/mpt/mpilib/mpi_raid.h"
50
51 #include <bus/cam/cam.h>
52 #include <bus/cam/cam_ccb.h>
53 #include <bus/cam/cam_sim.h>
54 #include <bus/cam/cam_xpt_sim.h>
55
56 #include <sys/devicestat.h>
57 #include <bus/cam/cam_periph.h>
58
59 #include <sys/callout.h>
60 #include <sys/kthread.h>
61 #include <sys/sysctl.h>
62
63 #include <machine/stdarg.h>
64
65 struct mpt_raid_action_result
66 {
67         union {
68                 MPI_RAID_VOL_INDICATOR  indicator_struct;
69                 uint32_t                new_settings;
70                 uint8_t                 phys_disk_num;
71         } action_data;
72         uint16_t                        action_status;
73 };
74
75 #define REQ_TO_RAID_ACTION_RESULT(req) ((struct mpt_raid_action_result *) \
76         (((MSG_RAID_ACTION_REQUEST *)(req->req_vbuf)) + 1))
77
78 #define REQ_IOCSTATUS(req) ((req)->IOCStatus & MPI_IOCSTATUS_MASK)
79
80 static mpt_probe_handler_t      mpt_raid_probe;
81 static mpt_attach_handler_t     mpt_raid_attach;
82 static mpt_enable_handler_t     mpt_raid_enable;
83 static mpt_event_handler_t      mpt_raid_event;
84 static mpt_shutdown_handler_t   mpt_raid_shutdown;
85 static mpt_reset_handler_t      mpt_raid_ioc_reset;
86 static mpt_detach_handler_t     mpt_raid_detach;
87
88 static struct mpt_personality mpt_raid_personality =
89 {
90         .name           = "mpt_raid",
91         .probe          = mpt_raid_probe,
92         .attach         = mpt_raid_attach,
93         .enable         = mpt_raid_enable,
94         .event          = mpt_raid_event,
95         .reset          = mpt_raid_ioc_reset,
96         .shutdown       = mpt_raid_shutdown,
97         .detach         = mpt_raid_detach,
98 };
99
100 DECLARE_MPT_PERSONALITY(mpt_raid, SI_ORDER_THIRD);
101 MPT_PERSONALITY_DEPEND(mpt_raid, mpt_cam, 1, 1, 1);
102
103 static mpt_reply_handler_t mpt_raid_reply_handler;
104 static int mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
105                                         MSG_DEFAULT_REPLY *reply_frame);
106 static int mpt_spawn_raid_thread(struct mpt_softc *mpt);
107 static void mpt_terminate_raid_thread(struct mpt_softc *mpt);
108 static void mpt_raid_thread(void *arg);
109 static timeout_t mpt_raid_timer;
110 #if 0
111 static void mpt_enable_vol(struct mpt_softc *mpt,
112                            struct mpt_raid_volume *mpt_vol, int enable);
113 #endif
114 static void mpt_verify_mwce(struct mpt_softc *, struct mpt_raid_volume *);
115 static void mpt_adjust_queue_depth(struct mpt_softc *, struct mpt_raid_volume *,
116     struct cam_path *);
117 static void mpt_raid_sysctl_attach(struct mpt_softc *);
118
119 static const char *mpt_vol_type(struct mpt_raid_volume *vol);
120 static const char *mpt_vol_state(struct mpt_raid_volume *vol);
121 static const char *mpt_disk_state(struct mpt_raid_disk *disk);
122 static void mpt_vol_prt(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
123     const char *fmt, ...);
124 static void mpt_disk_prt(struct mpt_softc *mpt, struct mpt_raid_disk *disk,
125     const char *fmt, ...);
126
127 static int mpt_issue_raid_req(struct mpt_softc *mpt,
128     struct mpt_raid_volume *vol, struct mpt_raid_disk *disk, request_t *req,
129     u_int Action, uint32_t ActionDataWord, bus_addr_t addr, bus_size_t len,
130     int write, int wait);
131
132 static int mpt_refresh_raid_data(struct mpt_softc *mpt);
133 static void mpt_schedule_raid_refresh(struct mpt_softc *mpt);
134
135 static uint32_t raid_handler_id = MPT_HANDLER_ID_NONE;
136
137 static const char *
138 mpt_vol_type(struct mpt_raid_volume *vol)
139 {
140         switch (vol->config_page->VolumeType) {
141         case MPI_RAID_VOL_TYPE_IS:
142                 return ("RAID-0");
143         case MPI_RAID_VOL_TYPE_IME:
144                 return ("RAID-1E");
145         case MPI_RAID_VOL_TYPE_IM:
146                 return ("RAID-1");
147         default:
148                 return ("Unknown");
149         }
150 }
151
152 static const char *
153 mpt_vol_state(struct mpt_raid_volume *vol)
154 {
155         switch (vol->config_page->VolumeStatus.State) {
156         case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL:
157                 return ("Optimal");
158         case MPI_RAIDVOL0_STATUS_STATE_DEGRADED:
159                 return ("Degraded");
160         case MPI_RAIDVOL0_STATUS_STATE_FAILED:
161                 return ("Failed");
162         default:
163                 return ("Unknown");
164         }
165 }
166
167 static const char *
168 mpt_disk_state(struct mpt_raid_disk *disk)
169 {
170         switch (disk->config_page.PhysDiskStatus.State) {
171         case MPI_PHYSDISK0_STATUS_ONLINE:
172                 return ("Online");
173         case MPI_PHYSDISK0_STATUS_MISSING:
174                 return ("Missing");
175         case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
176                 return ("Incompatible");
177         case MPI_PHYSDISK0_STATUS_FAILED:
178                 return ("Failed");
179         case MPI_PHYSDISK0_STATUS_INITIALIZING:
180                 return ("Initializing");
181         case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
182                 return ("Offline Requested");
183         case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
184                 return ("Failed per Host Request");
185         case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
186                 return ("Offline");
187         default:
188                 return ("Unknown");
189         }
190 }
191
192 static void
193 mpt_vol_prt(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
194             const char *fmt, ...)
195 {
196         __va_list ap;
197
198         kprintf("%s:vol%d(%s:%d:%d): ", device_get_nameunit(mpt->dev),
199                (u_int)(vol - mpt->raid_volumes), device_get_nameunit(mpt->dev),
200                vol->config_page->VolumeBus, vol->config_page->VolumeID);
201         __va_start(ap, fmt);
202         kvprintf(fmt, ap);
203         __va_end(ap);
204 }
205
206 static void
207 mpt_disk_prt(struct mpt_softc *mpt, struct mpt_raid_disk *disk,
208              const char *fmt, ...)
209 {
210         __va_list ap;
211
212         if (disk->volume != NULL) {
213                 kprintf("(%s:vol%d:%d): ",
214                        device_get_nameunit(mpt->dev),
215                        disk->volume->config_page->VolumeID,
216                        disk->member_number);
217         } else {
218                 kprintf("(%s:%d:%d): ", device_get_nameunit(mpt->dev),
219                        disk->config_page.PhysDiskBus,
220                        disk->config_page.PhysDiskID);
221         }
222         __va_start(ap, fmt);
223         kvprintf(fmt, ap);
224         __va_end(ap);
225 }
226
227 static void
228 mpt_raid_async(void *callback_arg, u_int32_t code,
229                struct cam_path *path, void *arg)
230 {
231         struct mpt_softc *mpt;
232
233         mpt = (struct mpt_softc*)callback_arg;
234         switch (code) {
235         case AC_FOUND_DEVICE:
236         {
237                 struct ccb_getdev *cgd;
238                 struct mpt_raid_volume *mpt_vol;
239
240                 cgd = (struct ccb_getdev *)arg;
241                 if (cgd == NULL) {
242                         break;
243                 }
244
245                 mpt_lprt(mpt, MPT_PRT_DEBUG, "Callback for %d\n",
246                          cgd->ccb_h.target_id);
247
248                 RAID_VOL_FOREACH(mpt, mpt_vol) {
249                         if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
250                                 continue;
251
252                         if (mpt_vol->config_page->VolumeID
253                          == cgd->ccb_h.target_id) {
254                                 mpt_adjust_queue_depth(mpt, mpt_vol, path);
255                                 break;
256                         }
257                 }
258         }
259         default:
260                 break;
261         }
262 }
263
264 static int
265 mpt_raid_probe(struct mpt_softc *mpt)
266 {
267
268         if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
269                 return (ENODEV);
270         }
271         return (0);
272 }
273
274 static int
275 mpt_raid_attach(struct mpt_softc *mpt)
276 {
277         struct ccb_setasync csa;
278         mpt_handler_t    handler;
279         int              error;
280
281         mpt_callout_init(mpt, &mpt->raid_timer);
282
283         error = mpt_spawn_raid_thread(mpt);
284         if (error != 0) {
285                 mpt_prt(mpt, "Unable to spawn RAID thread!\n");
286                 goto cleanup;
287         }
288
289         MPT_LOCK(mpt);
290         handler.reply_handler = mpt_raid_reply_handler;
291         error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
292                                      &raid_handler_id);
293         if (error != 0) {
294                 mpt_prt(mpt, "Unable to register RAID haandler!\n");
295                 goto cleanup;
296         }
297
298         xpt_setup_ccb(&csa.ccb_h, mpt->path, 5);
299         csa.ccb_h.func_code = XPT_SASYNC_CB;
300         csa.event_enable = AC_FOUND_DEVICE;
301         csa.callback = mpt_raid_async;
302         csa.callback_arg = mpt;
303         xpt_action((union ccb *)&csa);
304         if (csa.ccb_h.status != CAM_REQ_CMP) {
305                 mpt_prt(mpt, "mpt_raid_attach: Unable to register "
306                         "CAM async handler.\n");
307         }
308         MPT_UNLOCK(mpt);
309
310         mpt_raid_sysctl_attach(mpt);
311         return (0);
312 cleanup:
313         MPT_UNLOCK(mpt);
314         mpt_raid_detach(mpt);
315         return (error);
316 }
317
318 static int
319 mpt_raid_enable(struct mpt_softc *mpt)
320 {
321
322         return (0);
323 }
324
325 static void
326 mpt_raid_detach(struct mpt_softc *mpt)
327 {
328         struct ccb_setasync csa;
329         mpt_handler_t handler;
330
331         callout_stop(&mpt->raid_timer);
332
333         MPT_LOCK(mpt);
334         mpt_terminate_raid_thread(mpt);
335         handler.reply_handler = mpt_raid_reply_handler;
336         mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
337                                raid_handler_id);
338         xpt_setup_ccb(&csa.ccb_h, mpt->path, /*priority*/5);
339         csa.ccb_h.func_code = XPT_SASYNC_CB;
340         csa.event_enable = 0;
341         csa.callback = mpt_raid_async;
342         csa.callback_arg = mpt;
343         xpt_action((union ccb *)&csa);
344         MPT_UNLOCK(mpt);
345 }
346
347 static void
348 mpt_raid_ioc_reset(struct mpt_softc *mpt, int type)
349 {
350
351         /* Nothing to do yet. */
352 }
353
354 static const char *raid_event_txt[] =
355 {
356         "Volume Created",
357         "Volume Deleted",
358         "Volume Settings Changed",
359         "Volume Status Changed",
360         "Volume Physical Disk Membership Changed",
361         "Physical Disk Created",
362         "Physical Disk Deleted",
363         "Physical Disk Settings Changed",
364         "Physical Disk Status Changed",
365         "Domain Validation Required",
366         "SMART Data Received",
367         "Replace Action Started",
368 };
369
370 static int
371 mpt_raid_event(struct mpt_softc *mpt, request_t *req,
372                MSG_EVENT_NOTIFY_REPLY *msg)
373 {
374         EVENT_DATA_RAID *raid_event;
375         struct mpt_raid_volume *mpt_vol;
376         struct mpt_raid_disk *mpt_disk;
377         CONFIG_PAGE_RAID_VOL_0 *vol_pg;
378         int i;
379         int print_event;
380
381         if (msg->Event != MPI_EVENT_INTEGRATED_RAID) {
382                 return (0);
383         }
384
385         raid_event = (EVENT_DATA_RAID *)&msg->Data;
386
387         mpt_vol = NULL;
388         vol_pg = NULL;
389         if (mpt->raid_volumes != NULL && mpt->ioc_page2 != NULL) {
390                 for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
391                         mpt_vol = &mpt->raid_volumes[i];
392                         vol_pg = mpt_vol->config_page;
393
394                         if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
395                                 continue;
396
397                         if (vol_pg->VolumeID == raid_event->VolumeID
398                          && vol_pg->VolumeBus == raid_event->VolumeBus)
399                                 break;
400                 }
401                 if (i >= mpt->ioc_page2->MaxVolumes) {
402                         mpt_vol = NULL;
403                         vol_pg = NULL;
404                 }
405         }
406
407         mpt_disk = NULL;
408         if (raid_event->PhysDiskNum != 0xFF && mpt->raid_disks != NULL) {
409                 mpt_disk = mpt->raid_disks + raid_event->PhysDiskNum;
410                 if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0) {
411                         mpt_disk = NULL;
412                 }
413         }
414
415         print_event = 1;
416         switch(raid_event->ReasonCode) {
417         case MPI_EVENT_RAID_RC_VOLUME_CREATED:
418         case MPI_EVENT_RAID_RC_VOLUME_DELETED:
419                 break;
420         case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED:
421                 if (mpt_vol != NULL) {
422                         if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0) {
423                                 mpt_vol->flags &= ~MPT_RVF_UP2DATE;
424                         } else {
425                                 /*
426                                  * Coalesce status messages into one
427                                  * per background run of our RAID thread.
428                                  * This removes "spurious" status messages
429                                  * from our output.
430                                  */
431                                 print_event = 0;
432                         }
433                 }
434                 break;
435         case MPI_EVENT_RAID_RC_VOLUME_SETTINGS_CHANGED:
436         case MPI_EVENT_RAID_RC_VOLUME_PHYSDISK_CHANGED:
437                 mpt->raid_rescan++;
438                 if (mpt_vol != NULL) {
439                         mpt_vol->flags &= ~(MPT_RVF_UP2DATE|MPT_RVF_ANNOUNCED);
440                 }
441                 break;
442         case MPI_EVENT_RAID_RC_PHYSDISK_CREATED:
443         case MPI_EVENT_RAID_RC_PHYSDISK_DELETED:
444                 mpt->raid_rescan++;
445                 break;
446         case MPI_EVENT_RAID_RC_PHYSDISK_SETTINGS_CHANGED:
447         case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED:
448                 mpt->raid_rescan++;
449                 if (mpt_disk != NULL) {
450                         mpt_disk->flags &= ~MPT_RDF_UP2DATE;
451                 }
452                 break;
453         case MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED:
454                 mpt->raid_rescan++;
455                 break;
456         case MPI_EVENT_RAID_RC_SMART_DATA:
457         case MPI_EVENT_RAID_RC_REPLACE_ACTION_STARTED:
458                 break;
459         }
460
461         if (print_event) {
462                 if (mpt_disk != NULL) {
463                         mpt_disk_prt(mpt, mpt_disk, "%s", "");
464                 } else if (mpt_vol != NULL) {
465                         mpt_vol_prt(mpt, mpt_vol, "%s", "");
466                 } else {
467                         mpt_prt(mpt, "Volume(%d:%d", raid_event->VolumeBus,
468                                 raid_event->VolumeID);
469
470                         if (raid_event->PhysDiskNum != 0xFF)
471                                 mpt_prtc(mpt, ":%d): ",
472                                          raid_event->PhysDiskNum);
473                         else
474                                 mpt_prtc(mpt, "): ");
475                 }
476
477                 if (raid_event->ReasonCode >= NUM_ELEMENTS(raid_event_txt))
478                         mpt_prtc(mpt, "Unhandled RaidEvent %#x\n",
479                                  raid_event->ReasonCode);
480                 else
481                         mpt_prtc(mpt, "%s\n",
482                                  raid_event_txt[raid_event->ReasonCode]);
483         }
484
485         if (raid_event->ReasonCode == MPI_EVENT_RAID_RC_SMART_DATA) {
486                 /* XXX Use CAM's print sense for this... */
487                 if (mpt_disk != NULL)
488                         mpt_disk_prt(mpt, mpt_disk, "%s", "");
489                 else
490                         mpt_prt(mpt, "Volume(%d:%d:%d: ",
491                             raid_event->VolumeBus, raid_event->VolumeID,
492                             raid_event->PhysDiskNum);
493                 mpt_prtc(mpt, "ASC 0x%x, ASCQ 0x%x)\n",
494                          raid_event->ASC, raid_event->ASCQ);
495         }
496
497         mpt_raid_wakeup(mpt);
498         return (1);
499 }
500
501 static void
502 mpt_raid_shutdown(struct mpt_softc *mpt)
503 {
504         struct mpt_raid_volume *mpt_vol;
505
506         if (mpt->raid_mwce_setting != MPT_RAID_MWCE_REBUILD_ONLY) {
507                 return;
508         }
509
510         mpt->raid_mwce_setting = MPT_RAID_MWCE_OFF;
511         RAID_VOL_FOREACH(mpt, mpt_vol) {
512                 mpt_verify_mwce(mpt, mpt_vol);
513         }
514 }
515
516 static int
517 mpt_raid_reply_handler(struct mpt_softc *mpt, request_t *req,
518     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
519 {
520         int free_req;
521
522         if (req == NULL)
523                 return (TRUE);
524
525         free_req = TRUE;
526         if (reply_frame != NULL)
527                 free_req = mpt_raid_reply_frame_handler(mpt, req, reply_frame);
528 #ifdef NOTYET
529         else if (req->ccb != NULL) {
530                 /* Complete Quiesce CCB with error... */
531         }
532 #endif
533
534         req->state &= ~REQ_STATE_QUEUED;
535         req->state |= REQ_STATE_DONE;
536         TAILQ_REMOVE(&mpt->request_pending_list, req, links);
537
538         if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
539                 wakeup(req);
540         } else if (free_req) {
541                 mpt_free_request(mpt, req);
542         }
543
544         return (TRUE);
545 }
546
547 /*
548  * Parse additional completion information in the reply
549  * frame for RAID I/O requests.
550  */
551 static int
552 mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
553     MSG_DEFAULT_REPLY *reply_frame)
554 {
555         MSG_RAID_ACTION_REPLY *reply;
556         struct mpt_raid_action_result *action_result;
557         MSG_RAID_ACTION_REQUEST *rap;
558
559         reply = (MSG_RAID_ACTION_REPLY *)reply_frame;
560         req->IOCStatus = le16toh(reply->IOCStatus);
561         rap = (MSG_RAID_ACTION_REQUEST *)req->req_vbuf;
562
563         switch (rap->Action) {
564         case MPI_RAID_ACTION_QUIESCE_PHYS_IO:
565                 mpt_prt(mpt, "QUIESCE PHYSIO DONE\n");
566                 break;
567         case MPI_RAID_ACTION_ENABLE_PHYS_IO:
568                 mpt_prt(mpt, "ENABLY PHYSIO DONE\n");
569                 break;
570         default:
571                 break;
572         }
573         action_result = REQ_TO_RAID_ACTION_RESULT(req);
574         memcpy(&action_result->action_data, &reply->ActionData,
575             sizeof(action_result->action_data));
576         action_result->action_status = le16toh(reply->ActionStatus);
577         return (TRUE);
578 }
579
580 /*
581  * Utiltity routine to perform a RAID action command;
582  */
583 static int
584 mpt_issue_raid_req(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
585                    struct mpt_raid_disk *disk, request_t *req, u_int Action,
586                    uint32_t ActionDataWord, bus_addr_t addr, bus_size_t len,
587                    int write, int wait)
588 {
589         MSG_RAID_ACTION_REQUEST *rap;
590         SGE_SIMPLE32 *se;
591
592         rap = req->req_vbuf;
593         memset(rap, 0, sizeof *rap);
594         rap->Action = Action;
595         rap->ActionDataWord = htole32(ActionDataWord);
596         rap->Function = MPI_FUNCTION_RAID_ACTION;
597         rap->VolumeID = vol->config_page->VolumeID;
598         rap->VolumeBus = vol->config_page->VolumeBus;
599         if (disk != NULL)
600                 rap->PhysDiskNum = disk->config_page.PhysDiskNum;
601         else
602                 rap->PhysDiskNum = 0xFF;
603         se = (SGE_SIMPLE32 *)&rap->ActionDataSGE;
604         se->Address = htole32(addr);
605         MPI_pSGE_SET_LENGTH(se, len);
606         MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
607             MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
608             MPI_SGE_FLAGS_END_OF_LIST |
609             write ? MPI_SGE_FLAGS_HOST_TO_IOC : MPI_SGE_FLAGS_IOC_TO_HOST));
610         se->FlagsLength = htole32(se->FlagsLength);
611         rap->MsgContext = htole32(req->index | raid_handler_id);
612
613         mpt_check_doorbell(mpt);
614         mpt_send_cmd(mpt, req);
615
616         if (wait) {
617                 return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
618                                      /*sleep_ok*/FALSE, /*time_ms*/2000));
619         } else {
620                 return (0);
621         }
622 }
623
624 /*************************** RAID Status Monitoring ***************************/
625 static int
626 mpt_spawn_raid_thread(struct mpt_softc *mpt)
627 {
628         int error;
629
630         /*
631          * Freeze out any CAM transactions until our thread
632          * is able to run at least once.  We need to update
633          * our RAID pages before acception I/O or we may
634          * reject I/O to an ID we later determine is for a
635          * hidden physdisk.
636          */
637         MPT_LOCK(mpt);
638         xpt_freeze_simq(mpt->phydisk_sim, 1);
639         MPT_UNLOCK(mpt);
640         error = mpt_kthread_create(mpt_raid_thread, mpt,
641             &mpt->raid_thread, /*flags*/0, /*altstack*/0,
642             "mpt_raid%d", mpt->unit);
643         if (error != 0) {
644                 MPT_LOCK(mpt);
645                 xpt_release_simq(mpt->phydisk_sim, /*run_queue*/FALSE);
646                 MPT_UNLOCK(mpt);
647         }
648         return (error);
649 }
650
651 static void
652 mpt_terminate_raid_thread(struct mpt_softc *mpt)
653 {
654
655         if (mpt->raid_thread == NULL) {
656                 return;
657         }
658         mpt->shutdwn_raid = 1;
659         wakeup(&mpt->raid_volumes);
660         /*
661          * Sleep on a slightly different location
662          * for this interlock just for added safety.
663          */
664         mpt_sleep(mpt, &mpt->raid_thread, 0, "thtrm", 0);
665 }
666
667 static void
668 mpt_cam_rescan_callback(struct cam_periph *periph, union ccb *ccb)
669 {
670     xpt_free_path(ccb->ccb_h.path);
671     kfree(ccb, M_TEMP);
672 }
673
674 static void
675 mpt_raid_thread(void *arg)
676 {
677         struct mpt_softc *mpt;
678         int firstrun;
679
680         mpt = (struct mpt_softc *)arg;
681         firstrun = 1;
682         MPT_LOCK(mpt);
683         while (mpt->shutdwn_raid == 0) {
684
685                 if (mpt->raid_wakeup == 0) {
686                         mpt_sleep(mpt, &mpt->raid_volumes, 0, "idle", 0);
687                         continue;
688                 }
689
690                 mpt->raid_wakeup = 0;
691
692                 if (mpt_refresh_raid_data(mpt)) {
693                         mpt_schedule_raid_refresh(mpt); /* XX NOT QUITE RIGHT */
694                         continue;
695                 }
696
697                 /*
698                  * Now that we have our first snapshot of RAID data,
699                  * allow CAM to access our physical disk bus.
700                  */
701                 if (firstrun) {
702                         firstrun = 0;
703                         MPTLOCK_2_CAMLOCK(mpt);
704                         xpt_release_simq(mpt->phydisk_sim, TRUE);
705                         CAMLOCK_2_MPTLOCK(mpt);
706                 }
707
708                 if (mpt->raid_rescan != 0) {
709                         union ccb *ccb;
710                         int error;
711
712                         mpt->raid_rescan = 0;
713                         MPT_UNLOCK(mpt);
714
715                         ccb = kmalloc(sizeof(union ccb), M_TEMP,
716                             M_WAITOK | M_ZERO);
717
718                         MPT_LOCK(mpt);
719                         error = xpt_create_path(&ccb->ccb_h.path, xpt_periph,
720                             cam_sim_path(mpt->phydisk_sim),
721                             CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
722                         if (error != CAM_REQ_CMP) {
723                                 kfree(ccb, M_TEMP);
724                                 mpt_prt(mpt, "Unable to rescan RAID Bus!\n");
725                         } else {
726                                 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
727                                     5/*priority (low)*/);
728                                 ccb->ccb_h.func_code = XPT_SCAN_BUS;
729                                 ccb->ccb_h.cbfcnp = mpt_cam_rescan_callback;
730                                 ccb->crcn.flags = CAM_FLAG_NONE;
731                                 xpt_action(ccb);
732
733                                 /* scan is now in progress */
734                         }
735                 }
736         }
737         mpt->raid_thread = NULL;
738         wakeup(&mpt->raid_thread);
739         MPT_UNLOCK(mpt);
740         mpt_kthread_exit(0);
741 }
742
743 #if 0
744 static void
745 mpt_raid_quiesce_timeout(void *arg)
746 {
747
748         /* Complete the CCB with error */
749         /* COWWWW */
750 }
751
752 static timeout_t mpt_raid_quiesce_timeout;
753 cam_status
754 mpt_raid_quiesce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
755                       request_t *req)
756 {
757         union ccb *ccb;
758
759         ccb = req->ccb;
760         if ((mpt_disk->flags & MPT_RDF_QUIESCED) != 0)
761                 return (CAM_REQ_CMP);
762
763         if ((mpt_disk->flags & MPT_RDF_QUIESCING) == 0) {
764                 int rv;
765
766                 mpt_disk->flags |= MPT_RDF_QUIESCING;
767                 xpt_freeze_devq(ccb->ccb_h.path, 1);
768
769                 rv = mpt_issue_raid_req(mpt, mpt_disk->volume, mpt_disk, req,
770                                         MPI_RAID_ACTION_QUIESCE_PHYS_IO,
771                                         /*ActionData*/0, /*addr*/0,
772                                         /*len*/0, /*write*/FALSE,
773                                         /*wait*/FALSE);
774                 if (rv != 0)
775                         return (CAM_REQ_CMP_ERR);
776
777                 mpt_req_timeout(req, mpt_raid_quiesce_timeout, ccb, 5 * hz);
778 #if 0
779                 if (rv == ETIMEDOUT) {
780                         mpt_disk_prt(mpt, mpt_disk, "mpt_raid_quiesce_disk: "
781                                      "Quiece Timed-out\n");
782                         xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
783                         return (CAM_REQ_CMP_ERR);
784                 }
785
786                 ar = REQ_TO_RAID_ACTION_RESULT(req);
787                 if (rv != 0
788                  || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
789                  || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
790                         mpt_disk_prt(mpt, mpt_disk, "Quiece Failed"
791                                     "%d:%x:%x\n", rv, req->IOCStatus,
792                                     ar->action_status);
793                         xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
794                         return (CAM_REQ_CMP_ERR);
795                 }
796 #endif
797                 return (CAM_REQ_INPROG);
798         }
799         return (CAM_REQUEUE_REQ);
800 }
801 #endif
802
803 /* XXX Ignores that there may be multiple busses/IOCs involved. */
804 cam_status
805 mpt_map_physdisk(struct mpt_softc *mpt, union ccb *ccb, target_id_t *tgt)
806 {
807         struct mpt_raid_disk *mpt_disk;
808
809         mpt_disk = mpt->raid_disks + ccb->ccb_h.target_id;
810         if (ccb->ccb_h.target_id < mpt->raid_max_disks
811          && (mpt_disk->flags & MPT_RDF_ACTIVE) != 0) {
812                 *tgt = mpt_disk->config_page.PhysDiskID;
813                 return (0);
814         }
815         mpt_lprt(mpt, MPT_PRT_DEBUG1, "mpt_map_physdisk(%d) - Not Active\n",
816                  ccb->ccb_h.target_id);
817         return (-1);
818 }
819
820 /* XXX Ignores that there may be multiple busses/IOCs involved. */
821 int
822 mpt_is_raid_member(struct mpt_softc *mpt, target_id_t tgt)
823 {
824         struct mpt_raid_disk *mpt_disk;
825         int i;
826
827         if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0)
828                 return (0);
829         for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
830                 mpt_disk = &mpt->raid_disks[i];
831                 if ((mpt_disk->flags & MPT_RDF_ACTIVE) != 0 &&
832                     mpt_disk->config_page.PhysDiskID == tgt)
833                         return (1);
834         }
835         return (0);
836
837 }
838
839 /* XXX Ignores that there may be multiple busses/IOCs involved. */
840 int
841 mpt_is_raid_volume(struct mpt_softc *mpt, target_id_t tgt)
842 {
843         CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
844         CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol;
845
846         if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
847                 return (0);
848         }
849         ioc_vol = mpt->ioc_page2->RaidVolume;
850         ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
851         for (;ioc_vol != ioc_last_vol; ioc_vol++) {
852                 if (ioc_vol->VolumeID == tgt) {
853                         return (1);
854                 }
855         }
856         return (0);
857 }
858
859 #if 0
860 static void
861 mpt_enable_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
862                int enable)
863 {
864         request_t *req;
865         struct mpt_raid_action_result *ar;
866         CONFIG_PAGE_RAID_VOL_0 *vol_pg;
867         int enabled;
868         int rv;
869
870         vol_pg = mpt_vol->config_page;
871         enabled = vol_pg->VolumeStatus.Flags & MPI_RAIDVOL0_STATUS_FLAG_ENABLED;
872
873         /*
874          * If the setting matches the configuration,
875          * there is nothing to do.
876          */
877         if ((enabled && enable)
878          || (!enabled && !enable))
879                 return;
880
881         req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
882         if (req == NULL) {
883                 mpt_vol_prt(mpt, mpt_vol,
884                             "mpt_enable_vol: Get request failed!\n");
885                 return;
886         }
887
888         rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
889                                 enable ? MPI_RAID_ACTION_ENABLE_VOLUME
890                                        : MPI_RAID_ACTION_DISABLE_VOLUME,
891                                 /*data*/0, /*addr*/0, /*len*/0,
892                                 /*write*/FALSE, /*wait*/TRUE);
893         if (rv == ETIMEDOUT) {
894                 mpt_vol_prt(mpt, mpt_vol, "mpt_enable_vol: "
895                             "%s Volume Timed-out\n",
896                             enable ? "Enable" : "Disable");
897                 return;
898         }
899         ar = REQ_TO_RAID_ACTION_RESULT(req);
900         if (rv != 0
901          || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
902          || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
903                 mpt_vol_prt(mpt, mpt_vol, "%s Volume Failed: %d:%x:%x\n",
904                             enable ? "Enable" : "Disable",
905                             rv, req->IOCStatus, ar->action_status);
906         }
907
908         mpt_free_request(mpt, req);
909 }
910 #endif
911
912 static void
913 mpt_verify_mwce(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
914 {
915         request_t *req;
916         struct mpt_raid_action_result *ar;
917         CONFIG_PAGE_RAID_VOL_0 *vol_pg;
918         uint32_t data;
919         int rv;
920         int resyncing;
921         int mwce;
922
923         vol_pg = mpt_vol->config_page;
924         resyncing = vol_pg->VolumeStatus.Flags
925                   & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
926         mwce = vol_pg->VolumeSettings.Settings
927              & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
928
929         /*
930          * If the setting matches the configuration,
931          * there is nothing to do.
932          */
933         switch (mpt->raid_mwce_setting) {
934         case MPT_RAID_MWCE_REBUILD_ONLY:
935                 if ((resyncing && mwce) || (!resyncing && !mwce)) {
936                         return;
937                 }
938                 mpt_vol->flags ^= MPT_RVF_WCE_CHANGED;
939                 if ((mpt_vol->flags & MPT_RVF_WCE_CHANGED) == 0) {
940                         /*
941                          * Wait one more status update to see if
942                          * resyncing gets enabled.  It gets disabled
943                          * temporarilly when WCE is changed.
944                          */
945                         return;
946                 }
947                 break;
948         case MPT_RAID_MWCE_ON:
949                 if (mwce)
950                         return;
951                 break;
952         case MPT_RAID_MWCE_OFF:
953                 if (!mwce)
954                         return;
955                 break;
956         case MPT_RAID_MWCE_NC:
957                 return;
958         }
959
960         req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
961         if (req == NULL) {
962                 mpt_vol_prt(mpt, mpt_vol,
963                             "mpt_verify_mwce: Get request failed!\n");
964                 return;
965         }
966
967         vol_pg->VolumeSettings.Settings ^=
968             MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
969         memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
970         vol_pg->VolumeSettings.Settings ^=
971             MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
972         rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
973                                 MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
974                                 data, /*addr*/0, /*len*/0,
975                                 /*write*/FALSE, /*wait*/TRUE);
976         if (rv == ETIMEDOUT) {
977                 mpt_vol_prt(mpt, mpt_vol, "mpt_verify_mwce: "
978                             "Write Cache Enable Timed-out\n");
979                 return;
980         }
981         ar = REQ_TO_RAID_ACTION_RESULT(req);
982         if (rv != 0
983          || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
984          || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
985                 mpt_vol_prt(mpt, mpt_vol, "Write Cache Enable Failed: "
986                             "%d:%x:%x\n", rv, req->IOCStatus,
987                             ar->action_status);
988         } else {
989                 vol_pg->VolumeSettings.Settings ^=
990                     MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
991         }
992         mpt_free_request(mpt, req);
993 }
994
995 static void
996 mpt_verify_resync_rate(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
997 {
998         request_t *req;
999         struct mpt_raid_action_result *ar;
1000         CONFIG_PAGE_RAID_VOL_0  *vol_pg;
1001         u_int prio;
1002         int rv;
1003
1004         vol_pg = mpt_vol->config_page;
1005
1006         if (mpt->raid_resync_rate == MPT_RAID_RESYNC_RATE_NC)
1007                 return;
1008
1009         /*
1010          * If the current RAID resync rate does not
1011          * match our configured rate, update it.
1012          */
1013         prio = vol_pg->VolumeSettings.Settings
1014              & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1015         if (vol_pg->ResyncRate != 0
1016          && vol_pg->ResyncRate != mpt->raid_resync_rate) {
1017
1018                 req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
1019                 if (req == NULL) {
1020                         mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
1021                                     "Get request failed!\n");
1022                         return;
1023                 }
1024
1025                 rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
1026                                         MPI_RAID_ACTION_SET_RESYNC_RATE,
1027                                         mpt->raid_resync_rate, /*addr*/0,
1028                                         /*len*/0, /*write*/FALSE, /*wait*/TRUE);
1029                 if (rv == ETIMEDOUT) {
1030                         mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
1031                                     "Resync Rate Setting Timed-out\n");
1032                         return;
1033                 }
1034
1035                 ar = REQ_TO_RAID_ACTION_RESULT(req);
1036                 if (rv != 0
1037                  || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
1038                  || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
1039                         mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
1040                                     "%d:%x:%x\n", rv, req->IOCStatus,
1041                                     ar->action_status);
1042                 } else
1043                         vol_pg->ResyncRate = mpt->raid_resync_rate;
1044                 mpt_free_request(mpt, req);
1045         } else if ((prio && mpt->raid_resync_rate < 128)
1046                 || (!prio && mpt->raid_resync_rate >= 128)) {
1047                 uint32_t data;
1048
1049                 req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
1050                 if (req == NULL) {
1051                         mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
1052                                     "Get request failed!\n");
1053                         return;
1054                 }
1055
1056                 vol_pg->VolumeSettings.Settings ^=
1057                     MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1058                 memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
1059                 vol_pg->VolumeSettings.Settings ^=
1060                     MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1061                 rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
1062                                         MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
1063                                         data, /*addr*/0, /*len*/0,
1064                                         /*write*/FALSE, /*wait*/TRUE);
1065                 if (rv == ETIMEDOUT) {
1066                         mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
1067                                     "Resync Rate Setting Timed-out\n");
1068                         return;
1069                 }
1070                 ar = REQ_TO_RAID_ACTION_RESULT(req);
1071                 if (rv != 0
1072                  || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
1073                  || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
1074                         mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
1075                                     "%d:%x:%x\n", rv, req->IOCStatus,
1076                                     ar->action_status);
1077                 } else {
1078                         vol_pg->VolumeSettings.Settings ^=
1079                             MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1080                 }
1081
1082                 mpt_free_request(mpt, req);
1083         }
1084 }
1085
1086 static void
1087 mpt_adjust_queue_depth(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1088                        struct cam_path *path)
1089 {
1090         struct ccb_relsim crs;
1091
1092         xpt_setup_ccb(&crs.ccb_h, path, /*priority*/5);
1093         crs.ccb_h.func_code = XPT_REL_SIMQ;
1094         crs.ccb_h.flags = CAM_DEV_QFREEZE;
1095         crs.release_flags = RELSIM_ADJUST_OPENINGS;
1096         crs.openings = mpt->raid_queue_depth;
1097         xpt_action((union ccb *)&crs);
1098         if (crs.ccb_h.status != CAM_REQ_CMP)
1099                 mpt_vol_prt(mpt, mpt_vol, "mpt_adjust_queue_depth failed "
1100                             "with CAM status %#x\n", crs.ccb_h.status);
1101 }
1102
1103 static void
1104 mpt_announce_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
1105 {
1106         CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1107         u_int i;
1108
1109         vol_pg = mpt_vol->config_page;
1110         mpt_vol_prt(mpt, mpt_vol, "Settings (");
1111         for (i = 1; i <= 0x8000; i <<= 1) {
1112                 switch (vol_pg->VolumeSettings.Settings & i) {
1113                 case MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE:
1114                         mpt_prtc(mpt, " Member-WCE");
1115                         break;
1116                 case MPI_RAIDVOL0_SETTING_OFFLINE_ON_SMART:
1117                         mpt_prtc(mpt, " Offline-On-SMART-Err");
1118                         break;
1119                 case MPI_RAIDVOL0_SETTING_AUTO_CONFIGURE:
1120                         mpt_prtc(mpt, " Hot-Plug-Spares");
1121                         break;
1122                 case MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC:
1123                         mpt_prtc(mpt, " High-Priority-ReSync");
1124                         break;
1125                 default:
1126                         break;
1127                 }
1128         }
1129         mpt_prtc(mpt, " )\n");
1130         if (vol_pg->VolumeSettings.HotSparePool != 0) {
1131                 mpt_vol_prt(mpt, mpt_vol, "Using Spare Pool%s",
1132                             powerof2(vol_pg->VolumeSettings.HotSparePool)
1133                           ? ":" : "s:");
1134                 for (i = 0; i < 8; i++) {
1135                         u_int mask;
1136
1137                         mask = 0x1 << i;
1138                         if ((vol_pg->VolumeSettings.HotSparePool & mask) == 0)
1139                                 continue;
1140                         mpt_prtc(mpt, " %d", i);
1141                 }
1142                 mpt_prtc(mpt, "\n");
1143         }
1144         mpt_vol_prt(mpt, mpt_vol, "%d Members:\n", vol_pg->NumPhysDisks);
1145         for (i = 0; i < vol_pg->NumPhysDisks; i++){
1146                 struct mpt_raid_disk *mpt_disk;
1147                 CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1148                 int pt_bus = cam_sim_bus(mpt->phydisk_sim);
1149                 U8 f, s;
1150
1151                 mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum;
1152                 disk_pg = &mpt_disk->config_page;
1153                 mpt_prtc(mpt, "      ");
1154                 mpt_prtc(mpt, "(%s:%d:%d:0): ", device_get_nameunit(mpt->dev),
1155                          pt_bus, disk_pg->PhysDiskID);
1156                 if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM) {
1157                         mpt_prtc(mpt, "%s", mpt_disk->member_number == 0?
1158                             "Primary" : "Secondary");
1159                 } else {
1160                         mpt_prtc(mpt, "Stripe Position %d",
1161                                  mpt_disk->member_number);
1162                 }
1163                 f = disk_pg->PhysDiskStatus.Flags;
1164                 s = disk_pg->PhysDiskStatus.State;
1165                 if (f & MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC) {
1166                         mpt_prtc(mpt, " Out of Sync");
1167                 }
1168                 if (f & MPI_PHYSDISK0_STATUS_FLAG_QUIESCED) {
1169                         mpt_prtc(mpt, " Quiesced");
1170                 }
1171                 if (f & MPI_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME) {
1172                         mpt_prtc(mpt, " Inactive");
1173                 }
1174                 if (f & MPI_PHYSDISK0_STATUS_FLAG_OPTIMAL_PREVIOUS) {
1175                         mpt_prtc(mpt, " Was Optimal");
1176                 }
1177                 if (f & MPI_PHYSDISK0_STATUS_FLAG_NOT_OPTIMAL_PREVIOUS) {
1178                         mpt_prtc(mpt, " Was Non-Optimal");
1179                 }
1180                 switch (s) {
1181                 case MPI_PHYSDISK0_STATUS_ONLINE:
1182                         mpt_prtc(mpt, " Online");
1183                         break;
1184                 case MPI_PHYSDISK0_STATUS_MISSING:
1185                         mpt_prtc(mpt, " Missing");
1186                         break;
1187                 case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
1188                         mpt_prtc(mpt, " Incompatible");
1189                         break;
1190                 case MPI_PHYSDISK0_STATUS_FAILED:
1191                         mpt_prtc(mpt, " Failed");
1192                         break;
1193                 case MPI_PHYSDISK0_STATUS_INITIALIZING:
1194                         mpt_prtc(mpt, " Initializing");
1195                         break;
1196                 case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
1197                         mpt_prtc(mpt, " Requested Offline");
1198                         break;
1199                 case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
1200                         mpt_prtc(mpt, " Requested Failed");
1201                         break;
1202                 case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
1203                 default:
1204                         mpt_prtc(mpt, " Offline Other (%x)", s);
1205                         break;
1206                 }
1207                 mpt_prtc(mpt, "\n");
1208         }
1209 }
1210
1211 static void
1212 mpt_announce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk)
1213 {
1214         CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1215         int rd_bus = cam_sim_bus(mpt->sim);
1216         int pt_bus = cam_sim_bus(mpt->phydisk_sim);
1217         u_int i;
1218
1219         disk_pg = &mpt_disk->config_page;
1220         mpt_disk_prt(mpt, mpt_disk,
1221                      "Physical (%s:%d:%d:0), Pass-thru (%s:%d:%d:0)\n",
1222                      device_get_nameunit(mpt->dev), rd_bus,
1223                      disk_pg->PhysDiskID, device_get_nameunit(mpt->dev),
1224                      pt_bus, (int)(mpt_disk - mpt->raid_disks));
1225         if (disk_pg->PhysDiskSettings.HotSparePool == 0)
1226                 return;
1227         mpt_disk_prt(mpt, mpt_disk, "Member of Hot Spare Pool%s",
1228                      powerof2(disk_pg->PhysDiskSettings.HotSparePool)
1229                    ? ":" : "s:");
1230         for (i = 0; i < 8; i++) {
1231                 u_int mask;
1232
1233                 mask = 0x1 << i;
1234                 if ((disk_pg->PhysDiskSettings.HotSparePool & mask) == 0)
1235                         continue;
1236                 mpt_prtc(mpt, " %d", i);
1237         }
1238         mpt_prtc(mpt, "\n");
1239 }
1240
1241 static void
1242 mpt_refresh_raid_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
1243                       IOC_3_PHYS_DISK *ioc_disk)
1244 {
1245         int rv;
1246
1247         rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_PHYSDISK,
1248                                  /*PageNumber*/0, ioc_disk->PhysDiskNum,
1249                                  &mpt_disk->config_page.Header,
1250                                  /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1251         if (rv != 0) {
1252                 mpt_prt(mpt, "mpt_refresh_raid_disk: "
1253                         "Failed to read RAID Disk Hdr(%d)\n",
1254                         ioc_disk->PhysDiskNum);
1255                 return;
1256         }
1257         rv = mpt_read_cur_cfg_page(mpt, ioc_disk->PhysDiskNum,
1258                                    &mpt_disk->config_page.Header,
1259                                    sizeof(mpt_disk->config_page),
1260                                    /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1261         if (rv != 0)
1262                 mpt_prt(mpt, "mpt_refresh_raid_disk: "
1263                         "Failed to read RAID Disk Page(%d)\n",
1264                         ioc_disk->PhysDiskNum);
1265         mpt2host_config_page_raid_phys_disk_0(&mpt_disk->config_page);
1266 }
1267
1268 static void
1269 mpt_refresh_raid_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1270     CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol)
1271 {
1272         CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1273         struct mpt_raid_action_result *ar;
1274         request_t *req;
1275         int rv;
1276         int i;
1277
1278         vol_pg = mpt_vol->config_page;
1279         mpt_vol->flags &= ~MPT_RVF_UP2DATE;
1280
1281         rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_VOLUME, 0,
1282             ioc_vol->VolumePageNumber, &vol_pg->Header, TRUE, 5000);
1283         if (rv != 0) {
1284                 mpt_vol_prt(mpt, mpt_vol,
1285                     "mpt_refresh_raid_vol: Failed to read RAID Vol Hdr(%d)\n",
1286                     ioc_vol->VolumePageNumber);
1287                 return;
1288         }
1289
1290         rv = mpt_read_cur_cfg_page(mpt, ioc_vol->VolumePageNumber,
1291             &vol_pg->Header, mpt->raid_page0_len, TRUE, 5000);
1292         if (rv != 0) {
1293                 mpt_vol_prt(mpt, mpt_vol,
1294                     "mpt_refresh_raid_vol: Failed to read RAID Vol Page(%d)\n",
1295                     ioc_vol->VolumePageNumber);
1296                 return;
1297         }
1298         mpt2host_config_page_raid_vol_0(vol_pg);
1299
1300         mpt_vol->flags |= MPT_RVF_ACTIVE;
1301
1302         /* Update disk entry array data. */
1303         for (i = 0; i < vol_pg->NumPhysDisks; i++) {
1304                 struct mpt_raid_disk *mpt_disk;
1305                 mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum;
1306                 mpt_disk->volume = mpt_vol;
1307                 mpt_disk->member_number = vol_pg->PhysDisk[i].PhysDiskMap;
1308                 if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM) {
1309                         mpt_disk->member_number--;
1310                 }
1311         }
1312
1313         if ((vol_pg->VolumeStatus.Flags
1314            & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1315                 return;
1316
1317         req = mpt_get_request(mpt, TRUE);
1318         if (req == NULL) {
1319                 mpt_vol_prt(mpt, mpt_vol,
1320                     "mpt_refresh_raid_vol: Get request failed!\n");
1321                 return;
1322         }
1323         rv = mpt_issue_raid_req(mpt, mpt_vol, NULL, req,
1324             MPI_RAID_ACTION_INDICATOR_STRUCT, 0, 0, 0, FALSE, TRUE);
1325         if (rv == ETIMEDOUT) {
1326                 mpt_vol_prt(mpt, mpt_vol,
1327                     "mpt_refresh_raid_vol: Progress Indicator fetch timeout\n");
1328                 mpt_free_request(mpt, req);
1329                 return;
1330         }
1331
1332         ar = REQ_TO_RAID_ACTION_RESULT(req);
1333         if (rv == 0
1334          && ar->action_status == MPI_RAID_ACTION_ASTATUS_SUCCESS
1335          && REQ_IOCSTATUS(req) == MPI_IOCSTATUS_SUCCESS) {
1336                 memcpy(&mpt_vol->sync_progress,
1337                        &ar->action_data.indicator_struct,
1338                        sizeof(mpt_vol->sync_progress));
1339                 mpt2host_mpi_raid_vol_indicator(&mpt_vol->sync_progress);
1340         } else {
1341                 mpt_vol_prt(mpt, mpt_vol,
1342                     "mpt_refresh_raid_vol: Progress indicator fetch failed!\n");
1343         }
1344         mpt_free_request(mpt, req);
1345 }
1346
1347 /*
1348  * Update in-core information about RAID support.  We update any entries
1349  * that didn't previously exists or have been marked as needing to
1350  * be updated by our event handler.  Interesting changes are displayed
1351  * to the console.
1352  */
1353 static int
1354 mpt_refresh_raid_data(struct mpt_softc *mpt)
1355 {
1356         CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
1357         CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol;
1358         IOC_3_PHYS_DISK *ioc_disk;
1359         IOC_3_PHYS_DISK *ioc_last_disk;
1360         CONFIG_PAGE_RAID_VOL_0  *vol_pg;
1361         size_t len;
1362         int rv;
1363         int i;
1364         u_int nonopt_volumes;
1365
1366         if (mpt->ioc_page2 == NULL || mpt->ioc_page3 == NULL) {
1367                 return (0);
1368         }
1369
1370         /*
1371          * Mark all items as unreferenced by the configuration.
1372          * This allows us to find, report, and discard stale
1373          * entries.
1374          */
1375         for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1376                 mpt->raid_disks[i].flags &= ~MPT_RDF_REFERENCED;
1377         }
1378         for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1379                 mpt->raid_volumes[i].flags &= ~MPT_RVF_REFERENCED;
1380         }
1381
1382         /*
1383          * Get Physical Disk information.
1384          */
1385         len = mpt->ioc_page3->Header.PageLength * sizeof(uint32_t);
1386         rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1387                                    &mpt->ioc_page3->Header, len,
1388                                    /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1389         if (rv) {
1390                 mpt_prt(mpt,
1391                     "mpt_refresh_raid_data: Failed to read IOC Page 3\n");
1392                 return (-1);
1393         }
1394         mpt2host_config_page_ioc3(mpt->ioc_page3);
1395
1396         ioc_disk = mpt->ioc_page3->PhysDisk;
1397         ioc_last_disk = ioc_disk + mpt->ioc_page3->NumPhysDisks;
1398         for (; ioc_disk != ioc_last_disk; ioc_disk++) {
1399                 struct mpt_raid_disk *mpt_disk;
1400
1401                 mpt_disk = mpt->raid_disks + ioc_disk->PhysDiskNum;
1402                 mpt_disk->flags |= MPT_RDF_REFERENCED;
1403                 if ((mpt_disk->flags & (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE))
1404                  != (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE)) {
1405
1406                         mpt_refresh_raid_disk(mpt, mpt_disk, ioc_disk);
1407
1408                 }
1409                 mpt_disk->flags |= MPT_RDF_ACTIVE;
1410                 mpt->raid_rescan++;
1411         }
1412
1413         /*
1414          * Refresh volume data.
1415          */
1416         len = mpt->ioc_page2->Header.PageLength * sizeof(uint32_t);
1417         rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1418                                    &mpt->ioc_page2->Header, len,
1419                                    /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1420         if (rv) {
1421                 mpt_prt(mpt, "mpt_refresh_raid_data: "
1422                         "Failed to read IOC Page 2\n");
1423                 return (-1);
1424         }
1425         mpt2host_config_page_ioc2(mpt->ioc_page2);
1426
1427         ioc_vol = mpt->ioc_page2->RaidVolume;
1428         ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
1429         for (;ioc_vol != ioc_last_vol; ioc_vol++) {
1430                 struct mpt_raid_volume *mpt_vol;
1431
1432                 mpt_vol = mpt->raid_volumes + ioc_vol->VolumePageNumber;
1433                 mpt_vol->flags |= MPT_RVF_REFERENCED;
1434                 vol_pg = mpt_vol->config_page;
1435                 if (vol_pg == NULL)
1436                         continue;
1437                 if (((mpt_vol->flags & (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1438                   != (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1439                  || (vol_pg->VolumeStatus.Flags
1440                    & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) != 0) {
1441
1442                         mpt_refresh_raid_vol(mpt, mpt_vol, ioc_vol);
1443                 }
1444                 mpt_vol->flags |= MPT_RVF_ACTIVE;
1445         }
1446
1447         nonopt_volumes = 0;
1448         for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1449                 struct mpt_raid_volume *mpt_vol;
1450                 uint64_t total;
1451                 uint64_t left;
1452                 int m;
1453                 u_int prio;
1454
1455                 mpt_vol = &mpt->raid_volumes[i];
1456
1457                 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) {
1458                         continue;
1459                 }
1460
1461                 vol_pg = mpt_vol->config_page;
1462                 if ((mpt_vol->flags & (MPT_RVF_REFERENCED|MPT_RVF_ANNOUNCED))
1463                  == MPT_RVF_ANNOUNCED) {
1464                         mpt_vol_prt(mpt, mpt_vol, "No longer configured\n");
1465                         mpt_vol->flags = 0;
1466                         continue;
1467                 }
1468
1469                 if ((mpt_vol->flags & MPT_RVF_ANNOUNCED) == 0) {
1470                         mpt_announce_vol(mpt, mpt_vol);
1471                         mpt_vol->flags |= MPT_RVF_ANNOUNCED;
1472                 }
1473
1474                 if (vol_pg->VolumeStatus.State !=
1475                     MPI_RAIDVOL0_STATUS_STATE_OPTIMAL)
1476                         nonopt_volumes++;
1477
1478                 if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0)
1479                         continue;
1480
1481                 mpt_vol->flags |= MPT_RVF_UP2DATE;
1482                 mpt_vol_prt(mpt, mpt_vol, "%s - %s\n",
1483                     mpt_vol_type(mpt_vol), mpt_vol_state(mpt_vol));
1484                 mpt_verify_mwce(mpt, mpt_vol);
1485
1486                 if (vol_pg->VolumeStatus.Flags == 0) {
1487                         continue;
1488                 }
1489
1490                 mpt_vol_prt(mpt, mpt_vol, "Status (");
1491                 for (m = 1; m <= 0x80; m <<= 1) {
1492                         switch (vol_pg->VolumeStatus.Flags & m) {
1493                         case MPI_RAIDVOL0_STATUS_FLAG_ENABLED:
1494                                 mpt_prtc(mpt, " Enabled");
1495                                 break;
1496                         case MPI_RAIDVOL0_STATUS_FLAG_QUIESCED:
1497                                 mpt_prtc(mpt, " Quiesced");
1498                                 break;
1499                         case MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS:
1500                                 mpt_prtc(mpt, " Re-Syncing");
1501                                 break;
1502                         case MPI_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE:
1503                                 mpt_prtc(mpt, " Inactive");
1504                                 break;
1505                         default:
1506                                 break;
1507                         }
1508                 }
1509                 mpt_prtc(mpt, " )\n");
1510
1511                 if ((vol_pg->VolumeStatus.Flags
1512                    & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1513                         continue;
1514
1515                 mpt_verify_resync_rate(mpt, mpt_vol);
1516
1517                 left = MPT_U64_2_SCALAR(mpt_vol->sync_progress.BlocksRemaining);
1518                 total = MPT_U64_2_SCALAR(mpt_vol->sync_progress.TotalBlocks);
1519                 if (vol_pg->ResyncRate != 0) {
1520
1521                         prio = ((u_int)vol_pg->ResyncRate * 100000) / 0xFF;
1522                         mpt_vol_prt(mpt, mpt_vol, "Rate %d.%d%%\n",
1523                             prio / 1000, prio % 1000);
1524                 } else {
1525                         prio = vol_pg->VolumeSettings.Settings
1526                              & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1527                         mpt_vol_prt(mpt, mpt_vol, "%s Priority Re-Sync\n",
1528                             prio ? "High" : "Low");
1529                 }
1530                 mpt_vol_prt(mpt, mpt_vol, "%ju of %ju "
1531                             "blocks remaining\n", (uintmax_t)left,
1532                             (uintmax_t)total);
1533
1534                 /* Periodically report on sync progress. */
1535                 mpt_schedule_raid_refresh(mpt);
1536         }
1537
1538         for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1539                 struct mpt_raid_disk *mpt_disk;
1540                 CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1541                 int m;
1542
1543                 mpt_disk = &mpt->raid_disks[i];
1544                 disk_pg = &mpt_disk->config_page;
1545
1546                 if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0)
1547                         continue;
1548
1549                 if ((mpt_disk->flags & (MPT_RDF_REFERENCED|MPT_RDF_ANNOUNCED))
1550                  == MPT_RDF_ANNOUNCED) {
1551                         mpt_disk_prt(mpt, mpt_disk, "No longer configured\n");
1552                         mpt_disk->flags = 0;
1553                         mpt->raid_rescan++;
1554                         continue;
1555                 }
1556
1557                 if ((mpt_disk->flags & MPT_RDF_ANNOUNCED) == 0) {
1558
1559                         mpt_announce_disk(mpt, mpt_disk);
1560                         mpt_disk->flags |= MPT_RVF_ANNOUNCED;
1561                 }
1562
1563                 if ((mpt_disk->flags & MPT_RDF_UP2DATE) != 0)
1564                         continue;
1565
1566                 mpt_disk->flags |= MPT_RDF_UP2DATE;
1567                 mpt_disk_prt(mpt, mpt_disk, "%s\n", mpt_disk_state(mpt_disk));
1568                 if (disk_pg->PhysDiskStatus.Flags == 0)
1569                         continue;
1570
1571                 mpt_disk_prt(mpt, mpt_disk, "Status (");
1572                 for (m = 1; m <= 0x80; m <<= 1) {
1573                         switch (disk_pg->PhysDiskStatus.Flags & m) {
1574                         case MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC:
1575                                 mpt_prtc(mpt, " Out-Of-Sync");
1576                                 break;
1577                         case MPI_PHYSDISK0_STATUS_FLAG_QUIESCED:
1578                                 mpt_prtc(mpt, " Quiesced");
1579                                 break;
1580                         default:
1581                                 break;
1582                         }
1583                 }
1584                 mpt_prtc(mpt, " )\n");
1585         }
1586
1587         mpt->raid_nonopt_volumes = nonopt_volumes;
1588         return (0);
1589 }
1590
1591 static void
1592 mpt_raid_timer(void *arg)
1593 {
1594         struct mpt_softc *mpt;
1595
1596         mpt = (struct mpt_softc *)arg;
1597         MPT_LOCK_ASSERT(mpt);
1598         mpt_raid_wakeup(mpt);
1599 }
1600
1601 static void
1602 mpt_schedule_raid_refresh(struct mpt_softc *mpt)
1603 {
1604
1605         callout_reset(&mpt->raid_timer, MPT_RAID_SYNC_REPORT_INTERVAL,
1606                       mpt_raid_timer, mpt);
1607 }
1608
1609 void
1610 mpt_raid_free_mem(struct mpt_softc *mpt)
1611 {
1612
1613         if (mpt->raid_volumes) {
1614                 struct mpt_raid_volume *mpt_raid;
1615                 int i;
1616                 for (i = 0; i < mpt->raid_max_volumes; i++) {
1617                         mpt_raid = &mpt->raid_volumes[i];
1618                         if (mpt_raid->config_page) {
1619                                 kfree(mpt_raid->config_page, M_DEVBUF);
1620                                 mpt_raid->config_page = NULL;
1621                         }
1622                 }
1623                 kfree(mpt->raid_volumes, M_DEVBUF);
1624                 mpt->raid_volumes = NULL;
1625         }
1626         if (mpt->raid_disks) {
1627                 kfree(mpt->raid_disks, M_DEVBUF);
1628                 mpt->raid_disks = NULL;
1629         }
1630         if (mpt->ioc_page2) {
1631                 kfree(mpt->ioc_page2, M_DEVBUF);
1632                 mpt->ioc_page2 = NULL;
1633         }
1634         if (mpt->ioc_page3) {
1635                 kfree(mpt->ioc_page3, M_DEVBUF);
1636                 mpt->ioc_page3 = NULL;
1637         }
1638         mpt->raid_max_volumes =  0;
1639         mpt->raid_max_disks =  0;
1640 }
1641
1642 static int
1643 mpt_raid_set_vol_resync_rate(struct mpt_softc *mpt, u_int rate)
1644 {
1645         struct mpt_raid_volume *mpt_vol;
1646
1647         if ((rate > MPT_RAID_RESYNC_RATE_MAX
1648           || rate < MPT_RAID_RESYNC_RATE_MIN)
1649          && rate != MPT_RAID_RESYNC_RATE_NC)
1650                 return (EINVAL);
1651
1652         MPT_LOCK(mpt);
1653         mpt->raid_resync_rate = rate;
1654         RAID_VOL_FOREACH(mpt, mpt_vol) {
1655                 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) {
1656                         continue;
1657                 }
1658                 mpt_verify_resync_rate(mpt, mpt_vol);
1659         }
1660         MPT_UNLOCK(mpt);
1661         return (0);
1662 }
1663
1664 static int
1665 mpt_raid_set_vol_queue_depth(struct mpt_softc *mpt, u_int vol_queue_depth)
1666 {
1667         struct mpt_raid_volume *mpt_vol;
1668
1669         if (vol_queue_depth > 255 || vol_queue_depth < 1)
1670                 return (EINVAL);
1671
1672         MPT_LOCK(mpt);
1673         mpt->raid_queue_depth = vol_queue_depth;
1674         RAID_VOL_FOREACH(mpt, mpt_vol) {
1675                 struct cam_path *path;
1676                 int error;
1677
1678                 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1679                         continue;
1680
1681                 mpt->raid_rescan = 0;
1682
1683                 MPTLOCK_2_CAMLOCK(mpt);
1684                 error = xpt_create_path(&path, xpt_periph,
1685                                         cam_sim_path(mpt->sim),
1686                                         mpt_vol->config_page->VolumeID,
1687                                         /*lun*/0);
1688                 if (error != CAM_REQ_CMP) {
1689                         CAMLOCK_2_MPTLOCK(mpt);
1690                         mpt_vol_prt(mpt, mpt_vol, "Unable to allocate path!\n");
1691                         continue;
1692                 }
1693                 mpt_adjust_queue_depth(mpt, mpt_vol, path);
1694                 xpt_free_path(path);
1695                 CAMLOCK_2_MPTLOCK(mpt);
1696         }
1697         MPT_UNLOCK(mpt);
1698         return (0);
1699 }
1700
1701 static int
1702 mpt_raid_set_vol_mwce(struct mpt_softc *mpt, mpt_raid_mwce_t mwce)
1703 {
1704         struct mpt_raid_volume *mpt_vol;
1705         int force_full_resync;
1706
1707         MPT_LOCK(mpt);
1708         if (mwce == mpt->raid_mwce_setting) {
1709                 MPT_UNLOCK(mpt);
1710                 return (0);
1711         }
1712
1713         /*
1714          * Catch MWCE being left on due to a failed shutdown.  Since
1715          * sysctls cannot be set by the loader, we treat the first
1716          * setting of this varible specially and force a full volume
1717          * resync if MWCE is enabled and a resync is in progress.
1718          */
1719         force_full_resync = 0;
1720         if (mpt->raid_mwce_set == 0
1721          && mpt->raid_mwce_setting == MPT_RAID_MWCE_NC
1722          && mwce == MPT_RAID_MWCE_REBUILD_ONLY)
1723                 force_full_resync = 1;
1724
1725         mpt->raid_mwce_setting = mwce;
1726         RAID_VOL_FOREACH(mpt, mpt_vol) {
1727                 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1728                 int resyncing;
1729                 int mwce;
1730
1731                 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1732                         continue;
1733
1734                 vol_pg = mpt_vol->config_page;
1735                 resyncing = vol_pg->VolumeStatus.Flags
1736                           & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
1737                 mwce = vol_pg->VolumeSettings.Settings
1738                      & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
1739                 if (force_full_resync && resyncing && mwce) {
1740
1741                         /*
1742                          * XXX disable/enable volume should force a resync,
1743                          *     but we'll need to queice, drain, and restart
1744                          *     I/O to do that.
1745                          */
1746                         mpt_vol_prt(mpt, mpt_vol, "WARNING - Unsafe shutdown "
1747                                     "detected.  Suggest full resync.\n");
1748                 }
1749                 mpt_verify_mwce(mpt, mpt_vol);
1750         }
1751         mpt->raid_mwce_set = 1;
1752         MPT_UNLOCK(mpt);
1753         return (0);
1754 }
1755
1756 static const char *mpt_vol_mwce_strs[] =
1757 {
1758         "On",
1759         "Off",
1760         "On-During-Rebuild",
1761         "NC"
1762 };
1763
1764 static int
1765 mpt_raid_sysctl_vol_member_wce(SYSCTL_HANDLER_ARGS)
1766 {
1767         char inbuf[20];
1768         struct mpt_softc *mpt;
1769         const char *str;
1770         int error;
1771         u_int size;
1772         u_int i;
1773
1774         mpt = (struct mpt_softc *)arg1;
1775         str = mpt_vol_mwce_strs[mpt->raid_mwce_setting];
1776         error = SYSCTL_OUT(req, str, strlen(str) + 1);
1777         if (error || !req->newptr) {
1778                 return (error);
1779         }
1780
1781         size = req->newlen - req->newidx;
1782         if (size >= sizeof(inbuf)) {
1783                 return (EINVAL);
1784         }
1785
1786         error = SYSCTL_IN(req, inbuf, size);
1787         if (error) {
1788                 return (error);
1789         }
1790         inbuf[size] = '\0';
1791         for (i = 0; i < NUM_ELEMENTS(mpt_vol_mwce_strs); i++) {
1792                 if (strcmp(mpt_vol_mwce_strs[i], inbuf) == 0) {
1793                         return (mpt_raid_set_vol_mwce(mpt, i));
1794                 }
1795         }
1796         return (EINVAL);
1797 }
1798
1799 static int
1800 mpt_raid_sysctl_vol_resync_rate(SYSCTL_HANDLER_ARGS)
1801 {
1802         struct mpt_softc *mpt;
1803         u_int raid_resync_rate;
1804         int error;
1805
1806         mpt = (struct mpt_softc *)arg1;
1807         raid_resync_rate = mpt->raid_resync_rate;
1808
1809         error = sysctl_handle_int(oidp, &raid_resync_rate, 0, req);
1810         if (error || !req->newptr) {
1811                 return error;
1812         }
1813
1814         return (mpt_raid_set_vol_resync_rate(mpt, raid_resync_rate));
1815 }
1816
1817 static int
1818 mpt_raid_sysctl_vol_queue_depth(SYSCTL_HANDLER_ARGS)
1819 {
1820         struct mpt_softc *mpt;
1821         u_int raid_queue_depth;
1822         int error;
1823
1824         mpt = (struct mpt_softc *)arg1;
1825         raid_queue_depth = mpt->raid_queue_depth;
1826
1827         error = sysctl_handle_int(oidp, &raid_queue_depth, 0, req);
1828         if (error || !req->newptr) {
1829                 return error;
1830         }
1831
1832         return (mpt_raid_set_vol_queue_depth(mpt, raid_queue_depth));
1833 }
1834
1835 static void
1836 mpt_raid_sysctl_attach(struct mpt_softc *mpt)
1837 {
1838         SYSCTL_ADD_PROC(&mpt->mpt_sysctl_ctx,
1839                         SYSCTL_CHILDREN(mpt->mpt_sysctl_tree), OID_AUTO,
1840                         "vol_member_wce", CTLTYPE_STRING | CTLFLAG_RW, mpt, 0,
1841                         mpt_raid_sysctl_vol_member_wce, "A",
1842                         "volume member WCE(On,Off,On-During-Rebuild,NC)");
1843
1844         SYSCTL_ADD_PROC(&mpt->mpt_sysctl_ctx,
1845                         SYSCTL_CHILDREN(mpt->mpt_sysctl_tree), OID_AUTO,
1846                         "vol_queue_depth", CTLTYPE_INT | CTLFLAG_RW, mpt, 0,
1847                         mpt_raid_sysctl_vol_queue_depth, "I",
1848                         "default volume queue depth");
1849
1850         SYSCTL_ADD_PROC(&mpt->mpt_sysctl_ctx,
1851                         SYSCTL_CHILDREN(mpt->mpt_sysctl_tree), OID_AUTO,
1852                         "vol_resync_rate", CTLTYPE_INT | CTLFLAG_RW, mpt, 0,
1853                         mpt_raid_sysctl_vol_resync_rate, "I",
1854                         "volume resync priority (0 == NC, 1 - 255)");
1855         SYSCTL_ADD_UINT(&mpt->mpt_sysctl_ctx,
1856                         SYSCTL_CHILDREN(mpt->mpt_sysctl_tree), OID_AUTO,
1857                         "nonoptimal_volumes", CTLFLAG_RD,
1858                         &mpt->raid_nonopt_volumes, 0,
1859                         "number of nonoptimal volumes");
1860 }