Merge branch 'vendor/FILE'
[dragonfly.git] / sys / dev / disk / mpt / mpt_raid.c
1 /*-
2  * Routines for handling the integrated RAID features LSI MPT Fusion adapters.
3  *
4  * Copyright (c) 2005, WHEEL Sp. z o.o.
5  * Copyright (c) 2005 Justin T. Gibbs.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are
10  * met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14  *    substantially similar to the "NO WARRANTY" disclaimer below
15  *    ("Disclaimer") and any redistribution must be conditioned upon including
16  *    a substantially similar Disclaimer requirement for further binary
17  *    redistribution.
18  * 3. Neither the names of the above listed copyright holders nor the names
19  *    of any contributors may be used to endorse or promote products derived
20  *    from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
32  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 /*-
35  * Some Breakage and Bug Fixing added later.
36  * Copyright (c) 2006, by Matthew Jacob
37  * All Rights Reserved
38  *
39  * Support from LSI-Logic has also gone a great deal toward making this a
40  * workable subsystem and is gratefully acknowledged.
41  *
42  * $FreeBSD: src/sys/dev/mpt/mpt_raid.c,v 1.28 2011/01/12 19:53:56 mdf Exp $
43  */
44
45 #include <dev/disk/mpt/mpt.h>
46 #include <dev/disk/mpt/mpt_raid.h>
47
48 #include "dev/disk/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
49 #include "dev/disk/mpt/mpilib/mpi_raid.h"
50
51 #include <bus/cam/cam.h>
52 #include <bus/cam/cam_ccb.h>
53 #include <bus/cam/cam_sim.h>
54 #include <bus/cam/cam_xpt_sim.h>
55
56 #include <sys/devicestat.h>
57 #include <bus/cam/cam_periph.h>
58
59 #include <sys/callout.h>
60 #include <sys/kthread.h>
61 #include <sys/sysctl.h>
62
63 #include <machine/stdarg.h>
64
65 struct mpt_raid_action_result
66 {
67         union {
68                 MPI_RAID_VOL_INDICATOR  indicator_struct;
69                 uint32_t                new_settings;
70                 uint8_t                 phys_disk_num;
71         } action_data;
72         uint16_t                        action_status;
73 };
74
75 #define REQ_TO_RAID_ACTION_RESULT(req) ((struct mpt_raid_action_result *) \
76         (((MSG_RAID_ACTION_REQUEST *)(req->req_vbuf)) + 1))
77
78 #define REQ_IOCSTATUS(req) ((req)->IOCStatus & MPI_IOCSTATUS_MASK)
79
80
81 static mpt_probe_handler_t      mpt_raid_probe;
82 static mpt_attach_handler_t     mpt_raid_attach;
83 static mpt_enable_handler_t     mpt_raid_enable;
84 static mpt_event_handler_t      mpt_raid_event;
85 static mpt_shutdown_handler_t   mpt_raid_shutdown;
86 static mpt_reset_handler_t      mpt_raid_ioc_reset;
87 static mpt_detach_handler_t     mpt_raid_detach;
88
89 static struct mpt_personality mpt_raid_personality =
90 {
91         .name           = "mpt_raid",
92         .probe          = mpt_raid_probe,
93         .attach         = mpt_raid_attach,
94         .enable         = mpt_raid_enable,
95         .event          = mpt_raid_event,
96         .reset          = mpt_raid_ioc_reset,
97         .shutdown       = mpt_raid_shutdown,
98         .detach         = mpt_raid_detach,
99 };
100
101 DECLARE_MPT_PERSONALITY(mpt_raid, SI_ORDER_THIRD);
102 MPT_PERSONALITY_DEPEND(mpt_raid, mpt_cam, 1, 1, 1);
103
104 static mpt_reply_handler_t mpt_raid_reply_handler;
105 static int mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
106                                         MSG_DEFAULT_REPLY *reply_frame);
107 static int mpt_spawn_raid_thread(struct mpt_softc *mpt);
108 static void mpt_terminate_raid_thread(struct mpt_softc *mpt);
109 static void mpt_raid_thread(void *arg);
110 static timeout_t mpt_raid_timer;
111 #if 0
112 static void mpt_enable_vol(struct mpt_softc *mpt,
113                            struct mpt_raid_volume *mpt_vol, int enable);
114 #endif
115 static void mpt_verify_mwce(struct mpt_softc *, struct mpt_raid_volume *);
116 static void mpt_adjust_queue_depth(struct mpt_softc *, struct mpt_raid_volume *,
117     struct cam_path *);
118 static void mpt_raid_sysctl_attach(struct mpt_softc *);
119
120 static uint32_t raid_handler_id = MPT_HANDLER_ID_NONE;
121
122 const char *
123 mpt_vol_type(struct mpt_raid_volume *vol)
124 {
125         switch (vol->config_page->VolumeType) {
126         case MPI_RAID_VOL_TYPE_IS:
127                 return ("RAID-0");
128         case MPI_RAID_VOL_TYPE_IME:
129                 return ("RAID-1E");
130         case MPI_RAID_VOL_TYPE_IM:
131                 return ("RAID-1");
132         default:
133                 return ("Unknown");
134         }
135 }
136
137 const char *
138 mpt_vol_state(struct mpt_raid_volume *vol)
139 {
140         switch (vol->config_page->VolumeStatus.State) {
141         case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL:
142                 return ("Optimal");
143         case MPI_RAIDVOL0_STATUS_STATE_DEGRADED:
144                 return ("Degraded");
145         case MPI_RAIDVOL0_STATUS_STATE_FAILED:
146                 return ("Failed");
147         default:
148                 return ("Unknown");
149         }
150 }
151
152 const char *
153 mpt_disk_state(struct mpt_raid_disk *disk)
154 {
155         switch (disk->config_page.PhysDiskStatus.State) {
156         case MPI_PHYSDISK0_STATUS_ONLINE:
157                 return ("Online");
158         case MPI_PHYSDISK0_STATUS_MISSING:
159                 return ("Missing");
160         case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
161                 return ("Incompatible");
162         case MPI_PHYSDISK0_STATUS_FAILED:
163                 return ("Failed");
164         case MPI_PHYSDISK0_STATUS_INITIALIZING:
165                 return ("Initializing");
166         case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
167                 return ("Offline Requested");
168         case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
169                 return ("Failed per Host Request");
170         case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
171                 return ("Offline");
172         default:
173                 return ("Unknown");
174         }
175 }
176
177 void
178 mpt_vol_prt(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
179             const char *fmt, ...)
180 {
181         __va_list ap;
182
183         kprintf("%s:vol%d(%s:%d:%d): ", device_get_nameunit(mpt->dev),
184                (u_int)(vol - mpt->raid_volumes), device_get_nameunit(mpt->dev),
185                vol->config_page->VolumeBus, vol->config_page->VolumeID);
186         __va_start(ap, fmt);
187         kvprintf(fmt, ap);
188         __va_end(ap);
189 }
190
191 void
192 mpt_disk_prt(struct mpt_softc *mpt, struct mpt_raid_disk *disk,
193              const char *fmt, ...)
194 {
195         __va_list ap;
196
197         if (disk->volume != NULL) {
198                 kprintf("(%s:vol%d:%d): ",
199                        device_get_nameunit(mpt->dev),
200                        disk->volume->config_page->VolumeID,
201                        disk->member_number);
202         } else {
203                 kprintf("(%s:%d:%d): ", device_get_nameunit(mpt->dev),
204                        disk->config_page.PhysDiskBus,
205                        disk->config_page.PhysDiskID);
206         }
207         __va_start(ap, fmt);
208         kvprintf(fmt, ap);
209         __va_end(ap);
210 }
211
212 static void
213 mpt_raid_async(void *callback_arg, u_int32_t code,
214                struct cam_path *path, void *arg)
215 {
216         struct mpt_softc *mpt;
217
218         mpt = (struct mpt_softc*)callback_arg;
219         switch (code) {
220         case AC_FOUND_DEVICE:
221         {
222                 struct ccb_getdev *cgd;
223                 struct mpt_raid_volume *mpt_vol;
224
225                 cgd = (struct ccb_getdev *)arg;
226                 if (cgd == NULL) {
227                         break;
228                 }
229
230                 mpt_lprt(mpt, MPT_PRT_DEBUG, "Callback for %d\n",
231                          cgd->ccb_h.target_id);
232
233                 RAID_VOL_FOREACH(mpt, mpt_vol) {
234                         if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
235                                 continue;
236
237                         if (mpt_vol->config_page->VolumeID
238                          == cgd->ccb_h.target_id) {
239                                 mpt_adjust_queue_depth(mpt, mpt_vol, path);
240                                 break;
241                         }
242                 }
243         }
244         default:
245                 break;
246         }
247 }
248
249 int
250 mpt_raid_probe(struct mpt_softc *mpt)
251 {
252         if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
253                 return (ENODEV);
254         }
255         return (0);
256 }
257
258 int
259 mpt_raid_attach(struct mpt_softc *mpt)
260 {
261         struct ccb_setasync csa;
262         mpt_handler_t    handler;
263         int              error;
264
265         mpt_callout_init(mpt, &mpt->raid_timer);
266
267         error = mpt_spawn_raid_thread(mpt);
268         if (error != 0) {
269                 mpt_prt(mpt, "Unable to spawn RAID thread!\n");
270                 goto cleanup;
271         }
272
273         MPT_LOCK(mpt);
274         handler.reply_handler = mpt_raid_reply_handler;
275         error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
276                                      &raid_handler_id);
277         if (error != 0) {
278                 mpt_prt(mpt, "Unable to register RAID haandler!\n");
279                 goto cleanup;
280         }
281
282         xpt_setup_ccb(&csa.ccb_h, mpt->path, 5);
283         csa.ccb_h.func_code = XPT_SASYNC_CB;
284         csa.event_enable = AC_FOUND_DEVICE;
285         csa.callback = mpt_raid_async;
286         csa.callback_arg = mpt;
287         xpt_action((union ccb *)&csa);
288         if (csa.ccb_h.status != CAM_REQ_CMP) {
289                 mpt_prt(mpt, "mpt_raid_attach: Unable to register "
290                         "CAM async handler.\n");
291         }
292         MPT_UNLOCK(mpt);
293
294         mpt_raid_sysctl_attach(mpt);
295         return (0);
296 cleanup:
297         MPT_UNLOCK(mpt);
298         mpt_raid_detach(mpt);
299         return (error);
300 }
301
302 int
303 mpt_raid_enable(struct mpt_softc *mpt)
304 {
305         return (0);
306 }
307
308 void
309 mpt_raid_detach(struct mpt_softc *mpt)
310 {
311         struct ccb_setasync csa;
312         mpt_handler_t handler;
313
314         callout_stop(&mpt->raid_timer);
315
316         MPT_LOCK(mpt);
317         mpt_terminate_raid_thread(mpt);
318         handler.reply_handler = mpt_raid_reply_handler;
319         mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
320                                raid_handler_id);
321         xpt_setup_ccb(&csa.ccb_h, mpt->path, /*priority*/5);
322         csa.ccb_h.func_code = XPT_SASYNC_CB;
323         csa.event_enable = 0;
324         csa.callback = mpt_raid_async;
325         csa.callback_arg = mpt;
326         xpt_action((union ccb *)&csa);
327         MPT_UNLOCK(mpt);
328 }
329
330 static void
331 mpt_raid_ioc_reset(struct mpt_softc *mpt, int type)
332 {
333         /* Nothing to do yet. */
334 }
335
336 static const char *raid_event_txt[] =
337 {
338         "Volume Created",
339         "Volume Deleted",
340         "Volume Settings Changed",
341         "Volume Status Changed",
342         "Volume Physical Disk Membership Changed",
343         "Physical Disk Created",
344         "Physical Disk Deleted",
345         "Physical Disk Settings Changed",
346         "Physical Disk Status Changed",
347         "Domain Validation Required",
348         "SMART Data Received",
349         "Replace Action Started",
350 };
351
352 static int
353 mpt_raid_event(struct mpt_softc *mpt, request_t *req,
354                MSG_EVENT_NOTIFY_REPLY *msg)
355 {
356         EVENT_DATA_RAID *raid_event;
357         struct mpt_raid_volume *mpt_vol;
358         struct mpt_raid_disk *mpt_disk;
359         CONFIG_PAGE_RAID_VOL_0 *vol_pg;
360         int i;
361         int print_event;
362
363         if (msg->Event != MPI_EVENT_INTEGRATED_RAID) {
364                 return (0);
365         }
366
367         raid_event = (EVENT_DATA_RAID *)&msg->Data;
368
369         mpt_vol = NULL;
370         vol_pg = NULL;
371         if (mpt->raid_volumes != NULL && mpt->ioc_page2 != NULL) {
372                 for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
373                         mpt_vol = &mpt->raid_volumes[i];
374                         vol_pg = mpt_vol->config_page;
375
376                         if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
377                                 continue;
378
379                         if (vol_pg->VolumeID == raid_event->VolumeID
380                          && vol_pg->VolumeBus == raid_event->VolumeBus)
381                                 break;
382                 }
383                 if (i >= mpt->ioc_page2->MaxVolumes) {
384                         mpt_vol = NULL;
385                         vol_pg = NULL;
386                 }
387         }
388
389         mpt_disk = NULL;
390         if (raid_event->PhysDiskNum != 0xFF && mpt->raid_disks != NULL) {
391                 mpt_disk = mpt->raid_disks + raid_event->PhysDiskNum;
392                 if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0) {
393                         mpt_disk = NULL;
394                 }
395         }
396
397         print_event = 1;
398         switch(raid_event->ReasonCode) {
399         case MPI_EVENT_RAID_RC_VOLUME_CREATED:
400         case MPI_EVENT_RAID_RC_VOLUME_DELETED:
401                 break;
402         case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED:
403                 if (mpt_vol != NULL) {
404                         if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0) {
405                                 mpt_vol->flags &= ~MPT_RVF_UP2DATE;
406                         } else {
407                                 /*
408                                  * Coalesce status messages into one
409                                  * per background run of our RAID thread.
410                                  * This removes "spurious" status messages
411                                  * from our output.
412                                  */
413                                 print_event = 0;
414                         }
415                 }
416                 break;
417         case MPI_EVENT_RAID_RC_VOLUME_SETTINGS_CHANGED:
418         case MPI_EVENT_RAID_RC_VOLUME_PHYSDISK_CHANGED:
419                 mpt->raid_rescan++;
420                 if (mpt_vol != NULL) {
421                         mpt_vol->flags &= ~(MPT_RVF_UP2DATE|MPT_RVF_ANNOUNCED);
422                 }
423                 break;
424         case MPI_EVENT_RAID_RC_PHYSDISK_CREATED:
425         case MPI_EVENT_RAID_RC_PHYSDISK_DELETED:
426                 mpt->raid_rescan++;
427                 break;
428         case MPI_EVENT_RAID_RC_PHYSDISK_SETTINGS_CHANGED:
429         case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED:
430                 mpt->raid_rescan++;
431                 if (mpt_disk != NULL) {
432                         mpt_disk->flags &= ~MPT_RDF_UP2DATE;
433                 }
434                 break;
435         case MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED:
436                 mpt->raid_rescan++;
437                 break;
438         case MPI_EVENT_RAID_RC_SMART_DATA:
439         case MPI_EVENT_RAID_RC_REPLACE_ACTION_STARTED:
440                 break;
441         }
442
443         if (print_event) {
444                 if (mpt_disk != NULL) {
445                         mpt_disk_prt(mpt, mpt_disk, "%s", "");
446                 } else if (mpt_vol != NULL) {
447                         mpt_vol_prt(mpt, mpt_vol, "%s", "");
448                 } else {
449                         mpt_prt(mpt, "Volume(%d:%d", raid_event->VolumeBus,
450                                 raid_event->VolumeID);
451
452                         if (raid_event->PhysDiskNum != 0xFF)
453                                 mpt_prtc(mpt, ":%d): ",
454                                          raid_event->PhysDiskNum);
455                         else
456                                 mpt_prtc(mpt, "): ");
457                 }
458
459                 if (raid_event->ReasonCode >= NUM_ELEMENTS(raid_event_txt))
460                         mpt_prtc(mpt, "Unhandled RaidEvent %#x\n",
461                                  raid_event->ReasonCode);
462                 else
463                         mpt_prtc(mpt, "%s\n",
464                                  raid_event_txt[raid_event->ReasonCode]);
465         }
466
467         if (raid_event->ReasonCode == MPI_EVENT_RAID_RC_SMART_DATA) {
468                 /* XXX Use CAM's print sense for this... */
469                 if (mpt_disk != NULL)
470                         mpt_disk_prt(mpt, mpt_disk, "%s", "");
471                 else
472                         mpt_prt(mpt, "Volume(%d:%d:%d: ",
473                             raid_event->VolumeBus, raid_event->VolumeID,
474                             raid_event->PhysDiskNum);
475                 mpt_prtc(mpt, "ASC 0x%x, ASCQ 0x%x)\n",
476                          raid_event->ASC, raid_event->ASCQ);
477         }
478
479         mpt_raid_wakeup(mpt);
480         return (1);
481 }
482
483 static void
484 mpt_raid_shutdown(struct mpt_softc *mpt)
485 {
486         struct mpt_raid_volume *mpt_vol;
487
488         if (mpt->raid_mwce_setting != MPT_RAID_MWCE_REBUILD_ONLY) {
489                 return;
490         }
491
492         mpt->raid_mwce_setting = MPT_RAID_MWCE_OFF;
493         RAID_VOL_FOREACH(mpt, mpt_vol) {
494                 mpt_verify_mwce(mpt, mpt_vol);
495         }
496 }
497
498 static int
499 mpt_raid_reply_handler(struct mpt_softc *mpt, request_t *req,
500     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
501 {
502         int free_req;
503
504         if (req == NULL)
505                 return (TRUE);
506
507         free_req = TRUE;
508         if (reply_frame != NULL)
509                 free_req = mpt_raid_reply_frame_handler(mpt, req, reply_frame);
510 #ifdef NOTYET
511         else if (req->ccb != NULL) {
512                 /* Complete Quiesce CCB with error... */
513         }
514 #endif
515
516         req->state &= ~REQ_STATE_QUEUED;
517         req->state |= REQ_STATE_DONE;
518         TAILQ_REMOVE(&mpt->request_pending_list, req, links);
519
520         if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
521                 wakeup(req);
522         } else if (free_req) {
523                 mpt_free_request(mpt, req);
524         }
525
526         return (TRUE);
527 }
528
529 /*
530  * Parse additional completion information in the reply
531  * frame for RAID I/O requests.
532  */
533 static int
534 mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
535     MSG_DEFAULT_REPLY *reply_frame)
536 {
537         MSG_RAID_ACTION_REPLY *reply;
538         struct mpt_raid_action_result *action_result;
539         MSG_RAID_ACTION_REQUEST *rap;
540
541         reply = (MSG_RAID_ACTION_REPLY *)reply_frame;
542         req->IOCStatus = le16toh(reply->IOCStatus);
543         rap = (MSG_RAID_ACTION_REQUEST *)req->req_vbuf;
544
545         switch (rap->Action) {
546         case MPI_RAID_ACTION_QUIESCE_PHYS_IO:
547                 mpt_prt(mpt, "QUIESCE PHYSIO DONE\n");
548                 break;
549         case MPI_RAID_ACTION_ENABLE_PHYS_IO:
550                 mpt_prt(mpt, "ENABLY PHYSIO DONE\n");
551                 break;
552         default:
553                 break;
554         }
555         action_result = REQ_TO_RAID_ACTION_RESULT(req);
556         memcpy(&action_result->action_data, &reply->ActionData,
557             sizeof(action_result->action_data));
558         action_result->action_status = le16toh(reply->ActionStatus);
559         return (TRUE);
560 }
561
562 /*
563  * Utiltity routine to perform a RAID action command;
564  */
565 int
566 mpt_issue_raid_req(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
567                    struct mpt_raid_disk *disk, request_t *req, u_int Action,
568                    uint32_t ActionDataWord, bus_addr_t addr, bus_size_t len,
569                    int write, int wait)
570 {
571         MSG_RAID_ACTION_REQUEST *rap;
572         SGE_SIMPLE32 *se;
573
574         rap = req->req_vbuf;
575         memset(rap, 0, sizeof *rap);
576         rap->Action = Action;
577         rap->ActionDataWord = htole32(ActionDataWord);
578         rap->Function = MPI_FUNCTION_RAID_ACTION;
579         rap->VolumeID = vol->config_page->VolumeID;
580         rap->VolumeBus = vol->config_page->VolumeBus;
581         if (disk != 0)
582                 rap->PhysDiskNum = disk->config_page.PhysDiskNum;
583         else
584                 rap->PhysDiskNum = 0xFF;
585         se = (SGE_SIMPLE32 *)&rap->ActionDataSGE;
586         se->Address = htole32(addr);
587         MPI_pSGE_SET_LENGTH(se, len);
588         MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
589             MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
590             MPI_SGE_FLAGS_END_OF_LIST |
591             write ? MPI_SGE_FLAGS_HOST_TO_IOC : MPI_SGE_FLAGS_IOC_TO_HOST));
592         se->FlagsLength = htole32(se->FlagsLength);
593         rap->MsgContext = htole32(req->index | raid_handler_id);
594
595         mpt_check_doorbell(mpt);
596         mpt_send_cmd(mpt, req);
597
598         if (wait) {
599                 return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
600                                      /*sleep_ok*/FALSE, /*time_ms*/2000));
601         } else {
602                 return (0);
603         }
604 }
605
606 /*************************** RAID Status Monitoring ***************************/
607 static int
608 mpt_spawn_raid_thread(struct mpt_softc *mpt)
609 {
610         int error;
611
612         /*
613          * Freeze out any CAM transactions until our thread
614          * is able to run at least once.  We need to update
615          * our RAID pages before acception I/O or we may
616          * reject I/O to an ID we later determine is for a
617          * hidden physdisk.
618          */
619         MPT_LOCK(mpt);
620         xpt_freeze_simq(mpt->phydisk_sim, 1);
621         MPT_UNLOCK(mpt);
622         error = mpt_kthread_create(mpt_raid_thread, mpt,
623             &mpt->raid_thread, /*flags*/0, /*altstack*/0,
624             "mpt_raid%d", mpt->unit);
625         if (error != 0) {
626                 MPT_LOCK(mpt);
627                 xpt_release_simq(mpt->phydisk_sim, /*run_queue*/FALSE);
628                 MPT_UNLOCK(mpt);
629         }
630         return (error);
631 }
632
633 static void
634 mpt_terminate_raid_thread(struct mpt_softc *mpt)
635 {
636
637         if (mpt->raid_thread == NULL) {
638                 return;
639         }
640         mpt->shutdwn_raid = 1;
641         wakeup(&mpt->raid_volumes);
642         /*
643          * Sleep on a slightly different location
644          * for this interlock just for added safety.
645          */
646         mpt_sleep(mpt, &mpt->raid_thread, 0, "thtrm", 0);
647 }
648
649 static void
650 mpt_cam_rescan_callback(struct cam_periph *periph, union ccb *ccb)
651 {
652     xpt_free_path(ccb->ccb_h.path);
653     kfree(ccb, M_TEMP);
654 }
655
656 static void
657 mpt_raid_thread(void *arg)
658 {
659         struct mpt_softc *mpt;
660         int firstrun;
661
662         mpt = (struct mpt_softc *)arg;
663         firstrun = 1;
664         MPT_LOCK(mpt);
665         while (mpt->shutdwn_raid == 0) {
666
667                 if (mpt->raid_wakeup == 0) {
668                         mpt_sleep(mpt, &mpt->raid_volumes, 0, "idle", 0);
669                         continue;
670                 }
671
672                 mpt->raid_wakeup = 0;
673
674                 if (mpt_refresh_raid_data(mpt)) {
675                         mpt_schedule_raid_refresh(mpt); /* XX NOT QUITE RIGHT */
676                         continue;
677                 }
678
679                 /*
680                  * Now that we have our first snapshot of RAID data,
681                  * allow CAM to access our physical disk bus.
682                  */
683                 if (firstrun) {
684                         firstrun = 0;
685                         MPTLOCK_2_CAMLOCK(mpt);
686                         xpt_release_simq(mpt->phydisk_sim, TRUE);
687                         CAMLOCK_2_MPTLOCK(mpt);
688                 }
689
690                 if (mpt->raid_rescan != 0) {
691                         union ccb *ccb;
692                         int error;
693
694                         mpt->raid_rescan = 0;
695                         MPT_UNLOCK(mpt);
696
697                         ccb = kmalloc(sizeof(union ccb), M_TEMP,
698                             M_WAITOK | M_ZERO);
699
700                         MPT_LOCK(mpt);
701                         error = xpt_create_path(&ccb->ccb_h.path, xpt_periph,
702                             cam_sim_path(mpt->phydisk_sim),
703                             CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
704                         if (error != CAM_REQ_CMP) {
705                                 kfree(ccb, M_TEMP);
706                                 mpt_prt(mpt, "Unable to rescan RAID Bus!\n");
707                         } else {
708                                 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
709                                     5/*priority (low)*/);
710                                 ccb->ccb_h.func_code = XPT_SCAN_BUS;
711                                 ccb->ccb_h.cbfcnp = mpt_cam_rescan_callback;
712                                 ccb->crcn.flags = CAM_FLAG_NONE;
713                                 xpt_action(ccb);
714
715                                 /* scan is now in progress */
716                         }
717                 }
718         }
719         mpt->raid_thread = NULL;
720         wakeup(&mpt->raid_thread);
721         MPT_UNLOCK(mpt);
722         mpt_kthread_exit(0);
723 }
724
725 #if 0
726 static void
727 mpt_raid_quiesce_timeout(void *arg)
728 {
729         /* Complete the CCB with error */
730         /* COWWWW */
731 }
732
733 static timeout_t mpt_raid_quiesce_timeout;
734 cam_status
735 mpt_raid_quiesce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
736                       request_t *req)
737 {
738         union ccb *ccb;
739
740         ccb = req->ccb;
741         if ((mpt_disk->flags & MPT_RDF_QUIESCED) != 0)
742                 return (CAM_REQ_CMP);
743
744         if ((mpt_disk->flags & MPT_RDF_QUIESCING) == 0) {
745                 int rv;
746
747                 mpt_disk->flags |= MPT_RDF_QUIESCING;
748                 xpt_freeze_devq(ccb->ccb_h.path, 1);
749
750                 rv = mpt_issue_raid_req(mpt, mpt_disk->volume, mpt_disk, req,
751                                         MPI_RAID_ACTION_QUIESCE_PHYS_IO,
752                                         /*ActionData*/0, /*addr*/0,
753                                         /*len*/0, /*write*/FALSE,
754                                         /*wait*/FALSE);
755                 if (rv != 0)
756                         return (CAM_REQ_CMP_ERR);
757
758                 mpt_req_timeout(req, mpt_raid_quiesce_timeout, ccb, 5 * hz);
759 #if 0
760                 if (rv == ETIMEDOUT) {
761                         mpt_disk_prt(mpt, mpt_disk, "mpt_raid_quiesce_disk: "
762                                      "Quiece Timed-out\n");
763                         xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
764                         return (CAM_REQ_CMP_ERR);
765                 }
766
767                 ar = REQ_TO_RAID_ACTION_RESULT(req);
768                 if (rv != 0
769                  || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
770                  || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
771                         mpt_disk_prt(mpt, mpt_disk, "Quiece Failed"
772                                     "%d:%x:%x\n", rv, req->IOCStatus,
773                                     ar->action_status);
774                         xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
775                         return (CAM_REQ_CMP_ERR);
776                 }
777 #endif
778                 return (CAM_REQ_INPROG);
779         }
780         return (CAM_REQUEUE_REQ);
781 }
782 #endif
783
784 /* XXX Ignores that there may be multiple busses/IOCs involved. */
785 cam_status
786 mpt_map_physdisk(struct mpt_softc *mpt, union ccb *ccb, u_int *tgt)
787 {
788         struct mpt_raid_disk *mpt_disk;
789
790         mpt_disk = mpt->raid_disks + ccb->ccb_h.target_id;
791         if (ccb->ccb_h.target_id < mpt->raid_max_disks
792          && (mpt_disk->flags & MPT_RDF_ACTIVE) != 0) {
793                 *tgt = mpt_disk->config_page.PhysDiskID;
794                 return (0);
795         }
796         mpt_lprt(mpt, MPT_PRT_DEBUG1, "mpt_map_physdisk(%d) - Not Active\n",
797                  ccb->ccb_h.target_id);
798         return (-1);
799 }
800
801 /* XXX Ignores that there may be multiple busses/IOCs involved. */
802 int
803 mpt_is_raid_volume(struct mpt_softc *mpt, int tgt)
804 {
805         CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
806         CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol;
807
808         if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
809                 return (0);
810         }
811         ioc_vol = mpt->ioc_page2->RaidVolume;
812         ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
813         for (;ioc_vol != ioc_last_vol; ioc_vol++) {
814                 if (ioc_vol->VolumeID == tgt) {
815                         return (1);
816                 }
817         }
818         return (0);
819 }
820
821 #if 0
822 static void
823 mpt_enable_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
824                int enable)
825 {
826         request_t *req;
827         struct mpt_raid_action_result *ar;
828         CONFIG_PAGE_RAID_VOL_0 *vol_pg;
829         int enabled;
830         int rv;
831
832         vol_pg = mpt_vol->config_page;
833         enabled = vol_pg->VolumeStatus.Flags & MPI_RAIDVOL0_STATUS_FLAG_ENABLED;
834
835         /*
836          * If the setting matches the configuration,
837          * there is nothing to do.
838          */
839         if ((enabled && enable)
840          || (!enabled && !enable))
841                 return;
842
843         req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
844         if (req == NULL) {
845                 mpt_vol_prt(mpt, mpt_vol,
846                             "mpt_enable_vol: Get request failed!\n");
847                 return;
848         }
849
850         rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
851                                 enable ? MPI_RAID_ACTION_ENABLE_VOLUME
852                                        : MPI_RAID_ACTION_DISABLE_VOLUME,
853                                 /*data*/0, /*addr*/0, /*len*/0,
854                                 /*write*/FALSE, /*wait*/TRUE);
855         if (rv == ETIMEDOUT) {
856                 mpt_vol_prt(mpt, mpt_vol, "mpt_enable_vol: "
857                             "%s Volume Timed-out\n",
858                             enable ? "Enable" : "Disable");
859                 return;
860         }
861         ar = REQ_TO_RAID_ACTION_RESULT(req);
862         if (rv != 0
863          || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
864          || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
865                 mpt_vol_prt(mpt, mpt_vol, "%s Volume Failed: %d:%x:%x\n",
866                             enable ? "Enable" : "Disable",
867                             rv, req->IOCStatus, ar->action_status);
868         }
869
870         mpt_free_request(mpt, req);
871 }
872 #endif
873
874 static void
875 mpt_verify_mwce(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
876 {
877         request_t *req;
878         struct mpt_raid_action_result *ar;
879         CONFIG_PAGE_RAID_VOL_0 *vol_pg;
880         uint32_t data;
881         int rv;
882         int resyncing;
883         int mwce;
884
885         vol_pg = mpt_vol->config_page;
886         resyncing = vol_pg->VolumeStatus.Flags
887                   & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
888         mwce = vol_pg->VolumeSettings.Settings
889              & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
890
891         /*
892          * If the setting matches the configuration,
893          * there is nothing to do.
894          */
895         switch (mpt->raid_mwce_setting) {
896         case MPT_RAID_MWCE_REBUILD_ONLY:
897                 if ((resyncing && mwce) || (!resyncing && !mwce)) {
898                         return;
899                 }
900                 mpt_vol->flags ^= MPT_RVF_WCE_CHANGED;
901                 if ((mpt_vol->flags & MPT_RVF_WCE_CHANGED) == 0) {
902                         /*
903                          * Wait one more status update to see if
904                          * resyncing gets enabled.  It gets disabled
905                          * temporarilly when WCE is changed.
906                          */
907                         return;
908                 }
909                 break;
910         case MPT_RAID_MWCE_ON:
911                 if (mwce)
912                         return;
913                 break;
914         case MPT_RAID_MWCE_OFF:
915                 if (!mwce)
916                         return;
917                 break;
918         case MPT_RAID_MWCE_NC:
919                 return;
920         }
921
922         req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
923         if (req == NULL) {
924                 mpt_vol_prt(mpt, mpt_vol,
925                             "mpt_verify_mwce: Get request failed!\n");
926                 return;
927         }
928
929         vol_pg->VolumeSettings.Settings ^=
930             MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
931         memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
932         vol_pg->VolumeSettings.Settings ^=
933             MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
934         rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
935                                 MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
936                                 data, /*addr*/0, /*len*/0,
937                                 /*write*/FALSE, /*wait*/TRUE);
938         if (rv == ETIMEDOUT) {
939                 mpt_vol_prt(mpt, mpt_vol, "mpt_verify_mwce: "
940                             "Write Cache Enable Timed-out\n");
941                 return;
942         }
943         ar = REQ_TO_RAID_ACTION_RESULT(req);
944         if (rv != 0
945          || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
946          || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
947                 mpt_vol_prt(mpt, mpt_vol, "Write Cache Enable Failed: "
948                             "%d:%x:%x\n", rv, req->IOCStatus,
949                             ar->action_status);
950         } else {
951                 vol_pg->VolumeSettings.Settings ^=
952                     MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
953         }
954         mpt_free_request(mpt, req);
955 }
956
957 static void
958 mpt_verify_resync_rate(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
959 {
960         request_t *req;
961         struct mpt_raid_action_result *ar;
962         CONFIG_PAGE_RAID_VOL_0  *vol_pg;
963         u_int prio;
964         int rv;
965
966         vol_pg = mpt_vol->config_page;
967
968         if (mpt->raid_resync_rate == MPT_RAID_RESYNC_RATE_NC)
969                 return;
970
971         /*
972          * If the current RAID resync rate does not
973          * match our configured rate, update it.
974          */
975         prio = vol_pg->VolumeSettings.Settings
976              & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
977         if (vol_pg->ResyncRate != 0
978          && vol_pg->ResyncRate != mpt->raid_resync_rate) {
979
980                 req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
981                 if (req == NULL) {
982                         mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
983                                     "Get request failed!\n");
984                         return;
985                 }
986
987                 rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
988                                         MPI_RAID_ACTION_SET_RESYNC_RATE,
989                                         mpt->raid_resync_rate, /*addr*/0,
990                                         /*len*/0, /*write*/FALSE, /*wait*/TRUE);
991                 if (rv == ETIMEDOUT) {
992                         mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
993                                     "Resync Rate Setting Timed-out\n");
994                         return;
995                 }
996
997                 ar = REQ_TO_RAID_ACTION_RESULT(req);
998                 if (rv != 0
999                  || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
1000                  || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
1001                         mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
1002                                     "%d:%x:%x\n", rv, req->IOCStatus,
1003                                     ar->action_status);
1004                 } else
1005                         vol_pg->ResyncRate = mpt->raid_resync_rate;
1006                 mpt_free_request(mpt, req);
1007         } else if ((prio && mpt->raid_resync_rate < 128)
1008                 || (!prio && mpt->raid_resync_rate >= 128)) {
1009                 uint32_t data;
1010
1011                 req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
1012                 if (req == NULL) {
1013                         mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
1014                                     "Get request failed!\n");
1015                         return;
1016                 }
1017
1018                 vol_pg->VolumeSettings.Settings ^=
1019                     MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1020                 memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
1021                 vol_pg->VolumeSettings.Settings ^=
1022                     MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1023                 rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
1024                                         MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
1025                                         data, /*addr*/0, /*len*/0,
1026                                         /*write*/FALSE, /*wait*/TRUE);
1027                 if (rv == ETIMEDOUT) {
1028                         mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
1029                                     "Resync Rate Setting Timed-out\n");
1030                         return;
1031                 }
1032                 ar = REQ_TO_RAID_ACTION_RESULT(req);
1033                 if (rv != 0
1034                  || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
1035                  || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
1036                         mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
1037                                     "%d:%x:%x\n", rv, req->IOCStatus,
1038                                     ar->action_status);
1039                 } else {
1040                         vol_pg->VolumeSettings.Settings ^=
1041                             MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1042                 }
1043
1044                 mpt_free_request(mpt, req);
1045         }
1046 }
1047
1048 static void
1049 mpt_adjust_queue_depth(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1050                        struct cam_path *path)
1051 {
1052         struct ccb_relsim crs;
1053
1054         xpt_setup_ccb(&crs.ccb_h, path, /*priority*/5);
1055         crs.ccb_h.func_code = XPT_REL_SIMQ;
1056         crs.ccb_h.flags = CAM_DEV_QFREEZE;
1057         crs.release_flags = RELSIM_ADJUST_OPENINGS;
1058         crs.openings = mpt->raid_queue_depth;
1059         xpt_action((union ccb *)&crs);
1060         if (crs.ccb_h.status != CAM_REQ_CMP)
1061                 mpt_vol_prt(mpt, mpt_vol, "mpt_adjust_queue_depth failed "
1062                             "with CAM status %#x\n", crs.ccb_h.status);
1063 }
1064
1065 static void
1066 mpt_announce_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
1067 {
1068         CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1069         u_int i;
1070
1071         vol_pg = mpt_vol->config_page;
1072         mpt_vol_prt(mpt, mpt_vol, "Settings (");
1073         for (i = 1; i <= 0x8000; i <<= 1) {
1074                 switch (vol_pg->VolumeSettings.Settings & i) {
1075                 case MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE:
1076                         mpt_prtc(mpt, " Member-WCE");
1077                         break;
1078                 case MPI_RAIDVOL0_SETTING_OFFLINE_ON_SMART:
1079                         mpt_prtc(mpt, " Offline-On-SMART-Err");
1080                         break;
1081                 case MPI_RAIDVOL0_SETTING_AUTO_CONFIGURE:
1082                         mpt_prtc(mpt, " Hot-Plug-Spares");
1083                         break;
1084                 case MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC:
1085                         mpt_prtc(mpt, " High-Priority-ReSync");
1086                         break;
1087                 default:
1088                         break;
1089                 }
1090         }
1091         mpt_prtc(mpt, " )\n");
1092         if (vol_pg->VolumeSettings.HotSparePool != 0) {
1093                 mpt_vol_prt(mpt, mpt_vol, "Using Spare Pool%s",
1094                             powerof2(vol_pg->VolumeSettings.HotSparePool)
1095                           ? ":" : "s:");
1096                 for (i = 0; i < 8; i++) {
1097                         u_int mask;
1098
1099                         mask = 0x1 << i;
1100                         if ((vol_pg->VolumeSettings.HotSparePool & mask) == 0)
1101                                 continue;
1102                         mpt_prtc(mpt, " %d", i);
1103                 }
1104                 mpt_prtc(mpt, "\n");
1105         }
1106         mpt_vol_prt(mpt, mpt_vol, "%d Members:\n", vol_pg->NumPhysDisks);
1107         for (i = 0; i < vol_pg->NumPhysDisks; i++){
1108                 struct mpt_raid_disk *mpt_disk;
1109                 CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1110                 int pt_bus = cam_sim_bus(mpt->phydisk_sim);
1111                 U8 f, s;
1112
1113                 mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum;
1114                 disk_pg = &mpt_disk->config_page;
1115                 mpt_prtc(mpt, "      ");
1116                 mpt_prtc(mpt, "(%s:%d:%d:0): ", device_get_nameunit(mpt->dev),
1117                          pt_bus, disk_pg->PhysDiskID);
1118                 if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM) {
1119                         mpt_prtc(mpt, "%s", mpt_disk->member_number == 0?
1120                             "Primary" : "Secondary");
1121                 } else {
1122                         mpt_prtc(mpt, "Stripe Position %d",
1123                                  mpt_disk->member_number);
1124                 }
1125                 f = disk_pg->PhysDiskStatus.Flags;
1126                 s = disk_pg->PhysDiskStatus.State;
1127                 if (f & MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC) {
1128                         mpt_prtc(mpt, " Out of Sync");
1129                 }
1130                 if (f & MPI_PHYSDISK0_STATUS_FLAG_QUIESCED) {
1131                         mpt_prtc(mpt, " Quiesced");
1132                 }
1133                 if (f & MPI_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME) {
1134                         mpt_prtc(mpt, " Inactive");
1135                 }
1136                 if (f & MPI_PHYSDISK0_STATUS_FLAG_OPTIMAL_PREVIOUS) {
1137                         mpt_prtc(mpt, " Was Optimal");
1138                 }
1139                 if (f & MPI_PHYSDISK0_STATUS_FLAG_NOT_OPTIMAL_PREVIOUS) {
1140                         mpt_prtc(mpt, " Was Non-Optimal");
1141                 }
1142                 switch (s) {
1143                 case MPI_PHYSDISK0_STATUS_ONLINE:
1144                         mpt_prtc(mpt, " Online");
1145                         break;
1146                 case MPI_PHYSDISK0_STATUS_MISSING:
1147                         mpt_prtc(mpt, " Missing");
1148                         break;
1149                 case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
1150                         mpt_prtc(mpt, " Incompatible");
1151                         break;
1152                 case MPI_PHYSDISK0_STATUS_FAILED:
1153                         mpt_prtc(mpt, " Failed");
1154                         break;
1155                 case MPI_PHYSDISK0_STATUS_INITIALIZING:
1156                         mpt_prtc(mpt, " Initializing");
1157                         break;
1158                 case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
1159                         mpt_prtc(mpt, " Requested Offline");
1160                         break;
1161                 case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
1162                         mpt_prtc(mpt, " Requested Failed");
1163                         break;
1164                 case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
1165                 default:
1166                         mpt_prtc(mpt, " Offline Other (%x)", s);
1167                         break;
1168                 }
1169                 mpt_prtc(mpt, "\n");
1170         }
1171 }
1172
1173 static void
1174 mpt_announce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk)
1175 {
1176         CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1177         int rd_bus = cam_sim_bus(mpt->sim);
1178         int pt_bus = cam_sim_bus(mpt->phydisk_sim);
1179         u_int i;
1180
1181         disk_pg = &mpt_disk->config_page;
1182         mpt_disk_prt(mpt, mpt_disk,
1183                      "Physical (%s:%d:%d:0), Pass-thru (%s:%d:%d:0)\n",
1184                      device_get_nameunit(mpt->dev), rd_bus,
1185                      disk_pg->PhysDiskID, device_get_nameunit(mpt->dev),
1186                      pt_bus, (int)(mpt_disk - mpt->raid_disks));
1187         if (disk_pg->PhysDiskSettings.HotSparePool == 0)
1188                 return;
1189         mpt_disk_prt(mpt, mpt_disk, "Member of Hot Spare Pool%s",
1190                      powerof2(disk_pg->PhysDiskSettings.HotSparePool)
1191                    ? ":" : "s:");
1192         for (i = 0; i < 8; i++) {
1193                 u_int mask;
1194
1195                 mask = 0x1 << i;
1196                 if ((disk_pg->PhysDiskSettings.HotSparePool & mask) == 0)
1197                         continue;
1198                 mpt_prtc(mpt, " %d", i);
1199         }
1200         mpt_prtc(mpt, "\n");
1201 }
1202
1203 static void
1204 mpt_refresh_raid_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
1205                       IOC_3_PHYS_DISK *ioc_disk)
1206 {
1207         int rv;
1208
1209         rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_PHYSDISK,
1210                                  /*PageNumber*/0, ioc_disk->PhysDiskNum,
1211                                  &mpt_disk->config_page.Header,
1212                                  /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1213         if (rv != 0) {
1214                 mpt_prt(mpt, "mpt_refresh_raid_disk: "
1215                         "Failed to read RAID Disk Hdr(%d)\n",
1216                         ioc_disk->PhysDiskNum);
1217                 return;
1218         }
1219         rv = mpt_read_cur_cfg_page(mpt, ioc_disk->PhysDiskNum,
1220                                    &mpt_disk->config_page.Header,
1221                                    sizeof(mpt_disk->config_page),
1222                                    /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1223         if (rv != 0)
1224                 mpt_prt(mpt, "mpt_refresh_raid_disk: "
1225                         "Failed to read RAID Disk Page(%d)\n",
1226                         ioc_disk->PhysDiskNum);
1227         mpt2host_config_page_raid_phys_disk_0(&mpt_disk->config_page);
1228 }
1229
1230 static void
1231 mpt_refresh_raid_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1232     CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol)
1233 {
1234         CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1235         struct mpt_raid_action_result *ar;
1236         request_t *req;
1237         int rv;
1238         int i;
1239
1240         vol_pg = mpt_vol->config_page;
1241         mpt_vol->flags &= ~MPT_RVF_UP2DATE;
1242
1243         rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_VOLUME, 0,
1244             ioc_vol->VolumePageNumber, &vol_pg->Header, TRUE, 5000);
1245         if (rv != 0) {
1246                 mpt_vol_prt(mpt, mpt_vol,
1247                     "mpt_refresh_raid_vol: Failed to read RAID Vol Hdr(%d)\n",
1248                     ioc_vol->VolumePageNumber);
1249                 return;
1250         }
1251
1252         rv = mpt_read_cur_cfg_page(mpt, ioc_vol->VolumePageNumber,
1253             &vol_pg->Header, mpt->raid_page0_len, TRUE, 5000);
1254         if (rv != 0) {
1255                 mpt_vol_prt(mpt, mpt_vol,
1256                     "mpt_refresh_raid_vol: Failed to read RAID Vol Page(%d)\n",
1257                     ioc_vol->VolumePageNumber);
1258                 return;
1259         }
1260         mpt2host_config_page_raid_vol_0(vol_pg);
1261
1262         mpt_vol->flags |= MPT_RVF_ACTIVE;
1263
1264         /* Update disk entry array data. */
1265         for (i = 0; i < vol_pg->NumPhysDisks; i++) {
1266                 struct mpt_raid_disk *mpt_disk;
1267                 mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum;
1268                 mpt_disk->volume = mpt_vol;
1269                 mpt_disk->member_number = vol_pg->PhysDisk[i].PhysDiskMap;
1270                 if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM) {
1271                         mpt_disk->member_number--;
1272                 }
1273         }
1274
1275         if ((vol_pg->VolumeStatus.Flags
1276            & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1277                 return;
1278
1279         req = mpt_get_request(mpt, TRUE);
1280         if (req == NULL) {
1281                 mpt_vol_prt(mpt, mpt_vol,
1282                     "mpt_refresh_raid_vol: Get request failed!\n");
1283                 return;
1284         }
1285         rv = mpt_issue_raid_req(mpt, mpt_vol, NULL, req,
1286             MPI_RAID_ACTION_INDICATOR_STRUCT, 0, 0, 0, FALSE, TRUE);
1287         if (rv == ETIMEDOUT) {
1288                 mpt_vol_prt(mpt, mpt_vol,
1289                     "mpt_refresh_raid_vol: Progress Indicator fetch timeout\n");
1290                 mpt_free_request(mpt, req);
1291                 return;
1292         }
1293
1294         ar = REQ_TO_RAID_ACTION_RESULT(req);
1295         if (rv == 0
1296          && ar->action_status == MPI_RAID_ACTION_ASTATUS_SUCCESS
1297          && REQ_IOCSTATUS(req) == MPI_IOCSTATUS_SUCCESS) {
1298                 memcpy(&mpt_vol->sync_progress,
1299                        &ar->action_data.indicator_struct,
1300                        sizeof(mpt_vol->sync_progress));
1301                 mpt2host_mpi_raid_vol_indicator(&mpt_vol->sync_progress);
1302         } else {
1303                 mpt_vol_prt(mpt, mpt_vol,
1304                     "mpt_refresh_raid_vol: Progress indicator fetch failed!\n");
1305         }
1306         mpt_free_request(mpt, req);
1307 }
1308
1309 /*
1310  * Update in-core information about RAID support.  We update any entries
1311  * that didn't previously exists or have been marked as needing to
1312  * be updated by our event handler.  Interesting changes are displayed
1313  * to the console.
1314  */
1315 int
1316 mpt_refresh_raid_data(struct mpt_softc *mpt)
1317 {
1318         CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
1319         CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol;
1320         IOC_3_PHYS_DISK *ioc_disk;
1321         IOC_3_PHYS_DISK *ioc_last_disk;
1322         CONFIG_PAGE_RAID_VOL_0  *vol_pg;
1323         size_t len;
1324         int rv;
1325         int i;
1326         u_int nonopt_volumes;
1327
1328         if (mpt->ioc_page2 == NULL || mpt->ioc_page3 == NULL) {
1329                 return (0);
1330         }
1331
1332         /*
1333          * Mark all items as unreferenced by the configuration.
1334          * This allows us to find, report, and discard stale
1335          * entries.
1336          */
1337         for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1338                 mpt->raid_disks[i].flags &= ~MPT_RDF_REFERENCED;
1339         }
1340         for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1341                 mpt->raid_volumes[i].flags &= ~MPT_RVF_REFERENCED;
1342         }
1343
1344         /*
1345          * Get Physical Disk information.
1346          */
1347         len = mpt->ioc_page3->Header.PageLength * sizeof(uint32_t);
1348         rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1349                                    &mpt->ioc_page3->Header, len,
1350                                    /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1351         if (rv) {
1352                 mpt_prt(mpt,
1353                     "mpt_refresh_raid_data: Failed to read IOC Page 3\n");
1354                 return (-1);
1355         }
1356         mpt2host_config_page_ioc3(mpt->ioc_page3);
1357
1358         ioc_disk = mpt->ioc_page3->PhysDisk;
1359         ioc_last_disk = ioc_disk + mpt->ioc_page3->NumPhysDisks;
1360         for (; ioc_disk != ioc_last_disk; ioc_disk++) {
1361                 struct mpt_raid_disk *mpt_disk;
1362
1363                 mpt_disk = mpt->raid_disks + ioc_disk->PhysDiskNum;
1364                 mpt_disk->flags |= MPT_RDF_REFERENCED;
1365                 if ((mpt_disk->flags & (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE))
1366                  != (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE)) {
1367
1368                         mpt_refresh_raid_disk(mpt, mpt_disk, ioc_disk);
1369
1370                 }
1371                 mpt_disk->flags |= MPT_RDF_ACTIVE;
1372                 mpt->raid_rescan++;
1373         }
1374
1375         /*
1376          * Refresh volume data.
1377          */
1378         len = mpt->ioc_page2->Header.PageLength * sizeof(uint32_t);
1379         rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1380                                    &mpt->ioc_page2->Header, len,
1381                                    /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1382         if (rv) {
1383                 mpt_prt(mpt, "mpt_refresh_raid_data: "
1384                         "Failed to read IOC Page 2\n");
1385                 return (-1);
1386         }
1387         mpt2host_config_page_ioc2(mpt->ioc_page2);
1388
1389         ioc_vol = mpt->ioc_page2->RaidVolume;
1390         ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
1391         for (;ioc_vol != ioc_last_vol; ioc_vol++) {
1392                 struct mpt_raid_volume *mpt_vol;
1393
1394                 mpt_vol = mpt->raid_volumes + ioc_vol->VolumePageNumber;
1395                 mpt_vol->flags |= MPT_RVF_REFERENCED;
1396                 vol_pg = mpt_vol->config_page;
1397                 if (vol_pg == NULL)
1398                         continue;
1399                 if (((mpt_vol->flags & (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1400                   != (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1401                  || (vol_pg->VolumeStatus.Flags
1402                    & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) != 0) {
1403
1404                         mpt_refresh_raid_vol(mpt, mpt_vol, ioc_vol);
1405                 }
1406                 mpt_vol->flags |= MPT_RVF_ACTIVE;
1407         }
1408
1409         nonopt_volumes = 0;
1410         for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1411                 struct mpt_raid_volume *mpt_vol;
1412                 uint64_t total;
1413                 uint64_t left;
1414                 int m;
1415                 u_int prio;
1416
1417                 mpt_vol = &mpt->raid_volumes[i];
1418
1419                 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) {
1420                         continue;
1421                 }
1422
1423                 vol_pg = mpt_vol->config_page;
1424                 if ((mpt_vol->flags & (MPT_RVF_REFERENCED|MPT_RVF_ANNOUNCED))
1425                  == MPT_RVF_ANNOUNCED) {
1426                         mpt_vol_prt(mpt, mpt_vol, "No longer configured\n");
1427                         mpt_vol->flags = 0;
1428                         continue;
1429                 }
1430
1431                 if ((mpt_vol->flags & MPT_RVF_ANNOUNCED) == 0) {
1432                         mpt_announce_vol(mpt, mpt_vol);
1433                         mpt_vol->flags |= MPT_RVF_ANNOUNCED;
1434                 }
1435
1436                 if (vol_pg->VolumeStatus.State !=
1437                     MPI_RAIDVOL0_STATUS_STATE_OPTIMAL)
1438                         nonopt_volumes++;
1439
1440                 if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0)
1441                         continue;
1442
1443                 mpt_vol->flags |= MPT_RVF_UP2DATE;
1444                 mpt_vol_prt(mpt, mpt_vol, "%s - %s\n",
1445                     mpt_vol_type(mpt_vol), mpt_vol_state(mpt_vol));
1446                 mpt_verify_mwce(mpt, mpt_vol);
1447
1448                 if (vol_pg->VolumeStatus.Flags == 0) {
1449                         continue;
1450                 }
1451
1452                 mpt_vol_prt(mpt, mpt_vol, "Status (");
1453                 for (m = 1; m <= 0x80; m <<= 1) {
1454                         switch (vol_pg->VolumeStatus.Flags & m) {
1455                         case MPI_RAIDVOL0_STATUS_FLAG_ENABLED:
1456                                 mpt_prtc(mpt, " Enabled");
1457                                 break;
1458                         case MPI_RAIDVOL0_STATUS_FLAG_QUIESCED:
1459                                 mpt_prtc(mpt, " Quiesced");
1460                                 break;
1461                         case MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS:
1462                                 mpt_prtc(mpt, " Re-Syncing");
1463                                 break;
1464                         case MPI_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE:
1465                                 mpt_prtc(mpt, " Inactive");
1466                                 break;
1467                         default:
1468                                 break;
1469                         }
1470                 }
1471                 mpt_prtc(mpt, " )\n");
1472
1473                 if ((vol_pg->VolumeStatus.Flags
1474                    & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1475                         continue;
1476
1477                 mpt_verify_resync_rate(mpt, mpt_vol);
1478
1479                 left = MPT_U64_2_SCALAR(mpt_vol->sync_progress.BlocksRemaining);
1480                 total = MPT_U64_2_SCALAR(mpt_vol->sync_progress.TotalBlocks);
1481                 if (vol_pg->ResyncRate != 0) {
1482
1483                         prio = ((u_int)vol_pg->ResyncRate * 100000) / 0xFF;
1484                         mpt_vol_prt(mpt, mpt_vol, "Rate %d.%d%%\n",
1485                             prio / 1000, prio % 1000);
1486                 } else {
1487                         prio = vol_pg->VolumeSettings.Settings
1488                              & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1489                         mpt_vol_prt(mpt, mpt_vol, "%s Priority Re-Sync\n",
1490                             prio ? "High" : "Low");
1491                 }
1492                 mpt_vol_prt(mpt, mpt_vol, "%ju of %ju "
1493                             "blocks remaining\n", (uintmax_t)left,
1494                             (uintmax_t)total);
1495
1496                 /* Periodically report on sync progress. */
1497                 mpt_schedule_raid_refresh(mpt);
1498         }
1499
1500         for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1501                 struct mpt_raid_disk *mpt_disk;
1502                 CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1503                 int m;
1504
1505                 mpt_disk = &mpt->raid_disks[i];
1506                 disk_pg = &mpt_disk->config_page;
1507
1508                 if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0)
1509                         continue;
1510
1511                 if ((mpt_disk->flags & (MPT_RDF_REFERENCED|MPT_RDF_ANNOUNCED))
1512                  == MPT_RDF_ANNOUNCED) {
1513                         mpt_disk_prt(mpt, mpt_disk, "No longer configured\n");
1514                         mpt_disk->flags = 0;
1515                         mpt->raid_rescan++;
1516                         continue;
1517                 }
1518
1519                 if ((mpt_disk->flags & MPT_RDF_ANNOUNCED) == 0) {
1520
1521                         mpt_announce_disk(mpt, mpt_disk);
1522                         mpt_disk->flags |= MPT_RVF_ANNOUNCED;
1523                 }
1524
1525                 if ((mpt_disk->flags & MPT_RDF_UP2DATE) != 0)
1526                         continue;
1527
1528                 mpt_disk->flags |= MPT_RDF_UP2DATE;
1529                 mpt_disk_prt(mpt, mpt_disk, "%s\n", mpt_disk_state(mpt_disk));
1530                 if (disk_pg->PhysDiskStatus.Flags == 0)
1531                         continue;
1532
1533                 mpt_disk_prt(mpt, mpt_disk, "Status (");
1534                 for (m = 1; m <= 0x80; m <<= 1) {
1535                         switch (disk_pg->PhysDiskStatus.Flags & m) {
1536                         case MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC:
1537                                 mpt_prtc(mpt, " Out-Of-Sync");
1538                                 break;
1539                         case MPI_PHYSDISK0_STATUS_FLAG_QUIESCED:
1540                                 mpt_prtc(mpt, " Quiesced");
1541                                 break;
1542                         default:
1543                                 break;
1544                         }
1545                 }
1546                 mpt_prtc(mpt, " )\n");
1547         }
1548
1549         mpt->raid_nonopt_volumes = nonopt_volumes;
1550         return (0);
1551 }
1552
1553 static void
1554 mpt_raid_timer(void *arg)
1555 {
1556         struct mpt_softc *mpt;
1557
1558         mpt = (struct mpt_softc *)arg;
1559         MPT_LOCK_ASSERT(mpt);
1560         mpt_raid_wakeup(mpt);
1561 }
1562
1563 void
1564 mpt_schedule_raid_refresh(struct mpt_softc *mpt)
1565 {
1566         callout_reset(&mpt->raid_timer, MPT_RAID_SYNC_REPORT_INTERVAL,
1567                       mpt_raid_timer, mpt);
1568 }
1569
1570 void
1571 mpt_raid_free_mem(struct mpt_softc *mpt)
1572 {
1573
1574         if (mpt->raid_volumes) {
1575                 struct mpt_raid_volume *mpt_raid;
1576                 int i;
1577                 for (i = 0; i < mpt->raid_max_volumes; i++) {
1578                         mpt_raid = &mpt->raid_volumes[i];
1579                         if (mpt_raid->config_page) {
1580                                 kfree(mpt_raid->config_page, M_DEVBUF);
1581                                 mpt_raid->config_page = NULL;
1582                         }
1583                 }
1584                 kfree(mpt->raid_volumes, M_DEVBUF);
1585                 mpt->raid_volumes = NULL;
1586         }
1587         if (mpt->raid_disks) {
1588                 kfree(mpt->raid_disks, M_DEVBUF);
1589                 mpt->raid_disks = NULL;
1590         }
1591         if (mpt->ioc_page2) {
1592                 kfree(mpt->ioc_page2, M_DEVBUF);
1593                 mpt->ioc_page2 = NULL;
1594         }
1595         if (mpt->ioc_page3) {
1596                 kfree(mpt->ioc_page3, M_DEVBUF);
1597                 mpt->ioc_page3 = NULL;
1598         }
1599         mpt->raid_max_volumes =  0;
1600         mpt->raid_max_disks =  0;
1601 }
1602
1603 static int
1604 mpt_raid_set_vol_resync_rate(struct mpt_softc *mpt, u_int rate)
1605 {
1606         struct mpt_raid_volume *mpt_vol;
1607
1608         if ((rate > MPT_RAID_RESYNC_RATE_MAX
1609           || rate < MPT_RAID_RESYNC_RATE_MIN)
1610          && rate != MPT_RAID_RESYNC_RATE_NC)
1611                 return (EINVAL);
1612
1613         MPT_LOCK(mpt);
1614         mpt->raid_resync_rate = rate;
1615         RAID_VOL_FOREACH(mpt, mpt_vol) {
1616                 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) {
1617                         continue;
1618                 }
1619                 mpt_verify_resync_rate(mpt, mpt_vol);
1620         }
1621         MPT_UNLOCK(mpt);
1622         return (0);
1623 }
1624
1625 static int
1626 mpt_raid_set_vol_queue_depth(struct mpt_softc *mpt, u_int vol_queue_depth)
1627 {
1628         struct mpt_raid_volume *mpt_vol;
1629
1630         if (vol_queue_depth > 255 || vol_queue_depth < 1)
1631                 return (EINVAL);
1632
1633         MPT_LOCK(mpt);
1634         mpt->raid_queue_depth = vol_queue_depth;
1635         RAID_VOL_FOREACH(mpt, mpt_vol) {
1636                 struct cam_path *path;
1637                 int error;
1638
1639                 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1640                         continue;
1641
1642                 mpt->raid_rescan = 0;
1643
1644                 MPTLOCK_2_CAMLOCK(mpt);
1645                 error = xpt_create_path(&path, xpt_periph,
1646                                         cam_sim_path(mpt->sim),
1647                                         mpt_vol->config_page->VolumeID,
1648                                         /*lun*/0);
1649                 if (error != CAM_REQ_CMP) {
1650                         CAMLOCK_2_MPTLOCK(mpt);
1651                         mpt_vol_prt(mpt, mpt_vol, "Unable to allocate path!\n");
1652                         continue;
1653                 }
1654                 mpt_adjust_queue_depth(mpt, mpt_vol, path);
1655                 xpt_free_path(path);
1656                 CAMLOCK_2_MPTLOCK(mpt);
1657         }
1658         MPT_UNLOCK(mpt);
1659         return (0);
1660 }
1661
1662 static int
1663 mpt_raid_set_vol_mwce(struct mpt_softc *mpt, mpt_raid_mwce_t mwce)
1664 {
1665         struct mpt_raid_volume *mpt_vol;
1666         int force_full_resync;
1667
1668         MPT_LOCK(mpt);
1669         if (mwce == mpt->raid_mwce_setting) {
1670                 MPT_UNLOCK(mpt);
1671                 return (0);
1672         }
1673
1674         /*
1675          * Catch MWCE being left on due to a failed shutdown.  Since
1676          * sysctls cannot be set by the loader, we treat the first
1677          * setting of this varible specially and force a full volume
1678          * resync if MWCE is enabled and a resync is in progress.
1679          */
1680         force_full_resync = 0;
1681         if (mpt->raid_mwce_set == 0
1682          && mpt->raid_mwce_setting == MPT_RAID_MWCE_NC
1683          && mwce == MPT_RAID_MWCE_REBUILD_ONLY)
1684                 force_full_resync = 1;
1685
1686         mpt->raid_mwce_setting = mwce;
1687         RAID_VOL_FOREACH(mpt, mpt_vol) {
1688                 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1689                 int resyncing;
1690                 int mwce;
1691
1692                 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1693                         continue;
1694
1695                 vol_pg = mpt_vol->config_page;
1696                 resyncing = vol_pg->VolumeStatus.Flags
1697                           & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
1698                 mwce = vol_pg->VolumeSettings.Settings
1699                      & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
1700                 if (force_full_resync && resyncing && mwce) {
1701
1702                         /*
1703                          * XXX disable/enable volume should force a resync,
1704                          *     but we'll need to queice, drain, and restart
1705                          *     I/O to do that.
1706                          */
1707                         mpt_vol_prt(mpt, mpt_vol, "WARNING - Unsafe shutdown "
1708                                     "detected.  Suggest full resync.\n");
1709                 }
1710                 mpt_verify_mwce(mpt, mpt_vol);
1711         }
1712         mpt->raid_mwce_set = 1;
1713         MPT_UNLOCK(mpt);
1714         return (0);
1715 }
1716 const char *mpt_vol_mwce_strs[] =
1717 {
1718         "On",
1719         "Off",
1720         "On-During-Rebuild",
1721         "NC"
1722 };
1723
1724 static int
1725 mpt_raid_sysctl_vol_member_wce(SYSCTL_HANDLER_ARGS)
1726 {
1727         char inbuf[20];
1728         struct mpt_softc *mpt;
1729         const char *str;
1730         int error;
1731         u_int size;
1732         u_int i;
1733
1734         mpt = (struct mpt_softc *)arg1;
1735         str = mpt_vol_mwce_strs[mpt->raid_mwce_setting];
1736         error = SYSCTL_OUT(req, str, strlen(str) + 1);
1737         if (error || !req->newptr) {
1738                 return (error);
1739         }
1740
1741         size = req->newlen - req->newidx;
1742         if (size >= sizeof(inbuf)) {
1743                 return (EINVAL);
1744         }
1745
1746         error = SYSCTL_IN(req, inbuf, size);
1747         if (error) {
1748                 return (error);
1749         }
1750         inbuf[size] = '\0';
1751         for (i = 0; i < NUM_ELEMENTS(mpt_vol_mwce_strs); i++) {
1752                 if (strcmp(mpt_vol_mwce_strs[i], inbuf) == 0) {
1753                         return (mpt_raid_set_vol_mwce(mpt, i));
1754                 }
1755         }
1756         return (EINVAL);
1757 }
1758
1759 static int
1760 mpt_raid_sysctl_vol_resync_rate(SYSCTL_HANDLER_ARGS)
1761 {
1762         struct mpt_softc *mpt;
1763         u_int raid_resync_rate;
1764         int error;
1765
1766         mpt = (struct mpt_softc *)arg1;
1767         raid_resync_rate = mpt->raid_resync_rate;
1768
1769         error = sysctl_handle_int(oidp, &raid_resync_rate, 0, req);
1770         if (error || !req->newptr) {
1771                 return error;
1772         }
1773
1774         return (mpt_raid_set_vol_resync_rate(mpt, raid_resync_rate));
1775 }
1776
1777 static int
1778 mpt_raid_sysctl_vol_queue_depth(SYSCTL_HANDLER_ARGS)
1779 {
1780         struct mpt_softc *mpt;
1781         u_int raid_queue_depth;
1782         int error;
1783
1784         mpt = (struct mpt_softc *)arg1;
1785         raid_queue_depth = mpt->raid_queue_depth;
1786
1787         error = sysctl_handle_int(oidp, &raid_queue_depth, 0, req);
1788         if (error || !req->newptr) {
1789                 return error;
1790         }
1791
1792         return (mpt_raid_set_vol_queue_depth(mpt, raid_queue_depth));
1793 }
1794
1795 static void
1796 mpt_raid_sysctl_attach(struct mpt_softc *mpt)
1797 {
1798         SYSCTL_ADD_PROC(&mpt->mpt_sysctl_ctx,
1799                         SYSCTL_CHILDREN(mpt->mpt_sysctl_tree), OID_AUTO,
1800                         "vol_member_wce", CTLTYPE_STRING | CTLFLAG_RW, mpt, 0,
1801                         mpt_raid_sysctl_vol_member_wce, "A",
1802                         "volume member WCE(On,Off,On-During-Rebuild,NC)");
1803
1804         SYSCTL_ADD_PROC(&mpt->mpt_sysctl_ctx,
1805                         SYSCTL_CHILDREN(mpt->mpt_sysctl_tree), OID_AUTO,
1806                         "vol_queue_depth", CTLTYPE_INT | CTLFLAG_RW, mpt, 0,
1807                         mpt_raid_sysctl_vol_queue_depth, "I",
1808                         "default volume queue depth");
1809
1810         SYSCTL_ADD_PROC(&mpt->mpt_sysctl_ctx,
1811                         SYSCTL_CHILDREN(mpt->mpt_sysctl_tree), OID_AUTO,
1812                         "vol_resync_rate", CTLTYPE_INT | CTLFLAG_RW, mpt, 0,
1813                         mpt_raid_sysctl_vol_resync_rate, "I",
1814                         "volume resync priority (0 == NC, 1 - 255)");
1815         SYSCTL_ADD_UINT(&mpt->mpt_sysctl_ctx,
1816                         SYSCTL_CHILDREN(mpt->mpt_sysctl_tree), OID_AUTO,
1817                         "nonoptimal_volumes", CTLFLAG_RD,
1818                         &mpt->raid_nonopt_volumes, 0,
1819                         "number of nonoptimal volumes");
1820 }