Remove inclusion of <sys/cdefs.h> from kernel .c files.
[dragonfly.git] / sys / dev / disk / mpt / mpt_raid.c
1 /*-
2  * Routines for handling the integrated RAID features LSI MPT Fusion adapters.
3  *
4  * Copyright (c) 2005, WHEEL Sp. z o.o.
5  * Copyright (c) 2005 Justin T. Gibbs.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are
10  * met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14  *    substantially similar to the "NO WARRANTY" disclaimer below
15  *    ("Disclaimer") and any redistribution must be conditioned upon including
16  *    a substantially similar Disclaimer requirement for further binary
17  *    redistribution.
18  * 3. Neither the names of the above listed copyright holders nor the names
19  *    of any contributors may be used to endorse or promote products derived
20  *    from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
32  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 /*-
35  * Some Breakage and Bug Fixing added later.
36  * Copyright (c) 2006, by Matthew Jacob
37  * All Rights Reserved
38  *
39  * Support from LSI-Logic has also gone a great deal toward making this a
40  * workable subsystem and is gratefully acknowledged.
41  *
42  * $FreeBSD: src/sys/dev/mpt/mpt_raid.c,v 1.20 2009/05/21 12:36:40 jhb Exp $
43  */
44
45 #include <dev/disk/mpt/mpt.h>
46 #include <dev/disk/mpt/mpt_raid.h>
47
48 #include "dev/disk/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
49 #include "dev/disk/mpt/mpilib/mpi_raid.h"
50
51 #include <bus/cam/cam.h>
52 #include <bus/cam/cam_ccb.h>
53 #include <bus/cam/cam_sim.h>
54 #include <bus/cam/cam_xpt_periph.h>
55 #include <bus/cam/cam_xpt_sim.h>
56
57 #if __FreeBSD_version < 500000
58 #include <sys/devicestat.h>
59 #define GIANT_REQUIRED
60 #endif
61 #include <bus/cam/cam_periph.h>
62
63 #include <sys/callout.h>
64 #include <sys/kthread.h>
65 #include <sys/sysctl.h>
66
67 #include <machine/stdarg.h>
68
69 struct mpt_raid_action_result
70 {
71         union {
72                 MPI_RAID_VOL_INDICATOR  indicator_struct;
73                 uint32_t                new_settings;
74                 uint8_t                 phys_disk_num;
75         } action_data;
76         uint16_t                        action_status;
77 };
78
79 #define REQ_TO_RAID_ACTION_RESULT(req) ((struct mpt_raid_action_result *) \
80         (((MSG_RAID_ACTION_REQUEST *)(req->req_vbuf)) + 1))
81
82 #define REQ_IOCSTATUS(req) ((req)->IOCStatus & MPI_IOCSTATUS_MASK)
83
84
85 static mpt_probe_handler_t      mpt_raid_probe;
86 static mpt_attach_handler_t     mpt_raid_attach;
87 static mpt_enable_handler_t     mpt_raid_enable;
88 static mpt_event_handler_t      mpt_raid_event;
89 static mpt_shutdown_handler_t   mpt_raid_shutdown;
90 static mpt_reset_handler_t      mpt_raid_ioc_reset;
91 static mpt_detach_handler_t     mpt_raid_detach;
92
93 static struct mpt_personality mpt_raid_personality =
94 {
95         .name           = "mpt_raid",
96         .probe          = mpt_raid_probe,
97         .attach         = mpt_raid_attach,
98         .enable         = mpt_raid_enable,
99         .event          = mpt_raid_event,
100         .reset          = mpt_raid_ioc_reset,
101         .shutdown       = mpt_raid_shutdown,
102         .detach         = mpt_raid_detach,
103 };
104
105 DECLARE_MPT_PERSONALITY(mpt_raid, SI_ORDER_THIRD);
106 MPT_PERSONALITY_DEPEND(mpt_raid, mpt_cam, 1, 1, 1);
107
108 static mpt_reply_handler_t mpt_raid_reply_handler;
109 static int mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
110                                         MSG_DEFAULT_REPLY *reply_frame);
111 static int mpt_spawn_raid_thread(struct mpt_softc *mpt);
112 static void mpt_terminate_raid_thread(struct mpt_softc *mpt);
113 static void mpt_raid_thread(void *arg);
114 static timeout_t mpt_raid_timer;
115 #if 0
116 static void mpt_enable_vol(struct mpt_softc *mpt,
117                            struct mpt_raid_volume *mpt_vol, int enable);
118 #endif
119 static void mpt_verify_mwce(struct mpt_softc *, struct mpt_raid_volume *);
120 static void mpt_adjust_queue_depth(struct mpt_softc *, struct mpt_raid_volume *,
121     struct cam_path *);
122 #if __FreeBSD_version < 500000
123 #define mpt_raid_sysctl_attach(x)       do { } while (0)
124 #else
125 static void mpt_raid_sysctl_attach(struct mpt_softc *);
126 #endif
127
128 static uint32_t raid_handler_id = MPT_HANDLER_ID_NONE;
129
130 const char *
131 mpt_vol_type(struct mpt_raid_volume *vol)
132 {
133         switch (vol->config_page->VolumeType) {
134         case MPI_RAID_VOL_TYPE_IS:
135                 return ("RAID-0");
136         case MPI_RAID_VOL_TYPE_IME:
137                 return ("RAID-1E");
138         case MPI_RAID_VOL_TYPE_IM:
139                 return ("RAID-1");
140         default:
141                 return ("Unknown");
142         }
143 }
144
145 const char *
146 mpt_vol_state(struct mpt_raid_volume *vol)
147 {
148         switch (vol->config_page->VolumeStatus.State) {
149         case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL:
150                 return ("Optimal");
151         case MPI_RAIDVOL0_STATUS_STATE_DEGRADED:
152                 return ("Degraded");
153         case MPI_RAIDVOL0_STATUS_STATE_FAILED:
154                 return ("Failed");
155         default:
156                 return ("Unknown");
157         }
158 }
159
160 const char *
161 mpt_disk_state(struct mpt_raid_disk *disk)
162 {
163         switch (disk->config_page.PhysDiskStatus.State) {
164         case MPI_PHYSDISK0_STATUS_ONLINE:
165                 return ("Online");
166         case MPI_PHYSDISK0_STATUS_MISSING:
167                 return ("Missing");
168         case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
169                 return ("Incompatible");
170         case MPI_PHYSDISK0_STATUS_FAILED:
171                 return ("Failed");
172         case MPI_PHYSDISK0_STATUS_INITIALIZING:
173                 return ("Initializing");
174         case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
175                 return ("Offline Requested");
176         case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
177                 return ("Failed per Host Request");
178         case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
179                 return ("Offline");
180         default:
181                 return ("Unknown");
182         }
183 }
184
185 void
186 mpt_vol_prt(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
187             const char *fmt, ...)
188 {
189         __va_list ap;
190
191         kprintf("%s:vol%d(%s:%d:%d): ", device_get_nameunit(mpt->dev),
192                (u_int)(vol - mpt->raid_volumes), device_get_nameunit(mpt->dev),
193                vol->config_page->VolumeBus, vol->config_page->VolumeID);
194         __va_start(ap, fmt);
195         kvprintf(fmt, ap);
196         __va_end(ap);
197 }
198
199 void
200 mpt_disk_prt(struct mpt_softc *mpt, struct mpt_raid_disk *disk,
201              const char *fmt, ...)
202 {
203         __va_list ap;
204
205         if (disk->volume != NULL) {
206                 kprintf("(%s:vol%d:%d): ",
207                        device_get_nameunit(mpt->dev),
208                        disk->volume->config_page->VolumeID,
209                        disk->member_number);
210         } else {
211                 kprintf("(%s:%d:%d): ", device_get_nameunit(mpt->dev),
212                        disk->config_page.PhysDiskBus,
213                        disk->config_page.PhysDiskID);
214         }
215         __va_start(ap, fmt);
216         kvprintf(fmt, ap);
217         __va_end(ap);
218 }
219
220 static void
221 mpt_raid_async(void *callback_arg, u_int32_t code,
222                struct cam_path *path, void *arg)
223 {
224         struct mpt_softc *mpt;
225
226         mpt = (struct mpt_softc*)callback_arg;
227         switch (code) {
228         case AC_FOUND_DEVICE:
229         {
230                 struct ccb_getdev *cgd;
231                 struct mpt_raid_volume *mpt_vol;
232
233                 cgd = (struct ccb_getdev *)arg;
234                 if (cgd == NULL) {
235                         break;
236                 }
237
238                 mpt_lprt(mpt, MPT_PRT_DEBUG, "Callback for %d\n",
239                          cgd->ccb_h.target_id);
240
241                 RAID_VOL_FOREACH(mpt, mpt_vol) {
242                         if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
243                                 continue;
244
245                         if (mpt_vol->config_page->VolumeID
246                          == cgd->ccb_h.target_id) {
247                                 mpt_adjust_queue_depth(mpt, mpt_vol, path);
248                                 break;
249                         }
250                 }
251         }
252         default:
253                 break;
254         }
255 }
256
257 int
258 mpt_raid_probe(struct mpt_softc *mpt)
259 {
260         if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
261                 return (ENODEV);
262         }
263         return (0);
264 }
265
266 int
267 mpt_raid_attach(struct mpt_softc *mpt)
268 {
269         struct ccb_setasync csa;
270         mpt_handler_t    handler;
271         int              error;
272
273         mpt_callout_init(&mpt->raid_timer);
274
275         error = mpt_spawn_raid_thread(mpt);
276         if (error != 0) {
277                 mpt_prt(mpt, "Unable to spawn RAID thread!\n");
278                 goto cleanup;
279         }
280
281         MPT_LOCK(mpt);
282         handler.reply_handler = mpt_raid_reply_handler;
283         error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
284                                      &raid_handler_id);
285         if (error != 0) {
286                 mpt_prt(mpt, "Unable to register RAID haandler!\n");
287                 goto cleanup;
288         }
289
290         xpt_setup_ccb(&csa.ccb_h, mpt->path, 5);
291         csa.ccb_h.func_code = XPT_SASYNC_CB;
292         csa.event_enable = AC_FOUND_DEVICE;
293         csa.callback = mpt_raid_async;
294         csa.callback_arg = mpt;
295         xpt_action((union ccb *)&csa);
296         if (csa.ccb_h.status != CAM_REQ_CMP) {
297                 mpt_prt(mpt, "mpt_raid_attach: Unable to register "
298                         "CAM async handler.\n");
299         }
300         MPT_UNLOCK(mpt);
301
302         mpt_raid_sysctl_attach(mpt);
303         return (0);
304 cleanup:
305         MPT_UNLOCK(mpt);
306         mpt_raid_detach(mpt);
307         return (error);
308 }
309
310 int
311 mpt_raid_enable(struct mpt_softc *mpt)
312 {
313         return (0);
314 }
315
316 void
317 mpt_raid_detach(struct mpt_softc *mpt)
318 {
319         struct ccb_setasync csa;
320         mpt_handler_t handler;
321
322         callout_stop(&mpt->raid_timer);
323         MPT_LOCK(mpt);
324         mpt_terminate_raid_thread(mpt);
325
326         handler.reply_handler = mpt_raid_reply_handler;
327         mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
328                                raid_handler_id);
329         xpt_setup_ccb(&csa.ccb_h, mpt->path, /*priority*/5);
330         csa.ccb_h.func_code = XPT_SASYNC_CB;
331         csa.event_enable = 0;
332         csa.callback = mpt_raid_async;
333         csa.callback_arg = mpt;
334         xpt_action((union ccb *)&csa);
335         MPT_UNLOCK(mpt);
336 }
337
338 static void
339 mpt_raid_ioc_reset(struct mpt_softc *mpt, int type)
340 {
341         /* Nothing to do yet. */
342 }
343
344 static const char *raid_event_txt[] =
345 {
346         "Volume Created",
347         "Volume Deleted",
348         "Volume Settings Changed",
349         "Volume Status Changed",
350         "Volume Physical Disk Membership Changed",
351         "Physical Disk Created",
352         "Physical Disk Deleted",
353         "Physical Disk Settings Changed",
354         "Physical Disk Status Changed",
355         "Domain Validation Required",
356         "SMART Data Received",
357         "Replace Action Started",
358 };
359
360 static int
361 mpt_raid_event(struct mpt_softc *mpt, request_t *req,
362                MSG_EVENT_NOTIFY_REPLY *msg)
363 {
364         EVENT_DATA_RAID *raid_event;
365         struct mpt_raid_volume *mpt_vol;
366         struct mpt_raid_disk *mpt_disk;
367         CONFIG_PAGE_RAID_VOL_0 *vol_pg;
368         int i;
369         int print_event;
370
371         if (msg->Event != MPI_EVENT_INTEGRATED_RAID) {
372                 return (0);
373         }
374
375         raid_event = (EVENT_DATA_RAID *)&msg->Data;
376
377         mpt_vol = NULL;
378         vol_pg = NULL;
379         if (mpt->raid_volumes != NULL && mpt->ioc_page2 != NULL) {
380                 for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
381                         mpt_vol = &mpt->raid_volumes[i];
382                         vol_pg = mpt_vol->config_page;
383
384                         if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
385                                 continue;
386
387                         if (vol_pg->VolumeID == raid_event->VolumeID
388                          && vol_pg->VolumeBus == raid_event->VolumeBus)
389                                 break;
390                 }
391                 if (i >= mpt->ioc_page2->MaxVolumes) {
392                         mpt_vol = NULL;
393                         vol_pg = NULL;
394                 }
395         }
396
397         mpt_disk = NULL;
398         if (raid_event->PhysDiskNum != 0xFF && mpt->raid_disks != NULL) {
399                 mpt_disk = mpt->raid_disks + raid_event->PhysDiskNum;
400                 if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0) {
401                         mpt_disk = NULL;
402                 }
403         }
404
405         print_event = 1;
406         switch(raid_event->ReasonCode) {
407         case MPI_EVENT_RAID_RC_VOLUME_CREATED:
408         case MPI_EVENT_RAID_RC_VOLUME_DELETED:
409                 break;
410         case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED:
411                 if (mpt_vol != NULL) {
412                         if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0) {
413                                 mpt_vol->flags &= ~MPT_RVF_UP2DATE;
414                         } else {
415                                 /*
416                                  * Coalesce status messages into one
417                                  * per background run of our RAID thread.
418                                  * This removes "spurious" status messages
419                                  * from our output.
420                                  */
421                                 print_event = 0;
422                         }
423                 }
424                 break;
425         case MPI_EVENT_RAID_RC_VOLUME_SETTINGS_CHANGED:
426         case MPI_EVENT_RAID_RC_VOLUME_PHYSDISK_CHANGED:
427                 mpt->raid_rescan++;
428                 if (mpt_vol != NULL) {
429                         mpt_vol->flags &= ~(MPT_RVF_UP2DATE|MPT_RVF_ANNOUNCED);
430                 }
431                 break;
432         case MPI_EVENT_RAID_RC_PHYSDISK_CREATED:
433         case MPI_EVENT_RAID_RC_PHYSDISK_DELETED:
434                 mpt->raid_rescan++;
435                 break;
436         case MPI_EVENT_RAID_RC_PHYSDISK_SETTINGS_CHANGED:
437         case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED:
438                 mpt->raid_rescan++;
439                 if (mpt_disk != NULL) {
440                         mpt_disk->flags &= ~MPT_RDF_UP2DATE;
441                 }
442                 break;
443         case MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED:
444                 mpt->raid_rescan++;
445                 break;
446         case MPI_EVENT_RAID_RC_SMART_DATA:
447         case MPI_EVENT_RAID_RC_REPLACE_ACTION_STARTED:
448                 break;
449         }
450
451         if (print_event) {
452                 if (mpt_disk != NULL) {
453                         mpt_disk_prt(mpt, mpt_disk, "");
454                 } else if (mpt_vol != NULL) {
455                         mpt_vol_prt(mpt, mpt_vol, "");
456                 } else {
457                         mpt_prt(mpt, "Volume(%d:%d", raid_event->VolumeBus,
458                                 raid_event->VolumeID);
459
460                         if (raid_event->PhysDiskNum != 0xFF)
461                                 mpt_prtc(mpt, ":%d): ",
462                                          raid_event->PhysDiskNum);
463                         else
464                                 mpt_prtc(mpt, "): ");
465                 }
466
467                 if (raid_event->ReasonCode >= NUM_ELEMENTS(raid_event_txt))
468                         mpt_prtc(mpt, "Unhandled RaidEvent %#x\n",
469                                  raid_event->ReasonCode);
470                 else
471                         mpt_prtc(mpt, "%s\n",
472                                  raid_event_txt[raid_event->ReasonCode]);
473         }
474
475         if (raid_event->ReasonCode == MPI_EVENT_RAID_RC_SMART_DATA) {
476                 /* XXX Use CAM's print sense for this... */
477                 if (mpt_disk != NULL)
478                         mpt_disk_prt(mpt, mpt_disk, "");
479                 else
480                         mpt_prt(mpt, "Volume(%d:%d:%d: ",
481                             raid_event->VolumeBus, raid_event->VolumeID,
482                             raid_event->PhysDiskNum);
483                 mpt_prtc(mpt, "ASC 0x%x, ASCQ 0x%x)\n",
484                          raid_event->ASC, raid_event->ASCQ);
485         }
486
487         mpt_raid_wakeup(mpt);
488         return (1);
489 }
490
491 static void
492 mpt_raid_shutdown(struct mpt_softc *mpt)
493 {
494         struct mpt_raid_volume *mpt_vol;
495
496         if (mpt->raid_mwce_setting != MPT_RAID_MWCE_REBUILD_ONLY) {
497                 return;
498         }
499
500         mpt->raid_mwce_setting = MPT_RAID_MWCE_OFF;
501         RAID_VOL_FOREACH(mpt, mpt_vol) {
502                 mpt_verify_mwce(mpt, mpt_vol);
503         }
504 }
505
506 static int
507 mpt_raid_reply_handler(struct mpt_softc *mpt, request_t *req,
508     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
509 {
510         int free_req;
511
512         if (req == NULL)
513                 return (TRUE);
514
515         free_req = TRUE;
516         if (reply_frame != NULL)
517                 free_req = mpt_raid_reply_frame_handler(mpt, req, reply_frame);
518 #ifdef NOTYET
519         else if (req->ccb != NULL) {
520                 /* Complete Quiesce CCB with error... */
521         }
522 #endif
523
524         req->state &= ~REQ_STATE_QUEUED;
525         req->state |= REQ_STATE_DONE;
526         TAILQ_REMOVE(&mpt->request_pending_list, req, links);
527
528         if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
529                 wakeup(req);
530         } else if (free_req) {
531                 mpt_free_request(mpt, req);
532         }
533
534         return (TRUE);
535 }
536
537 /*
538  * Parse additional completion information in the reply
539  * frame for RAID I/O requests.
540  */
541 static int
542 mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
543     MSG_DEFAULT_REPLY *reply_frame)
544 {
545         MSG_RAID_ACTION_REPLY *reply;
546         struct mpt_raid_action_result *action_result;
547         MSG_RAID_ACTION_REQUEST *rap;
548
549         reply = (MSG_RAID_ACTION_REPLY *)reply_frame;
550         req->IOCStatus = le16toh(reply->IOCStatus);
551         rap = (MSG_RAID_ACTION_REQUEST *)req->req_vbuf;
552
553         switch (rap->Action) {
554         case MPI_RAID_ACTION_QUIESCE_PHYS_IO:
555                 mpt_prt(mpt, "QUIESCE PHYSIO DONE\n");
556                 break;
557         case MPI_RAID_ACTION_ENABLE_PHYS_IO:
558                 mpt_prt(mpt, "ENABLY PHYSIO DONE\n");
559                 break;
560         default:
561                 break;
562         }
563         action_result = REQ_TO_RAID_ACTION_RESULT(req);
564         memcpy(&action_result->action_data, &reply->ActionData,
565             sizeof(action_result->action_data));
566         action_result->action_status = le16toh(reply->ActionStatus);
567         return (TRUE);
568 }
569
570 /*
571  * Utiltity routine to perform a RAID action command;
572  */
573 int
574 mpt_issue_raid_req(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
575                    struct mpt_raid_disk *disk, request_t *req, u_int Action,
576                    uint32_t ActionDataWord, bus_addr_t addr, bus_size_t len,
577                    int write, int wait)
578 {
579         MSG_RAID_ACTION_REQUEST *rap;
580         SGE_SIMPLE32 *se;
581
582         rap = req->req_vbuf;
583         memset(rap, 0, sizeof *rap);
584         rap->Action = Action;
585         rap->ActionDataWord = htole32(ActionDataWord);
586         rap->Function = MPI_FUNCTION_RAID_ACTION;
587         rap->VolumeID = vol->config_page->VolumeID;
588         rap->VolumeBus = vol->config_page->VolumeBus;
589         if (disk != 0)
590                 rap->PhysDiskNum = disk->config_page.PhysDiskNum;
591         else
592                 rap->PhysDiskNum = 0xFF;
593         se = (SGE_SIMPLE32 *)&rap->ActionDataSGE;
594         se->Address = htole32(addr);
595         MPI_pSGE_SET_LENGTH(se, len);
596         MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
597             MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
598             MPI_SGE_FLAGS_END_OF_LIST |
599             write ? MPI_SGE_FLAGS_HOST_TO_IOC : MPI_SGE_FLAGS_IOC_TO_HOST));
600         se->FlagsLength = htole32(se->FlagsLength);
601         rap->MsgContext = htole32(req->index | raid_handler_id);
602
603         mpt_check_doorbell(mpt);
604         mpt_send_cmd(mpt, req);
605
606         if (wait) {
607                 return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
608                                      /*sleep_ok*/FALSE, /*time_ms*/2000));
609         } else {
610                 return (0);
611         }
612 }
613
614 /*************************** RAID Status Monitoring ***************************/
615 static int
616 mpt_spawn_raid_thread(struct mpt_softc *mpt)
617 {
618         int error;
619
620         /*
621          * Freeze out any CAM transactions until our thread
622          * is able to run at least once.  We need to update
623          * our RAID pages before acception I/O or we may
624          * reject I/O to an ID we later determine is for a
625          * hidden physdisk.
626          */
627         MPT_LOCK(mpt);
628         xpt_freeze_simq(mpt->phydisk_sim, 1);
629         MPT_UNLOCK(mpt);
630         error = mpt_kthread_create(mpt_raid_thread, mpt,
631             &mpt->raid_thread, /*flags*/0, /*altstack*/0,
632             "mpt_raid%d", mpt->unit);
633         if (error != 0) {
634                 MPT_LOCK(mpt);
635                 xpt_release_simq(mpt->phydisk_sim, /*run_queue*/FALSE);
636                 MPT_UNLOCK(mpt);
637         }
638         return (error);
639 }
640
641 static void
642 mpt_terminate_raid_thread(struct mpt_softc *mpt)
643 {
644
645         if (mpt->raid_thread == NULL) {
646                 return;
647         }
648         mpt->shutdwn_raid = 1;
649         wakeup(mpt->raid_volumes);
650         /*
651          * Sleep on a slightly different location
652          * for this interlock just for added safety.
653          */
654         mpt_sleep(mpt, &mpt->raid_thread, PUSER, "thtrm", 0);
655 }
656
657 static void
658 mpt_cam_rescan_callback(struct cam_periph *periph, union ccb *ccb)
659 {
660
661         xpt_free_path(ccb->ccb_h.path);
662         xpt_free_ccb(ccb);
663 }
664
665 static void
666 mpt_raid_thread(void *arg)
667 {
668         struct mpt_softc *mpt;
669         int firstrun;
670
671         mpt = (struct mpt_softc *)arg;
672         firstrun = 1;
673         MPT_LOCK(mpt);
674         while (mpt->shutdwn_raid == 0) {
675
676                 if (mpt->raid_wakeup == 0) {
677                         mpt_sleep(mpt, &mpt->raid_volumes, PUSER, "idle", 0);
678                         continue;
679                 }
680
681                 mpt->raid_wakeup = 0;
682
683                 if (mpt_refresh_raid_data(mpt)) {
684                         mpt_schedule_raid_refresh(mpt); /* XX NOT QUITE RIGHT */
685                         continue;
686                 }
687
688                 /*
689                  * Now that we have our first snapshot of RAID data,
690                  * allow CAM to access our physical disk bus.
691                  */
692                 if (firstrun) {
693                         firstrun = 0;
694                         MPTLOCK_2_CAMLOCK(mpt);
695                         xpt_release_simq(mpt->phydisk_sim, TRUE);
696                         CAMLOCK_2_MPTLOCK(mpt);
697                 }
698
699                 if (mpt->raid_rescan != 0) {
700                         union ccb *ccb;
701                         struct cam_path *path;
702                         int error;
703
704                         mpt->raid_rescan = 0;
705                         MPT_UNLOCK(mpt);
706
707                         ccb = xpt_alloc_ccb();
708
709                         MPT_LOCK(mpt);
710                         error = xpt_create_path(&path, xpt_periph,
711                             cam_sim_path(mpt->phydisk_sim),
712                             CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
713                         if (error != CAM_REQ_CMP) {
714                                 xpt_free_ccb(ccb);
715                                 mpt_prt(mpt, "Unable to rescan RAID Bus!\n");
716                         } else {
717                                 xpt_setup_ccb(&ccb->ccb_h, path, 5);
718                                 ccb->ccb_h.func_code = XPT_SCAN_BUS;
719                                 ccb->ccb_h.cbfcnp = mpt_cam_rescan_callback;
720                                 ccb->crcn.flags = CAM_FLAG_NONE;
721                                 MPTLOCK_2_CAMLOCK(mpt);
722                                 xpt_action(ccb);
723                                 CAMLOCK_2_MPTLOCK(mpt);
724                         }
725                 }
726         }
727         mpt->raid_thread = NULL;
728         wakeup(&mpt->raid_thread);
729         MPT_UNLOCK(mpt);
730         mpt_kthread_exit(0);
731 }
732
733 #if 0
734 static void
735 mpt_raid_quiesce_timeout(void *arg)
736 {
737         /* Complete the CCB with error */
738         /* COWWWW */
739 }
740
741 static timeout_t mpt_raid_quiesce_timeout;
742 cam_status
743 mpt_raid_quiesce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
744                       request_t *req)
745 {
746         union ccb *ccb;
747
748         ccb = req->ccb;
749         if ((mpt_disk->flags & MPT_RDF_QUIESCED) != 0)
750                 return (CAM_REQ_CMP);
751
752         if ((mpt_disk->flags & MPT_RDF_QUIESCING) == 0) {
753                 int rv;
754
755                 mpt_disk->flags |= MPT_RDF_QUIESCING;
756                 xpt_freeze_devq(ccb->ccb_h.path, 1);
757
758                 rv = mpt_issue_raid_req(mpt, mpt_disk->volume, mpt_disk, req,
759                                         MPI_RAID_ACTION_QUIESCE_PHYS_IO,
760                                         /*ActionData*/0, /*addr*/0,
761                                         /*len*/0, /*write*/FALSE,
762                                         /*wait*/FALSE);
763                 if (rv != 0)
764                         return (CAM_REQ_CMP_ERR);
765
766                 mpt_req_timeout(req, mpt_raid_quiesce_timeout, ccb, 5 * hz);
767 #if 0
768                 if (rv == ETIMEDOUT) {
769                         mpt_disk_prt(mpt, mpt_disk, "mpt_raid_quiesce_disk: "
770                                      "Quiece Timed-out\n");
771                         xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
772                         return (CAM_REQ_CMP_ERR);
773                 }
774
775                 ar = REQ_TO_RAID_ACTION_RESULT(req);
776                 if (rv != 0
777                  || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
778                  || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
779                         mpt_disk_prt(mpt, mpt_disk, "Quiece Failed"
780                                     "%d:%x:%x\n", rv, req->IOCStatus,
781                                     ar->action_status);
782                         xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
783                         return (CAM_REQ_CMP_ERR);
784                 }
785 #endif
786                 return (CAM_REQ_INPROG);
787         }
788         return (CAM_REQUEUE_REQ);
789 }
790 #endif
791
792 /* XXX Ignores that there may be multiple busses/IOCs involved. */
793 cam_status
794 mpt_map_physdisk(struct mpt_softc *mpt, union ccb *ccb, u_int *tgt)
795 {
796         struct mpt_raid_disk *mpt_disk;
797
798         mpt_disk = mpt->raid_disks + ccb->ccb_h.target_id;
799         if (ccb->ccb_h.target_id < mpt->raid_max_disks
800          && (mpt_disk->flags & MPT_RDF_ACTIVE) != 0) {
801                 *tgt = mpt_disk->config_page.PhysDiskID;
802                 return (0);
803         }
804         mpt_lprt(mpt, MPT_PRT_DEBUG1, "mpt_map_physdisk(%d) - Not Active\n",
805                  ccb->ccb_h.target_id);
806         return (-1);
807 }
808
809 /* XXX Ignores that there may be multiple busses/IOCs involved. */
810 int
811 mpt_is_raid_volume(struct mpt_softc *mpt, int tgt)
812 {
813         CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
814         CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol;
815
816         if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
817                 return (0);
818         }
819         ioc_vol = mpt->ioc_page2->RaidVolume;
820         ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
821         for (;ioc_vol != ioc_last_vol; ioc_vol++) {
822                 if (ioc_vol->VolumeID == tgt) {
823                         return (1);
824                 }
825         }
826         return (0);
827 }
828
829 #if 0
830 static void
831 mpt_enable_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
832                int enable)
833 {
834         request_t *req;
835         struct mpt_raid_action_result *ar;
836         CONFIG_PAGE_RAID_VOL_0 *vol_pg;
837         int enabled;
838         int rv;
839
840         vol_pg = mpt_vol->config_page;
841         enabled = vol_pg->VolumeStatus.Flags & MPI_RAIDVOL0_STATUS_FLAG_ENABLED;
842
843         /*
844          * If the setting matches the configuration,
845          * there is nothing to do.
846          */
847         if ((enabled && enable)
848          || (!enabled && !enable))
849                 return;
850
851         req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
852         if (req == NULL) {
853                 mpt_vol_prt(mpt, mpt_vol,
854                             "mpt_enable_vol: Get request failed!\n");
855                 return;
856         }
857
858         rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
859                                 enable ? MPI_RAID_ACTION_ENABLE_VOLUME
860                                        : MPI_RAID_ACTION_DISABLE_VOLUME,
861                                 /*data*/0, /*addr*/0, /*len*/0,
862                                 /*write*/FALSE, /*wait*/TRUE);
863         if (rv == ETIMEDOUT) {
864                 mpt_vol_prt(mpt, mpt_vol, "mpt_enable_vol: "
865                             "%s Volume Timed-out\n",
866                             enable ? "Enable" : "Disable");
867                 return;
868         }
869         ar = REQ_TO_RAID_ACTION_RESULT(req);
870         if (rv != 0
871          || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
872          || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
873                 mpt_vol_prt(mpt, mpt_vol, "%s Volume Failed: %d:%x:%x\n",
874                             enable ? "Enable" : "Disable",
875                             rv, req->IOCStatus, ar->action_status);
876         }
877
878         mpt_free_request(mpt, req);
879 }
880 #endif
881
882 static void
883 mpt_verify_mwce(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
884 {
885         request_t *req;
886         struct mpt_raid_action_result *ar;
887         CONFIG_PAGE_RAID_VOL_0 *vol_pg;
888         uint32_t data;
889         int rv;
890         int resyncing;
891         int mwce;
892
893         vol_pg = mpt_vol->config_page;
894         resyncing = vol_pg->VolumeStatus.Flags
895                   & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
896         mwce = vol_pg->VolumeSettings.Settings
897              & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
898
899         /*
900          * If the setting matches the configuration,
901          * there is nothing to do.
902          */
903         switch (mpt->raid_mwce_setting) {
904         case MPT_RAID_MWCE_REBUILD_ONLY:
905                 if ((resyncing && mwce) || (!resyncing && !mwce)) {
906                         return;
907                 }
908                 mpt_vol->flags ^= MPT_RVF_WCE_CHANGED;
909                 if ((mpt_vol->flags & MPT_RVF_WCE_CHANGED) == 0) {
910                         /*
911                          * Wait one more status update to see if
912                          * resyncing gets enabled.  It gets disabled
913                          * temporarilly when WCE is changed.
914                          */
915                         return;
916                 }
917                 break;
918         case MPT_RAID_MWCE_ON:
919                 if (mwce)
920                         return;
921                 break;
922         case MPT_RAID_MWCE_OFF:
923                 if (!mwce)
924                         return;
925                 break;
926         case MPT_RAID_MWCE_NC:
927                 return;
928         }
929
930         req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
931         if (req == NULL) {
932                 mpt_vol_prt(mpt, mpt_vol,
933                             "mpt_verify_mwce: Get request failed!\n");
934                 return;
935         }
936
937         vol_pg->VolumeSettings.Settings ^=
938             MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
939         memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
940         vol_pg->VolumeSettings.Settings ^=
941             MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
942         rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
943                                 MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
944                                 data, /*addr*/0, /*len*/0,
945                                 /*write*/FALSE, /*wait*/TRUE);
946         if (rv == ETIMEDOUT) {
947                 mpt_vol_prt(mpt, mpt_vol, "mpt_verify_mwce: "
948                             "Write Cache Enable Timed-out\n");
949                 return;
950         }
951         ar = REQ_TO_RAID_ACTION_RESULT(req);
952         if (rv != 0
953          || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
954          || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
955                 mpt_vol_prt(mpt, mpt_vol, "Write Cache Enable Failed: "
956                             "%d:%x:%x\n", rv, req->IOCStatus,
957                             ar->action_status);
958         } else {
959                 vol_pg->VolumeSettings.Settings ^=
960                     MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
961         }
962         mpt_free_request(mpt, req);
963 }
964
965 static void
966 mpt_verify_resync_rate(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
967 {
968         request_t *req;
969         struct mpt_raid_action_result *ar;
970         CONFIG_PAGE_RAID_VOL_0  *vol_pg;
971         u_int prio;
972         int rv;
973
974         vol_pg = mpt_vol->config_page;
975
976         if (mpt->raid_resync_rate == MPT_RAID_RESYNC_RATE_NC)
977                 return;
978
979         /*
980          * If the current RAID resync rate does not
981          * match our configured rate, update it.
982          */
983         prio = vol_pg->VolumeSettings.Settings
984              & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
985         if (vol_pg->ResyncRate != 0
986          && vol_pg->ResyncRate != mpt->raid_resync_rate) {
987
988                 req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
989                 if (req == NULL) {
990                         mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
991                                     "Get request failed!\n");
992                         return;
993                 }
994
995                 rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
996                                         MPI_RAID_ACTION_SET_RESYNC_RATE,
997                                         mpt->raid_resync_rate, /*addr*/0,
998                                         /*len*/0, /*write*/FALSE, /*wait*/TRUE);
999                 if (rv == ETIMEDOUT) {
1000                         mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
1001                                     "Resync Rate Setting Timed-out\n");
1002                         return;
1003                 }
1004
1005                 ar = REQ_TO_RAID_ACTION_RESULT(req);
1006                 if (rv != 0
1007                  || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
1008                  || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
1009                         mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
1010                                     "%d:%x:%x\n", rv, req->IOCStatus,
1011                                     ar->action_status);
1012                 } else
1013                         vol_pg->ResyncRate = mpt->raid_resync_rate;
1014                 mpt_free_request(mpt, req);
1015         } else if ((prio && mpt->raid_resync_rate < 128)
1016                 || (!prio && mpt->raid_resync_rate >= 128)) {
1017                 uint32_t data;
1018
1019                 req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
1020                 if (req == NULL) {
1021                         mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
1022                                     "Get request failed!\n");
1023                         return;
1024                 }
1025
1026                 vol_pg->VolumeSettings.Settings ^=
1027                     MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1028                 memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
1029                 vol_pg->VolumeSettings.Settings ^=
1030                     MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1031                 rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
1032                                         MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
1033                                         data, /*addr*/0, /*len*/0,
1034                                         /*write*/FALSE, /*wait*/TRUE);
1035                 if (rv == ETIMEDOUT) {
1036                         mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
1037                                     "Resync Rate Setting Timed-out\n");
1038                         return;
1039                 }
1040                 ar = REQ_TO_RAID_ACTION_RESULT(req);
1041                 if (rv != 0
1042                  || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
1043                  || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
1044                         mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
1045                                     "%d:%x:%x\n", rv, req->IOCStatus,
1046                                     ar->action_status);
1047                 } else {
1048                         vol_pg->VolumeSettings.Settings ^=
1049                             MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1050                 }
1051
1052                 mpt_free_request(mpt, req);
1053         }
1054 }
1055
1056 static void
1057 mpt_adjust_queue_depth(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1058                        struct cam_path *path)
1059 {
1060         struct ccb_relsim crs;
1061
1062         xpt_setup_ccb(&crs.ccb_h, path, /*priority*/5);
1063         crs.ccb_h.func_code = XPT_REL_SIMQ;
1064         crs.release_flags = RELSIM_ADJUST_OPENINGS;
1065         crs.openings = mpt->raid_queue_depth;
1066         xpt_action((union ccb *)&crs);
1067         if (crs.ccb_h.status != CAM_REQ_CMP)
1068                 mpt_vol_prt(mpt, mpt_vol, "mpt_adjust_queue_depth failed "
1069                             "with CAM status %#x\n", crs.ccb_h.status);
1070 }
1071
1072 static void
1073 mpt_announce_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
1074 {
1075         CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1076         u_int i;
1077
1078         vol_pg = mpt_vol->config_page;
1079         mpt_vol_prt(mpt, mpt_vol, "Settings (");
1080         for (i = 1; i <= 0x8000; i <<= 1) {
1081                 switch (vol_pg->VolumeSettings.Settings & i) {
1082                 case MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE:
1083                         mpt_prtc(mpt, " Member-WCE");
1084                         break;
1085                 case MPI_RAIDVOL0_SETTING_OFFLINE_ON_SMART:
1086                         mpt_prtc(mpt, " Offline-On-SMART-Err");
1087                         break;
1088                 case MPI_RAIDVOL0_SETTING_AUTO_CONFIGURE:
1089                         mpt_prtc(mpt, " Hot-Plug-Spares");
1090                         break;
1091                 case MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC:
1092                         mpt_prtc(mpt, " High-Priority-ReSync");
1093                         break;
1094                 default:
1095                         break;
1096                 }
1097         }
1098         mpt_prtc(mpt, " )\n");
1099         if (vol_pg->VolumeSettings.HotSparePool != 0) {
1100                 mpt_vol_prt(mpt, mpt_vol, "Using Spare Pool%s",
1101                             powerof2(vol_pg->VolumeSettings.HotSparePool)
1102                           ? ":" : "s:");
1103                 for (i = 0; i < 8; i++) {
1104                         u_int mask;
1105
1106                         mask = 0x1 << i;
1107                         if ((vol_pg->VolumeSettings.HotSparePool & mask) == 0)
1108                                 continue;
1109                         mpt_prtc(mpt, " %d", i);
1110                 }
1111                 mpt_prtc(mpt, "\n");
1112         }
1113         mpt_vol_prt(mpt, mpt_vol, "%d Members:\n", vol_pg->NumPhysDisks);
1114         for (i = 0; i < vol_pg->NumPhysDisks; i++){
1115                 struct mpt_raid_disk *mpt_disk;
1116                 CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1117                 int pt_bus = cam_sim_bus(mpt->phydisk_sim);
1118                 U8 f, s;
1119
1120                 mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum;
1121                 disk_pg = &mpt_disk->config_page;
1122                 mpt_prtc(mpt, "      ");
1123                 mpt_prtc(mpt, "(%s:%d:%d:0): ", device_get_nameunit(mpt->dev),
1124                          pt_bus, disk_pg->PhysDiskID);
1125                 if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM) {
1126                         mpt_prtc(mpt, "%s", mpt_disk->member_number == 0?
1127                             "Primary" : "Secondary");
1128                 } else {
1129                         mpt_prtc(mpt, "Stripe Position %d",
1130                                  mpt_disk->member_number);
1131                 }
1132                 f = disk_pg->PhysDiskStatus.Flags;
1133                 s = disk_pg->PhysDiskStatus.State;
1134                 if (f & MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC) {
1135                         mpt_prtc(mpt, " Out of Sync");
1136                 }
1137                 if (f & MPI_PHYSDISK0_STATUS_FLAG_QUIESCED) {
1138                         mpt_prtc(mpt, " Quiesced");
1139                 }
1140                 if (f & MPI_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME) {
1141                         mpt_prtc(mpt, " Inactive");
1142                 }
1143                 if (f & MPI_PHYSDISK0_STATUS_FLAG_OPTIMAL_PREVIOUS) {
1144                         mpt_prtc(mpt, " Was Optimal");
1145                 }
1146                 if (f & MPI_PHYSDISK0_STATUS_FLAG_NOT_OPTIMAL_PREVIOUS) {
1147                         mpt_prtc(mpt, " Was Non-Optimal");
1148                 }
1149                 switch (s) {
1150                 case MPI_PHYSDISK0_STATUS_ONLINE:
1151                         mpt_prtc(mpt, " Online");
1152                         break;
1153                 case MPI_PHYSDISK0_STATUS_MISSING:
1154                         mpt_prtc(mpt, " Missing");
1155                         break;
1156                 case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
1157                         mpt_prtc(mpt, " Incompatible");
1158                         break;
1159                 case MPI_PHYSDISK0_STATUS_FAILED:
1160                         mpt_prtc(mpt, " Failed");
1161                         break;
1162                 case MPI_PHYSDISK0_STATUS_INITIALIZING:
1163                         mpt_prtc(mpt, " Initializing");
1164                         break;
1165                 case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
1166                         mpt_prtc(mpt, " Requested Offline");
1167                         break;
1168                 case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
1169                         mpt_prtc(mpt, " Requested Failed");
1170                         break;
1171                 case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
1172                 default:
1173                         mpt_prtc(mpt, " Offline Other (%x)", s);
1174                         break;
1175                 }
1176                 mpt_prtc(mpt, "\n");
1177         }
1178 }
1179
1180 static void
1181 mpt_announce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk)
1182 {
1183         CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1184         int rd_bus = cam_sim_bus(mpt->sim);
1185         int pt_bus = cam_sim_bus(mpt->phydisk_sim);
1186         u_int i;
1187
1188         disk_pg = &mpt_disk->config_page;
1189         mpt_disk_prt(mpt, mpt_disk,
1190                      "Physical (%s:%d:%d:0), Pass-thru (%s:%d:%d:0)\n",
1191                      device_get_nameunit(mpt->dev), rd_bus,
1192                      disk_pg->PhysDiskID, device_get_nameunit(mpt->dev),
1193                      pt_bus, mpt_disk - mpt->raid_disks);
1194         if (disk_pg->PhysDiskSettings.HotSparePool == 0)
1195                 return;
1196         mpt_disk_prt(mpt, mpt_disk, "Member of Hot Spare Pool%s",
1197                      powerof2(disk_pg->PhysDiskSettings.HotSparePool)
1198                    ? ":" : "s:");
1199         for (i = 0; i < 8; i++) {
1200                 u_int mask;
1201
1202                 mask = 0x1 << i;
1203                 if ((disk_pg->PhysDiskSettings.HotSparePool & mask) == 0)
1204                         continue;
1205                 mpt_prtc(mpt, " %d", i);
1206         }
1207         mpt_prtc(mpt, "\n");
1208 }
1209
1210 static void
1211 mpt_refresh_raid_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
1212                       IOC_3_PHYS_DISK *ioc_disk)
1213 {
1214         int rv;
1215
1216         rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_PHYSDISK,
1217                                  /*PageNumber*/0, ioc_disk->PhysDiskNum,
1218                                  &mpt_disk->config_page.Header,
1219                                  /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1220         if (rv != 0) {
1221                 mpt_prt(mpt, "mpt_refresh_raid_disk: "
1222                         "Failed to read RAID Disk Hdr(%d)\n",
1223                         ioc_disk->PhysDiskNum);
1224                 return;
1225         }
1226         rv = mpt_read_cur_cfg_page(mpt, ioc_disk->PhysDiskNum,
1227                                    &mpt_disk->config_page.Header,
1228                                    sizeof(mpt_disk->config_page),
1229                                    /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1230         if (rv != 0)
1231                 mpt_prt(mpt, "mpt_refresh_raid_disk: "
1232                         "Failed to read RAID Disk Page(%d)\n",
1233                         ioc_disk->PhysDiskNum);
1234         mpt2host_config_page_raid_phys_disk_0(&mpt_disk->config_page);
1235 }
1236
1237 static void
1238 mpt_refresh_raid_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1239     CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol)
1240 {
1241         CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1242         struct mpt_raid_action_result *ar;
1243         request_t *req;
1244         int rv;
1245         int i;
1246
1247         vol_pg = mpt_vol->config_page;
1248         mpt_vol->flags &= ~MPT_RVF_UP2DATE;
1249
1250         rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_VOLUME, 0,
1251             ioc_vol->VolumePageNumber, &vol_pg->Header, TRUE, 5000);
1252         if (rv != 0) {
1253                 mpt_vol_prt(mpt, mpt_vol,
1254                     "mpt_refresh_raid_vol: Failed to read RAID Vol Hdr(%d)\n",
1255                     ioc_vol->VolumePageNumber);
1256                 return;
1257         }
1258
1259         rv = mpt_read_cur_cfg_page(mpt, ioc_vol->VolumePageNumber,
1260             &vol_pg->Header, mpt->raid_page0_len, TRUE, 5000);
1261         if (rv != 0) {
1262                 mpt_vol_prt(mpt, mpt_vol,
1263                     "mpt_refresh_raid_vol: Failed to read RAID Vol Page(%d)\n",
1264                     ioc_vol->VolumePageNumber);
1265                 return;
1266         }
1267         mpt2host_config_page_raid_vol_0(vol_pg);
1268
1269         mpt_vol->flags |= MPT_RVF_ACTIVE;
1270
1271         /* Update disk entry array data. */
1272         for (i = 0; i < vol_pg->NumPhysDisks; i++) {
1273                 struct mpt_raid_disk *mpt_disk;
1274                 mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum;
1275                 mpt_disk->volume = mpt_vol;
1276                 mpt_disk->member_number = vol_pg->PhysDisk[i].PhysDiskMap;
1277                 if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM) {
1278                         mpt_disk->member_number--;
1279                 }
1280         }
1281
1282         if ((vol_pg->VolumeStatus.Flags
1283            & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1284                 return;
1285
1286         req = mpt_get_request(mpt, TRUE);
1287         if (req == NULL) {
1288                 mpt_vol_prt(mpt, mpt_vol,
1289                     "mpt_refresh_raid_vol: Get request failed!\n");
1290                 return;
1291         }
1292         rv = mpt_issue_raid_req(mpt, mpt_vol, NULL, req,
1293             MPI_RAID_ACTION_INDICATOR_STRUCT, 0, 0, 0, FALSE, TRUE);
1294         if (rv == ETIMEDOUT) {
1295                 mpt_vol_prt(mpt, mpt_vol,
1296                     "mpt_refresh_raid_vol: Progress Indicator fetch timeout\n");
1297                 mpt_free_request(mpt, req);
1298                 return;
1299         }
1300
1301         ar = REQ_TO_RAID_ACTION_RESULT(req);
1302         if (rv == 0
1303          && ar->action_status == MPI_RAID_ACTION_ASTATUS_SUCCESS
1304          && REQ_IOCSTATUS(req) == MPI_IOCSTATUS_SUCCESS) {
1305                 memcpy(&mpt_vol->sync_progress,
1306                        &ar->action_data.indicator_struct,
1307                        sizeof(mpt_vol->sync_progress));
1308                 mpt2host_mpi_raid_vol_indicator(&mpt_vol->sync_progress);
1309         } else {
1310                 mpt_vol_prt(mpt, mpt_vol,
1311                     "mpt_refresh_raid_vol: Progress indicator fetch failed!\n");
1312         }
1313         mpt_free_request(mpt, req);
1314 }
1315
1316 /*
1317  * Update in-core information about RAID support.  We update any entries
1318  * that didn't previously exists or have been marked as needing to
1319  * be updated by our event handler.  Interesting changes are displayed
1320  * to the console.
1321  */
1322 int
1323 mpt_refresh_raid_data(struct mpt_softc *mpt)
1324 {
1325         CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
1326         CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol;
1327         IOC_3_PHYS_DISK *ioc_disk;
1328         IOC_3_PHYS_DISK *ioc_last_disk;
1329         CONFIG_PAGE_RAID_VOL_0  *vol_pg;
1330         size_t len;
1331         int rv;
1332         int i;
1333         u_int nonopt_volumes;
1334
1335         if (mpt->ioc_page2 == NULL || mpt->ioc_page3 == NULL) {
1336                 return (0);
1337         }
1338
1339         /*
1340          * Mark all items as unreferenced by the configuration.
1341          * This allows us to find, report, and discard stale
1342          * entries.
1343          */
1344         for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1345                 mpt->raid_disks[i].flags &= ~MPT_RDF_REFERENCED;
1346         }
1347         for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1348                 mpt->raid_volumes[i].flags &= ~MPT_RVF_REFERENCED;
1349         }
1350
1351         /*
1352          * Get Physical Disk information.
1353          */
1354         len = mpt->ioc_page3->Header.PageLength * sizeof(uint32_t);
1355         rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1356                                    &mpt->ioc_page3->Header, len,
1357                                    /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1358         if (rv) {
1359                 mpt_prt(mpt,
1360                     "mpt_refresh_raid_data: Failed to read IOC Page 3\n");
1361                 return (-1);
1362         }
1363         mpt2host_config_page_ioc3(mpt->ioc_page3);
1364
1365         ioc_disk = mpt->ioc_page3->PhysDisk;
1366         ioc_last_disk = ioc_disk + mpt->ioc_page3->NumPhysDisks;
1367         for (; ioc_disk != ioc_last_disk; ioc_disk++) {
1368                 struct mpt_raid_disk *mpt_disk;
1369
1370                 mpt_disk = mpt->raid_disks + ioc_disk->PhysDiskNum;
1371                 mpt_disk->flags |= MPT_RDF_REFERENCED;
1372                 if ((mpt_disk->flags & (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE))
1373                  != (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE)) {
1374
1375                         mpt_refresh_raid_disk(mpt, mpt_disk, ioc_disk);
1376
1377                 }
1378                 mpt_disk->flags |= MPT_RDF_ACTIVE;
1379                 mpt->raid_rescan++;
1380         }
1381
1382         /*
1383          * Refresh volume data.
1384          */
1385         len = mpt->ioc_page2->Header.PageLength * sizeof(uint32_t);
1386         rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1387                                    &mpt->ioc_page2->Header, len,
1388                                    /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1389         if (rv) {
1390                 mpt_prt(mpt, "mpt_refresh_raid_data: "
1391                         "Failed to read IOC Page 2\n");
1392                 return (-1);
1393         }
1394         mpt2host_config_page_ioc2(mpt->ioc_page2);
1395
1396         ioc_vol = mpt->ioc_page2->RaidVolume;
1397         ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
1398         for (;ioc_vol != ioc_last_vol; ioc_vol++) {
1399                 struct mpt_raid_volume *mpt_vol;
1400
1401                 mpt_vol = mpt->raid_volumes + ioc_vol->VolumePageNumber;
1402                 mpt_vol->flags |= MPT_RVF_REFERENCED;
1403                 vol_pg = mpt_vol->config_page;
1404                 if (vol_pg == NULL)
1405                         continue;
1406                 if (((mpt_vol->flags & (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1407                   != (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1408                  || (vol_pg->VolumeStatus.Flags
1409                    & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) != 0) {
1410
1411                         mpt_refresh_raid_vol(mpt, mpt_vol, ioc_vol);
1412                 }
1413                 mpt_vol->flags |= MPT_RVF_ACTIVE;
1414         }
1415
1416         nonopt_volumes = 0;
1417         for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1418                 struct mpt_raid_volume *mpt_vol;
1419                 uint64_t total;
1420                 uint64_t left;
1421                 int m;
1422                 u_int prio;
1423
1424                 mpt_vol = &mpt->raid_volumes[i];
1425
1426                 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) {
1427                         continue;
1428                 }
1429
1430                 vol_pg = mpt_vol->config_page;
1431                 if ((mpt_vol->flags & (MPT_RVF_REFERENCED|MPT_RVF_ANNOUNCED))
1432                  == MPT_RVF_ANNOUNCED) {
1433                         mpt_vol_prt(mpt, mpt_vol, "No longer configured\n");
1434                         mpt_vol->flags = 0;
1435                         continue;
1436                 }
1437
1438                 if ((mpt_vol->flags & MPT_RVF_ANNOUNCED) == 0) {
1439                         mpt_announce_vol(mpt, mpt_vol);
1440                         mpt_vol->flags |= MPT_RVF_ANNOUNCED;
1441                 }
1442
1443                 if (vol_pg->VolumeStatus.State !=
1444                     MPI_RAIDVOL0_STATUS_STATE_OPTIMAL)
1445                         nonopt_volumes++;
1446
1447                 if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0)
1448                         continue;
1449
1450                 mpt_vol->flags |= MPT_RVF_UP2DATE;
1451                 mpt_vol_prt(mpt, mpt_vol, "%s - %s\n",
1452                     mpt_vol_type(mpt_vol), mpt_vol_state(mpt_vol));
1453                 mpt_verify_mwce(mpt, mpt_vol);
1454
1455                 if (vol_pg->VolumeStatus.Flags == 0) {
1456                         continue;
1457                 }
1458
1459                 mpt_vol_prt(mpt, mpt_vol, "Status (");
1460                 for (m = 1; m <= 0x80; m <<= 1) {
1461                         switch (vol_pg->VolumeStatus.Flags & m) {
1462                         case MPI_RAIDVOL0_STATUS_FLAG_ENABLED:
1463                                 mpt_prtc(mpt, " Enabled");
1464                                 break;
1465                         case MPI_RAIDVOL0_STATUS_FLAG_QUIESCED:
1466                                 mpt_prtc(mpt, " Quiesced");
1467                                 break;
1468                         case MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS:
1469                                 mpt_prtc(mpt, " Re-Syncing");
1470                                 break;
1471                         case MPI_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE:
1472                                 mpt_prtc(mpt, " Inactive");
1473                                 break;
1474                         default:
1475                                 break;
1476                         }
1477                 }
1478                 mpt_prtc(mpt, " )\n");
1479
1480                 if ((vol_pg->VolumeStatus.Flags
1481                    & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1482                         continue;
1483
1484                 mpt_verify_resync_rate(mpt, mpt_vol);
1485
1486                 left = MPT_U64_2_SCALAR(mpt_vol->sync_progress.BlocksRemaining);
1487                 total = MPT_U64_2_SCALAR(mpt_vol->sync_progress.TotalBlocks);
1488                 if (vol_pg->ResyncRate != 0) {
1489
1490                         prio = ((u_int)vol_pg->ResyncRate * 100000) / 0xFF;
1491                         mpt_vol_prt(mpt, mpt_vol, "Rate %d.%d%%\n",
1492                             prio / 1000, prio % 1000);
1493                 } else {
1494                         prio = vol_pg->VolumeSettings.Settings
1495                              & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1496                         mpt_vol_prt(mpt, mpt_vol, "%s Priority Re-Sync\n",
1497                             prio ? "High" : "Low");
1498                 }
1499 #if __FreeBSD_version >= 500000
1500                 mpt_vol_prt(mpt, mpt_vol, "%ju of %ju "
1501                             "blocks remaining\n", (uintmax_t)left,
1502                             (uintmax_t)total);
1503 #else
1504                 mpt_vol_prt(mpt, mpt_vol, "%llu of %llu "
1505                             "blocks remaining\n", (uint64_t)left,
1506                             (uint64_t)total);
1507 #endif
1508
1509                 /* Periodically report on sync progress. */
1510                 mpt_schedule_raid_refresh(mpt);
1511         }
1512
1513         for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1514                 struct mpt_raid_disk *mpt_disk;
1515                 CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1516                 int m;
1517
1518                 mpt_disk = &mpt->raid_disks[i];
1519                 disk_pg = &mpt_disk->config_page;
1520
1521                 if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0)
1522                         continue;
1523
1524                 if ((mpt_disk->flags & (MPT_RDF_REFERENCED|MPT_RDF_ANNOUNCED))
1525                  == MPT_RDF_ANNOUNCED) {
1526                         mpt_disk_prt(mpt, mpt_disk, "No longer configured\n");
1527                         mpt_disk->flags = 0;
1528                         mpt->raid_rescan++;
1529                         continue;
1530                 }
1531
1532                 if ((mpt_disk->flags & MPT_RDF_ANNOUNCED) == 0) {
1533
1534                         mpt_announce_disk(mpt, mpt_disk);
1535                         mpt_disk->flags |= MPT_RVF_ANNOUNCED;
1536                 }
1537
1538                 if ((mpt_disk->flags & MPT_RDF_UP2DATE) != 0)
1539                         continue;
1540
1541                 mpt_disk->flags |= MPT_RDF_UP2DATE;
1542                 mpt_disk_prt(mpt, mpt_disk, "%s\n", mpt_disk_state(mpt_disk));
1543                 if (disk_pg->PhysDiskStatus.Flags == 0)
1544                         continue;
1545
1546                 mpt_disk_prt(mpt, mpt_disk, "Status (");
1547                 for (m = 1; m <= 0x80; m <<= 1) {
1548                         switch (disk_pg->PhysDiskStatus.Flags & m) {
1549                         case MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC:
1550                                 mpt_prtc(mpt, " Out-Of-Sync");
1551                                 break;
1552                         case MPI_PHYSDISK0_STATUS_FLAG_QUIESCED:
1553                                 mpt_prtc(mpt, " Quiesced");
1554                                 break;
1555                         default:
1556                                 break;
1557                         }
1558                 }
1559                 mpt_prtc(mpt, " )\n");
1560         }
1561
1562         mpt->raid_nonopt_volumes = nonopt_volumes;
1563         return (0);
1564 }
1565
1566 static void
1567 mpt_raid_timer(void *arg)
1568 {
1569         struct mpt_softc *mpt;
1570
1571         mpt = (struct mpt_softc *)arg;
1572         MPT_LOCK(mpt);
1573         mpt_raid_wakeup(mpt);
1574         MPT_UNLOCK(mpt);
1575 }
1576
1577 void
1578 mpt_schedule_raid_refresh(struct mpt_softc *mpt)
1579 {
1580         callout_reset(&mpt->raid_timer, MPT_RAID_SYNC_REPORT_INTERVAL,
1581                       mpt_raid_timer, mpt);
1582 }
1583
1584 void
1585 mpt_raid_free_mem(struct mpt_softc *mpt)
1586 {
1587
1588         if (mpt->raid_volumes) {
1589                 struct mpt_raid_volume *mpt_raid;
1590                 int i;
1591                 for (i = 0; i < mpt->raid_max_volumes; i++) {
1592                         mpt_raid = &mpt->raid_volumes[i];
1593                         if (mpt_raid->config_page) {
1594                                 kfree(mpt_raid->config_page, M_DEVBUF);
1595                                 mpt_raid->config_page = NULL;
1596                         }
1597                 }
1598                 kfree(mpt->raid_volumes, M_DEVBUF);
1599                 mpt->raid_volumes = NULL;
1600         }
1601         if (mpt->raid_disks) {
1602                 kfree(mpt->raid_disks, M_DEVBUF);
1603                 mpt->raid_disks = NULL;
1604         }
1605         if (mpt->ioc_page2) {
1606                 kfree(mpt->ioc_page2, M_DEVBUF);
1607                 mpt->ioc_page2 = NULL;
1608         }
1609         if (mpt->ioc_page3) {
1610                 kfree(mpt->ioc_page3, M_DEVBUF);
1611                 mpt->ioc_page3 = NULL;
1612         }
1613         mpt->raid_max_volumes =  0;
1614         mpt->raid_max_disks =  0;
1615 }
1616
1617 #if __FreeBSD_version >= 500000
1618 static int
1619 mpt_raid_set_vol_resync_rate(struct mpt_softc *mpt, u_int rate)
1620 {
1621         struct mpt_raid_volume *mpt_vol;
1622
1623         if ((rate > MPT_RAID_RESYNC_RATE_MAX
1624           || rate < MPT_RAID_RESYNC_RATE_MIN)
1625          && rate != MPT_RAID_RESYNC_RATE_NC)
1626                 return (EINVAL);
1627
1628         MPT_LOCK(mpt);
1629         mpt->raid_resync_rate = rate;
1630         RAID_VOL_FOREACH(mpt, mpt_vol) {
1631                 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) {
1632                         continue;
1633                 }
1634                 mpt_verify_resync_rate(mpt, mpt_vol);
1635         }
1636         MPT_UNLOCK(mpt);
1637         return (0);
1638 }
1639
1640 static int
1641 mpt_raid_set_vol_queue_depth(struct mpt_softc *mpt, u_int vol_queue_depth)
1642 {
1643         struct mpt_raid_volume *mpt_vol;
1644
1645         if (vol_queue_depth > 255 || vol_queue_depth < 1)
1646                 return (EINVAL);
1647
1648         MPT_LOCK(mpt);
1649         mpt->raid_queue_depth = vol_queue_depth;
1650         RAID_VOL_FOREACH(mpt, mpt_vol) {
1651                 struct cam_path *path;
1652                 int error;
1653
1654                 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1655                         continue;
1656
1657                 mpt->raid_rescan = 0;
1658
1659                 MPTLOCK_2_CAMLOCK(mpt);
1660                 error = xpt_create_path(&path, xpt_periph,
1661                                         cam_sim_path(mpt->sim),
1662                                         mpt_vol->config_page->VolumeID,
1663                                         /*lun*/0);
1664                 if (error != CAM_REQ_CMP) {
1665                         CAMLOCK_2_MPTLOCK(mpt);
1666                         mpt_vol_prt(mpt, mpt_vol, "Unable to allocate path!\n");
1667                         continue;
1668                 }
1669                 mpt_adjust_queue_depth(mpt, mpt_vol, path);
1670                 xpt_free_path(path);
1671                 CAMLOCK_2_MPTLOCK(mpt);
1672         }
1673         MPT_UNLOCK(mpt);
1674         return (0);
1675 }
1676
1677 static int
1678 mpt_raid_set_vol_mwce(struct mpt_softc *mpt, mpt_raid_mwce_t mwce)
1679 {
1680         struct mpt_raid_volume *mpt_vol;
1681         int force_full_resync;
1682
1683         MPT_LOCK(mpt);
1684         if (mwce == mpt->raid_mwce_setting) {
1685                 MPT_UNLOCK(mpt);
1686                 return (0);
1687         }
1688
1689         /*
1690          * Catch MWCE being left on due to a failed shutdown.  Since
1691          * sysctls cannot be set by the loader, we treat the first
1692          * setting of this varible specially and force a full volume
1693          * resync if MWCE is enabled and a resync is in progress.
1694          */
1695         force_full_resync = 0;
1696         if (mpt->raid_mwce_set == 0
1697          && mpt->raid_mwce_setting == MPT_RAID_MWCE_NC
1698          && mwce == MPT_RAID_MWCE_REBUILD_ONLY)
1699                 force_full_resync = 1;
1700
1701         mpt->raid_mwce_setting = mwce;
1702         RAID_VOL_FOREACH(mpt, mpt_vol) {
1703                 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1704                 int resyncing;
1705                 int mwce;
1706
1707                 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1708                         continue;
1709
1710                 vol_pg = mpt_vol->config_page;
1711                 resyncing = vol_pg->VolumeStatus.Flags
1712                           & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
1713                 mwce = vol_pg->VolumeSettings.Settings
1714                      & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
1715                 if (force_full_resync && resyncing && mwce) {
1716
1717                         /*
1718                          * XXX disable/enable volume should force a resync,
1719                          *     but we'll need to queice, drain, and restart
1720                          *     I/O to do that.
1721                          */
1722                         mpt_vol_prt(mpt, mpt_vol, "WARNING - Unsafe shutdown "
1723                                     "detected.  Suggest full resync.\n");
1724                 }
1725                 mpt_verify_mwce(mpt, mpt_vol);
1726         }
1727         mpt->raid_mwce_set = 1;
1728         MPT_UNLOCK(mpt);
1729         return (0);
1730 }
1731 const char *mpt_vol_mwce_strs[] =
1732 {
1733         "On",
1734         "Off",
1735         "On-During-Rebuild",
1736         "NC"
1737 };
1738
1739 static int
1740 mpt_raid_sysctl_vol_member_wce(SYSCTL_HANDLER_ARGS)
1741 {
1742         char inbuf[20];
1743         struct mpt_softc *mpt;
1744         const char *str;
1745         int error;
1746         u_int size;
1747         u_int i;
1748
1749         GIANT_REQUIRED;
1750
1751         mpt = (struct mpt_softc *)arg1;
1752         str = mpt_vol_mwce_strs[mpt->raid_mwce_setting];
1753         error = SYSCTL_OUT(req, str, strlen(str) + 1);
1754         if (error || !req->newptr) {
1755                 return (error);
1756         }
1757
1758         size = req->newlen - req->newidx;
1759         if (size >= sizeof(inbuf)) {
1760                 return (EINVAL);
1761         }
1762
1763         error = SYSCTL_IN(req, inbuf, size);
1764         if (error) {
1765                 return (error);
1766         }
1767         inbuf[size] = '\0';
1768         for (i = 0; i < NUM_ELEMENTS(mpt_vol_mwce_strs); i++) {
1769                 if (strcmp(mpt_vol_mwce_strs[i], inbuf) == 0) {
1770                         return (mpt_raid_set_vol_mwce(mpt, i));
1771                 }
1772         }
1773         return (EINVAL);
1774 }
1775
1776 static int
1777 mpt_raid_sysctl_vol_resync_rate(SYSCTL_HANDLER_ARGS)
1778 {
1779         struct mpt_softc *mpt;
1780         u_int raid_resync_rate;
1781         int error;
1782
1783         GIANT_REQUIRED;
1784
1785         mpt = (struct mpt_softc *)arg1;
1786         raid_resync_rate = mpt->raid_resync_rate;
1787
1788         error = sysctl_handle_int(oidp, &raid_resync_rate, 0, req);
1789         if (error || !req->newptr) {
1790                 return error;
1791         }
1792
1793         return (mpt_raid_set_vol_resync_rate(mpt, raid_resync_rate));
1794 }
1795
1796 static int
1797 mpt_raid_sysctl_vol_queue_depth(SYSCTL_HANDLER_ARGS)
1798 {
1799         struct mpt_softc *mpt;
1800         u_int raid_queue_depth;
1801         int error;
1802
1803         GIANT_REQUIRED;
1804
1805         mpt = (struct mpt_softc *)arg1;
1806         raid_queue_depth = mpt->raid_queue_depth;
1807
1808         error = sysctl_handle_int(oidp, &raid_queue_depth, 0, req);
1809         if (error || !req->newptr) {
1810                 return error;
1811         }
1812
1813         return (mpt_raid_set_vol_queue_depth(mpt, raid_queue_depth));
1814 }
1815
1816 static void
1817 mpt_raid_sysctl_attach(struct mpt_softc *mpt)
1818 {
1819         struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev);
1820         struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev);
1821
1822         SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1823                         "vol_member_wce", CTLTYPE_STRING | CTLFLAG_RW, mpt, 0,
1824                         mpt_raid_sysctl_vol_member_wce, "A",
1825                         "volume member WCE(On,Off,On-During-Rebuild,NC)");
1826
1827         SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1828                         "vol_queue_depth", CTLTYPE_INT | CTLFLAG_RW, mpt, 0,
1829                         mpt_raid_sysctl_vol_queue_depth, "I",
1830                         "default volume queue depth");
1831
1832         SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1833                         "vol_resync_rate", CTLTYPE_INT | CTLFLAG_RW, mpt, 0,
1834                         mpt_raid_sysctl_vol_resync_rate, "I",
1835                         "volume resync priority (0 == NC, 1 - 255)");
1836         SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1837                         "nonoptimal_volumes", CTLFLAG_RD,
1838                         &mpt->raid_nonopt_volumes, 0,
1839                         "number of nonoptimal volumes");
1840 }
1841 #endif