2 * Routines for handling the integrated RAID features LSI MPT Fusion adapters.
4 * Copyright (c) 2005, WHEEL Sp. z o.o.
5 * Copyright (c) 2005 Justin T. Gibbs.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14 * substantially similar to the "NO WARRANTY" disclaimer below
15 * ("Disclaimer") and any redistribution must be conditioned upon including
16 * a substantially similar Disclaimer requirement for further binary
18 * 3. Neither the names of the above listed copyright holders nor the names
19 * of any contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
32 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 * Some Breakage and Bug Fixing added later.
36 * Copyright (c) 2006, by Matthew Jacob
39 * Support from LSI-Logic has also gone a great deal toward making this a
40 * workable subsystem and is gratefully acknowledged.
42 * $FreeBSD: src/sys/dev/mpt/mpt_raid.c,v 1.20 2009/05/21 12:36:40 jhb Exp $
45 #include <dev/disk/mpt/mpt.h>
46 #include <dev/disk/mpt/mpt_raid.h>
48 #include "dev/disk/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
49 #include "dev/disk/mpt/mpilib/mpi_raid.h"
51 #include <bus/cam/cam.h>
52 #include <bus/cam/cam_ccb.h>
53 #include <bus/cam/cam_sim.h>
54 #include <bus/cam/cam_xpt_periph.h>
55 #include <bus/cam/cam_xpt_sim.h>
57 #if __FreeBSD_version < 500000
58 #include <sys/devicestat.h>
59 #define GIANT_REQUIRED
61 #include <bus/cam/cam_periph.h>
63 #include <sys/callout.h>
64 #include <sys/kthread.h>
65 #include <sys/sysctl.h>
67 #include <machine/stdarg.h>
69 struct mpt_raid_action_result
72 MPI_RAID_VOL_INDICATOR indicator_struct;
73 uint32_t new_settings;
74 uint8_t phys_disk_num;
76 uint16_t action_status;
79 #define REQ_TO_RAID_ACTION_RESULT(req) ((struct mpt_raid_action_result *) \
80 (((MSG_RAID_ACTION_REQUEST *)(req->req_vbuf)) + 1))
82 #define REQ_IOCSTATUS(req) ((req)->IOCStatus & MPI_IOCSTATUS_MASK)
85 static mpt_probe_handler_t mpt_raid_probe;
86 static mpt_attach_handler_t mpt_raid_attach;
87 static mpt_enable_handler_t mpt_raid_enable;
88 static mpt_event_handler_t mpt_raid_event;
89 static mpt_shutdown_handler_t mpt_raid_shutdown;
90 static mpt_reset_handler_t mpt_raid_ioc_reset;
91 static mpt_detach_handler_t mpt_raid_detach;
93 static struct mpt_personality mpt_raid_personality =
96 .probe = mpt_raid_probe,
97 .attach = mpt_raid_attach,
98 .enable = mpt_raid_enable,
99 .event = mpt_raid_event,
100 .reset = mpt_raid_ioc_reset,
101 .shutdown = mpt_raid_shutdown,
102 .detach = mpt_raid_detach,
105 DECLARE_MPT_PERSONALITY(mpt_raid, SI_ORDER_THIRD);
106 MPT_PERSONALITY_DEPEND(mpt_raid, mpt_cam, 1, 1, 1);
108 static mpt_reply_handler_t mpt_raid_reply_handler;
109 static int mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
110 MSG_DEFAULT_REPLY *reply_frame);
111 static int mpt_spawn_raid_thread(struct mpt_softc *mpt);
112 static void mpt_terminate_raid_thread(struct mpt_softc *mpt);
113 static void mpt_raid_thread(void *arg);
114 static timeout_t mpt_raid_timer;
116 static void mpt_enable_vol(struct mpt_softc *mpt,
117 struct mpt_raid_volume *mpt_vol, int enable);
119 static void mpt_verify_mwce(struct mpt_softc *, struct mpt_raid_volume *);
120 static void mpt_adjust_queue_depth(struct mpt_softc *, struct mpt_raid_volume *,
122 #if __FreeBSD_version < 500000
123 #define mpt_raid_sysctl_attach(x) do { } while (0)
125 static void mpt_raid_sysctl_attach(struct mpt_softc *);
128 static uint32_t raid_handler_id = MPT_HANDLER_ID_NONE;
131 mpt_vol_type(struct mpt_raid_volume *vol)
133 switch (vol->config_page->VolumeType) {
134 case MPI_RAID_VOL_TYPE_IS:
136 case MPI_RAID_VOL_TYPE_IME:
138 case MPI_RAID_VOL_TYPE_IM:
146 mpt_vol_state(struct mpt_raid_volume *vol)
148 switch (vol->config_page->VolumeStatus.State) {
149 case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL:
151 case MPI_RAIDVOL0_STATUS_STATE_DEGRADED:
153 case MPI_RAIDVOL0_STATUS_STATE_FAILED:
161 mpt_disk_state(struct mpt_raid_disk *disk)
163 switch (disk->config_page.PhysDiskStatus.State) {
164 case MPI_PHYSDISK0_STATUS_ONLINE:
166 case MPI_PHYSDISK0_STATUS_MISSING:
168 case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
169 return ("Incompatible");
170 case MPI_PHYSDISK0_STATUS_FAILED:
172 case MPI_PHYSDISK0_STATUS_INITIALIZING:
173 return ("Initializing");
174 case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
175 return ("Offline Requested");
176 case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
177 return ("Failed per Host Request");
178 case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
186 mpt_vol_prt(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
187 const char *fmt, ...)
191 kprintf("%s:vol%d(%s:%d:%d): ", device_get_nameunit(mpt->dev),
192 (u_int)(vol - mpt->raid_volumes), device_get_nameunit(mpt->dev),
193 vol->config_page->VolumeBus, vol->config_page->VolumeID);
200 mpt_disk_prt(struct mpt_softc *mpt, struct mpt_raid_disk *disk,
201 const char *fmt, ...)
205 if (disk->volume != NULL) {
206 kprintf("(%s:vol%d:%d): ",
207 device_get_nameunit(mpt->dev),
208 disk->volume->config_page->VolumeID,
209 disk->member_number);
211 kprintf("(%s:%d:%d): ", device_get_nameunit(mpt->dev),
212 disk->config_page.PhysDiskBus,
213 disk->config_page.PhysDiskID);
221 mpt_raid_async(void *callback_arg, u_int32_t code,
222 struct cam_path *path, void *arg)
224 struct mpt_softc *mpt;
226 mpt = (struct mpt_softc*)callback_arg;
228 case AC_FOUND_DEVICE:
230 struct ccb_getdev *cgd;
231 struct mpt_raid_volume *mpt_vol;
233 cgd = (struct ccb_getdev *)arg;
238 mpt_lprt(mpt, MPT_PRT_DEBUG, "Callback for %d\n",
239 cgd->ccb_h.target_id);
241 RAID_VOL_FOREACH(mpt, mpt_vol) {
242 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
245 if (mpt_vol->config_page->VolumeID
246 == cgd->ccb_h.target_id) {
247 mpt_adjust_queue_depth(mpt, mpt_vol, path);
258 mpt_raid_probe(struct mpt_softc *mpt)
260 if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
267 mpt_raid_attach(struct mpt_softc *mpt)
269 struct ccb_setasync csa;
270 mpt_handler_t handler;
273 mpt_callout_init(&mpt->raid_timer);
275 error = mpt_spawn_raid_thread(mpt);
277 mpt_prt(mpt, "Unable to spawn RAID thread!\n");
282 handler.reply_handler = mpt_raid_reply_handler;
283 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
286 mpt_prt(mpt, "Unable to register RAID haandler!\n");
290 xpt_setup_ccb(&csa.ccb_h, mpt->path, 5);
291 csa.ccb_h.func_code = XPT_SASYNC_CB;
292 csa.event_enable = AC_FOUND_DEVICE;
293 csa.callback = mpt_raid_async;
294 csa.callback_arg = mpt;
295 xpt_action((union ccb *)&csa);
296 if (csa.ccb_h.status != CAM_REQ_CMP) {
297 mpt_prt(mpt, "mpt_raid_attach: Unable to register "
298 "CAM async handler.\n");
302 mpt_raid_sysctl_attach(mpt);
306 mpt_raid_detach(mpt);
311 mpt_raid_enable(struct mpt_softc *mpt)
317 mpt_raid_detach(struct mpt_softc *mpt)
319 struct ccb_setasync csa;
320 mpt_handler_t handler;
322 callout_stop(&mpt->raid_timer);
324 mpt_terminate_raid_thread(mpt);
326 handler.reply_handler = mpt_raid_reply_handler;
327 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
329 xpt_setup_ccb(&csa.ccb_h, mpt->path, /*priority*/5);
330 csa.ccb_h.func_code = XPT_SASYNC_CB;
331 csa.event_enable = 0;
332 csa.callback = mpt_raid_async;
333 csa.callback_arg = mpt;
334 xpt_action((union ccb *)&csa);
339 mpt_raid_ioc_reset(struct mpt_softc *mpt, int type)
341 /* Nothing to do yet. */
344 static const char *raid_event_txt[] =
348 "Volume Settings Changed",
349 "Volume Status Changed",
350 "Volume Physical Disk Membership Changed",
351 "Physical Disk Created",
352 "Physical Disk Deleted",
353 "Physical Disk Settings Changed",
354 "Physical Disk Status Changed",
355 "Domain Validation Required",
356 "SMART Data Received",
357 "Replace Action Started",
361 mpt_raid_event(struct mpt_softc *mpt, request_t *req,
362 MSG_EVENT_NOTIFY_REPLY *msg)
364 EVENT_DATA_RAID *raid_event;
365 struct mpt_raid_volume *mpt_vol;
366 struct mpt_raid_disk *mpt_disk;
367 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
371 if (msg->Event != MPI_EVENT_INTEGRATED_RAID) {
375 raid_event = (EVENT_DATA_RAID *)&msg->Data;
379 if (mpt->raid_volumes != NULL && mpt->ioc_page2 != NULL) {
380 for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
381 mpt_vol = &mpt->raid_volumes[i];
382 vol_pg = mpt_vol->config_page;
384 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
387 if (vol_pg->VolumeID == raid_event->VolumeID
388 && vol_pg->VolumeBus == raid_event->VolumeBus)
391 if (i >= mpt->ioc_page2->MaxVolumes) {
398 if (raid_event->PhysDiskNum != 0xFF && mpt->raid_disks != NULL) {
399 mpt_disk = mpt->raid_disks + raid_event->PhysDiskNum;
400 if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0) {
406 switch(raid_event->ReasonCode) {
407 case MPI_EVENT_RAID_RC_VOLUME_CREATED:
408 case MPI_EVENT_RAID_RC_VOLUME_DELETED:
410 case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED:
411 if (mpt_vol != NULL) {
412 if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0) {
413 mpt_vol->flags &= ~MPT_RVF_UP2DATE;
416 * Coalesce status messages into one
417 * per background run of our RAID thread.
418 * This removes "spurious" status messages
425 case MPI_EVENT_RAID_RC_VOLUME_SETTINGS_CHANGED:
426 case MPI_EVENT_RAID_RC_VOLUME_PHYSDISK_CHANGED:
428 if (mpt_vol != NULL) {
429 mpt_vol->flags &= ~(MPT_RVF_UP2DATE|MPT_RVF_ANNOUNCED);
432 case MPI_EVENT_RAID_RC_PHYSDISK_CREATED:
433 case MPI_EVENT_RAID_RC_PHYSDISK_DELETED:
436 case MPI_EVENT_RAID_RC_PHYSDISK_SETTINGS_CHANGED:
437 case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED:
439 if (mpt_disk != NULL) {
440 mpt_disk->flags &= ~MPT_RDF_UP2DATE;
443 case MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED:
446 case MPI_EVENT_RAID_RC_SMART_DATA:
447 case MPI_EVENT_RAID_RC_REPLACE_ACTION_STARTED:
452 if (mpt_disk != NULL) {
453 mpt_disk_prt(mpt, mpt_disk, NULL);
454 } else if (mpt_vol != NULL) {
455 mpt_vol_prt(mpt, mpt_vol, NULL);
457 mpt_prt(mpt, "Volume(%d:%d", raid_event->VolumeBus,
458 raid_event->VolumeID);
460 if (raid_event->PhysDiskNum != 0xFF)
461 mpt_prtc(mpt, ":%d): ",
462 raid_event->PhysDiskNum);
464 mpt_prtc(mpt, "): ");
467 if (raid_event->ReasonCode >= NUM_ELEMENTS(raid_event_txt))
468 mpt_prtc(mpt, "Unhandled RaidEvent %#x\n",
469 raid_event->ReasonCode);
471 mpt_prtc(mpt, "%s\n",
472 raid_event_txt[raid_event->ReasonCode]);
475 if (raid_event->ReasonCode == MPI_EVENT_RAID_RC_SMART_DATA) {
476 /* XXX Use CAM's print sense for this... */
477 if (mpt_disk != NULL)
478 mpt_disk_prt(mpt, mpt_disk, NULL);
480 mpt_prt(mpt, "Volume(%d:%d:%d: ",
481 raid_event->VolumeBus, raid_event->VolumeID,
482 raid_event->PhysDiskNum);
483 mpt_prtc(mpt, "ASC 0x%x, ASCQ 0x%x)\n",
484 raid_event->ASC, raid_event->ASCQ);
487 mpt_raid_wakeup(mpt);
492 mpt_raid_shutdown(struct mpt_softc *mpt)
494 struct mpt_raid_volume *mpt_vol;
496 if (mpt->raid_mwce_setting != MPT_RAID_MWCE_REBUILD_ONLY) {
500 mpt->raid_mwce_setting = MPT_RAID_MWCE_OFF;
501 RAID_VOL_FOREACH(mpt, mpt_vol) {
502 mpt_verify_mwce(mpt, mpt_vol);
507 mpt_raid_reply_handler(struct mpt_softc *mpt, request_t *req,
508 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
516 if (reply_frame != NULL)
517 free_req = mpt_raid_reply_frame_handler(mpt, req, reply_frame);
519 else if (req->ccb != NULL) {
520 /* Complete Quiesce CCB with error... */
524 req->state &= ~REQ_STATE_QUEUED;
525 req->state |= REQ_STATE_DONE;
526 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
528 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
530 } else if (free_req) {
531 mpt_free_request(mpt, req);
538 * Parse additional completion information in the reply
539 * frame for RAID I/O requests.
542 mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
543 MSG_DEFAULT_REPLY *reply_frame)
545 MSG_RAID_ACTION_REPLY *reply;
546 struct mpt_raid_action_result *action_result;
547 MSG_RAID_ACTION_REQUEST *rap;
549 reply = (MSG_RAID_ACTION_REPLY *)reply_frame;
550 req->IOCStatus = le16toh(reply->IOCStatus);
551 rap = (MSG_RAID_ACTION_REQUEST *)req->req_vbuf;
553 switch (rap->Action) {
554 case MPI_RAID_ACTION_QUIESCE_PHYS_IO:
555 mpt_prt(mpt, "QUIESCE PHYSIO DONE\n");
557 case MPI_RAID_ACTION_ENABLE_PHYS_IO:
558 mpt_prt(mpt, "ENABLY PHYSIO DONE\n");
563 action_result = REQ_TO_RAID_ACTION_RESULT(req);
564 memcpy(&action_result->action_data, &reply->ActionData,
565 sizeof(action_result->action_data));
566 action_result->action_status = le16toh(reply->ActionStatus);
571 * Utiltity routine to perform a RAID action command;
574 mpt_issue_raid_req(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
575 struct mpt_raid_disk *disk, request_t *req, u_int Action,
576 uint32_t ActionDataWord, bus_addr_t addr, bus_size_t len,
579 MSG_RAID_ACTION_REQUEST *rap;
583 memset(rap, 0, sizeof *rap);
584 rap->Action = Action;
585 rap->ActionDataWord = htole32(ActionDataWord);
586 rap->Function = MPI_FUNCTION_RAID_ACTION;
587 rap->VolumeID = vol->config_page->VolumeID;
588 rap->VolumeBus = vol->config_page->VolumeBus;
590 rap->PhysDiskNum = disk->config_page.PhysDiskNum;
592 rap->PhysDiskNum = 0xFF;
593 se = (SGE_SIMPLE32 *)&rap->ActionDataSGE;
594 se->Address = htole32(addr);
595 MPI_pSGE_SET_LENGTH(se, len);
596 MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
597 MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
598 MPI_SGE_FLAGS_END_OF_LIST |
599 write ? MPI_SGE_FLAGS_HOST_TO_IOC : MPI_SGE_FLAGS_IOC_TO_HOST));
600 se->FlagsLength = htole32(se->FlagsLength);
601 rap->MsgContext = htole32(req->index | raid_handler_id);
603 mpt_check_doorbell(mpt);
604 mpt_send_cmd(mpt, req);
607 return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
608 /*sleep_ok*/FALSE, /*time_ms*/2000));
614 /*************************** RAID Status Monitoring ***************************/
616 mpt_spawn_raid_thread(struct mpt_softc *mpt)
621 * Freeze out any CAM transactions until our thread
622 * is able to run at least once. We need to update
623 * our RAID pages before acception I/O or we may
624 * reject I/O to an ID we later determine is for a
628 xpt_freeze_simq(mpt->phydisk_sim, 1);
630 error = mpt_kthread_create(mpt_raid_thread, mpt,
631 &mpt->raid_thread, /*flags*/0, /*altstack*/0,
632 "mpt_raid%d", mpt->unit);
635 xpt_release_simq(mpt->phydisk_sim, /*run_queue*/FALSE);
642 mpt_terminate_raid_thread(struct mpt_softc *mpt)
645 if (mpt->raid_thread == NULL) {
648 mpt->shutdwn_raid = 1;
649 wakeup(mpt->raid_volumes);
651 * Sleep on a slightly different location
652 * for this interlock just for added safety.
654 mpt_sleep(mpt, &mpt->raid_thread, PUSER, "thtrm", 0);
658 mpt_cam_rescan_callback(struct cam_periph *periph, union ccb *ccb)
661 xpt_free_path(ccb->ccb_h.path);
666 mpt_raid_thread(void *arg)
668 struct mpt_softc *mpt;
671 mpt = (struct mpt_softc *)arg;
677 while (mpt->shutdwn_raid == 0) {
679 if (mpt->raid_wakeup == 0) {
680 mpt_sleep(mpt, &mpt->raid_volumes, PUSER, "idle", 0);
684 mpt->raid_wakeup = 0;
686 if (mpt_refresh_raid_data(mpt)) {
687 mpt_schedule_raid_refresh(mpt); /* XX NOT QUITE RIGHT */
692 * Now that we have our first snapshot of RAID data,
693 * allow CAM to access our physical disk bus.
697 MPTLOCK_2_CAMLOCK(mpt);
698 xpt_release_simq(mpt->phydisk_sim, TRUE);
699 CAMLOCK_2_MPTLOCK(mpt);
702 if (mpt->raid_rescan != 0) {
704 struct cam_path *path;
707 mpt->raid_rescan = 0;
710 ccb = xpt_alloc_ccb();
713 error = xpt_create_path(&path, xpt_periph,
714 cam_sim_path(mpt->phydisk_sim),
715 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
716 if (error != CAM_REQ_CMP) {
718 mpt_prt(mpt, "Unable to rescan RAID Bus!\n");
720 xpt_setup_ccb(&ccb->ccb_h, path, 5);
721 ccb->ccb_h.func_code = XPT_SCAN_BUS;
722 ccb->ccb_h.cbfcnp = mpt_cam_rescan_callback;
723 ccb->crcn.flags = CAM_FLAG_NONE;
724 MPTLOCK_2_CAMLOCK(mpt);
726 CAMLOCK_2_MPTLOCK(mpt);
730 mpt->raid_thread = NULL;
731 wakeup(&mpt->raid_thread);
738 mpt_raid_quiesce_timeout(void *arg)
740 /* Complete the CCB with error */
744 static timeout_t mpt_raid_quiesce_timeout;
746 mpt_raid_quiesce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
752 if ((mpt_disk->flags & MPT_RDF_QUIESCED) != 0)
753 return (CAM_REQ_CMP);
755 if ((mpt_disk->flags & MPT_RDF_QUIESCING) == 0) {
758 mpt_disk->flags |= MPT_RDF_QUIESCING;
759 xpt_freeze_devq(ccb->ccb_h.path, 1);
761 rv = mpt_issue_raid_req(mpt, mpt_disk->volume, mpt_disk, req,
762 MPI_RAID_ACTION_QUIESCE_PHYS_IO,
763 /*ActionData*/0, /*addr*/0,
764 /*len*/0, /*write*/FALSE,
767 return (CAM_REQ_CMP_ERR);
769 mpt_req_timeout(req, mpt_raid_quiesce_timeout, ccb, 5 * hz);
771 if (rv == ETIMEDOUT) {
772 mpt_disk_prt(mpt, mpt_disk, "mpt_raid_quiesce_disk: "
773 "Quiece Timed-out\n");
774 xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
775 return (CAM_REQ_CMP_ERR);
778 ar = REQ_TO_RAID_ACTION_RESULT(req);
780 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
781 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
782 mpt_disk_prt(mpt, mpt_disk, "Quiece Failed"
783 "%d:%x:%x\n", rv, req->IOCStatus,
785 xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
786 return (CAM_REQ_CMP_ERR);
789 return (CAM_REQ_INPROG);
791 return (CAM_REQUEUE_REQ);
795 /* XXX Ignores that there may be multiple busses/IOCs involved. */
797 mpt_map_physdisk(struct mpt_softc *mpt, union ccb *ccb, u_int *tgt)
799 struct mpt_raid_disk *mpt_disk;
801 mpt_disk = mpt->raid_disks + ccb->ccb_h.target_id;
802 if (ccb->ccb_h.target_id < mpt->raid_max_disks
803 && (mpt_disk->flags & MPT_RDF_ACTIVE) != 0) {
804 *tgt = mpt_disk->config_page.PhysDiskID;
807 mpt_lprt(mpt, MPT_PRT_DEBUG1, "mpt_map_physdisk(%d) - Not Active\n",
808 ccb->ccb_h.target_id);
812 /* XXX Ignores that there may be multiple busses/IOCs involved. */
814 mpt_is_raid_volume(struct mpt_softc *mpt, int tgt)
816 CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
817 CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol;
819 if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
822 ioc_vol = mpt->ioc_page2->RaidVolume;
823 ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
824 for (;ioc_vol != ioc_last_vol; ioc_vol++) {
825 if (ioc_vol->VolumeID == tgt) {
834 mpt_enable_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
838 struct mpt_raid_action_result *ar;
839 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
843 vol_pg = mpt_vol->config_page;
844 enabled = vol_pg->VolumeStatus.Flags & MPI_RAIDVOL0_STATUS_FLAG_ENABLED;
847 * If the setting matches the configuration,
848 * there is nothing to do.
850 if ((enabled && enable)
851 || (!enabled && !enable))
854 req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
856 mpt_vol_prt(mpt, mpt_vol,
857 "mpt_enable_vol: Get request failed!\n");
861 rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
862 enable ? MPI_RAID_ACTION_ENABLE_VOLUME
863 : MPI_RAID_ACTION_DISABLE_VOLUME,
864 /*data*/0, /*addr*/0, /*len*/0,
865 /*write*/FALSE, /*wait*/TRUE);
866 if (rv == ETIMEDOUT) {
867 mpt_vol_prt(mpt, mpt_vol, "mpt_enable_vol: "
868 "%s Volume Timed-out\n",
869 enable ? "Enable" : "Disable");
872 ar = REQ_TO_RAID_ACTION_RESULT(req);
874 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
875 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
876 mpt_vol_prt(mpt, mpt_vol, "%s Volume Failed: %d:%x:%x\n",
877 enable ? "Enable" : "Disable",
878 rv, req->IOCStatus, ar->action_status);
881 mpt_free_request(mpt, req);
886 mpt_verify_mwce(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
889 struct mpt_raid_action_result *ar;
890 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
896 vol_pg = mpt_vol->config_page;
897 resyncing = vol_pg->VolumeStatus.Flags
898 & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
899 mwce = vol_pg->VolumeSettings.Settings
900 & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
903 * If the setting matches the configuration,
904 * there is nothing to do.
906 switch (mpt->raid_mwce_setting) {
907 case MPT_RAID_MWCE_REBUILD_ONLY:
908 if ((resyncing && mwce) || (!resyncing && !mwce)) {
911 mpt_vol->flags ^= MPT_RVF_WCE_CHANGED;
912 if ((mpt_vol->flags & MPT_RVF_WCE_CHANGED) == 0) {
914 * Wait one more status update to see if
915 * resyncing gets enabled. It gets disabled
916 * temporarilly when WCE is changed.
921 case MPT_RAID_MWCE_ON:
925 case MPT_RAID_MWCE_OFF:
929 case MPT_RAID_MWCE_NC:
933 req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
935 mpt_vol_prt(mpt, mpt_vol,
936 "mpt_verify_mwce: Get request failed!\n");
940 vol_pg->VolumeSettings.Settings ^=
941 MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
942 memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
943 vol_pg->VolumeSettings.Settings ^=
944 MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
945 rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
946 MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
947 data, /*addr*/0, /*len*/0,
948 /*write*/FALSE, /*wait*/TRUE);
949 if (rv == ETIMEDOUT) {
950 mpt_vol_prt(mpt, mpt_vol, "mpt_verify_mwce: "
951 "Write Cache Enable Timed-out\n");
954 ar = REQ_TO_RAID_ACTION_RESULT(req);
956 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
957 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
958 mpt_vol_prt(mpt, mpt_vol, "Write Cache Enable Failed: "
959 "%d:%x:%x\n", rv, req->IOCStatus,
962 vol_pg->VolumeSettings.Settings ^=
963 MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
965 mpt_free_request(mpt, req);
969 mpt_verify_resync_rate(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
972 struct mpt_raid_action_result *ar;
973 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
977 vol_pg = mpt_vol->config_page;
979 if (mpt->raid_resync_rate == MPT_RAID_RESYNC_RATE_NC)
983 * If the current RAID resync rate does not
984 * match our configured rate, update it.
986 prio = vol_pg->VolumeSettings.Settings
987 & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
988 if (vol_pg->ResyncRate != 0
989 && vol_pg->ResyncRate != mpt->raid_resync_rate) {
991 req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
993 mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
994 "Get request failed!\n");
998 rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
999 MPI_RAID_ACTION_SET_RESYNC_RATE,
1000 mpt->raid_resync_rate, /*addr*/0,
1001 /*len*/0, /*write*/FALSE, /*wait*/TRUE);
1002 if (rv == ETIMEDOUT) {
1003 mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
1004 "Resync Rate Setting Timed-out\n");
1008 ar = REQ_TO_RAID_ACTION_RESULT(req);
1010 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
1011 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
1012 mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
1013 "%d:%x:%x\n", rv, req->IOCStatus,
1016 vol_pg->ResyncRate = mpt->raid_resync_rate;
1017 mpt_free_request(mpt, req);
1018 } else if ((prio && mpt->raid_resync_rate < 128)
1019 || (!prio && mpt->raid_resync_rate >= 128)) {
1022 req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
1024 mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
1025 "Get request failed!\n");
1029 vol_pg->VolumeSettings.Settings ^=
1030 MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1031 memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
1032 vol_pg->VolumeSettings.Settings ^=
1033 MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1034 rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
1035 MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
1036 data, /*addr*/0, /*len*/0,
1037 /*write*/FALSE, /*wait*/TRUE);
1038 if (rv == ETIMEDOUT) {
1039 mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
1040 "Resync Rate Setting Timed-out\n");
1043 ar = REQ_TO_RAID_ACTION_RESULT(req);
1045 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
1046 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
1047 mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
1048 "%d:%x:%x\n", rv, req->IOCStatus,
1051 vol_pg->VolumeSettings.Settings ^=
1052 MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1055 mpt_free_request(mpt, req);
1060 mpt_adjust_queue_depth(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1061 struct cam_path *path)
1063 struct ccb_relsim crs;
1065 xpt_setup_ccb(&crs.ccb_h, path, /*priority*/5);
1066 crs.ccb_h.func_code = XPT_REL_SIMQ;
1067 crs.release_flags = RELSIM_ADJUST_OPENINGS;
1068 crs.openings = mpt->raid_queue_depth;
1069 xpt_action((union ccb *)&crs);
1070 if (crs.ccb_h.status != CAM_REQ_CMP)
1071 mpt_vol_prt(mpt, mpt_vol, "mpt_adjust_queue_depth failed "
1072 "with CAM status %#x\n", crs.ccb_h.status);
1076 mpt_announce_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
1078 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1081 vol_pg = mpt_vol->config_page;
1082 mpt_vol_prt(mpt, mpt_vol, "Settings (");
1083 for (i = 1; i <= 0x8000; i <<= 1) {
1084 switch (vol_pg->VolumeSettings.Settings & i) {
1085 case MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE:
1086 mpt_prtc(mpt, " Member-WCE");
1088 case MPI_RAIDVOL0_SETTING_OFFLINE_ON_SMART:
1089 mpt_prtc(mpt, " Offline-On-SMART-Err");
1091 case MPI_RAIDVOL0_SETTING_AUTO_CONFIGURE:
1092 mpt_prtc(mpt, " Hot-Plug-Spares");
1094 case MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC:
1095 mpt_prtc(mpt, " High-Priority-ReSync");
1101 mpt_prtc(mpt, " )\n");
1102 if (vol_pg->VolumeSettings.HotSparePool != 0) {
1103 mpt_vol_prt(mpt, mpt_vol, "Using Spare Pool%s",
1104 powerof2(vol_pg->VolumeSettings.HotSparePool)
1106 for (i = 0; i < 8; i++) {
1110 if ((vol_pg->VolumeSettings.HotSparePool & mask) == 0)
1112 mpt_prtc(mpt, " %d", i);
1114 mpt_prtc(mpt, "\n");
1116 mpt_vol_prt(mpt, mpt_vol, "%d Members:\n", vol_pg->NumPhysDisks);
1117 for (i = 0; i < vol_pg->NumPhysDisks; i++){
1118 struct mpt_raid_disk *mpt_disk;
1119 CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1120 int pt_bus = cam_sim_bus(mpt->phydisk_sim);
1123 mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum;
1124 disk_pg = &mpt_disk->config_page;
1126 mpt_prtc(mpt, "(%s:%d:%d:0): ", device_get_nameunit(mpt->dev),
1127 pt_bus, disk_pg->PhysDiskID);
1128 if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM) {
1129 mpt_prtc(mpt, "%s", mpt_disk->member_number == 0?
1130 "Primary" : "Secondary");
1132 mpt_prtc(mpt, "Stripe Position %d",
1133 mpt_disk->member_number);
1135 f = disk_pg->PhysDiskStatus.Flags;
1136 s = disk_pg->PhysDiskStatus.State;
1137 if (f & MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC) {
1138 mpt_prtc(mpt, " Out of Sync");
1140 if (f & MPI_PHYSDISK0_STATUS_FLAG_QUIESCED) {
1141 mpt_prtc(mpt, " Quiesced");
1143 if (f & MPI_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME) {
1144 mpt_prtc(mpt, " Inactive");
1146 if (f & MPI_PHYSDISK0_STATUS_FLAG_OPTIMAL_PREVIOUS) {
1147 mpt_prtc(mpt, " Was Optimal");
1149 if (f & MPI_PHYSDISK0_STATUS_FLAG_NOT_OPTIMAL_PREVIOUS) {
1150 mpt_prtc(mpt, " Was Non-Optimal");
1153 case MPI_PHYSDISK0_STATUS_ONLINE:
1154 mpt_prtc(mpt, " Online");
1156 case MPI_PHYSDISK0_STATUS_MISSING:
1157 mpt_prtc(mpt, " Missing");
1159 case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
1160 mpt_prtc(mpt, " Incompatible");
1162 case MPI_PHYSDISK0_STATUS_FAILED:
1163 mpt_prtc(mpt, " Failed");
1165 case MPI_PHYSDISK0_STATUS_INITIALIZING:
1166 mpt_prtc(mpt, " Initializing");
1168 case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
1169 mpt_prtc(mpt, " Requested Offline");
1171 case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
1172 mpt_prtc(mpt, " Requested Failed");
1174 case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
1176 mpt_prtc(mpt, " Offline Other (%x)", s);
1179 mpt_prtc(mpt, "\n");
1184 mpt_announce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk)
1186 CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1187 int rd_bus = cam_sim_bus(mpt->sim);
1188 int pt_bus = cam_sim_bus(mpt->phydisk_sim);
1191 disk_pg = &mpt_disk->config_page;
1192 mpt_disk_prt(mpt, mpt_disk,
1193 "Physical (%s:%d:%d:0), Pass-thru (%s:%d:%jd:0)\n",
1194 device_get_nameunit(mpt->dev), rd_bus,
1195 disk_pg->PhysDiskID, device_get_nameunit(mpt->dev),
1196 pt_bus, (intmax_t)(mpt_disk - mpt->raid_disks));
1197 if (disk_pg->PhysDiskSettings.HotSparePool == 0)
1199 mpt_disk_prt(mpt, mpt_disk, "Member of Hot Spare Pool%s",
1200 powerof2(disk_pg->PhysDiskSettings.HotSparePool)
1202 for (i = 0; i < 8; i++) {
1206 if ((disk_pg->PhysDiskSettings.HotSparePool & mask) == 0)
1208 mpt_prtc(mpt, " %d", i);
1210 mpt_prtc(mpt, "\n");
1214 mpt_refresh_raid_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
1215 IOC_3_PHYS_DISK *ioc_disk)
1219 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_PHYSDISK,
1220 /*PageNumber*/0, ioc_disk->PhysDiskNum,
1221 &mpt_disk->config_page.Header,
1222 /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1224 mpt_prt(mpt, "mpt_refresh_raid_disk: "
1225 "Failed to read RAID Disk Hdr(%d)\n",
1226 ioc_disk->PhysDiskNum);
1229 rv = mpt_read_cur_cfg_page(mpt, ioc_disk->PhysDiskNum,
1230 &mpt_disk->config_page.Header,
1231 sizeof(mpt_disk->config_page),
1232 /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1234 mpt_prt(mpt, "mpt_refresh_raid_disk: "
1235 "Failed to read RAID Disk Page(%d)\n",
1236 ioc_disk->PhysDiskNum);
1237 mpt2host_config_page_raid_phys_disk_0(&mpt_disk->config_page);
1241 mpt_refresh_raid_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1242 CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol)
1244 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1245 struct mpt_raid_action_result *ar;
1250 vol_pg = mpt_vol->config_page;
1251 mpt_vol->flags &= ~MPT_RVF_UP2DATE;
1253 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_VOLUME, 0,
1254 ioc_vol->VolumePageNumber, &vol_pg->Header, TRUE, 5000);
1256 mpt_vol_prt(mpt, mpt_vol,
1257 "mpt_refresh_raid_vol: Failed to read RAID Vol Hdr(%d)\n",
1258 ioc_vol->VolumePageNumber);
1262 rv = mpt_read_cur_cfg_page(mpt, ioc_vol->VolumePageNumber,
1263 &vol_pg->Header, mpt->raid_page0_len, TRUE, 5000);
1265 mpt_vol_prt(mpt, mpt_vol,
1266 "mpt_refresh_raid_vol: Failed to read RAID Vol Page(%d)\n",
1267 ioc_vol->VolumePageNumber);
1270 mpt2host_config_page_raid_vol_0(vol_pg);
1272 mpt_vol->flags |= MPT_RVF_ACTIVE;
1274 /* Update disk entry array data. */
1275 for (i = 0; i < vol_pg->NumPhysDisks; i++) {
1276 struct mpt_raid_disk *mpt_disk;
1277 mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum;
1278 mpt_disk->volume = mpt_vol;
1279 mpt_disk->member_number = vol_pg->PhysDisk[i].PhysDiskMap;
1280 if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM) {
1281 mpt_disk->member_number--;
1285 if ((vol_pg->VolumeStatus.Flags
1286 & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1289 req = mpt_get_request(mpt, TRUE);
1291 mpt_vol_prt(mpt, mpt_vol,
1292 "mpt_refresh_raid_vol: Get request failed!\n");
1295 rv = mpt_issue_raid_req(mpt, mpt_vol, NULL, req,
1296 MPI_RAID_ACTION_INDICATOR_STRUCT, 0, 0, 0, FALSE, TRUE);
1297 if (rv == ETIMEDOUT) {
1298 mpt_vol_prt(mpt, mpt_vol,
1299 "mpt_refresh_raid_vol: Progress Indicator fetch timeout\n");
1300 mpt_free_request(mpt, req);
1304 ar = REQ_TO_RAID_ACTION_RESULT(req);
1306 && ar->action_status == MPI_RAID_ACTION_ASTATUS_SUCCESS
1307 && REQ_IOCSTATUS(req) == MPI_IOCSTATUS_SUCCESS) {
1308 memcpy(&mpt_vol->sync_progress,
1309 &ar->action_data.indicator_struct,
1310 sizeof(mpt_vol->sync_progress));
1311 mpt2host_mpi_raid_vol_indicator(&mpt_vol->sync_progress);
1313 mpt_vol_prt(mpt, mpt_vol,
1314 "mpt_refresh_raid_vol: Progress indicator fetch failed!\n");
1316 mpt_free_request(mpt, req);
1320 * Update in-core information about RAID support. We update any entries
1321 * that didn't previously exists or have been marked as needing to
1322 * be updated by our event handler. Interesting changes are displayed
1326 mpt_refresh_raid_data(struct mpt_softc *mpt)
1328 CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
1329 CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol;
1330 IOC_3_PHYS_DISK *ioc_disk;
1331 IOC_3_PHYS_DISK *ioc_last_disk;
1332 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1336 u_int nonopt_volumes;
1338 if (mpt->ioc_page2 == NULL || mpt->ioc_page3 == NULL) {
1343 * Mark all items as unreferenced by the configuration.
1344 * This allows us to find, report, and discard stale
1347 for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1348 mpt->raid_disks[i].flags &= ~MPT_RDF_REFERENCED;
1350 for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1351 mpt->raid_volumes[i].flags &= ~MPT_RVF_REFERENCED;
1355 * Get Physical Disk information.
1357 len = mpt->ioc_page3->Header.PageLength * sizeof(uint32_t);
1358 rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1359 &mpt->ioc_page3->Header, len,
1360 /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1363 "mpt_refresh_raid_data: Failed to read IOC Page 3\n");
1366 mpt2host_config_page_ioc3(mpt->ioc_page3);
1368 ioc_disk = mpt->ioc_page3->PhysDisk;
1369 ioc_last_disk = ioc_disk + mpt->ioc_page3->NumPhysDisks;
1370 for (; ioc_disk != ioc_last_disk; ioc_disk++) {
1371 struct mpt_raid_disk *mpt_disk;
1373 mpt_disk = mpt->raid_disks + ioc_disk->PhysDiskNum;
1374 mpt_disk->flags |= MPT_RDF_REFERENCED;
1375 if ((mpt_disk->flags & (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE))
1376 != (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE)) {
1378 mpt_refresh_raid_disk(mpt, mpt_disk, ioc_disk);
1381 mpt_disk->flags |= MPT_RDF_ACTIVE;
1386 * Refresh volume data.
1388 len = mpt->ioc_page2->Header.PageLength * sizeof(uint32_t);
1389 rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1390 &mpt->ioc_page2->Header, len,
1391 /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1393 mpt_prt(mpt, "mpt_refresh_raid_data: "
1394 "Failed to read IOC Page 2\n");
1397 mpt2host_config_page_ioc2(mpt->ioc_page2);
1399 ioc_vol = mpt->ioc_page2->RaidVolume;
1400 ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
1401 for (;ioc_vol != ioc_last_vol; ioc_vol++) {
1402 struct mpt_raid_volume *mpt_vol;
1404 mpt_vol = mpt->raid_volumes + ioc_vol->VolumePageNumber;
1405 mpt_vol->flags |= MPT_RVF_REFERENCED;
1406 vol_pg = mpt_vol->config_page;
1409 if (((mpt_vol->flags & (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1410 != (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1411 || (vol_pg->VolumeStatus.Flags
1412 & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) != 0) {
1414 mpt_refresh_raid_vol(mpt, mpt_vol, ioc_vol);
1416 mpt_vol->flags |= MPT_RVF_ACTIVE;
1420 for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1421 struct mpt_raid_volume *mpt_vol;
1427 mpt_vol = &mpt->raid_volumes[i];
1429 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) {
1433 vol_pg = mpt_vol->config_page;
1434 if ((mpt_vol->flags & (MPT_RVF_REFERENCED|MPT_RVF_ANNOUNCED))
1435 == MPT_RVF_ANNOUNCED) {
1436 mpt_vol_prt(mpt, mpt_vol, "No longer configured\n");
1441 if ((mpt_vol->flags & MPT_RVF_ANNOUNCED) == 0) {
1442 mpt_announce_vol(mpt, mpt_vol);
1443 mpt_vol->flags |= MPT_RVF_ANNOUNCED;
1446 if (vol_pg->VolumeStatus.State !=
1447 MPI_RAIDVOL0_STATUS_STATE_OPTIMAL)
1450 if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0)
1453 mpt_vol->flags |= MPT_RVF_UP2DATE;
1454 mpt_vol_prt(mpt, mpt_vol, "%s - %s\n",
1455 mpt_vol_type(mpt_vol), mpt_vol_state(mpt_vol));
1456 mpt_verify_mwce(mpt, mpt_vol);
1458 if (vol_pg->VolumeStatus.Flags == 0) {
1462 mpt_vol_prt(mpt, mpt_vol, "Status (");
1463 for (m = 1; m <= 0x80; m <<= 1) {
1464 switch (vol_pg->VolumeStatus.Flags & m) {
1465 case MPI_RAIDVOL0_STATUS_FLAG_ENABLED:
1466 mpt_prtc(mpt, " Enabled");
1468 case MPI_RAIDVOL0_STATUS_FLAG_QUIESCED:
1469 mpt_prtc(mpt, " Quiesced");
1471 case MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS:
1472 mpt_prtc(mpt, " Re-Syncing");
1474 case MPI_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE:
1475 mpt_prtc(mpt, " Inactive");
1481 mpt_prtc(mpt, " )\n");
1483 if ((vol_pg->VolumeStatus.Flags
1484 & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1487 mpt_verify_resync_rate(mpt, mpt_vol);
1489 left = MPT_U64_2_SCALAR(mpt_vol->sync_progress.BlocksRemaining);
1490 total = MPT_U64_2_SCALAR(mpt_vol->sync_progress.TotalBlocks);
1491 if (vol_pg->ResyncRate != 0) {
1493 prio = ((u_int)vol_pg->ResyncRate * 100000) / 0xFF;
1494 mpt_vol_prt(mpt, mpt_vol, "Rate %d.%d%%\n",
1495 prio / 1000, prio % 1000);
1497 prio = vol_pg->VolumeSettings.Settings
1498 & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1499 mpt_vol_prt(mpt, mpt_vol, "%s Priority Re-Sync\n",
1500 prio ? "High" : "Low");
1502 mpt_vol_prt(mpt, mpt_vol, "%ju of %ju "
1503 "blocks remaining\n", (uintmax_t)left,
1506 /* Periodically report on sync progress. */
1507 mpt_schedule_raid_refresh(mpt);
1510 for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1511 struct mpt_raid_disk *mpt_disk;
1512 CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1515 mpt_disk = &mpt->raid_disks[i];
1516 disk_pg = &mpt_disk->config_page;
1518 if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0)
1521 if ((mpt_disk->flags & (MPT_RDF_REFERENCED|MPT_RDF_ANNOUNCED))
1522 == MPT_RDF_ANNOUNCED) {
1523 mpt_disk_prt(mpt, mpt_disk, "No longer configured\n");
1524 mpt_disk->flags = 0;
1529 if ((mpt_disk->flags & MPT_RDF_ANNOUNCED) == 0) {
1531 mpt_announce_disk(mpt, mpt_disk);
1532 mpt_disk->flags |= MPT_RVF_ANNOUNCED;
1535 if ((mpt_disk->flags & MPT_RDF_UP2DATE) != 0)
1538 mpt_disk->flags |= MPT_RDF_UP2DATE;
1539 mpt_disk_prt(mpt, mpt_disk, "%s\n", mpt_disk_state(mpt_disk));
1540 if (disk_pg->PhysDiskStatus.Flags == 0)
1543 mpt_disk_prt(mpt, mpt_disk, "Status (");
1544 for (m = 1; m <= 0x80; m <<= 1) {
1545 switch (disk_pg->PhysDiskStatus.Flags & m) {
1546 case MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC:
1547 mpt_prtc(mpt, " Out-Of-Sync");
1549 case MPI_PHYSDISK0_STATUS_FLAG_QUIESCED:
1550 mpt_prtc(mpt, " Quiesced");
1556 mpt_prtc(mpt, " )\n");
1559 mpt->raid_nonopt_volumes = nonopt_volumes;
1564 mpt_raid_timer(void *arg)
1566 struct mpt_softc *mpt;
1568 mpt = (struct mpt_softc *)arg;
1570 mpt_raid_wakeup(mpt);
1575 mpt_schedule_raid_refresh(struct mpt_softc *mpt)
1577 callout_reset(&mpt->raid_timer, MPT_RAID_SYNC_REPORT_INTERVAL,
1578 mpt_raid_timer, mpt);
1582 mpt_raid_free_mem(struct mpt_softc *mpt)
1585 if (mpt->raid_volumes) {
1586 struct mpt_raid_volume *mpt_raid;
1588 for (i = 0; i < mpt->raid_max_volumes; i++) {
1589 mpt_raid = &mpt->raid_volumes[i];
1590 if (mpt_raid->config_page) {
1591 kfree(mpt_raid->config_page, M_DEVBUF);
1592 mpt_raid->config_page = NULL;
1595 kfree(mpt->raid_volumes, M_DEVBUF);
1596 mpt->raid_volumes = NULL;
1598 if (mpt->raid_disks) {
1599 kfree(mpt->raid_disks, M_DEVBUF);
1600 mpt->raid_disks = NULL;
1602 if (mpt->ioc_page2) {
1603 kfree(mpt->ioc_page2, M_DEVBUF);
1604 mpt->ioc_page2 = NULL;
1606 if (mpt->ioc_page3) {
1607 kfree(mpt->ioc_page3, M_DEVBUF);
1608 mpt->ioc_page3 = NULL;
1610 mpt->raid_max_volumes = 0;
1611 mpt->raid_max_disks = 0;
1614 #if __FreeBSD_version >= 500000
1616 mpt_raid_set_vol_resync_rate(struct mpt_softc *mpt, u_int rate)
1618 struct mpt_raid_volume *mpt_vol;
1620 if ((rate > MPT_RAID_RESYNC_RATE_MAX
1621 || rate < MPT_RAID_RESYNC_RATE_MIN)
1622 && rate != MPT_RAID_RESYNC_RATE_NC)
1626 mpt->raid_resync_rate = rate;
1627 RAID_VOL_FOREACH(mpt, mpt_vol) {
1628 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) {
1631 mpt_verify_resync_rate(mpt, mpt_vol);
1638 mpt_raid_set_vol_queue_depth(struct mpt_softc *mpt, u_int vol_queue_depth)
1640 struct mpt_raid_volume *mpt_vol;
1642 if (vol_queue_depth > 255 || vol_queue_depth < 1)
1646 mpt->raid_queue_depth = vol_queue_depth;
1647 RAID_VOL_FOREACH(mpt, mpt_vol) {
1648 struct cam_path *path;
1651 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1654 mpt->raid_rescan = 0;
1656 MPTLOCK_2_CAMLOCK(mpt);
1657 error = xpt_create_path(&path, xpt_periph,
1658 cam_sim_path(mpt->sim),
1659 mpt_vol->config_page->VolumeID,
1661 if (error != CAM_REQ_CMP) {
1662 CAMLOCK_2_MPTLOCK(mpt);
1663 mpt_vol_prt(mpt, mpt_vol, "Unable to allocate path!\n");
1666 mpt_adjust_queue_depth(mpt, mpt_vol, path);
1667 xpt_free_path(path);
1668 CAMLOCK_2_MPTLOCK(mpt);
1675 mpt_raid_set_vol_mwce(struct mpt_softc *mpt, mpt_raid_mwce_t mwce)
1677 struct mpt_raid_volume *mpt_vol;
1678 int force_full_resync;
1681 if (mwce == mpt->raid_mwce_setting) {
1687 * Catch MWCE being left on due to a failed shutdown. Since
1688 * sysctls cannot be set by the loader, we treat the first
1689 * setting of this varible specially and force a full volume
1690 * resync if MWCE is enabled and a resync is in progress.
1692 force_full_resync = 0;
1693 if (mpt->raid_mwce_set == 0
1694 && mpt->raid_mwce_setting == MPT_RAID_MWCE_NC
1695 && mwce == MPT_RAID_MWCE_REBUILD_ONLY)
1696 force_full_resync = 1;
1698 mpt->raid_mwce_setting = mwce;
1699 RAID_VOL_FOREACH(mpt, mpt_vol) {
1700 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1704 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1707 vol_pg = mpt_vol->config_page;
1708 resyncing = vol_pg->VolumeStatus.Flags
1709 & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
1710 mwce = vol_pg->VolumeSettings.Settings
1711 & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
1712 if (force_full_resync && resyncing && mwce) {
1715 * XXX disable/enable volume should force a resync,
1716 * but we'll need to queice, drain, and restart
1719 mpt_vol_prt(mpt, mpt_vol, "WARNING - Unsafe shutdown "
1720 "detected. Suggest full resync.\n");
1722 mpt_verify_mwce(mpt, mpt_vol);
1724 mpt->raid_mwce_set = 1;
1728 const char *mpt_vol_mwce_strs[] =
1732 "On-During-Rebuild",
1737 mpt_raid_sysctl_vol_member_wce(SYSCTL_HANDLER_ARGS)
1740 struct mpt_softc *mpt;
1748 mpt = (struct mpt_softc *)arg1;
1749 str = mpt_vol_mwce_strs[mpt->raid_mwce_setting];
1750 error = SYSCTL_OUT(req, str, strlen(str) + 1);
1751 if (error || !req->newptr) {
1755 size = req->newlen - req->newidx;
1756 if (size >= sizeof(inbuf)) {
1760 error = SYSCTL_IN(req, inbuf, size);
1765 for (i = 0; i < NUM_ELEMENTS(mpt_vol_mwce_strs); i++) {
1766 if (strcmp(mpt_vol_mwce_strs[i], inbuf) == 0) {
1767 return (mpt_raid_set_vol_mwce(mpt, i));
1774 mpt_raid_sysctl_vol_resync_rate(SYSCTL_HANDLER_ARGS)
1776 struct mpt_softc *mpt;
1777 u_int raid_resync_rate;
1782 mpt = (struct mpt_softc *)arg1;
1783 raid_resync_rate = mpt->raid_resync_rate;
1785 error = sysctl_handle_int(oidp, &raid_resync_rate, 0, req);
1786 if (error || !req->newptr) {
1790 return (mpt_raid_set_vol_resync_rate(mpt, raid_resync_rate));
1794 mpt_raid_sysctl_vol_queue_depth(SYSCTL_HANDLER_ARGS)
1796 struct mpt_softc *mpt;
1797 u_int raid_queue_depth;
1802 mpt = (struct mpt_softc *)arg1;
1803 raid_queue_depth = mpt->raid_queue_depth;
1805 error = sysctl_handle_int(oidp, &raid_queue_depth, 0, req);
1806 if (error || !req->newptr) {
1810 return (mpt_raid_set_vol_queue_depth(mpt, raid_queue_depth));
1814 mpt_raid_sysctl_attach(struct mpt_softc *mpt)
1816 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev);
1817 struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev);
1819 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1820 "vol_member_wce", CTLTYPE_STRING | CTLFLAG_RW, mpt, 0,
1821 mpt_raid_sysctl_vol_member_wce, "A",
1822 "volume member WCE(On,Off,On-During-Rebuild,NC)");
1824 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1825 "vol_queue_depth", CTLTYPE_INT | CTLFLAG_RW, mpt, 0,
1826 mpt_raid_sysctl_vol_queue_depth, "I",
1827 "default volume queue depth");
1829 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1830 "vol_resync_rate", CTLTYPE_INT | CTLFLAG_RW, mpt, 0,
1831 mpt_raid_sysctl_vol_resync_rate, "I",
1832 "volume resync priority (0 == NC, 1 - 255)");
1833 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1834 "nonoptimal_volumes", CTLFLAG_RD,
1835 &mpt->raid_nonopt_volumes, 0,
1836 "number of nonoptimal volumes");