2 * Routines for handling the integrated RAID features LSI MPT Fusion adapters.
4 * Copyright (c) 2005, WHEEL Sp. z o.o.
5 * Copyright (c) 2005 Justin T. Gibbs.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14 * substantially similar to the "NO WARRANTY" disclaimer below
15 * ("Disclaimer") and any redistribution must be conditioned upon including
16 * a substantially similar Disclaimer requirement for further binary
18 * 3. Neither the names of the above listed copyright holders nor the names
19 * of any contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
32 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 * Some Breakage and Bug Fixing added later.
36 * Copyright (c) 2006, by Matthew Jacob
39 * Support from LSI-Logic has also gone a great deal toward making this a
40 * workable subsystem and is gratefully acknowledged.
42 * $FreeBSD: src/sys/dev/mpt/mpt_raid.c,v 1.28 2011/01/12 19:53:56 mdf Exp $
45 #include <dev/disk/mpt/mpt.h>
46 #include <dev/disk/mpt/mpt_raid.h>
48 #include "dev/disk/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
49 #include "dev/disk/mpt/mpilib/mpi_raid.h"
51 #include <bus/cam/cam.h>
52 #include <bus/cam/cam_ccb.h>
53 #include <bus/cam/cam_sim.h>
54 #include <bus/cam/cam_xpt_sim.h>
56 #include <sys/devicestat.h>
57 #include <bus/cam/cam_periph.h>
59 #include <sys/callout.h>
60 #include <sys/kthread.h>
61 #include <sys/sysctl.h>
63 #include <machine/stdarg.h>
65 struct mpt_raid_action_result
68 MPI_RAID_VOL_INDICATOR indicator_struct;
69 uint32_t new_settings;
70 uint8_t phys_disk_num;
72 uint16_t action_status;
75 #define REQ_TO_RAID_ACTION_RESULT(req) ((struct mpt_raid_action_result *) \
76 (((MSG_RAID_ACTION_REQUEST *)(req->req_vbuf)) + 1))
78 #define REQ_IOCSTATUS(req) ((req)->IOCStatus & MPI_IOCSTATUS_MASK)
81 static mpt_probe_handler_t mpt_raid_probe;
82 static mpt_attach_handler_t mpt_raid_attach;
83 static mpt_enable_handler_t mpt_raid_enable;
84 static mpt_event_handler_t mpt_raid_event;
85 static mpt_shutdown_handler_t mpt_raid_shutdown;
86 static mpt_reset_handler_t mpt_raid_ioc_reset;
87 static mpt_detach_handler_t mpt_raid_detach;
89 static struct mpt_personality mpt_raid_personality =
92 .probe = mpt_raid_probe,
93 .attach = mpt_raid_attach,
94 .enable = mpt_raid_enable,
95 .event = mpt_raid_event,
96 .reset = mpt_raid_ioc_reset,
97 .shutdown = mpt_raid_shutdown,
98 .detach = mpt_raid_detach,
101 DECLARE_MPT_PERSONALITY(mpt_raid, SI_ORDER_THIRD);
102 MPT_PERSONALITY_DEPEND(mpt_raid, mpt_cam, 1, 1, 1);
104 static mpt_reply_handler_t mpt_raid_reply_handler;
105 static int mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
106 MSG_DEFAULT_REPLY *reply_frame);
107 static int mpt_spawn_raid_thread(struct mpt_softc *mpt);
108 static void mpt_terminate_raid_thread(struct mpt_softc *mpt);
109 static void mpt_raid_thread(void *arg);
110 static timeout_t mpt_raid_timer;
112 static void mpt_enable_vol(struct mpt_softc *mpt,
113 struct mpt_raid_volume *mpt_vol, int enable);
115 static void mpt_verify_mwce(struct mpt_softc *, struct mpt_raid_volume *);
116 static void mpt_adjust_queue_depth(struct mpt_softc *, struct mpt_raid_volume *,
118 static void mpt_raid_sysctl_attach(struct mpt_softc *);
120 static uint32_t raid_handler_id = MPT_HANDLER_ID_NONE;
123 mpt_vol_type(struct mpt_raid_volume *vol)
125 switch (vol->config_page->VolumeType) {
126 case MPI_RAID_VOL_TYPE_IS:
128 case MPI_RAID_VOL_TYPE_IME:
130 case MPI_RAID_VOL_TYPE_IM:
138 mpt_vol_state(struct mpt_raid_volume *vol)
140 switch (vol->config_page->VolumeStatus.State) {
141 case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL:
143 case MPI_RAIDVOL0_STATUS_STATE_DEGRADED:
145 case MPI_RAIDVOL0_STATUS_STATE_FAILED:
153 mpt_disk_state(struct mpt_raid_disk *disk)
155 switch (disk->config_page.PhysDiskStatus.State) {
156 case MPI_PHYSDISK0_STATUS_ONLINE:
158 case MPI_PHYSDISK0_STATUS_MISSING:
160 case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
161 return ("Incompatible");
162 case MPI_PHYSDISK0_STATUS_FAILED:
164 case MPI_PHYSDISK0_STATUS_INITIALIZING:
165 return ("Initializing");
166 case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
167 return ("Offline Requested");
168 case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
169 return ("Failed per Host Request");
170 case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
178 mpt_vol_prt(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
179 const char *fmt, ...)
183 kprintf("%s:vol%d(%s:%d:%d): ", device_get_nameunit(mpt->dev),
184 (u_int)(vol - mpt->raid_volumes), device_get_nameunit(mpt->dev),
185 vol->config_page->VolumeBus, vol->config_page->VolumeID);
192 mpt_disk_prt(struct mpt_softc *mpt, struct mpt_raid_disk *disk,
193 const char *fmt, ...)
197 if (disk->volume != NULL) {
198 kprintf("(%s:vol%d:%d): ",
199 device_get_nameunit(mpt->dev),
200 disk->volume->config_page->VolumeID,
201 disk->member_number);
203 kprintf("(%s:%d:%d): ", device_get_nameunit(mpt->dev),
204 disk->config_page.PhysDiskBus,
205 disk->config_page.PhysDiskID);
213 mpt_raid_async(void *callback_arg, u_int32_t code,
214 struct cam_path *path, void *arg)
216 struct mpt_softc *mpt;
218 mpt = (struct mpt_softc*)callback_arg;
220 case AC_FOUND_DEVICE:
222 struct ccb_getdev *cgd;
223 struct mpt_raid_volume *mpt_vol;
225 cgd = (struct ccb_getdev *)arg;
230 mpt_lprt(mpt, MPT_PRT_DEBUG, "Callback for %d\n",
231 cgd->ccb_h.target_id);
233 RAID_VOL_FOREACH(mpt, mpt_vol) {
234 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
237 if (mpt_vol->config_page->VolumeID
238 == cgd->ccb_h.target_id) {
239 mpt_adjust_queue_depth(mpt, mpt_vol, path);
250 mpt_raid_probe(struct mpt_softc *mpt)
252 if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
259 mpt_raid_attach(struct mpt_softc *mpt)
261 struct ccb_setasync csa;
262 mpt_handler_t handler;
265 mpt_callout_init(mpt, &mpt->raid_timer);
267 error = mpt_spawn_raid_thread(mpt);
269 mpt_prt(mpt, "Unable to spawn RAID thread!\n");
274 handler.reply_handler = mpt_raid_reply_handler;
275 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
278 mpt_prt(mpt, "Unable to register RAID haandler!\n");
282 xpt_setup_ccb(&csa.ccb_h, mpt->path, 5);
283 csa.ccb_h.func_code = XPT_SASYNC_CB;
284 csa.event_enable = AC_FOUND_DEVICE;
285 csa.callback = mpt_raid_async;
286 csa.callback_arg = mpt;
287 xpt_action((union ccb *)&csa);
288 if (csa.ccb_h.status != CAM_REQ_CMP) {
289 mpt_prt(mpt, "mpt_raid_attach: Unable to register "
290 "CAM async handler.\n");
294 mpt_raid_sysctl_attach(mpt);
298 mpt_raid_detach(mpt);
303 mpt_raid_enable(struct mpt_softc *mpt)
309 mpt_raid_detach(struct mpt_softc *mpt)
311 struct ccb_setasync csa;
312 mpt_handler_t handler;
314 callout_stop(&mpt->raid_timer);
317 mpt_terminate_raid_thread(mpt);
318 handler.reply_handler = mpt_raid_reply_handler;
319 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
321 xpt_setup_ccb(&csa.ccb_h, mpt->path, /*priority*/5);
322 csa.ccb_h.func_code = XPT_SASYNC_CB;
323 csa.event_enable = 0;
324 csa.callback = mpt_raid_async;
325 csa.callback_arg = mpt;
326 xpt_action((union ccb *)&csa);
331 mpt_raid_ioc_reset(struct mpt_softc *mpt, int type)
333 /* Nothing to do yet. */
336 static const char *raid_event_txt[] =
340 "Volume Settings Changed",
341 "Volume Status Changed",
342 "Volume Physical Disk Membership Changed",
343 "Physical Disk Created",
344 "Physical Disk Deleted",
345 "Physical Disk Settings Changed",
346 "Physical Disk Status Changed",
347 "Domain Validation Required",
348 "SMART Data Received",
349 "Replace Action Started",
353 mpt_raid_event(struct mpt_softc *mpt, request_t *req,
354 MSG_EVENT_NOTIFY_REPLY *msg)
356 EVENT_DATA_RAID *raid_event;
357 struct mpt_raid_volume *mpt_vol;
358 struct mpt_raid_disk *mpt_disk;
359 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
363 if (msg->Event != MPI_EVENT_INTEGRATED_RAID) {
367 raid_event = (EVENT_DATA_RAID *)&msg->Data;
371 if (mpt->raid_volumes != NULL && mpt->ioc_page2 != NULL) {
372 for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
373 mpt_vol = &mpt->raid_volumes[i];
374 vol_pg = mpt_vol->config_page;
376 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
379 if (vol_pg->VolumeID == raid_event->VolumeID
380 && vol_pg->VolumeBus == raid_event->VolumeBus)
383 if (i >= mpt->ioc_page2->MaxVolumes) {
390 if (raid_event->PhysDiskNum != 0xFF && mpt->raid_disks != NULL) {
391 mpt_disk = mpt->raid_disks + raid_event->PhysDiskNum;
392 if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0) {
398 switch(raid_event->ReasonCode) {
399 case MPI_EVENT_RAID_RC_VOLUME_CREATED:
400 case MPI_EVENT_RAID_RC_VOLUME_DELETED:
402 case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED:
403 if (mpt_vol != NULL) {
404 if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0) {
405 mpt_vol->flags &= ~MPT_RVF_UP2DATE;
408 * Coalesce status messages into one
409 * per background run of our RAID thread.
410 * This removes "spurious" status messages
417 case MPI_EVENT_RAID_RC_VOLUME_SETTINGS_CHANGED:
418 case MPI_EVENT_RAID_RC_VOLUME_PHYSDISK_CHANGED:
420 if (mpt_vol != NULL) {
421 mpt_vol->flags &= ~(MPT_RVF_UP2DATE|MPT_RVF_ANNOUNCED);
424 case MPI_EVENT_RAID_RC_PHYSDISK_CREATED:
425 case MPI_EVENT_RAID_RC_PHYSDISK_DELETED:
428 case MPI_EVENT_RAID_RC_PHYSDISK_SETTINGS_CHANGED:
429 case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED:
431 if (mpt_disk != NULL) {
432 mpt_disk->flags &= ~MPT_RDF_UP2DATE;
435 case MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED:
438 case MPI_EVENT_RAID_RC_SMART_DATA:
439 case MPI_EVENT_RAID_RC_REPLACE_ACTION_STARTED:
444 if (mpt_disk != NULL) {
445 mpt_disk_prt(mpt, mpt_disk, "%s", "");
446 } else if (mpt_vol != NULL) {
447 mpt_vol_prt(mpt, mpt_vol, "%s", "");
449 mpt_prt(mpt, "Volume(%d:%d", raid_event->VolumeBus,
450 raid_event->VolumeID);
452 if (raid_event->PhysDiskNum != 0xFF)
453 mpt_prtc(mpt, ":%d): ",
454 raid_event->PhysDiskNum);
456 mpt_prtc(mpt, "): ");
459 if (raid_event->ReasonCode >= NUM_ELEMENTS(raid_event_txt))
460 mpt_prtc(mpt, "Unhandled RaidEvent %#x\n",
461 raid_event->ReasonCode);
463 mpt_prtc(mpt, "%s\n",
464 raid_event_txt[raid_event->ReasonCode]);
467 if (raid_event->ReasonCode == MPI_EVENT_RAID_RC_SMART_DATA) {
468 /* XXX Use CAM's print sense for this... */
469 if (mpt_disk != NULL)
470 mpt_disk_prt(mpt, mpt_disk, "%s", "");
472 mpt_prt(mpt, "Volume(%d:%d:%d: ",
473 raid_event->VolumeBus, raid_event->VolumeID,
474 raid_event->PhysDiskNum);
475 mpt_prtc(mpt, "ASC 0x%x, ASCQ 0x%x)\n",
476 raid_event->ASC, raid_event->ASCQ);
479 mpt_raid_wakeup(mpt);
484 mpt_raid_shutdown(struct mpt_softc *mpt)
486 struct mpt_raid_volume *mpt_vol;
488 if (mpt->raid_mwce_setting != MPT_RAID_MWCE_REBUILD_ONLY) {
492 mpt->raid_mwce_setting = MPT_RAID_MWCE_OFF;
493 RAID_VOL_FOREACH(mpt, mpt_vol) {
494 mpt_verify_mwce(mpt, mpt_vol);
499 mpt_raid_reply_handler(struct mpt_softc *mpt, request_t *req,
500 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
508 if (reply_frame != NULL)
509 free_req = mpt_raid_reply_frame_handler(mpt, req, reply_frame);
511 else if (req->ccb != NULL) {
512 /* Complete Quiesce CCB with error... */
516 req->state &= ~REQ_STATE_QUEUED;
517 req->state |= REQ_STATE_DONE;
518 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
520 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
522 } else if (free_req) {
523 mpt_free_request(mpt, req);
530 * Parse additional completion information in the reply
531 * frame for RAID I/O requests.
534 mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
535 MSG_DEFAULT_REPLY *reply_frame)
537 MSG_RAID_ACTION_REPLY *reply;
538 struct mpt_raid_action_result *action_result;
539 MSG_RAID_ACTION_REQUEST *rap;
541 reply = (MSG_RAID_ACTION_REPLY *)reply_frame;
542 req->IOCStatus = le16toh(reply->IOCStatus);
543 rap = (MSG_RAID_ACTION_REQUEST *)req->req_vbuf;
545 switch (rap->Action) {
546 case MPI_RAID_ACTION_QUIESCE_PHYS_IO:
547 mpt_prt(mpt, "QUIESCE PHYSIO DONE\n");
549 case MPI_RAID_ACTION_ENABLE_PHYS_IO:
550 mpt_prt(mpt, "ENABLY PHYSIO DONE\n");
555 action_result = REQ_TO_RAID_ACTION_RESULT(req);
556 memcpy(&action_result->action_data, &reply->ActionData,
557 sizeof(action_result->action_data));
558 action_result->action_status = le16toh(reply->ActionStatus);
563 * Utiltity routine to perform a RAID action command;
566 mpt_issue_raid_req(struct mpt_softc *mpt, struct mpt_raid_volume *vol,
567 struct mpt_raid_disk *disk, request_t *req, u_int Action,
568 uint32_t ActionDataWord, bus_addr_t addr, bus_size_t len,
571 MSG_RAID_ACTION_REQUEST *rap;
575 memset(rap, 0, sizeof *rap);
576 rap->Action = Action;
577 rap->ActionDataWord = htole32(ActionDataWord);
578 rap->Function = MPI_FUNCTION_RAID_ACTION;
579 rap->VolumeID = vol->config_page->VolumeID;
580 rap->VolumeBus = vol->config_page->VolumeBus;
582 rap->PhysDiskNum = disk->config_page.PhysDiskNum;
584 rap->PhysDiskNum = 0xFF;
585 se = (SGE_SIMPLE32 *)&rap->ActionDataSGE;
586 se->Address = htole32(addr);
587 MPI_pSGE_SET_LENGTH(se, len);
588 MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
589 MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
590 MPI_SGE_FLAGS_END_OF_LIST |
591 write ? MPI_SGE_FLAGS_HOST_TO_IOC : MPI_SGE_FLAGS_IOC_TO_HOST));
592 se->FlagsLength = htole32(se->FlagsLength);
593 rap->MsgContext = htole32(req->index | raid_handler_id);
595 mpt_check_doorbell(mpt);
596 mpt_send_cmd(mpt, req);
599 return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
600 /*sleep_ok*/FALSE, /*time_ms*/2000));
606 /*************************** RAID Status Monitoring ***************************/
608 mpt_spawn_raid_thread(struct mpt_softc *mpt)
613 * Freeze out any CAM transactions until our thread
614 * is able to run at least once. We need to update
615 * our RAID pages before acception I/O or we may
616 * reject I/O to an ID we later determine is for a
620 xpt_freeze_simq(mpt->phydisk_sim, 1);
622 error = mpt_kthread_create(mpt_raid_thread, mpt,
623 &mpt->raid_thread, /*flags*/0, /*altstack*/0,
624 "mpt_raid%d", mpt->unit);
627 xpt_release_simq(mpt->phydisk_sim, /*run_queue*/FALSE);
634 mpt_terminate_raid_thread(struct mpt_softc *mpt)
637 if (mpt->raid_thread == NULL) {
640 mpt->shutdwn_raid = 1;
641 wakeup(&mpt->raid_volumes);
643 * Sleep on a slightly different location
644 * for this interlock just for added safety.
646 mpt_sleep(mpt, &mpt->raid_thread, 0, "thtrm", 0);
650 mpt_cam_rescan_callback(struct cam_periph *periph, union ccb *ccb)
652 xpt_free_path(ccb->ccb_h.path);
657 mpt_raid_thread(void *arg)
659 struct mpt_softc *mpt;
662 mpt = (struct mpt_softc *)arg;
665 while (mpt->shutdwn_raid == 0) {
667 if (mpt->raid_wakeup == 0) {
668 mpt_sleep(mpt, &mpt->raid_volumes, 0, "idle", 0);
672 mpt->raid_wakeup = 0;
674 if (mpt_refresh_raid_data(mpt)) {
675 mpt_schedule_raid_refresh(mpt); /* XX NOT QUITE RIGHT */
680 * Now that we have our first snapshot of RAID data,
681 * allow CAM to access our physical disk bus.
685 MPTLOCK_2_CAMLOCK(mpt);
686 xpt_release_simq(mpt->phydisk_sim, TRUE);
687 CAMLOCK_2_MPTLOCK(mpt);
690 if (mpt->raid_rescan != 0) {
694 mpt->raid_rescan = 0;
697 ccb = kmalloc(sizeof(union ccb), M_TEMP,
701 error = xpt_create_path(&ccb->ccb_h.path, xpt_periph,
702 cam_sim_path(mpt->phydisk_sim),
703 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
704 if (error != CAM_REQ_CMP) {
706 mpt_prt(mpt, "Unable to rescan RAID Bus!\n");
708 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
709 5/*priority (low)*/);
710 ccb->ccb_h.func_code = XPT_SCAN_BUS;
711 ccb->ccb_h.cbfcnp = mpt_cam_rescan_callback;
712 ccb->crcn.flags = CAM_FLAG_NONE;
715 /* scan is now in progress */
719 mpt->raid_thread = NULL;
720 wakeup(&mpt->raid_thread);
727 mpt_raid_quiesce_timeout(void *arg)
729 /* Complete the CCB with error */
733 static timeout_t mpt_raid_quiesce_timeout;
735 mpt_raid_quiesce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
741 if ((mpt_disk->flags & MPT_RDF_QUIESCED) != 0)
742 return (CAM_REQ_CMP);
744 if ((mpt_disk->flags & MPT_RDF_QUIESCING) == 0) {
747 mpt_disk->flags |= MPT_RDF_QUIESCING;
748 xpt_freeze_devq(ccb->ccb_h.path, 1);
750 rv = mpt_issue_raid_req(mpt, mpt_disk->volume, mpt_disk, req,
751 MPI_RAID_ACTION_QUIESCE_PHYS_IO,
752 /*ActionData*/0, /*addr*/0,
753 /*len*/0, /*write*/FALSE,
756 return (CAM_REQ_CMP_ERR);
758 mpt_req_timeout(req, mpt_raid_quiesce_timeout, ccb, 5 * hz);
760 if (rv == ETIMEDOUT) {
761 mpt_disk_prt(mpt, mpt_disk, "mpt_raid_quiesce_disk: "
762 "Quiece Timed-out\n");
763 xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
764 return (CAM_REQ_CMP_ERR);
767 ar = REQ_TO_RAID_ACTION_RESULT(req);
769 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
770 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
771 mpt_disk_prt(mpt, mpt_disk, "Quiece Failed"
772 "%d:%x:%x\n", rv, req->IOCStatus,
774 xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0);
775 return (CAM_REQ_CMP_ERR);
778 return (CAM_REQ_INPROG);
780 return (CAM_REQUEUE_REQ);
784 /* XXX Ignores that there may be multiple busses/IOCs involved. */
786 mpt_map_physdisk(struct mpt_softc *mpt, union ccb *ccb, u_int *tgt)
788 struct mpt_raid_disk *mpt_disk;
790 mpt_disk = mpt->raid_disks + ccb->ccb_h.target_id;
791 if (ccb->ccb_h.target_id < mpt->raid_max_disks
792 && (mpt_disk->flags & MPT_RDF_ACTIVE) != 0) {
793 *tgt = mpt_disk->config_page.PhysDiskID;
796 mpt_lprt(mpt, MPT_PRT_DEBUG1, "mpt_map_physdisk(%d) - Not Active\n",
797 ccb->ccb_h.target_id);
801 /* XXX Ignores that there may be multiple busses/IOCs involved. */
803 mpt_is_raid_volume(struct mpt_softc *mpt, int tgt)
805 CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
806 CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol;
808 if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
811 ioc_vol = mpt->ioc_page2->RaidVolume;
812 ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
813 for (;ioc_vol != ioc_last_vol; ioc_vol++) {
814 if (ioc_vol->VolumeID == tgt) {
823 mpt_enable_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
827 struct mpt_raid_action_result *ar;
828 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
832 vol_pg = mpt_vol->config_page;
833 enabled = vol_pg->VolumeStatus.Flags & MPI_RAIDVOL0_STATUS_FLAG_ENABLED;
836 * If the setting matches the configuration,
837 * there is nothing to do.
839 if ((enabled && enable)
840 || (!enabled && !enable))
843 req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
845 mpt_vol_prt(mpt, mpt_vol,
846 "mpt_enable_vol: Get request failed!\n");
850 rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
851 enable ? MPI_RAID_ACTION_ENABLE_VOLUME
852 : MPI_RAID_ACTION_DISABLE_VOLUME,
853 /*data*/0, /*addr*/0, /*len*/0,
854 /*write*/FALSE, /*wait*/TRUE);
855 if (rv == ETIMEDOUT) {
856 mpt_vol_prt(mpt, mpt_vol, "mpt_enable_vol: "
857 "%s Volume Timed-out\n",
858 enable ? "Enable" : "Disable");
861 ar = REQ_TO_RAID_ACTION_RESULT(req);
863 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
864 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
865 mpt_vol_prt(mpt, mpt_vol, "%s Volume Failed: %d:%x:%x\n",
866 enable ? "Enable" : "Disable",
867 rv, req->IOCStatus, ar->action_status);
870 mpt_free_request(mpt, req);
875 mpt_verify_mwce(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
878 struct mpt_raid_action_result *ar;
879 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
885 vol_pg = mpt_vol->config_page;
886 resyncing = vol_pg->VolumeStatus.Flags
887 & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
888 mwce = vol_pg->VolumeSettings.Settings
889 & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
892 * If the setting matches the configuration,
893 * there is nothing to do.
895 switch (mpt->raid_mwce_setting) {
896 case MPT_RAID_MWCE_REBUILD_ONLY:
897 if ((resyncing && mwce) || (!resyncing && !mwce)) {
900 mpt_vol->flags ^= MPT_RVF_WCE_CHANGED;
901 if ((mpt_vol->flags & MPT_RVF_WCE_CHANGED) == 0) {
903 * Wait one more status update to see if
904 * resyncing gets enabled. It gets disabled
905 * temporarilly when WCE is changed.
910 case MPT_RAID_MWCE_ON:
914 case MPT_RAID_MWCE_OFF:
918 case MPT_RAID_MWCE_NC:
922 req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
924 mpt_vol_prt(mpt, mpt_vol,
925 "mpt_verify_mwce: Get request failed!\n");
929 vol_pg->VolumeSettings.Settings ^=
930 MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
931 memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
932 vol_pg->VolumeSettings.Settings ^=
933 MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
934 rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
935 MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
936 data, /*addr*/0, /*len*/0,
937 /*write*/FALSE, /*wait*/TRUE);
938 if (rv == ETIMEDOUT) {
939 mpt_vol_prt(mpt, mpt_vol, "mpt_verify_mwce: "
940 "Write Cache Enable Timed-out\n");
943 ar = REQ_TO_RAID_ACTION_RESULT(req);
945 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
946 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
947 mpt_vol_prt(mpt, mpt_vol, "Write Cache Enable Failed: "
948 "%d:%x:%x\n", rv, req->IOCStatus,
951 vol_pg->VolumeSettings.Settings ^=
952 MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
954 mpt_free_request(mpt, req);
958 mpt_verify_resync_rate(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
961 struct mpt_raid_action_result *ar;
962 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
966 vol_pg = mpt_vol->config_page;
968 if (mpt->raid_resync_rate == MPT_RAID_RESYNC_RATE_NC)
972 * If the current RAID resync rate does not
973 * match our configured rate, update it.
975 prio = vol_pg->VolumeSettings.Settings
976 & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
977 if (vol_pg->ResyncRate != 0
978 && vol_pg->ResyncRate != mpt->raid_resync_rate) {
980 req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
982 mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
983 "Get request failed!\n");
987 rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
988 MPI_RAID_ACTION_SET_RESYNC_RATE,
989 mpt->raid_resync_rate, /*addr*/0,
990 /*len*/0, /*write*/FALSE, /*wait*/TRUE);
991 if (rv == ETIMEDOUT) {
992 mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
993 "Resync Rate Setting Timed-out\n");
997 ar = REQ_TO_RAID_ACTION_RESULT(req);
999 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
1000 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
1001 mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
1002 "%d:%x:%x\n", rv, req->IOCStatus,
1005 vol_pg->ResyncRate = mpt->raid_resync_rate;
1006 mpt_free_request(mpt, req);
1007 } else if ((prio && mpt->raid_resync_rate < 128)
1008 || (!prio && mpt->raid_resync_rate >= 128)) {
1011 req = mpt_get_request(mpt, /*sleep_ok*/TRUE);
1013 mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: "
1014 "Get request failed!\n");
1018 vol_pg->VolumeSettings.Settings ^=
1019 MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1020 memcpy(&data, &vol_pg->VolumeSettings, sizeof(data));
1021 vol_pg->VolumeSettings.Settings ^=
1022 MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1023 rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req,
1024 MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS,
1025 data, /*addr*/0, /*len*/0,
1026 /*write*/FALSE, /*wait*/TRUE);
1027 if (rv == ETIMEDOUT) {
1028 mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: "
1029 "Resync Rate Setting Timed-out\n");
1032 ar = REQ_TO_RAID_ACTION_RESULT(req);
1034 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS
1035 || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) {
1036 mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: "
1037 "%d:%x:%x\n", rv, req->IOCStatus,
1040 vol_pg->VolumeSettings.Settings ^=
1041 MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1044 mpt_free_request(mpt, req);
1049 mpt_adjust_queue_depth(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1050 struct cam_path *path)
1052 struct ccb_relsim crs;
1054 xpt_setup_ccb(&crs.ccb_h, path, /*priority*/5);
1055 crs.ccb_h.func_code = XPT_REL_SIMQ;
1056 crs.ccb_h.flags = CAM_DEV_QFREEZE;
1057 crs.release_flags = RELSIM_ADJUST_OPENINGS;
1058 crs.openings = mpt->raid_queue_depth;
1059 xpt_action((union ccb *)&crs);
1060 if (crs.ccb_h.status != CAM_REQ_CMP)
1061 mpt_vol_prt(mpt, mpt_vol, "mpt_adjust_queue_depth failed "
1062 "with CAM status %#x\n", crs.ccb_h.status);
1066 mpt_announce_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol)
1068 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1071 vol_pg = mpt_vol->config_page;
1072 mpt_vol_prt(mpt, mpt_vol, "Settings (");
1073 for (i = 1; i <= 0x8000; i <<= 1) {
1074 switch (vol_pg->VolumeSettings.Settings & i) {
1075 case MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE:
1076 mpt_prtc(mpt, " Member-WCE");
1078 case MPI_RAIDVOL0_SETTING_OFFLINE_ON_SMART:
1079 mpt_prtc(mpt, " Offline-On-SMART-Err");
1081 case MPI_RAIDVOL0_SETTING_AUTO_CONFIGURE:
1082 mpt_prtc(mpt, " Hot-Plug-Spares");
1084 case MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC:
1085 mpt_prtc(mpt, " High-Priority-ReSync");
1091 mpt_prtc(mpt, " )\n");
1092 if (vol_pg->VolumeSettings.HotSparePool != 0) {
1093 mpt_vol_prt(mpt, mpt_vol, "Using Spare Pool%s",
1094 powerof2(vol_pg->VolumeSettings.HotSparePool)
1096 for (i = 0; i < 8; i++) {
1100 if ((vol_pg->VolumeSettings.HotSparePool & mask) == 0)
1102 mpt_prtc(mpt, " %d", i);
1104 mpt_prtc(mpt, "\n");
1106 mpt_vol_prt(mpt, mpt_vol, "%d Members:\n", vol_pg->NumPhysDisks);
1107 for (i = 0; i < vol_pg->NumPhysDisks; i++){
1108 struct mpt_raid_disk *mpt_disk;
1109 CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1110 int pt_bus = cam_sim_bus(mpt->phydisk_sim);
1113 mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum;
1114 disk_pg = &mpt_disk->config_page;
1116 mpt_prtc(mpt, "(%s:%d:%d:0): ", device_get_nameunit(mpt->dev),
1117 pt_bus, disk_pg->PhysDiskID);
1118 if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM) {
1119 mpt_prtc(mpt, "%s", mpt_disk->member_number == 0?
1120 "Primary" : "Secondary");
1122 mpt_prtc(mpt, "Stripe Position %d",
1123 mpt_disk->member_number);
1125 f = disk_pg->PhysDiskStatus.Flags;
1126 s = disk_pg->PhysDiskStatus.State;
1127 if (f & MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC) {
1128 mpt_prtc(mpt, " Out of Sync");
1130 if (f & MPI_PHYSDISK0_STATUS_FLAG_QUIESCED) {
1131 mpt_prtc(mpt, " Quiesced");
1133 if (f & MPI_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME) {
1134 mpt_prtc(mpt, " Inactive");
1136 if (f & MPI_PHYSDISK0_STATUS_FLAG_OPTIMAL_PREVIOUS) {
1137 mpt_prtc(mpt, " Was Optimal");
1139 if (f & MPI_PHYSDISK0_STATUS_FLAG_NOT_OPTIMAL_PREVIOUS) {
1140 mpt_prtc(mpt, " Was Non-Optimal");
1143 case MPI_PHYSDISK0_STATUS_ONLINE:
1144 mpt_prtc(mpt, " Online");
1146 case MPI_PHYSDISK0_STATUS_MISSING:
1147 mpt_prtc(mpt, " Missing");
1149 case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
1150 mpt_prtc(mpt, " Incompatible");
1152 case MPI_PHYSDISK0_STATUS_FAILED:
1153 mpt_prtc(mpt, " Failed");
1155 case MPI_PHYSDISK0_STATUS_INITIALIZING:
1156 mpt_prtc(mpt, " Initializing");
1158 case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
1159 mpt_prtc(mpt, " Requested Offline");
1161 case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
1162 mpt_prtc(mpt, " Requested Failed");
1164 case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
1166 mpt_prtc(mpt, " Offline Other (%x)", s);
1169 mpt_prtc(mpt, "\n");
1174 mpt_announce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk)
1176 CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1177 int rd_bus = cam_sim_bus(mpt->sim);
1178 int pt_bus = cam_sim_bus(mpt->phydisk_sim);
1181 disk_pg = &mpt_disk->config_page;
1182 mpt_disk_prt(mpt, mpt_disk,
1183 "Physical (%s:%d:%d:0), Pass-thru (%s:%d:%d:0)\n",
1184 device_get_nameunit(mpt->dev), rd_bus,
1185 disk_pg->PhysDiskID, device_get_nameunit(mpt->dev),
1186 pt_bus, (int)(mpt_disk - mpt->raid_disks));
1187 if (disk_pg->PhysDiskSettings.HotSparePool == 0)
1189 mpt_disk_prt(mpt, mpt_disk, "Member of Hot Spare Pool%s",
1190 powerof2(disk_pg->PhysDiskSettings.HotSparePool)
1192 for (i = 0; i < 8; i++) {
1196 if ((disk_pg->PhysDiskSettings.HotSparePool & mask) == 0)
1198 mpt_prtc(mpt, " %d", i);
1200 mpt_prtc(mpt, "\n");
1204 mpt_refresh_raid_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk,
1205 IOC_3_PHYS_DISK *ioc_disk)
1209 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_PHYSDISK,
1210 /*PageNumber*/0, ioc_disk->PhysDiskNum,
1211 &mpt_disk->config_page.Header,
1212 /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1214 mpt_prt(mpt, "mpt_refresh_raid_disk: "
1215 "Failed to read RAID Disk Hdr(%d)\n",
1216 ioc_disk->PhysDiskNum);
1219 rv = mpt_read_cur_cfg_page(mpt, ioc_disk->PhysDiskNum,
1220 &mpt_disk->config_page.Header,
1221 sizeof(mpt_disk->config_page),
1222 /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1224 mpt_prt(mpt, "mpt_refresh_raid_disk: "
1225 "Failed to read RAID Disk Page(%d)\n",
1226 ioc_disk->PhysDiskNum);
1227 mpt2host_config_page_raid_phys_disk_0(&mpt_disk->config_page);
1231 mpt_refresh_raid_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol,
1232 CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol)
1234 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1235 struct mpt_raid_action_result *ar;
1240 vol_pg = mpt_vol->config_page;
1241 mpt_vol->flags &= ~MPT_RVF_UP2DATE;
1243 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_VOLUME, 0,
1244 ioc_vol->VolumePageNumber, &vol_pg->Header, TRUE, 5000);
1246 mpt_vol_prt(mpt, mpt_vol,
1247 "mpt_refresh_raid_vol: Failed to read RAID Vol Hdr(%d)\n",
1248 ioc_vol->VolumePageNumber);
1252 rv = mpt_read_cur_cfg_page(mpt, ioc_vol->VolumePageNumber,
1253 &vol_pg->Header, mpt->raid_page0_len, TRUE, 5000);
1255 mpt_vol_prt(mpt, mpt_vol,
1256 "mpt_refresh_raid_vol: Failed to read RAID Vol Page(%d)\n",
1257 ioc_vol->VolumePageNumber);
1260 mpt2host_config_page_raid_vol_0(vol_pg);
1262 mpt_vol->flags |= MPT_RVF_ACTIVE;
1264 /* Update disk entry array data. */
1265 for (i = 0; i < vol_pg->NumPhysDisks; i++) {
1266 struct mpt_raid_disk *mpt_disk;
1267 mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum;
1268 mpt_disk->volume = mpt_vol;
1269 mpt_disk->member_number = vol_pg->PhysDisk[i].PhysDiskMap;
1270 if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM) {
1271 mpt_disk->member_number--;
1275 if ((vol_pg->VolumeStatus.Flags
1276 & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1279 req = mpt_get_request(mpt, TRUE);
1281 mpt_vol_prt(mpt, mpt_vol,
1282 "mpt_refresh_raid_vol: Get request failed!\n");
1285 rv = mpt_issue_raid_req(mpt, mpt_vol, NULL, req,
1286 MPI_RAID_ACTION_INDICATOR_STRUCT, 0, 0, 0, FALSE, TRUE);
1287 if (rv == ETIMEDOUT) {
1288 mpt_vol_prt(mpt, mpt_vol,
1289 "mpt_refresh_raid_vol: Progress Indicator fetch timeout\n");
1290 mpt_free_request(mpt, req);
1294 ar = REQ_TO_RAID_ACTION_RESULT(req);
1296 && ar->action_status == MPI_RAID_ACTION_ASTATUS_SUCCESS
1297 && REQ_IOCSTATUS(req) == MPI_IOCSTATUS_SUCCESS) {
1298 memcpy(&mpt_vol->sync_progress,
1299 &ar->action_data.indicator_struct,
1300 sizeof(mpt_vol->sync_progress));
1301 mpt2host_mpi_raid_vol_indicator(&mpt_vol->sync_progress);
1303 mpt_vol_prt(mpt, mpt_vol,
1304 "mpt_refresh_raid_vol: Progress indicator fetch failed!\n");
1306 mpt_free_request(mpt, req);
1310 * Update in-core information about RAID support. We update any entries
1311 * that didn't previously exists or have been marked as needing to
1312 * be updated by our event handler. Interesting changes are displayed
1316 mpt_refresh_raid_data(struct mpt_softc *mpt)
1318 CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol;
1319 CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol;
1320 IOC_3_PHYS_DISK *ioc_disk;
1321 IOC_3_PHYS_DISK *ioc_last_disk;
1322 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1326 u_int nonopt_volumes;
1328 if (mpt->ioc_page2 == NULL || mpt->ioc_page3 == NULL) {
1333 * Mark all items as unreferenced by the configuration.
1334 * This allows us to find, report, and discard stale
1337 for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1338 mpt->raid_disks[i].flags &= ~MPT_RDF_REFERENCED;
1340 for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1341 mpt->raid_volumes[i].flags &= ~MPT_RVF_REFERENCED;
1345 * Get Physical Disk information.
1347 len = mpt->ioc_page3->Header.PageLength * sizeof(uint32_t);
1348 rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1349 &mpt->ioc_page3->Header, len,
1350 /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1353 "mpt_refresh_raid_data: Failed to read IOC Page 3\n");
1356 mpt2host_config_page_ioc3(mpt->ioc_page3);
1358 ioc_disk = mpt->ioc_page3->PhysDisk;
1359 ioc_last_disk = ioc_disk + mpt->ioc_page3->NumPhysDisks;
1360 for (; ioc_disk != ioc_last_disk; ioc_disk++) {
1361 struct mpt_raid_disk *mpt_disk;
1363 mpt_disk = mpt->raid_disks + ioc_disk->PhysDiskNum;
1364 mpt_disk->flags |= MPT_RDF_REFERENCED;
1365 if ((mpt_disk->flags & (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE))
1366 != (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE)) {
1368 mpt_refresh_raid_disk(mpt, mpt_disk, ioc_disk);
1371 mpt_disk->flags |= MPT_RDF_ACTIVE;
1376 * Refresh volume data.
1378 len = mpt->ioc_page2->Header.PageLength * sizeof(uint32_t);
1379 rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1380 &mpt->ioc_page2->Header, len,
1381 /*sleep_ok*/TRUE, /*timeout_ms*/5000);
1383 mpt_prt(mpt, "mpt_refresh_raid_data: "
1384 "Failed to read IOC Page 2\n");
1387 mpt2host_config_page_ioc2(mpt->ioc_page2);
1389 ioc_vol = mpt->ioc_page2->RaidVolume;
1390 ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
1391 for (;ioc_vol != ioc_last_vol; ioc_vol++) {
1392 struct mpt_raid_volume *mpt_vol;
1394 mpt_vol = mpt->raid_volumes + ioc_vol->VolumePageNumber;
1395 mpt_vol->flags |= MPT_RVF_REFERENCED;
1396 vol_pg = mpt_vol->config_page;
1399 if (((mpt_vol->flags & (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1400 != (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE))
1401 || (vol_pg->VolumeStatus.Flags
1402 & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) != 0) {
1404 mpt_refresh_raid_vol(mpt, mpt_vol, ioc_vol);
1406 mpt_vol->flags |= MPT_RVF_ACTIVE;
1410 for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1411 struct mpt_raid_volume *mpt_vol;
1417 mpt_vol = &mpt->raid_volumes[i];
1419 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) {
1423 vol_pg = mpt_vol->config_page;
1424 if ((mpt_vol->flags & (MPT_RVF_REFERENCED|MPT_RVF_ANNOUNCED))
1425 == MPT_RVF_ANNOUNCED) {
1426 mpt_vol_prt(mpt, mpt_vol, "No longer configured\n");
1431 if ((mpt_vol->flags & MPT_RVF_ANNOUNCED) == 0) {
1432 mpt_announce_vol(mpt, mpt_vol);
1433 mpt_vol->flags |= MPT_RVF_ANNOUNCED;
1436 if (vol_pg->VolumeStatus.State !=
1437 MPI_RAIDVOL0_STATUS_STATE_OPTIMAL)
1440 if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0)
1443 mpt_vol->flags |= MPT_RVF_UP2DATE;
1444 mpt_vol_prt(mpt, mpt_vol, "%s - %s\n",
1445 mpt_vol_type(mpt_vol), mpt_vol_state(mpt_vol));
1446 mpt_verify_mwce(mpt, mpt_vol);
1448 if (vol_pg->VolumeStatus.Flags == 0) {
1452 mpt_vol_prt(mpt, mpt_vol, "Status (");
1453 for (m = 1; m <= 0x80; m <<= 1) {
1454 switch (vol_pg->VolumeStatus.Flags & m) {
1455 case MPI_RAIDVOL0_STATUS_FLAG_ENABLED:
1456 mpt_prtc(mpt, " Enabled");
1458 case MPI_RAIDVOL0_STATUS_FLAG_QUIESCED:
1459 mpt_prtc(mpt, " Quiesced");
1461 case MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS:
1462 mpt_prtc(mpt, " Re-Syncing");
1464 case MPI_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE:
1465 mpt_prtc(mpt, " Inactive");
1471 mpt_prtc(mpt, " )\n");
1473 if ((vol_pg->VolumeStatus.Flags
1474 & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0)
1477 mpt_verify_resync_rate(mpt, mpt_vol);
1479 left = MPT_U64_2_SCALAR(mpt_vol->sync_progress.BlocksRemaining);
1480 total = MPT_U64_2_SCALAR(mpt_vol->sync_progress.TotalBlocks);
1481 if (vol_pg->ResyncRate != 0) {
1483 prio = ((u_int)vol_pg->ResyncRate * 100000) / 0xFF;
1484 mpt_vol_prt(mpt, mpt_vol, "Rate %d.%d%%\n",
1485 prio / 1000, prio % 1000);
1487 prio = vol_pg->VolumeSettings.Settings
1488 & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC;
1489 mpt_vol_prt(mpt, mpt_vol, "%s Priority Re-Sync\n",
1490 prio ? "High" : "Low");
1492 mpt_vol_prt(mpt, mpt_vol, "%ju of %ju "
1493 "blocks remaining\n", (uintmax_t)left,
1496 /* Periodically report on sync progress. */
1497 mpt_schedule_raid_refresh(mpt);
1500 for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) {
1501 struct mpt_raid_disk *mpt_disk;
1502 CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg;
1505 mpt_disk = &mpt->raid_disks[i];
1506 disk_pg = &mpt_disk->config_page;
1508 if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0)
1511 if ((mpt_disk->flags & (MPT_RDF_REFERENCED|MPT_RDF_ANNOUNCED))
1512 == MPT_RDF_ANNOUNCED) {
1513 mpt_disk_prt(mpt, mpt_disk, "No longer configured\n");
1514 mpt_disk->flags = 0;
1519 if ((mpt_disk->flags & MPT_RDF_ANNOUNCED) == 0) {
1521 mpt_announce_disk(mpt, mpt_disk);
1522 mpt_disk->flags |= MPT_RVF_ANNOUNCED;
1525 if ((mpt_disk->flags & MPT_RDF_UP2DATE) != 0)
1528 mpt_disk->flags |= MPT_RDF_UP2DATE;
1529 mpt_disk_prt(mpt, mpt_disk, "%s\n", mpt_disk_state(mpt_disk));
1530 if (disk_pg->PhysDiskStatus.Flags == 0)
1533 mpt_disk_prt(mpt, mpt_disk, "Status (");
1534 for (m = 1; m <= 0x80; m <<= 1) {
1535 switch (disk_pg->PhysDiskStatus.Flags & m) {
1536 case MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC:
1537 mpt_prtc(mpt, " Out-Of-Sync");
1539 case MPI_PHYSDISK0_STATUS_FLAG_QUIESCED:
1540 mpt_prtc(mpt, " Quiesced");
1546 mpt_prtc(mpt, " )\n");
1549 mpt->raid_nonopt_volumes = nonopt_volumes;
1554 mpt_raid_timer(void *arg)
1556 struct mpt_softc *mpt;
1558 mpt = (struct mpt_softc *)arg;
1559 MPT_LOCK_ASSERT(mpt);
1560 mpt_raid_wakeup(mpt);
1564 mpt_schedule_raid_refresh(struct mpt_softc *mpt)
1566 callout_reset(&mpt->raid_timer, MPT_RAID_SYNC_REPORT_INTERVAL,
1567 mpt_raid_timer, mpt);
1571 mpt_raid_free_mem(struct mpt_softc *mpt)
1574 if (mpt->raid_volumes) {
1575 struct mpt_raid_volume *mpt_raid;
1577 for (i = 0; i < mpt->raid_max_volumes; i++) {
1578 mpt_raid = &mpt->raid_volumes[i];
1579 if (mpt_raid->config_page) {
1580 kfree(mpt_raid->config_page, M_DEVBUF);
1581 mpt_raid->config_page = NULL;
1584 kfree(mpt->raid_volumes, M_DEVBUF);
1585 mpt->raid_volumes = NULL;
1587 if (mpt->raid_disks) {
1588 kfree(mpt->raid_disks, M_DEVBUF);
1589 mpt->raid_disks = NULL;
1591 if (mpt->ioc_page2) {
1592 kfree(mpt->ioc_page2, M_DEVBUF);
1593 mpt->ioc_page2 = NULL;
1595 if (mpt->ioc_page3) {
1596 kfree(mpt->ioc_page3, M_DEVBUF);
1597 mpt->ioc_page3 = NULL;
1599 mpt->raid_max_volumes = 0;
1600 mpt->raid_max_disks = 0;
1604 mpt_raid_set_vol_resync_rate(struct mpt_softc *mpt, u_int rate)
1606 struct mpt_raid_volume *mpt_vol;
1608 if ((rate > MPT_RAID_RESYNC_RATE_MAX
1609 || rate < MPT_RAID_RESYNC_RATE_MIN)
1610 && rate != MPT_RAID_RESYNC_RATE_NC)
1614 mpt->raid_resync_rate = rate;
1615 RAID_VOL_FOREACH(mpt, mpt_vol) {
1616 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) {
1619 mpt_verify_resync_rate(mpt, mpt_vol);
1626 mpt_raid_set_vol_queue_depth(struct mpt_softc *mpt, u_int vol_queue_depth)
1628 struct mpt_raid_volume *mpt_vol;
1630 if (vol_queue_depth > 255 || vol_queue_depth < 1)
1634 mpt->raid_queue_depth = vol_queue_depth;
1635 RAID_VOL_FOREACH(mpt, mpt_vol) {
1636 struct cam_path *path;
1639 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1642 mpt->raid_rescan = 0;
1644 MPTLOCK_2_CAMLOCK(mpt);
1645 error = xpt_create_path(&path, xpt_periph,
1646 cam_sim_path(mpt->sim),
1647 mpt_vol->config_page->VolumeID,
1649 if (error != CAM_REQ_CMP) {
1650 CAMLOCK_2_MPTLOCK(mpt);
1651 mpt_vol_prt(mpt, mpt_vol, "Unable to allocate path!\n");
1654 mpt_adjust_queue_depth(mpt, mpt_vol, path);
1655 xpt_free_path(path);
1656 CAMLOCK_2_MPTLOCK(mpt);
1663 mpt_raid_set_vol_mwce(struct mpt_softc *mpt, mpt_raid_mwce_t mwce)
1665 struct mpt_raid_volume *mpt_vol;
1666 int force_full_resync;
1669 if (mwce == mpt->raid_mwce_setting) {
1675 * Catch MWCE being left on due to a failed shutdown. Since
1676 * sysctls cannot be set by the loader, we treat the first
1677 * setting of this varible specially and force a full volume
1678 * resync if MWCE is enabled and a resync is in progress.
1680 force_full_resync = 0;
1681 if (mpt->raid_mwce_set == 0
1682 && mpt->raid_mwce_setting == MPT_RAID_MWCE_NC
1683 && mwce == MPT_RAID_MWCE_REBUILD_ONLY)
1684 force_full_resync = 1;
1686 mpt->raid_mwce_setting = mwce;
1687 RAID_VOL_FOREACH(mpt, mpt_vol) {
1688 CONFIG_PAGE_RAID_VOL_0 *vol_pg;
1692 if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0)
1695 vol_pg = mpt_vol->config_page;
1696 resyncing = vol_pg->VolumeStatus.Flags
1697 & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS;
1698 mwce = vol_pg->VolumeSettings.Settings
1699 & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE;
1700 if (force_full_resync && resyncing && mwce) {
1703 * XXX disable/enable volume should force a resync,
1704 * but we'll need to queice, drain, and restart
1707 mpt_vol_prt(mpt, mpt_vol, "WARNING - Unsafe shutdown "
1708 "detected. Suggest full resync.\n");
1710 mpt_verify_mwce(mpt, mpt_vol);
1712 mpt->raid_mwce_set = 1;
1716 const char *mpt_vol_mwce_strs[] =
1720 "On-During-Rebuild",
1725 mpt_raid_sysctl_vol_member_wce(SYSCTL_HANDLER_ARGS)
1728 struct mpt_softc *mpt;
1734 mpt = (struct mpt_softc *)arg1;
1735 str = mpt_vol_mwce_strs[mpt->raid_mwce_setting];
1736 error = SYSCTL_OUT(req, str, strlen(str) + 1);
1737 if (error || !req->newptr) {
1741 size = req->newlen - req->newidx;
1742 if (size >= sizeof(inbuf)) {
1746 error = SYSCTL_IN(req, inbuf, size);
1751 for (i = 0; i < NUM_ELEMENTS(mpt_vol_mwce_strs); i++) {
1752 if (strcmp(mpt_vol_mwce_strs[i], inbuf) == 0) {
1753 return (mpt_raid_set_vol_mwce(mpt, i));
1760 mpt_raid_sysctl_vol_resync_rate(SYSCTL_HANDLER_ARGS)
1762 struct mpt_softc *mpt;
1763 u_int raid_resync_rate;
1766 mpt = (struct mpt_softc *)arg1;
1767 raid_resync_rate = mpt->raid_resync_rate;
1769 error = sysctl_handle_int(oidp, &raid_resync_rate, 0, req);
1770 if (error || !req->newptr) {
1774 return (mpt_raid_set_vol_resync_rate(mpt, raid_resync_rate));
1778 mpt_raid_sysctl_vol_queue_depth(SYSCTL_HANDLER_ARGS)
1780 struct mpt_softc *mpt;
1781 u_int raid_queue_depth;
1784 mpt = (struct mpt_softc *)arg1;
1785 raid_queue_depth = mpt->raid_queue_depth;
1787 error = sysctl_handle_int(oidp, &raid_queue_depth, 0, req);
1788 if (error || !req->newptr) {
1792 return (mpt_raid_set_vol_queue_depth(mpt, raid_queue_depth));
1796 mpt_raid_sysctl_attach(struct mpt_softc *mpt)
1798 SYSCTL_ADD_PROC(&mpt->mpt_sysctl_ctx,
1799 SYSCTL_CHILDREN(mpt->mpt_sysctl_tree), OID_AUTO,
1800 "vol_member_wce", CTLTYPE_STRING | CTLFLAG_RW, mpt, 0,
1801 mpt_raid_sysctl_vol_member_wce, "A",
1802 "volume member WCE(On,Off,On-During-Rebuild,NC)");
1804 SYSCTL_ADD_PROC(&mpt->mpt_sysctl_ctx,
1805 SYSCTL_CHILDREN(mpt->mpt_sysctl_tree), OID_AUTO,
1806 "vol_queue_depth", CTLTYPE_INT | CTLFLAG_RW, mpt, 0,
1807 mpt_raid_sysctl_vol_queue_depth, "I",
1808 "default volume queue depth");
1810 SYSCTL_ADD_PROC(&mpt->mpt_sysctl_ctx,
1811 SYSCTL_CHILDREN(mpt->mpt_sysctl_tree), OID_AUTO,
1812 "vol_resync_rate", CTLTYPE_INT | CTLFLAG_RW, mpt, 0,
1813 mpt_raid_sysctl_vol_resync_rate, "I",
1814 "volume resync priority (0 == NC, 1 - 255)");
1815 SYSCTL_ADD_UINT(&mpt->mpt_sysctl_ctx,
1816 SYSCTL_CHILDREN(mpt->mpt_sysctl_tree), OID_AUTO,
1817 "nonoptimal_volumes", CTLFLAG_RD,
1818 &mpt->raid_nonopt_volumes, 0,
1819 "number of nonoptimal volumes");