2 * Copyright (c) 2003,2004,2009 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * ----------------------------------------------------------------------------
35 * "THE BEER-WARE LICENSE" (Revision 42):
36 * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you
37 * can do whatever you want with this stuff. If we meet some day, and you think
38 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
39 * ----------------------------------------------------------------------------
41 * Copyright (c) 1982, 1986, 1988, 1993
42 * The Regents of the University of California. All rights reserved.
43 * (c) UNIX System Laboratories, Inc.
44 * All or some portions of this file are derived from material licensed
45 * to the University of California by American Telephone and Telegraph
46 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
47 * the permission of UNIX System Laboratories, Inc.
49 * Redistribution and use in source and binary forms, with or without
50 * modification, are permitted provided that the following conditions
52 * 1. Redistributions of source code must retain the above copyright
53 * notice, this list of conditions and the following disclaimer.
54 * 2. Redistributions in binary form must reproduce the above copyright
55 * notice, this list of conditions and the following disclaimer in the
56 * documentation and/or other materials provided with the distribution.
57 * 3. All advertising materials mentioning features or use of this software
58 * must display the following acknowledgement:
59 * This product includes software developed by the University of
60 * California, Berkeley and its contributors.
61 * 4. Neither the name of the University nor the names of its contributors
62 * may be used to endorse or promote products derived from this software
63 * without specific prior written permission.
65 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
66 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
67 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
68 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
69 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
70 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
71 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
72 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
73 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
74 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
77 * @(#)ufs_disksubr.c 8.5 (Berkeley) 1/21/94
78 * $FreeBSD: src/sys/kern/subr_disk.c,v 1.20.2.6 2001/10/05 07:14:57 peter Exp $
79 * $FreeBSD: src/sys/ufs/ufs/ufs_disksubr.c,v 1.44.2.3 2001/03/05 05:42:19 obrien Exp $
80 * $DragonFly: src/sys/kern/subr_disk.c,v 1.40 2008/06/05 18:06:32 swildner Exp $
83 #include <sys/param.h>
84 #include <sys/systm.h>
85 #include <sys/kernel.h>
87 #include <sys/sysctl.h>
90 #include <sys/disklabel.h>
91 #include <sys/disklabel32.h>
92 #include <sys/disklabel64.h>
93 #include <sys/diskslice.h>
94 #include <sys/diskmbr.h>
96 #include <sys/kerneldump.h>
97 #include <sys/malloc.h>
98 #include <sys/sysctl.h>
99 #include <machine/md_var.h>
100 #include <sys/ctype.h>
101 #include <sys/syslog.h>
102 #include <sys/device.h>
103 #include <sys/msgport.h>
104 #include <sys/devfs.h>
105 #include <sys/thread.h>
106 #include <sys/dsched.h>
107 #include <sys/queue.h>
108 #include <sys/lock.h>
109 #include <sys/udev.h>
111 #include <sys/buf2.h>
112 #include <sys/mplock2.h>
113 #include <sys/msgport2.h>
114 #include <sys/thread2.h>
116 static MALLOC_DEFINE(M_DISK, "disk", "disk data");
117 static int disk_debug_enable = 0;
119 static void disk_msg_autofree_reply(lwkt_port_t, lwkt_msg_t);
120 static void disk_msg_core(void *);
121 static int disk_probe_slice(struct disk *dp, cdev_t dev, int slice, int reprobe);
122 static void disk_probe(struct disk *dp, int reprobe);
123 static void _setdiskinfo(struct disk *disk, struct disk_info *info);
124 static void bioqwritereorder(struct bio_queue_head *bioq);
125 static void disk_cleanserial(char *serno);
127 static d_open_t diskopen;
128 static d_close_t diskclose;
129 static d_ioctl_t diskioctl;
130 static d_strategy_t diskstrategy;
131 static d_psize_t diskpsize;
132 static d_clone_t diskclone;
133 static d_dump_t diskdump;
135 static LIST_HEAD(, disk) disklist = LIST_HEAD_INITIALIZER(&disklist);
136 static struct lwkt_token disklist_token;
138 static struct dev_ops disk_ops = {
139 { "disk", 0, D_DISK | D_MPSAFE },
141 .d_close = diskclose,
143 .d_write = physwrite,
144 .d_ioctl = diskioctl,
145 .d_strategy = diskstrategy,
147 .d_psize = diskpsize,
151 static struct objcache *disk_msg_cache;
153 struct objcache_malloc_args disk_msg_malloc_args = {
154 sizeof(struct disk_msg), M_DISK };
156 static struct lwkt_port disk_dispose_port;
157 static struct lwkt_port disk_msg_port;
160 disk_debug(int level, char *fmt, ...)
165 if (level <= disk_debug_enable)
173 disk_probe_slice(struct disk *dp, cdev_t dev, int slice, int reprobe)
175 struct disk_info *info = &dp->d_info;
176 struct diskslice *sp = &dp->d_slice->dss_slices[slice];
178 struct partinfo part;
185 "disk_probe_slice (begin): %s (%s)\n",
186 dev->si_name, dp->d_cdev->si_name);
188 sno = slice ? slice - 1 : 0;
190 ops = &disklabel32_ops;
191 msg = ops->op_readdisklabel(dev, sp, &sp->ds_label, info);
192 if (msg && !strcmp(msg, "no disk label")) {
193 ops = &disklabel64_ops;
194 msg = ops->op_readdisklabel(dev, sp, &sp->ds_label, info);
197 if (slice != WHOLE_DISK_SLICE)
198 ops->op_adjust_label_reserved(dp->d_slice, slice, sp);
203 for (i = 0; i < ops->op_getnumparts(sp->ds_label); i++) {
204 ops->op_loadpartinfo(sp->ds_label, i, &part);
207 (ndev = devfs_find_device_by_name("%s%c",
208 dev->si_name, 'a' + i))
211 * Device already exists and
214 ndev->si_flags |= SI_REPROBE_TEST;
216 ndev = make_dev_covering(&disk_ops, dp->d_rawdev->si_ops,
217 dkmakeminor(dkunit(dp->d_cdev),
219 UID_ROOT, GID_OPERATOR, 0640,
220 "%s%c", dev->si_name, 'a'+ i);
222 udev_dict_set_cstr(ndev, "subsystem", "disk");
223 /* Inherit parent's disk type */
224 if (dp->d_disktype) {
225 udev_dict_set_cstr(ndev, "disk-type",
226 __DECONST(char *, dp->d_disktype));
228 if (dp->d_info.d_serialno) {
231 dp->d_info.d_serialno,
234 ndev->si_flags |= SI_REPROBE_TEST;
238 } else if (info->d_dsflags & DSO_COMPATLABEL) {
240 if (sp->ds_size >= 0x100000000ULL)
241 ops = &disklabel64_ops;
243 ops = &disklabel32_ops;
244 sp->ds_label = ops->op_clone_label(info, sp);
246 if (sp->ds_type == DOSPTYP_386BSD || /* XXX */
247 sp->ds_type == DOSPTYP_NETBSD ||
248 sp->ds_type == DOSPTYP_OPENBSD) {
249 log(LOG_WARNING, "%s: cannot find label (%s)\n",
255 sp->ds_wlabel = FALSE;
258 return (msg ? EINVAL : 0);
262 * This routine is only called for newly minted drives or to reprobe
263 * a drive with no open slices. disk_probe_slice() is called directly
264 * when reprobing partition changes within slices.
267 disk_probe(struct disk *dp, int reprobe)
269 struct disk_info *info = &dp->d_info;
270 cdev_t dev = dp->d_cdev;
273 struct diskslices *osp;
274 struct diskslice *sp;
276 KKASSERT (info->d_media_blksize != 0);
279 dp->d_slice = dsmakeslicestruct(BASE_SLICE, info);
280 disk_debug(1, "disk_probe (begin): %s\n", dp->d_cdev->si_name);
282 error = mbrinit(dev, info, &(dp->d_slice));
288 for (i = 0; i < dp->d_slice->dss_nslices; i++) {
290 * Ignore the whole-disk slice, it has already been created.
292 if (i == WHOLE_DISK_SLICE)
294 sp = &dp->d_slice->dss_slices[i];
297 * Handle s0. s0 is a compatibility slice if there are no
298 * other slices and it has not otherwise been set up, else
301 if (i == COMPATIBILITY_SLICE) {
303 if (sp->ds_type == 0 &&
304 dp->d_slice->dss_nslices == BASE_SLICE) {
305 sp->ds_size = info->d_media_blocks;
314 * Ignore 0-length slices
316 if (sp->ds_size == 0)
320 (ndev = devfs_find_device_by_name("%ss%d",
321 dev->si_name, sno))) {
323 * Device already exists and is still valid
325 ndev->si_flags |= SI_REPROBE_TEST;
328 * Else create new device
330 ndev = make_dev_covering(&disk_ops, dp->d_rawdev->si_ops,
331 dkmakewholeslice(dkunit(dev), i),
332 UID_ROOT, GID_OPERATOR, 0640,
333 "%ss%d", dev->si_name, sno);
334 udev_dict_set_cstr(ndev, "subsystem", "disk");
335 /* Inherit parent's disk type */
336 if (dp->d_disktype) {
337 udev_dict_set_cstr(ndev, "disk-type",
338 __DECONST(char *, dp->d_disktype));
340 if (dp->d_info.d_serialno) {
341 make_dev_alias(ndev, "serno/%s.s%d",
342 dp->d_info.d_serialno, sno);
345 ndev->si_flags |= SI_REPROBE_TEST;
350 * Probe appropriate slices for a disklabel
352 * XXX slice type 1 used by our gpt probe code.
353 * XXX slice type 0 used by mbr compat slice.
355 if (sp->ds_type == DOSPTYP_386BSD ||
356 sp->ds_type == DOSPTYP_NETBSD ||
357 sp->ds_type == DOSPTYP_OPENBSD ||
360 if (dp->d_slice->dss_first_bsd_slice == 0)
361 dp->d_slice->dss_first_bsd_slice = i;
362 disk_probe_slice(dp, ndev, i, reprobe);
366 disk_debug(1, "disk_probe (end): %s\n", dp->d_cdev->si_name);
371 disk_msg_core(void *arg)
374 struct diskslice *sp;
378 lwkt_initport_thread(&disk_msg_port, curthread);
383 msg = (disk_msg_t)lwkt_waitport(&disk_msg_port, 0);
385 switch (msg->hdr.u.ms_result) {
386 case DISK_DISK_PROBE:
387 dp = (struct disk *)msg->load;
389 "DISK_DISK_PROBE: %s\n",
390 dp->d_cdev->si_name);
393 case DISK_DISK_DESTROY:
394 dp = (struct disk *)msg->load;
396 "DISK_DISK_DESTROY: %s\n",
397 dp->d_cdev->si_name);
398 devfs_destroy_subnames(dp->d_cdev->si_name);
399 devfs_destroy_dev(dp->d_cdev);
400 lwkt_gettoken(&disklist_token);
401 LIST_REMOVE(dp, d_list);
402 lwkt_reltoken(&disklist_token);
403 if (dp->d_info.d_serialno) {
404 kfree(dp->d_info.d_serialno, M_TEMP);
405 dp->d_info.d_serialno = NULL;
409 dp = (struct disk *)msg->load;
411 "DISK_DISK_UNPROBE: %s\n",
412 dp->d_cdev->si_name);
413 devfs_destroy_subnames(dp->d_cdev->si_name);
415 case DISK_SLICE_REPROBE:
416 dp = (struct disk *)msg->load;
417 sp = (struct diskslice *)msg->load2;
418 devfs_clr_subnames_flag(sp->ds_dev->si_name,
421 "DISK_SLICE_REPROBE: %s\n",
422 sp->ds_dev->si_name);
423 disk_probe_slice(dp, sp->ds_dev,
424 dkslice(sp->ds_dev), 1);
425 devfs_destroy_subnames_without_flag(
426 sp->ds_dev->si_name, SI_REPROBE_TEST);
428 case DISK_DISK_REPROBE:
429 dp = (struct disk *)msg->load;
430 devfs_clr_subnames_flag(dp->d_cdev->si_name, SI_REPROBE_TEST);
432 "DISK_DISK_REPROBE: %s\n",
433 dp->d_cdev->si_name);
435 devfs_destroy_subnames_without_flag(
436 dp->d_cdev->si_name, SI_REPROBE_TEST);
439 disk_debug(1, "DISK_SYNC\n");
442 devfs_debug(DEVFS_DEBUG_WARNING,
443 "disk_msg_core: unknown message "
444 "received at core\n");
447 lwkt_replymsg(&msg->hdr, 0);
454 * Acts as a message drain. Any message that is replied to here gets
455 * destroyed and the memory freed.
458 disk_msg_autofree_reply(lwkt_port_t port, lwkt_msg_t msg)
460 objcache_put(disk_msg_cache, msg);
465 disk_msg_send(uint32_t cmd, void *load, void *load2)
468 lwkt_port_t port = &disk_msg_port;
470 disk_msg = objcache_get(disk_msg_cache, M_WAITOK);
472 lwkt_initmsg(&disk_msg->hdr, &disk_dispose_port, 0);
474 disk_msg->hdr.u.ms_result = cmd;
475 disk_msg->load = load;
476 disk_msg->load2 = load2;
478 lwkt_sendmsg(port, &disk_msg->hdr);
482 disk_msg_send_sync(uint32_t cmd, void *load, void *load2)
484 struct lwkt_port rep_port;
488 disk_msg = objcache_get(disk_msg_cache, M_WAITOK);
489 port = &disk_msg_port;
491 /* XXX could probably use curthread's built-in msgport */
492 lwkt_initport_thread(&rep_port, curthread);
493 lwkt_initmsg(&disk_msg->hdr, &rep_port, 0);
495 disk_msg->hdr.u.ms_result = cmd;
496 disk_msg->load = load;
497 disk_msg->load2 = load2;
499 lwkt_sendmsg(port, &disk_msg->hdr);
500 lwkt_waitmsg(&disk_msg->hdr, 0);
501 objcache_put(disk_msg_cache, disk_msg);
505 * Create a raw device for the dev_ops template (which is returned). Also
506 * create a slice and unit managed disk and overload the user visible
507 * device space with it.
509 * NOTE: The returned raw device is NOT a slice and unit managed device.
510 * It is an actual raw device representing the raw disk as specified by
511 * the passed dev_ops. The disk layer not only returns such a raw device,
512 * it also uses it internally when passing (modified) commands through.
515 disk_create(int unit, struct disk *dp, struct dev_ops *raw_ops)
517 return disk_create_named(NULL, unit, dp, raw_ops);
521 disk_create_named(const char *name, int unit, struct disk *dp, struct dev_ops *raw_ops)
526 name = raw_ops->head.name;
528 disk_debug(1, "disk_create (begin): %s%d\n", name, unit);
530 rawdev = make_only_dev(raw_ops, dkmakewholedisk(unit),
531 UID_ROOT, GID_OPERATOR, 0640,
534 bzero(dp, sizeof(*dp));
536 dp->d_rawdev = rawdev;
537 dp->d_raw_ops = raw_ops;
538 dp->d_dev_ops = &disk_ops;
539 dp->d_cdev = make_dev_covering(&disk_ops, dp->d_rawdev->si_ops,
540 dkmakewholedisk(unit),
541 UID_ROOT, GID_OPERATOR, 0640,
543 udev_dict_set_cstr(dp->d_cdev, "subsystem", "disk");
544 dp->d_cdev->si_disk = dp;
546 dsched_disk_create_callback(dp, name, unit);
548 lwkt_gettoken(&disklist_token);
549 LIST_INSERT_HEAD(&disklist, dp, d_list);
550 lwkt_reltoken(&disklist_token);
552 disk_debug(1, "disk_create (end): %s%d\n", name, unit);
554 return (dp->d_rawdev);
558 disk_setdisktype(struct disk *disk, const char *type)
560 KKASSERT(disk != NULL);
562 disk->d_disktype = type;
563 return udev_dict_set_cstr(disk->d_cdev, "disk-type", __DECONST(char *, type));
567 _setdiskinfo(struct disk *disk, struct disk_info *info)
571 oldserialno = disk->d_info.d_serialno;
572 bcopy(info, &disk->d_info, sizeof(disk->d_info));
573 info = &disk->d_info;
576 "_setdiskinfo: %s\n",
577 disk->d_cdev->si_name);
580 * The serial number is duplicated so the caller can throw
583 if (info->d_serialno && info->d_serialno[0]) {
584 info->d_serialno = kstrdup(info->d_serialno, M_TEMP);
585 disk_cleanserial(info->d_serialno);
587 make_dev_alias(disk->d_cdev, "serno/%s",
591 info->d_serialno = NULL;
594 kfree(oldserialno, M_TEMP);
596 dsched_disk_update_callback(disk, info);
599 * The caller may set d_media_size or d_media_blocks and we
600 * calculate the other.
602 KKASSERT(info->d_media_size == 0 || info->d_media_blksize == 0);
603 if (info->d_media_size == 0 && info->d_media_blocks) {
604 info->d_media_size = (u_int64_t)info->d_media_blocks *
605 info->d_media_blksize;
606 } else if (info->d_media_size && info->d_media_blocks == 0 &&
607 info->d_media_blksize) {
608 info->d_media_blocks = info->d_media_size /
609 info->d_media_blksize;
613 * The si_* fields for rawdev are not set until after the
614 * disk_create() call, so someone using the cooked version
615 * of the raw device (i.e. da0s0) will not get the right
616 * si_iosize_max unless we fix it up here.
618 if (disk->d_cdev && disk->d_rawdev &&
619 disk->d_cdev->si_iosize_max == 0) {
620 disk->d_cdev->si_iosize_max = disk->d_rawdev->si_iosize_max;
621 disk->d_cdev->si_bsize_phys = disk->d_rawdev->si_bsize_phys;
622 disk->d_cdev->si_bsize_best = disk->d_rawdev->si_bsize_best;
625 /* Add the serial number to the udev_dictionary */
626 if (info->d_serialno)
627 udev_dict_set_cstr(disk->d_cdev, "serno", info->d_serialno);
631 * Disk drivers must call this routine when media parameters are available
635 disk_setdiskinfo(struct disk *disk, struct disk_info *info)
637 _setdiskinfo(disk, info);
638 disk_msg_send(DISK_DISK_PROBE, disk, NULL);
640 "disk_setdiskinfo: sent probe for %s\n",
641 disk->d_cdev->si_name);
645 disk_setdiskinfo_sync(struct disk *disk, struct disk_info *info)
647 _setdiskinfo(disk, info);
648 disk_msg_send_sync(DISK_DISK_PROBE, disk, NULL);
650 "disk_setdiskinfo_sync: sent probe for %s\n",
651 disk->d_cdev->si_name);
655 * This routine is called when an adapter detaches. The higher level
656 * managed disk device is destroyed while the lower level raw device is
660 disk_destroy(struct disk *disk)
662 dsched_disk_destroy_callback(disk);
663 disk_msg_send_sync(DISK_DISK_DESTROY, disk, NULL);
668 disk_dumpcheck(cdev_t dev, u_int64_t *size, u_int64_t *blkno, u_int32_t *secsize)
670 struct partinfo pinfo;
673 bzero(&pinfo, sizeof(pinfo));
674 error = dev_dioctl(dev, DIOCGPART, (void *)&pinfo, 0,
675 proc0.p_ucred, NULL);
679 if (pinfo.media_blksize == 0)
682 if (blkno) /* XXX: make sure this reserved stuff is right */
683 *blkno = pinfo.reserved_blocks +
684 pinfo.media_offset / pinfo.media_blksize;
686 *secsize = pinfo.media_blksize;
688 *size = (pinfo.media_blocks - pinfo.reserved_blocks);
694 disk_dumpconf(cdev_t dev, u_int onoff)
696 struct dumperinfo di;
697 u_int64_t size, blkno;
702 return set_dumper(NULL);
704 error = disk_dumpcheck(dev, &size, &blkno, &secsize);
709 bzero(&di, sizeof(struct dumperinfo));
710 di.dumper = diskdump;
712 di.blocksize = secsize;
713 di.mediaoffset = blkno * DEV_BSIZE;
714 di.mediasize = size * DEV_BSIZE;
716 return set_dumper(&di);
720 disk_unprobe(struct disk *disk)
725 disk_msg_send_sync(DISK_UNPROBE, disk, NULL);
729 disk_invalidate (struct disk *disk)
731 dsgone(&disk->d_slice);
735 disk_enumerate(struct disk *disk)
739 lwkt_gettoken(&disklist_token);
741 dp = (LIST_FIRST(&disklist));
743 dp = (LIST_NEXT(disk, d_list));
744 lwkt_reltoken(&disklist_token);
751 sysctl_disks(SYSCTL_HANDLER_ARGS)
759 while ((disk = disk_enumerate(disk))) {
761 error = SYSCTL_OUT(req, " ", 1);
767 error = SYSCTL_OUT(req, disk->d_rawdev->si_name,
768 strlen(disk->d_rawdev->si_name));
772 error = SYSCTL_OUT(req, "", 1);
776 SYSCTL_PROC(_kern, OID_AUTO, disks, CTLTYPE_STRING | CTLFLAG_RD, NULL, 0,
777 sysctl_disks, "A", "names of available disks");
780 * Open a disk device or partition.
784 diskopen(struct dev_open_args *ap)
786 cdev_t dev = ap->a_head.a_dev;
791 * dp can't be NULL here XXX.
793 * d_slice will be NULL if setdiskinfo() has not been called yet.
794 * setdiskinfo() is typically called whether the disk is present
795 * or not (e.g. CD), but the base disk device is created first
796 * and there may be a race.
799 if (dp == NULL || dp->d_slice == NULL)
804 * Deal with open races
807 while (dp->d_flags & DISKFLAG_LOCK) {
808 dp->d_flags |= DISKFLAG_WANTED;
809 error = tsleep(dp, PCATCH, "diskopen", hz);
815 dp->d_flags |= DISKFLAG_LOCK;
818 * Open the underlying raw device.
820 if (!dsisopen(dp->d_slice)) {
822 if (!pdev->si_iosize_max)
823 pdev->si_iosize_max = dev->si_iosize_max;
825 error = dev_dopen(dp->d_rawdev, ap->a_oflags,
826 ap->a_devtype, ap->a_cred);
830 * Inherit properties from the underlying device now that it is
838 error = dsopen(dev, ap->a_devtype, dp->d_info.d_dsflags,
839 &dp->d_slice, &dp->d_info);
840 if (!dsisopen(dp->d_slice)) {
841 dev_dclose(dp->d_rawdev, ap->a_oflags, ap->a_devtype);
844 dp->d_flags &= ~DISKFLAG_LOCK;
845 if (dp->d_flags & DISKFLAG_WANTED) {
846 dp->d_flags &= ~DISKFLAG_WANTED;
855 * Close a disk device or partition
859 diskclose(struct dev_close_args *ap)
861 cdev_t dev = ap->a_head.a_dev;
869 dsclose(dev, ap->a_devtype, dp->d_slice);
870 if (!dsisopen(dp->d_slice)) {
871 error = dev_dclose(dp->d_rawdev, ap->a_fflag, ap->a_devtype);
878 * First execute the ioctl on the disk device, and if it isn't supported
879 * try running it on the backing device.
883 diskioctl(struct dev_ioctl_args *ap)
885 cdev_t dev = ap->a_head.a_dev;
894 devfs_debug(DEVFS_DEBUG_DEBUG,
895 "diskioctl: cmd is: %lx (name: %s)\n",
896 ap->a_cmd, dev->si_name);
897 devfs_debug(DEVFS_DEBUG_DEBUG,
898 "diskioctl: &dp->d_slice is: %p, %p\n",
899 &dp->d_slice, dp->d_slice);
901 if (ap->a_cmd == DIOCGKERNELDUMP) {
902 u = *(u_int *)ap->a_data;
903 return disk_dumpconf(dev, u);
906 if (&dp->d_slice == NULL || dp->d_slice == NULL) {
910 error = dsioctl(dev, ap->a_cmd, ap->a_data, ap->a_fflag,
911 &dp->d_slice, &dp->d_info);
915 if (error == ENOIOCTL) {
916 error = dev_dioctl(dp->d_rawdev, ap->a_cmd, ap->a_data,
917 ap->a_fflag, ap->a_cred, NULL);
923 * Execute strategy routine
927 diskstrategy(struct dev_strategy_args *ap)
929 cdev_t dev = ap->a_head.a_dev;
930 struct bio *bio = ap->a_bio;
937 bio->bio_buf->b_error = ENXIO;
938 bio->bio_buf->b_flags |= B_ERROR;
942 KKASSERT(dev->si_disk == dp);
945 * The dscheck() function will also transform the slice relative
946 * block number i.e. bio->bio_offset into a block number that can be
947 * passed directly to the underlying raw device. If dscheck()
948 * returns NULL it will have handled the bio for us (e.g. EOF
949 * or error due to being beyond the device size).
951 if ((nbio = dscheck(dev, bio, dp->d_slice)) != NULL) {
952 dsched_queue(dp, nbio);
960 * Return the partition size in ?blocks?
964 diskpsize(struct dev_psize_args *ap)
966 cdev_t dev = ap->a_head.a_dev;
972 ap->a_result = dssize(dev, &dp->d_slice);
977 * When new device entries are instantiated, make sure they inherit our
978 * si_disk structure and block and iosize limits from the raw device.
980 * This routine is always called synchronously in the context of the
983 * XXX The various io and block size constraints are not always initialized
984 * properly by devices.
988 diskclone(struct dev_clone_args *ap)
990 cdev_t dev = ap->a_head.a_dev;
994 KKASSERT(dp != NULL);
996 dev->si_iosize_max = dp->d_rawdev->si_iosize_max;
997 dev->si_bsize_phys = dp->d_rawdev->si_bsize_phys;
998 dev->si_bsize_best = dp->d_rawdev->si_bsize_best;
1003 diskdump(struct dev_dump_args *ap)
1005 cdev_t dev = ap->a_head.a_dev;
1006 struct disk *dp = dev->si_disk;
1007 u_int64_t size, offset;
1010 error = disk_dumpcheck(dev, &size, &ap->a_blkno, &ap->a_secsize);
1011 /* XXX: this should probably go in disk_dumpcheck somehow */
1012 if (ap->a_length != 0) {
1014 offset = ap->a_blkno * DEV_BSIZE;
1015 if ((ap->a_offset < offset) ||
1016 (ap->a_offset + ap->a_length - offset > size)) {
1017 kprintf("Attempt to write outside dump device boundaries.\n");
1023 ap->a_head.a_dev = dp->d_rawdev;
1024 error = dev_doperate(&ap->a_head);
1031 SYSCTL_INT(_debug_sizeof, OID_AUTO, diskslices, CTLFLAG_RD,
1032 0, sizeof(struct diskslices), "sizeof(struct diskslices)");
1034 SYSCTL_INT(_debug_sizeof, OID_AUTO, disk, CTLFLAG_RD,
1035 0, sizeof(struct disk), "sizeof(struct disk)");
1038 * Reorder interval for burst write allowance and minor write
1041 * We always want to trickle some writes in to make use of the
1042 * disk's zone cache. Bursting occurs on a longer interval and only
1043 * runningbufspace is well over the hirunningspace limit.
1045 int bioq_reorder_burst_interval = 60; /* should be multiple of minor */
1046 SYSCTL_INT(_kern, OID_AUTO, bioq_reorder_burst_interval,
1047 CTLFLAG_RW, &bioq_reorder_burst_interval, 0, "");
1048 int bioq_reorder_minor_interval = 5;
1049 SYSCTL_INT(_kern, OID_AUTO, bioq_reorder_minor_interval,
1050 CTLFLAG_RW, &bioq_reorder_minor_interval, 0, "");
1052 int bioq_reorder_burst_bytes = 3000000;
1053 SYSCTL_INT(_kern, OID_AUTO, bioq_reorder_burst_bytes,
1054 CTLFLAG_RW, &bioq_reorder_burst_bytes, 0, "");
1055 int bioq_reorder_minor_bytes = 262144;
1056 SYSCTL_INT(_kern, OID_AUTO, bioq_reorder_minor_bytes,
1057 CTLFLAG_RW, &bioq_reorder_minor_bytes, 0, "");
1061 * Order I/Os. Generally speaking this code is designed to make better
1062 * use of drive zone caches. A drive zone cache can typically track linear
1063 * reads or writes for around 16 zones simultaniously.
1065 * Read prioritization issues: It is possible for hundreds of megabytes worth
1066 * of writes to be queued asynchronously. This creates a huge bottleneck
1067 * for reads which reduce read bandwidth to a trickle.
1069 * To solve this problem we generally reorder reads before writes.
1071 * However, a large number of random reads can also starve writes and
1072 * make poor use of the drive zone cache so we allow writes to trickle
1076 bioqdisksort(struct bio_queue_head *bioq, struct bio *bio)
1079 * The BIO wants to be ordered. Adding to the tail also
1080 * causes transition to be set to NULL, forcing the ordering
1081 * of all prior I/O's.
1083 if (bio->bio_buf->b_flags & B_ORDERED) {
1084 bioq_insert_tail(bioq, bio);
1088 switch(bio->bio_buf->b_cmd) {
1090 if (bioq->transition) {
1092 * Insert before the first write. Bleedover writes
1093 * based on reorder intervals to prevent starvation.
1095 TAILQ_INSERT_BEFORE(bioq->transition, bio, bio_act);
1097 if (bioq->reorder % bioq_reorder_minor_interval == 0) {
1098 bioqwritereorder(bioq);
1099 if (bioq->reorder >=
1100 bioq_reorder_burst_interval) {
1106 * No writes queued (or ordering was forced),
1109 TAILQ_INSERT_TAIL(&bioq->queue, bio, bio_act);
1114 * Writes are always appended. If no writes were previously
1115 * queued or an ordered tail insertion occured the transition
1116 * field will be NULL.
1118 TAILQ_INSERT_TAIL(&bioq->queue, bio, bio_act);
1119 if (bioq->transition == NULL)
1120 bioq->transition = bio;
1124 * All other request types are forced to be ordered.
1126 bioq_insert_tail(bioq, bio);
1132 * Move the read-write transition point to prevent reads from
1133 * completely starving our writes. This brings a number of writes into
1134 * the fold every N reads.
1136 * We bring a few linear writes into the fold on a minor interval
1137 * and we bring a non-linear burst of writes into the fold on a major
1138 * interval. Bursting only occurs if runningbufspace is really high
1139 * (typically from syncs, fsyncs, or HAMMER flushes).
1143 bioqwritereorder(struct bio_queue_head *bioq)
1151 if (bioq->reorder < bioq_reorder_burst_interval ||
1152 !buf_runningbufspace_severe()) {
1153 left = (size_t)bioq_reorder_minor_bytes;
1156 left = (size_t)bioq_reorder_burst_bytes;
1160 next_offset = bioq->transition->bio_offset;
1161 while ((bio = bioq->transition) != NULL &&
1162 (check_off == 0 || next_offset == bio->bio_offset)
1164 n = bio->bio_buf->b_bcount;
1165 next_offset = bio->bio_offset + n;
1166 bioq->transition = TAILQ_NEXT(bio, bio_act);
1174 * Bounds checking against the media size, used for the raw partition.
1175 * secsize, mediasize and b_blkno must all be the same units.
1176 * Possibly this has to be DEV_BSIZE (512).
1179 bounds_check_with_mediasize(struct bio *bio, int secsize, uint64_t mediasize)
1181 struct buf *bp = bio->bio_buf;
1184 sz = howmany(bp->b_bcount, secsize);
1186 if (bio->bio_offset/DEV_BSIZE + sz > mediasize) {
1187 sz = mediasize - bio->bio_offset/DEV_BSIZE;
1189 /* If exactly at end of disk, return EOF. */
1190 bp->b_resid = bp->b_bcount;
1194 /* If past end of disk, return EINVAL. */
1195 bp->b_error = EINVAL;
1198 /* Otherwise, truncate request. */
1199 bp->b_bcount = sz * secsize;
1206 * Disk error is the preface to plaintive error messages
1207 * about failing disk transfers. It prints messages of the form
1209 hp0g: hard error reading fsbn 12345 of 12344-12347 (hp0 bn %d cn %d tn %d sn %d)
1211 * if the offset of the error in the transfer and a disk label
1212 * are both available. blkdone should be -1 if the position of the error
1213 * is unknown; the disklabel pointer may be null from drivers that have not
1214 * been converted to use them. The message is printed with kprintf
1215 * if pri is LOG_PRINTF, otherwise it uses log at the specified priority.
1216 * The message should be completed (with at least a newline) with kprintf
1217 * or log(-1, ...), respectively. There is no trailing space.
1220 diskerr(struct bio *bio, cdev_t dev, const char *what, int pri, int donecnt)
1222 struct buf *bp = bio->bio_buf;
1236 kprintf("%s: %s %sing ", dev->si_name, what, term);
1237 kprintf("offset %012llx for %d",
1238 (long long)bio->bio_offset,
1242 kprintf(" (%d bytes completed)", donecnt);
1246 * Locate a disk device
1249 disk_locate(const char *devname)
1251 return devfs_find_device_by_name(devname);
1255 disk_config(void *arg)
1257 disk_msg_send_sync(DISK_SYNC, NULL, NULL);
1263 struct thread* td_core;
1265 disk_msg_cache = objcache_create("disk-msg-cache", 0, 0,
1267 objcache_malloc_alloc,
1268 objcache_malloc_free,
1269 &disk_msg_malloc_args);
1271 lwkt_token_init(&disklist_token, 1, "disks");
1274 * Initialize the reply-only port which acts as a message drain
1276 lwkt_initport_replyonly(&disk_dispose_port, disk_msg_autofree_reply);
1278 lwkt_create(disk_msg_core, /*args*/NULL, &td_core, NULL,
1279 0, 0, "disk_msg_core");
1281 tsleep(td_core, 0, "diskcore", 0);
1287 objcache_destroy(disk_msg_cache);
1291 * Clean out illegal characters in serial numbers.
1294 disk_cleanserial(char *serno)
1298 while ((c = *serno) != 0) {
1299 if (c >= 'a' && c <= 'z')
1301 else if (c >= 'A' && c <= 'Z')
1303 else if (c >= '0' && c <= '9')
1305 else if (c == '-' || c == '@' || c == '+' || c == '.')
1313 TUNABLE_INT("kern.disk_debug", &disk_debug_enable);
1314 SYSCTL_INT(_kern, OID_AUTO, disk_debug, CTLFLAG_RW, &disk_debug_enable,
1315 0, "Enable subr_disk debugging");
1317 SYSINIT(disk_register, SI_SUB_PRE_DRIVERS, SI_ORDER_FIRST, disk_init, NULL);
1318 SYSUNINIT(disk_register, SI_SUB_PRE_DRIVERS, SI_ORDER_ANY, disk_uninit, NULL);