2 * Copyright (c) 2003,2004,2009 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * ----------------------------------------------------------------------------
35 * "THE BEER-WARE LICENSE" (Revision 42):
36 * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you
37 * can do whatever you want with this stuff. If we meet some day, and you think
38 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
39 * ----------------------------------------------------------------------------
41 * Copyright (c) 1982, 1986, 1988, 1993
42 * The Regents of the University of California. All rights reserved.
43 * (c) UNIX System Laboratories, Inc.
44 * All or some portions of this file are derived from material licensed
45 * to the University of California by American Telephone and Telegraph
46 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
47 * the permission of UNIX System Laboratories, Inc.
49 * Redistribution and use in source and binary forms, with or without
50 * modification, are permitted provided that the following conditions
52 * 1. Redistributions of source code must retain the above copyright
53 * notice, this list of conditions and the following disclaimer.
54 * 2. Redistributions in binary form must reproduce the above copyright
55 * notice, this list of conditions and the following disclaimer in the
56 * documentation and/or other materials provided with the distribution.
57 * 3. All advertising materials mentioning features or use of this software
58 * must display the following acknowledgement:
59 * This product includes software developed by the University of
60 * California, Berkeley and its contributors.
61 * 4. Neither the name of the University nor the names of its contributors
62 * may be used to endorse or promote products derived from this software
63 * without specific prior written permission.
65 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
66 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
67 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
68 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
69 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
70 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
71 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
72 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
73 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
74 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
77 * @(#)ufs_disksubr.c 8.5 (Berkeley) 1/21/94
78 * $FreeBSD: src/sys/kern/subr_disk.c,v 1.20.2.6 2001/10/05 07:14:57 peter Exp $
79 * $FreeBSD: src/sys/ufs/ufs/ufs_disksubr.c,v 1.44.2.3 2001/03/05 05:42:19 obrien Exp $
80 * $DragonFly: src/sys/kern/subr_disk.c,v 1.40 2008/06/05 18:06:32 swildner Exp $
83 #include <sys/param.h>
84 #include <sys/systm.h>
85 #include <sys/kernel.h>
87 #include <sys/sysctl.h>
90 #include <sys/disklabel.h>
91 #include <sys/disklabel32.h>
92 #include <sys/disklabel64.h>
93 #include <sys/diskslice.h>
94 #include <sys/diskmbr.h>
96 #include <sys/malloc.h>
97 #include <sys/sysctl.h>
98 #include <machine/md_var.h>
99 #include <sys/ctype.h>
100 #include <sys/syslog.h>
101 #include <sys/device.h>
102 #include <sys/msgport.h>
103 #include <sys/msgport2.h>
104 #include <sys/buf2.h>
105 #include <vfs/devfs/devfs.h>
106 #include <sys/thread.h>
107 #include <sys/thread2.h>
109 #include <sys/queue.h>
110 #include <sys/lock.h>
112 static MALLOC_DEFINE(M_DISK, "disk", "disk data");
114 static void disk_msg_autofree_reply(lwkt_port_t, lwkt_msg_t);
115 static void disk_msg_core(void *);
116 static int disk_probe_slice(struct disk *dp, cdev_t dev, int slice, int reprobe);
117 static void disk_probe(struct disk *dp, int reprobe);
118 static void _setdiskinfo(struct disk *disk, struct disk_info *info);
120 static d_open_t diskopen;
121 static d_close_t diskclose;
122 static d_ioctl_t diskioctl;
123 static d_strategy_t diskstrategy;
124 static d_psize_t diskpsize;
125 static d_clone_t diskclone;
126 static d_dump_t diskdump;
128 static LIST_HEAD(, disk) disklist = LIST_HEAD_INITIALIZER(&disklist);
129 static struct lwkt_token disklist_token;
131 static struct dev_ops disk_ops = {
132 { "disk", 0, D_DISK },
134 .d_close = diskclose,
136 .d_write = physwrite,
137 .d_ioctl = diskioctl,
138 .d_strategy = diskstrategy,
140 .d_psize = diskpsize,
144 static struct objcache *disk_msg_cache;
146 struct objcache_malloc_args disk_msg_malloc_args = {
147 sizeof(struct disk_msg), M_DISK };
149 static struct lwkt_port disk_dispose_port;
150 static struct lwkt_port disk_msg_port;
154 disk_probe_slice(struct disk *dp, cdev_t dev, int slice, int reprobe)
156 struct disk_info *info = &dp->d_info;
157 struct diskslice *sp = &dp->d_slice->dss_slices[slice];
159 struct partinfo part;
165 sno = slice ? slice - 1 : 0;
167 ops = &disklabel32_ops;
168 msg = ops->op_readdisklabel(dev, sp, &sp->ds_label, info);
169 if (msg && !strcmp(msg, "no disk label")) {
170 ops = &disklabel64_ops;
171 msg = ops->op_readdisklabel(dev, sp, &sp->ds_label, info);
174 if (slice != WHOLE_DISK_SLICE)
175 ops->op_adjust_label_reserved(dp->d_slice, slice, sp);
180 for (i = 0; i < ops->op_getnumparts(sp->ds_label); i++) {
181 ops->op_loadpartinfo(sp->ds_label, i, &part);
184 (ndev = devfs_find_device_by_name("%s%c",
185 dev->si_name, 'a' + i))
188 * Device already exists and
191 ndev->si_flags |= SI_REPROBE_TEST;
193 ndev = make_dev(&disk_ops,
194 dkmakeminor(dkunit(dp->d_cdev),
196 UID_ROOT, GID_OPERATOR, 0640,
197 "%s%c", dev->si_name, 'a'+ i);
199 if (dp->d_info.d_serialno) {
202 dp->d_info.d_serialno,
205 ndev->si_flags |= SI_REPROBE_TEST;
209 } else if (info->d_dsflags & DSO_COMPATLABEL) {
211 if (sp->ds_size >= 0x100000000ULL)
212 ops = &disklabel64_ops;
214 ops = &disklabel32_ops;
215 sp->ds_label = ops->op_clone_label(info, sp);
217 if (sp->ds_type == DOSPTYP_386BSD /* XXX */) {
218 log(LOG_WARNING, "%s: cannot find label (%s)\n",
224 sp->ds_wlabel = FALSE;
227 return (msg ? EINVAL : 0);
232 disk_probe(struct disk *dp, int reprobe)
234 struct disk_info *info = &dp->d_info;
235 cdev_t dev = dp->d_cdev;
238 struct diskslice *sp;
240 KKASSERT (info->d_media_blksize != 0);
242 dp->d_slice = dsmakeslicestruct(BASE_SLICE, info);
244 error = mbrinit(dev, info, &(dp->d_slice));
248 for (i = 0; i < dp->d_slice->dss_nslices; i++) {
250 * Ignore the whole-disk slice, it has already been created.
252 if (i == WHOLE_DISK_SLICE)
254 sp = &dp->d_slice->dss_slices[i];
257 * Handle s0. s0 is a compatibility slice if there are no
258 * other slices and it has not otherwise been set up, else
261 if (i == COMPATIBILITY_SLICE) {
263 if (sp->ds_type == 0 &&
264 dp->d_slice->dss_nslices == BASE_SLICE) {
265 sp->ds_size = info->d_media_blocks;
274 * Ignore 0-length slices
276 if (sp->ds_size == 0)
280 (ndev = devfs_find_device_by_name("%ss%d",
281 dev->si_name, sno))) {
283 * Device already exists and is still valid
285 ndev->si_flags |= SI_REPROBE_TEST;
288 * Else create new device
290 ndev = make_dev(&disk_ops,
291 dkmakewholeslice(dkunit(dev), i),
292 UID_ROOT, GID_OPERATOR, 0640,
293 "%ss%d", dev->si_name, sno);
294 if (dp->d_info.d_serialno) {
295 make_dev_alias(ndev, "serno/%s.s%d",
296 dp->d_info.d_serialno, sno);
299 ndev->si_flags |= SI_REPROBE_TEST;
304 * Probe appropriate slices for a disklabel
306 * XXX slice type 1 used by our gpt probe code.
308 if (sp->ds_type == DOSPTYP_386BSD || sp->ds_type == 1) {
309 if (dp->d_slice->dss_first_bsd_slice == 0)
310 dp->d_slice->dss_first_bsd_slice = i;
311 disk_probe_slice(dp, ndev, i, reprobe);
318 disk_msg_core(void *arg)
321 struct diskslice *sp;
326 lwkt_initport_thread(&disk_msg_port, curthread);
331 msg = (disk_msg_t)lwkt_waitport(&disk_msg_port, 0);
333 switch (msg->hdr.u.ms_result) {
334 case DISK_DISK_PROBE:
335 dp = (struct disk *)msg->load;
338 case DISK_DISK_DESTROY:
339 dp = (struct disk *)msg->load;
340 devfs_destroy_subnames(dp->d_cdev->si_name);
341 devfs_destroy_dev(dp->d_cdev);
342 lwkt_gettoken(&ilock, &disklist_token);
343 LIST_REMOVE(dp, d_list);
344 lwkt_reltoken(&ilock);
345 if (dp->d_info.d_serialno) {
346 kfree(dp->d_info.d_serialno, M_TEMP);
347 dp->d_info.d_serialno = NULL;
351 dp = (struct disk *)msg->load;
352 devfs_destroy_subnames(dp->d_cdev->si_name);
354 case DISK_SLICE_REPROBE:
355 dp = (struct disk *)msg->load;
356 sp = (struct diskslice *)msg->load2;
357 devfs_clr_subnames_flag(sp->ds_dev->si_name,
359 devfs_debug(DEVFS_DEBUG_DEBUG,
360 "DISK_SLICE_REPROBE: %s\n",
361 sp->ds_dev->si_name);
362 disk_probe_slice(dp, sp->ds_dev,
363 dkslice(sp->ds_dev), 1);
364 devfs_destroy_subnames_without_flag(
365 sp->ds_dev->si_name, SI_REPROBE_TEST);
367 case DISK_DISK_REPROBE:
368 dp = (struct disk *)msg->load;
369 devfs_clr_subnames_flag(dp->d_cdev->si_name, SI_REPROBE_TEST);
370 devfs_debug(DEVFS_DEBUG_DEBUG,
371 "DISK_DISK_REPROBE: %s\n",
372 dp->d_cdev->si_name);
374 devfs_destroy_subnames_without_flag(
375 dp->d_cdev->si_name, SI_REPROBE_TEST);
380 devfs_debug(DEVFS_DEBUG_WARNING,
381 "disk_msg_core: unknown message "
382 "received at core\n");
385 lwkt_replymsg((lwkt_msg_t)msg, 0);
392 * Acts as a message drain. Any message that is replied to here gets
393 * destroyed and the memory freed.
396 disk_msg_autofree_reply(lwkt_port_t port, lwkt_msg_t msg)
398 objcache_put(disk_msg_cache, msg);
403 disk_msg_send(uint32_t cmd, void *load, void *load2)
406 lwkt_port_t port = &disk_msg_port;
408 disk_msg = objcache_get(disk_msg_cache, M_WAITOK);
410 lwkt_initmsg(&disk_msg->hdr, &disk_dispose_port, 0);
412 disk_msg->hdr.u.ms_result = cmd;
413 disk_msg->load = load;
414 disk_msg->load2 = load2;
416 lwkt_sendmsg(port, (lwkt_msg_t)disk_msg);
420 disk_msg_send_sync(uint32_t cmd, void *load, void *load2)
422 struct lwkt_port rep_port;
423 disk_msg_t disk_msg = objcache_get(disk_msg_cache, M_WAITOK);
424 disk_msg_t msg_incoming;
425 lwkt_port_t port = &disk_msg_port;
427 lwkt_initport_thread(&rep_port, curthread);
428 lwkt_initmsg(&disk_msg->hdr, &rep_port, 0);
430 disk_msg->hdr.u.ms_result = cmd;
431 disk_msg->load = load;
432 disk_msg->load2 = load2;
435 lwkt_sendmsg(port, (lwkt_msg_t)disk_msg);
436 msg_incoming = lwkt_waitport(&rep_port, 0);
440 * Create a raw device for the dev_ops template (which is returned). Also
441 * create a slice and unit managed disk and overload the user visible
442 * device space with it.
444 * NOTE: The returned raw device is NOT a slice and unit managed device.
445 * It is an actual raw device representing the raw disk as specified by
446 * the passed dev_ops. The disk layer not only returns such a raw device,
447 * it also uses it internally when passing (modified) commands through.
450 disk_create(int unit, struct disk *dp, struct dev_ops *raw_ops)
455 rawdev = make_only_dev(raw_ops, dkmakewholedisk(unit),
456 UID_ROOT, GID_OPERATOR, 0640,
457 "%s%d", raw_ops->head.name, unit);
459 bzero(dp, sizeof(*dp));
461 dp->d_rawdev = rawdev;
462 dp->d_raw_ops = raw_ops;
463 dp->d_dev_ops = &disk_ops;
464 dp->d_cdev = make_dev(&disk_ops,
465 dkmakewholedisk(unit),
466 UID_ROOT, GID_OPERATOR, 0640,
467 "%s%d", raw_ops->head.name, unit);
469 dp->d_cdev->si_disk = dp;
471 lwkt_gettoken(&ilock, &disklist_token);
472 LIST_INSERT_HEAD(&disklist, dp, d_list);
473 lwkt_reltoken(&ilock);
474 return (dp->d_rawdev);
479 _setdiskinfo(struct disk *disk, struct disk_info *info)
483 oldserialno = disk->d_info.d_serialno;
484 bcopy(info, &disk->d_info, sizeof(disk->d_info));
485 info = &disk->d_info;
488 * The serial number is duplicated so the caller can throw
491 if (info->d_serialno && info->d_serialno[0]) {
492 info->d_serialno = kstrdup(info->d_serialno, M_TEMP);
494 make_dev_alias(disk->d_cdev, "serno/%s",
498 info->d_serialno = NULL;
501 kfree(oldserialno, M_TEMP);
504 * The caller may set d_media_size or d_media_blocks and we
505 * calculate the other.
507 KKASSERT(info->d_media_size == 0 || info->d_media_blksize == 0);
508 if (info->d_media_size == 0 && info->d_media_blocks) {
509 info->d_media_size = (u_int64_t)info->d_media_blocks *
510 info->d_media_blksize;
511 } else if (info->d_media_size && info->d_media_blocks == 0 &&
512 info->d_media_blksize) {
513 info->d_media_blocks = info->d_media_size /
514 info->d_media_blksize;
518 * The si_* fields for rawdev are not set until after the
519 * disk_create() call, so someone using the cooked version
520 * of the raw device (i.e. da0s0) will not get the right
521 * si_iosize_max unless we fix it up here.
523 if (disk->d_cdev && disk->d_rawdev &&
524 disk->d_cdev->si_iosize_max == 0) {
525 disk->d_cdev->si_iosize_max = disk->d_rawdev->si_iosize_max;
526 disk->d_cdev->si_bsize_phys = disk->d_rawdev->si_bsize_phys;
527 disk->d_cdev->si_bsize_best = disk->d_rawdev->si_bsize_best;
532 * Disk drivers must call this routine when media parameters are available
536 disk_setdiskinfo(struct disk *disk, struct disk_info *info)
538 _setdiskinfo(disk, info);
539 disk_msg_send(DISK_DISK_PROBE, disk, NULL);
543 disk_setdiskinfo_sync(struct disk *disk, struct disk_info *info)
545 _setdiskinfo(disk, info);
546 disk_msg_send_sync(DISK_DISK_PROBE, disk, NULL);
550 * This routine is called when an adapter detaches. The higher level
551 * managed disk device is destroyed while the lower level raw device is
555 disk_destroy(struct disk *disk)
557 disk_msg_send_sync(DISK_DISK_DESTROY, disk, NULL);
562 disk_dumpcheck(cdev_t dev, u_int64_t *count, u_int64_t *blkno, u_int *secsize)
564 struct partinfo pinfo;
567 bzero(&pinfo, sizeof(pinfo));
568 error = dev_dioctl(dev, DIOCGPART, (void *)&pinfo, 0, proc0.p_ucred);
571 if (pinfo.media_blksize == 0)
573 *count = (u_int64_t)Maxmem * PAGE_SIZE / pinfo.media_blksize;
574 if (dumplo64 < pinfo.reserved_blocks ||
575 dumplo64 + *count > pinfo.media_blocks) {
578 *blkno = dumplo64 + pinfo.media_offset / pinfo.media_blksize;
579 *secsize = pinfo.media_blksize;
584 disk_unprobe(struct disk *disk)
589 disk_msg_send_sync(DISK_UNPROBE, disk, NULL);
593 disk_invalidate (struct disk *disk)
596 dsgone(&disk->d_slice);
600 disk_enumerate(struct disk *disk)
605 lwkt_gettoken(&ilock, &disklist_token);
607 dp = (LIST_FIRST(&disklist));
609 dp = (LIST_NEXT(disk, d_list));
610 lwkt_reltoken(&ilock);
617 sysctl_disks(SYSCTL_HANDLER_ARGS)
625 while ((disk = disk_enumerate(disk))) {
627 error = SYSCTL_OUT(req, " ", 1);
633 error = SYSCTL_OUT(req, disk->d_rawdev->si_name,
634 strlen(disk->d_rawdev->si_name));
638 error = SYSCTL_OUT(req, "", 1);
642 SYSCTL_PROC(_kern, OID_AUTO, disks, CTLTYPE_STRING | CTLFLAG_RD, NULL, 0,
643 sysctl_disks, "A", "names of available disks");
646 * Open a disk device or partition.
650 diskopen(struct dev_open_args *ap)
652 cdev_t dev = ap->a_head.a_dev;
657 * dp can't be NULL here XXX.
665 * Deal with open races
667 while (dp->d_flags & DISKFLAG_LOCK) {
668 dp->d_flags |= DISKFLAG_WANTED;
669 error = tsleep(dp, PCATCH, "diskopen", hz);
673 dp->d_flags |= DISKFLAG_LOCK;
676 * Open the underlying raw device.
678 if (!dsisopen(dp->d_slice)) {
680 if (!pdev->si_iosize_max)
681 pdev->si_iosize_max = dev->si_iosize_max;
683 error = dev_dopen(dp->d_rawdev, ap->a_oflags,
684 ap->a_devtype, ap->a_cred);
688 * Inherit properties from the underlying device now that it is
696 error = dsopen(dev, ap->a_devtype, dp->d_info.d_dsflags,
697 &dp->d_slice, &dp->d_info);
698 if (!dsisopen(dp->d_slice)) {
699 dev_dclose(dp->d_rawdev, ap->a_oflags, ap->a_devtype);
702 dp->d_flags &= ~DISKFLAG_LOCK;
703 if (dp->d_flags & DISKFLAG_WANTED) {
704 dp->d_flags &= ~DISKFLAG_WANTED;
712 * Close a disk device or partition
716 diskclose(struct dev_close_args *ap)
718 cdev_t dev = ap->a_head.a_dev;
725 dsclose(dev, ap->a_devtype, dp->d_slice);
726 if (!dsisopen(dp->d_slice)) {
727 error = dev_dclose(dp->d_rawdev, ap->a_fflag, ap->a_devtype);
733 * First execute the ioctl on the disk device, and if it isn't supported
734 * try running it on the backing device.
738 diskioctl(struct dev_ioctl_args *ap)
740 cdev_t dev = ap->a_head.a_dev;
748 devfs_debug(DEVFS_DEBUG_DEBUG,
749 "diskioctl: cmd is: %x (name: %s)\n",
750 ap->a_cmd, dev->si_name);
751 devfs_debug(DEVFS_DEBUG_DEBUG,
752 "diskioctl: &dp->d_slice is: %x, %x\n",
753 &dp->d_slice, dp->d_slice);
755 error = dsioctl(dev, ap->a_cmd, ap->a_data, ap->a_fflag,
756 &dp->d_slice, &dp->d_info);
758 if (error == ENOIOCTL) {
759 error = dev_dioctl(dp->d_rawdev, ap->a_cmd, ap->a_data,
760 ap->a_fflag, ap->a_cred);
766 * Execute strategy routine
770 diskstrategy(struct dev_strategy_args *ap)
772 cdev_t dev = ap->a_head.a_dev;
773 struct bio *bio = ap->a_bio;
780 bio->bio_buf->b_error = ENXIO;
781 bio->bio_buf->b_flags |= B_ERROR;
785 KKASSERT(dev->si_disk == dp);
788 * The dscheck() function will also transform the slice relative
789 * block number i.e. bio->bio_offset into a block number that can be
790 * passed directly to the underlying raw device. If dscheck()
791 * returns NULL it will have handled the bio for us (e.g. EOF
792 * or error due to being beyond the device size).
794 if ((nbio = dscheck(dev, bio, dp->d_slice)) != NULL) {
795 dev_dstrategy(dp->d_rawdev, nbio);
803 * Return the partition size in ?blocks?
807 diskpsize(struct dev_psize_args *ap)
809 cdev_t dev = ap->a_head.a_dev;
815 ap->a_result = dssize(dev, &dp->d_slice);
820 * When new device entries are instantiated, make sure they inherit our
821 * si_disk structure and block and iosize limits from the raw device.
823 * This routine is always called synchronously in the context of the
826 * XXX The various io and block size constraints are not always initialized
827 * properly by devices.
831 diskclone(struct dev_clone_args *ap)
833 cdev_t dev = ap->a_head.a_dev;
837 KKASSERT(dp != NULL);
839 dev->si_iosize_max = dp->d_rawdev->si_iosize_max;
840 dev->si_bsize_phys = dp->d_rawdev->si_bsize_phys;
841 dev->si_bsize_best = dp->d_rawdev->si_bsize_best;
846 diskdump(struct dev_dump_args *ap)
848 cdev_t dev = ap->a_head.a_dev;
849 struct disk *dp = dev->si_disk;
852 error = disk_dumpcheck(dev, &ap->a_count, &ap->a_blkno, &ap->a_secsize);
854 ap->a_head.a_dev = dp->d_rawdev;
855 error = dev_doperate(&ap->a_head);
862 SYSCTL_INT(_debug_sizeof, OID_AUTO, diskslices, CTLFLAG_RD,
863 0, sizeof(struct diskslices), "sizeof(struct diskslices)");
865 SYSCTL_INT(_debug_sizeof, OID_AUTO, disk, CTLFLAG_RD,
866 0, sizeof(struct disk), "sizeof(struct disk)");
869 * How sorted do we want to be? The higher the number the harder we try
870 * to sort, but also the higher the risk of bio's getting starved do
871 * to insertions in front of them.
873 static int bioq_barrier = 16;
874 SYSCTL_INT(_kern, OID_AUTO, bioq_barrier, CTLFLAG_RW, &bioq_barrier, 0, "");
878 * Seek sort for disks.
880 * The bio_queue keep two queues, sorted in ascending block order. The first
881 * queue holds those requests which are positioned after the current block
882 * (in the first request); the second, which starts at queue->switch_point,
883 * holds requests which came in after their block number was passed. Thus
884 * we implement a one way scan, retracting after reaching the end of the drive
885 * to the first request on the second queue, at which time it becomes the
888 * A one-way scan is natural because of the way UNIX read-ahead blocks are
892 bioqdisksort(struct bio_queue_head *bioq, struct bio *bio)
898 be = TAILQ_LAST(&bioq->queue, bio_queue);
901 * If the queue is empty or we are an
902 * ordered transaction, then it's easy.
904 if ((bq = bioq_first(bioq)) == NULL ||
905 (bio->bio_buf->b_flags & B_ORDERED) != 0) {
906 bioq_insert_tail(bioq, bio);
911 * Avoid permanent request starvation by forcing the request to
912 * be ordered every 16 requests. Without this long sequential
913 * write pipelines can prevent requests later in the queue from
914 * getting serviced for many seconds.
916 if (++bioq->order_count >= bioq_barrier) {
917 bioq_insert_tail_order(bioq, bio, 1);
921 if (bioq->insert_point != NULL) {
923 * A certain portion of the list is
924 * "locked" to preserve ordering, so
925 * we can only insert after the insert
928 bq = bioq->insert_point;
931 * If we lie before the last removed (currently active)
932 * request, and are not inserting ourselves into the
933 * "locked" portion of the list, then we must add ourselves
934 * to the second request list.
936 if (bio->bio_offset < bioq->last_offset) {
937 bq = bioq->switch_point;
940 * If we are starting a new secondary list,
944 bioq->switch_point = bio;
945 bioq_insert_tail(bioq, bio);
950 * If we lie ahead of the current switch point,
951 * insert us before the switch point and move
954 if (bio->bio_offset < bq->bio_offset) {
955 bioq->switch_point = bio;
956 TAILQ_INSERT_BEFORE(bq, bio, bio_act);
960 if (bioq->switch_point != NULL)
961 be = TAILQ_PREV(bioq->switch_point,
964 * If we lie between last_offset and bq,
967 if (bio->bio_offset < bq->bio_offset) {
968 TAILQ_INSERT_BEFORE(bq, bio, bio_act);
975 * Request is at/after our current position in the list.
976 * Optimize for sequential I/O by seeing if we go at the tail.
978 if (bio->bio_offset > be->bio_offset) {
979 TAILQ_INSERT_AFTER(&bioq->queue, be, bio, bio_act);
983 /* Otherwise, insertion sort */
984 while ((bn = TAILQ_NEXT(bq, bio_act)) != NULL) {
986 * We want to go after the current request if it is the end
987 * of the first request list, or if the next request is a
988 * larger cylinder than our request.
990 if (bn == bioq->switch_point ||
991 bio->bio_offset < bn->bio_offset) {
996 TAILQ_INSERT_AFTER(&bioq->queue, bq, bio, bio_act);
1000 * Disk error is the preface to plaintive error messages
1001 * about failing disk transfers. It prints messages of the form
1003 hp0g: hard error reading fsbn 12345 of 12344-12347 (hp0 bn %d cn %d tn %d sn %d)
1005 * if the offset of the error in the transfer and a disk label
1006 * are both available. blkdone should be -1 if the position of the error
1007 * is unknown; the disklabel pointer may be null from drivers that have not
1008 * been converted to use them. The message is printed with kprintf
1009 * if pri is LOG_PRINTF, otherwise it uses log at the specified priority.
1010 * The message should be completed (with at least a newline) with kprintf
1011 * or log(-1, ...), respectively. There is no trailing space.
1014 diskerr(struct bio *bio, cdev_t dev, const char *what, int pri, int donecnt)
1016 struct buf *bp = bio->bio_buf;
1030 kprintf("%s: %s %sing ", dev->si_name, what, term);
1031 kprintf("offset %012llx for %d",
1032 (long long)bio->bio_offset,
1036 kprintf(" (%d bytes completed)", donecnt);
1040 * Locate a disk device
1043 disk_locate(const char *devname)
1045 return devfs_find_device_by_name(devname);
1049 disk_config(void *arg)
1051 disk_msg_send_sync(DISK_SYNC, NULL, NULL);
1057 struct thread* td_core;
1059 disk_msg_cache = objcache_create("disk-msg-cache", 0, 0,
1061 objcache_malloc_alloc,
1062 objcache_malloc_free,
1063 &disk_msg_malloc_args);
1065 lwkt_token_init(&disklist_token);
1068 * Initialize the reply-only port which acts as a message drain
1070 lwkt_initport_replyonly(&disk_dispose_port, disk_msg_autofree_reply);
1072 lwkt_create(disk_msg_core, /*args*/NULL, &td_core, NULL,
1073 0, 0, "disk_msg_core");
1075 tsleep(td_core, 0, "diskcore", 0);
1081 objcache_destroy(disk_msg_cache);
1084 SYSINIT(disk_register, SI_SUB_PRE_DRIVERS, SI_ORDER_FIRST, disk_init, NULL);
1085 SYSUNINIT(disk_register, SI_SUB_PRE_DRIVERS, SI_ORDER_ANY, disk_uninit, NULL);