2 * Internal header file for device mapper
4 * Copyright (C) 2001, 2002 Sistina Software
5 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
7 * This file is released under the LGPL.
14 #include <linux/device-mapper.h>
15 #include <linux/list.h>
16 #include <linux/moduleparam.h>
17 #include <linux/blkdev.h>
18 #include <linux/backing-dev.h>
19 #include <linux/hdreg.h>
20 #include <linux/completion.h>
21 #include <linux/kobject.h>
22 #include <linux/refcount.h>
23 #include <linux/log2.h>
28 * Suspend feature flags
30 #define DM_SUSPEND_LOCKFS_FLAG (1 << 0)
31 #define DM_SUSPEND_NOFLUSH_FLAG (1 << 1)
34 * Status feature flags
36 #define DM_STATUS_NOFLUSH_FLAG (1 << 0)
39 * List of devices that a metadevice uses and should open/close.
41 struct dm_dev_internal {
42 struct list_head list;
44 struct dm_dev *dm_dev;
48 struct dm_md_mempools;
53 *---------------------------------------------------------------
54 * Internal table functions.
55 *---------------------------------------------------------------
57 void dm_table_event_callback(struct dm_table *t,
58 void (*fn)(void *), void *context);
59 struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector);
60 bool dm_table_has_no_data_devices(struct dm_table *table);
61 int dm_calculate_queue_limits(struct dm_table *table,
62 struct queue_limits *limits);
63 int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
64 struct queue_limits *limits);
65 struct list_head *dm_table_get_devices(struct dm_table *t);
66 void dm_table_presuspend_targets(struct dm_table *t);
67 void dm_table_presuspend_undo_targets(struct dm_table *t);
68 void dm_table_postsuspend_targets(struct dm_table *t);
69 int dm_table_resume_targets(struct dm_table *t);
70 enum dm_queue_mode dm_table_get_type(struct dm_table *t);
71 struct target_type *dm_table_get_immutable_target_type(struct dm_table *t);
72 struct dm_target *dm_table_get_immutable_target(struct dm_table *t);
73 struct dm_target *dm_table_get_wildcard_target(struct dm_table *t);
74 bool dm_table_bio_based(struct dm_table *t);
75 bool dm_table_request_based(struct dm_table *t);
77 void dm_lock_md_type(struct mapped_device *md);
78 void dm_unlock_md_type(struct mapped_device *md);
79 void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type);
80 enum dm_queue_mode dm_get_md_type(struct mapped_device *md);
81 struct target_type *dm_get_immutable_target_type(struct mapped_device *md);
83 int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t);
86 * To check whether the target type is bio-based or not (request-based).
88 #define dm_target_bio_based(t) ((t)->type->map != NULL)
91 * To check whether the target type is request-based or not (bio-based).
93 #define dm_target_request_based(t) ((t)->type->clone_and_map_rq != NULL)
96 * To check whether the target type is a hybrid (capable of being
97 * either request-based or bio-based).
99 #define dm_target_hybrid(t) (dm_target_bio_based(t) && dm_target_request_based(t))
102 * Zoned targets related functions.
104 int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q,
105 struct queue_limits *lim);
106 int dm_revalidate_zones(struct dm_table *t, struct request_queue *q);
107 void dm_zone_endio(struct dm_io *io, struct bio *clone);
108 #ifdef CONFIG_BLK_DEV_ZONED
109 int dm_blk_report_zones(struct gendisk *disk, sector_t sector,
110 unsigned int nr_zones, report_zones_cb cb, void *data);
111 bool dm_is_zone_write(struct mapped_device *md, struct bio *bio);
112 int dm_zone_map_bio(struct dm_target_io *io);
113 int dm_zone_get_reset_bitmap(struct mapped_device *md, struct dm_table *t,
114 sector_t sector, unsigned int nr_zones,
115 unsigned long *need_reset);
117 #define dm_blk_report_zones NULL
118 static inline bool dm_is_zone_write(struct mapped_device *md, struct bio *bio)
122 static inline int dm_zone_map_bio(struct dm_target_io *tio)
124 return DM_MAPIO_KILL;
129 *---------------------------------------------------------------
130 * A registry of target types.
131 *---------------------------------------------------------------
133 int dm_target_init(void);
134 void dm_target_exit(void);
135 struct target_type *dm_get_target_type(const char *name);
136 void dm_put_target_type(struct target_type *tt);
137 int dm_target_iterate(void (*iter_func)(struct target_type *tt,
138 void *param), void *param);
140 int dm_split_args(int *argc, char ***argvp, char *input);
143 * Is this mapped_device being deleted?
145 int dm_deleting_md(struct mapped_device *md);
148 * Is this mapped_device suspended?
150 int dm_suspended_md(struct mapped_device *md);
153 * Internal suspend and resume methods.
155 int dm_suspended_internally_md(struct mapped_device *md);
156 void dm_internal_suspend_fast(struct mapped_device *md);
157 void dm_internal_resume_fast(struct mapped_device *md);
158 void dm_internal_suspend_noflush(struct mapped_device *md);
159 void dm_internal_resume(struct mapped_device *md);
162 * Test if the device is scheduled for deferred remove.
164 int dm_test_deferred_remove_flag(struct mapped_device *md);
167 * Try to remove devices marked for deferred removal.
169 void dm_deferred_remove(void);
172 * The device-mapper can be driven through one of two interfaces;
173 * ioctl or filesystem, depending which patch you have applied.
175 int dm_interface_init(void);
176 void dm_interface_exit(void);
181 int dm_sysfs_init(struct mapped_device *md);
182 void dm_sysfs_exit(struct mapped_device *md);
183 struct kobject *dm_kobject(struct mapped_device *md);
184 struct mapped_device *dm_get_from_kobject(struct kobject *kobj);
189 void dm_kobject_release(struct kobject *kobj);
192 * Targets for linear and striped mappings
194 int linear_map(struct dm_target *ti, struct bio *bio);
195 int dm_linear_init(void);
196 void dm_linear_exit(void);
198 int stripe_map(struct dm_target *ti, struct bio *bio);
199 int dm_stripe_init(void);
200 void dm_stripe_exit(void);
203 * mapped_device operations
205 void dm_destroy(struct mapped_device *md);
206 void dm_destroy_immediate(struct mapped_device *md);
207 int dm_open_count(struct mapped_device *md);
208 int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred);
209 int dm_cancel_deferred_remove(struct mapped_device *md);
210 int dm_request_based(struct mapped_device *md);
211 int dm_get_table_device(struct mapped_device *md, dev_t dev, blk_mode_t mode,
212 struct dm_dev **result);
213 void dm_put_table_device(struct mapped_device *md, struct dm_dev *d);
215 int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
216 unsigned int cookie, bool need_resize_uevent);
218 int dm_io_init(void);
219 void dm_io_exit(void);
221 int dm_kcopyd_init(void);
222 void dm_kcopyd_exit(void);
227 void dm_free_md_mempools(struct dm_md_mempools *pools);
232 unsigned int dm_get_reserved_bio_based_ios(void);
234 #define DM_HASH_LOCKS_MAX 64
236 static inline unsigned int dm_num_hash_locks(void)
238 unsigned int num_locks = roundup_pow_of_two(num_online_cpus()) << 1;
240 return min_t(unsigned int, num_locks, DM_HASH_LOCKS_MAX);
243 #define DM_HASH_LOCKS_MULT 4294967291ULL
244 #define DM_HASH_LOCKS_SHIFT 6
246 static inline unsigned int dm_hash_locks_index(sector_t block,
247 unsigned int num_locks)
249 sector_t h1 = (block * DM_HASH_LOCKS_MULT) >> DM_HASH_LOCKS_SHIFT;
250 sector_t h2 = h1 >> DM_HASH_LOCKS_SHIFT;
252 return (h1 ^ h2) & (num_locks - 1);