1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
|
/*
* Copyright (C) 2001 Sistina Software (UK) Limited.
* Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
*
* This file is released under the LGPL.
*/
#ifndef _LINUX_DEVICE_MAPPER_H
#define _LINUX_DEVICE_MAPPER_H
#include <linux/bio.h>
#include <linux/blkdev.h>
struct dm_target;
struct dm_table;
struct mapped_device;
struct bio_vec;
typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t;
union map_info {
void *ptr;
unsigned long long ll;
};
/*
* In the constructor the target parameter will already have the
* table, type, begin and len fields filled in.
*/
typedef int (*dm_ctr_fn) (struct dm_target *target,
unsigned int argc, char **argv);
/*
* The destructor doesn't need to free the dm_target, just
* anything hidden ti->private.
*/
typedef void (*dm_dtr_fn) (struct dm_target *ti);
/*
* The map function must return:
* < 0: error
* = 0: The target will handle the io by resubmitting it later
* = 1: simple remap complete
* = 2: The target wants to push back the io
*/
typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio,
union map_info *map_context);
/*
* Returns:
* < 0 : error (currently ignored)
* 0 : ended successfully
* 1 : for some reason the io has still not completed (eg,
* multipath target might want to requeue a failed io).
* 2 : The target wants to push back the io
*/
typedef int (*dm_endio_fn) (struct dm_target *ti,
struct bio *bio, int error,
union map_info *map_context);
typedef void (*dm_flush_fn) (struct dm_target *ti);
typedef void (*dm_presuspend_fn) (struct dm_target *ti);
typedef void (*dm_postsuspend_fn) (struct dm_target *ti);
typedef int (*dm_preresume_fn) (struct dm_target *ti);
typedef void (*dm_resume_fn) (struct dm_target *ti);
typedef int (*dm_status_fn) (struct dm_target *ti, status_type_t status_type,
char *result, unsigned int maxlen);
typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv);
typedef int (*dm_ioctl_fn) (struct dm_target *ti, unsigned int cmd,
unsigned long arg);
typedef int (*dm_merge_fn) (struct dm_target *ti, struct bvec_merge_data *bvm,
struct bio_vec *biovec, int max_size);
void dm_error(const char *message);
/*
* Combine device limits.
*/
void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev);
struct dm_dev {
struct block_device *bdev;
fmode_t mode;
char name[16];
};
/*
* Constructors should call these functions to ensure destination devices
* are opened/closed correctly.
* FIXME: too many arguments.
*/
int dm_get_device(struct dm_target *ti, const char *path, sector_t start,
sector_t len, fmode_t mode, struct dm_dev **result);
void dm_put_device(struct dm_target *ti, struct dm_dev *d);
/*
* Information about a target type
*/
struct target_type {
const char *name;
struct module *module;
unsigned version[3];
dm_ctr_fn ctr;
dm_dtr_fn dtr;
dm_map_fn map;
dm_endio_fn end_io;
dm_flush_fn flush;
dm_presuspend_fn presuspend;
dm_postsuspend_fn postsuspend;
dm_preresume_fn preresume;
dm_resume_fn resume;
dm_status_fn status;
dm_message_fn message;
dm_ioctl_fn ioctl;
dm_merge_fn merge;
};
struct io_restrictions {
unsigned long bounce_pfn;
unsigned long seg_boundary_mask;
unsigned max_hw_sectors;
unsigned max_sectors;
unsigned max_segment_size;
unsigned short hardsect_size;
unsigned short max_hw_segments;
unsigned short max_phys_segments;
unsigned char no_cluster; /* inverted so that 0 is default */
};
struct dm_target {
struct dm_table *table;
struct target_type *type;
/* target limits */
sector_t begin;
sector_t len;
/* FIXME: turn this into a mask, and merge with io_restrictions */
/* Always a power of 2 */
sector_t split_io;
/*
* These are automatically filled in by
* dm_table_get_device.
*/
struct io_restrictions limits;
/* target specific data */
void *private;
/* Used to provide an error string from the ctr */
char *error;
};
int dm_register_target(struct target_type *t);
void dm_unregister_target(struct target_type *t);
/*-----------------------------------------------------------------
* Functions for creating and manipulating mapped devices.
* Drop the reference with dm_put when you finish with the object.
*---------------------------------------------------------------*/
/*
* DM_ANY_MINOR chooses the next available minor number.
*/
#define DM_ANY_MINOR (-1)
int dm_create(int minor, struct mapped_device **md);
/*
* Reference counting for md.
*/
struct mapped_device *dm_get_md(dev_t dev);
void dm_get(struct mapped_device *md);
void dm_put(struct mapped_device *md);
/*
* An arbitrary pointer may be stored alongside a mapped device.
*/
void dm_set_mdptr(struct mapped_device *md, void *ptr);
void *dm_get_mdptr(struct mapped_device *md);
/*
* A device can still be used while suspended, but I/O is deferred.
*/
int dm_suspend(struct mapped_device *md, unsigned suspend_flags);
int dm_resume(struct mapped_device *md);
/*
* Event functions.
*/
uint32_t dm_get_event_nr(struct mapped_device *md);
int dm_wait_event(struct mapped_device *md, int event_nr);
uint32_t dm_next_uevent_seq(struct mapped_device *md);
void dm_uevent_add(struct mapped_device *md, struct list_head *elist);
/*
* Info functions.
*/
const char *dm_device_name(struct mapped_device *md);
int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid);
struct gendisk *dm_disk(struct mapped_device *md);
int dm_suspended(struct mapped_device *md);
int dm_noflush_suspending(struct dm_target *ti);
union map_info *dm_get_mapinfo(struct bio *bio);
/*
* Geometry functions.
*/
int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo);
int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo);
/*-----------------------------------------------------------------
* Functions for manipulating device-mapper tables.
*---------------------------------------------------------------*/
/*
* First create an empty table.
*/
int dm_table_create(struct dm_table **result, fmode_t mode,
unsigned num_targets, struct mapped_device *md);
/*
* Then call this once for each target.
*/
int dm_table_add_target(struct dm_table *t, const char *type,
sector_t start, sector_t len, char *params);
/*
* Finally call this to make the table ready for use.
*/
int dm_table_complete(struct dm_table *t);
/*
* Unplug all devices in a table.
*/
void dm_table_unplug_all(struct dm_table *t);
/*
* Table reference counting.
*/
struct dm_table *dm_get_table(struct mapped_device *md);
void dm_table_get(struct dm_table *t);
void dm_table_put(struct dm_table *t);
/*
* Queries
*/
sector_t dm_table_get_size(struct dm_table *t);
unsigned int dm_table_get_num_targets(struct dm_table *t);
fmode_t dm_table_get_mode(struct dm_table *t);
struct mapped_device *dm_table_get_md(struct dm_table *t);
/*
* Trigger an event.
*/
void dm_table_event(struct dm_table *t);
/*
* The device must be suspended before calling this method.
*/
int dm_swap_table(struct mapped_device *md, struct dm_table *t);
/*
* A wrapper around vmalloc.
*/
void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size);
/*-----------------------------------------------------------------
* Macros.
*---------------------------------------------------------------*/
#define DM_NAME "device-mapper"
#define DMCRIT(f, arg...) \
printk(KERN_CRIT DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
#define DMERR(f, arg...) \
printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
#define DMERR_LIMIT(f, arg...) \
do { \
if (printk_ratelimit()) \
printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " \
f "\n", ## arg); \
} while (0)
#define DMWARN(f, arg...) \
printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
#define DMWARN_LIMIT(f, arg...) \
do { \
if (printk_ratelimit()) \
printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " \
f "\n", ## arg); \
} while (0)
#define DMINFO(f, arg...) \
printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
#define DMINFO_LIMIT(f, arg...) \
do { \
if (printk_ratelimit()) \
printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f \
"\n", ## arg); \
} while (0)
#ifdef CONFIG_DM_DEBUG
# define DMDEBUG(f, arg...) \
printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX " DEBUG: " f "\n", ## arg)
# define DMDEBUG_LIMIT(f, arg...) \
do { \
if (printk_ratelimit()) \
printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX ": " f \
"\n", ## arg); \
} while (0)
#else
# define DMDEBUG(f, arg...) do {} while (0)
# define DMDEBUG_LIMIT(f, arg...) do {} while (0)
#endif
#define DMEMIT(x...) sz += ((sz >= maxlen) ? \
0 : scnprintf(result + sz, maxlen - sz, x))
#define SECTOR_SHIFT 9
/*
* Definitions of return values from target end_io function.
*/
#define DM_ENDIO_INCOMPLETE 1
#define DM_ENDIO_REQUEUE 2
/*
* Definitions of return values from target map function.
*/
#define DM_MAPIO_SUBMITTED 0
#define DM_MAPIO_REMAPPED 1
#define DM_MAPIO_REQUEUE DM_ENDIO_REQUEUE
/*
* Ceiling(n / sz)
*/
#define dm_div_up(n, sz) (((n) + (sz) - 1) / (sz))
#define dm_sector_div_up(n, sz) ( \
{ \
sector_t _r = ((n) + (sz) - 1); \
sector_div(_r, (sz)); \
_r; \
} \
)
/*
* ceiling(n / size) * size
*/
#define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz))
#define dm_array_too_big(fixed, obj, num) \
((num) > (UINT_MAX - (fixed)) / (obj))
static inline sector_t to_sector(unsigned long n)
{
return (n >> SECTOR_SHIFT);
}
static inline unsigned long to_bytes(sector_t n)
{
return (n << SECTOR_SHIFT);
}
#endif /* _LINUX_DEVICE_MAPPER_H */
|