ab4c142488
Implement barrier support for single device DM devices This patch implements barrier support in DM for the common case of dm linear just remapping a single underlying device. In this case we can safely pass the barrier through because there can be no reordering between devices. NB. Any DM device might cease to support barriers if it gets reconfigured so code must continue to allow for a possible -EOPNOTSUPP on every barrier bio submitted. - agk Signed-off-by: Andi Kleen <ak@suse.de> Signed-off-by: Mikulas Patocka <mpatocka@redhat.com> Signed-off-by: Alasdair G Kergon <agk@redhat.com>
394 lines
10 KiB
C
394 lines
10 KiB
C
/*
|
|
* Copyright (C) 2001 Sistina Software (UK) Limited.
|
|
* Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
|
|
*
|
|
* This file is released under the LGPL.
|
|
*/
|
|
|
|
#ifndef _LINUX_DEVICE_MAPPER_H
|
|
#define _LINUX_DEVICE_MAPPER_H
|
|
|
|
#include <linux/bio.h>
|
|
#include <linux/blkdev.h>
|
|
|
|
struct dm_target;
|
|
struct dm_table;
|
|
struct mapped_device;
|
|
struct bio_vec;
|
|
|
|
typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t;
|
|
|
|
union map_info {
|
|
void *ptr;
|
|
unsigned long long ll;
|
|
};
|
|
|
|
/*
|
|
* In the constructor the target parameter will already have the
|
|
* table, type, begin and len fields filled in.
|
|
*/
|
|
typedef int (*dm_ctr_fn) (struct dm_target *target,
|
|
unsigned int argc, char **argv);
|
|
|
|
/*
|
|
* The destructor doesn't need to free the dm_target, just
|
|
* anything hidden ti->private.
|
|
*/
|
|
typedef void (*dm_dtr_fn) (struct dm_target *ti);
|
|
|
|
/*
|
|
* The map function must return:
|
|
* < 0: error
|
|
* = 0: The target will handle the io by resubmitting it later
|
|
* = 1: simple remap complete
|
|
* = 2: The target wants to push back the io
|
|
*/
|
|
typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio,
|
|
union map_info *map_context);
|
|
typedef int (*dm_map_request_fn) (struct dm_target *ti, struct request *clone,
|
|
union map_info *map_context);
|
|
|
|
/*
|
|
* Returns:
|
|
* < 0 : error (currently ignored)
|
|
* 0 : ended successfully
|
|
* 1 : for some reason the io has still not completed (eg,
|
|
* multipath target might want to requeue a failed io).
|
|
* 2 : The target wants to push back the io
|
|
*/
|
|
typedef int (*dm_endio_fn) (struct dm_target *ti,
|
|
struct bio *bio, int error,
|
|
union map_info *map_context);
|
|
typedef int (*dm_request_endio_fn) (struct dm_target *ti,
|
|
struct request *clone, int error,
|
|
union map_info *map_context);
|
|
|
|
typedef void (*dm_flush_fn) (struct dm_target *ti);
|
|
typedef void (*dm_presuspend_fn) (struct dm_target *ti);
|
|
typedef void (*dm_postsuspend_fn) (struct dm_target *ti);
|
|
typedef int (*dm_preresume_fn) (struct dm_target *ti);
|
|
typedef void (*dm_resume_fn) (struct dm_target *ti);
|
|
|
|
typedef int (*dm_status_fn) (struct dm_target *ti, status_type_t status_type,
|
|
char *result, unsigned int maxlen);
|
|
|
|
typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv);
|
|
|
|
typedef int (*dm_ioctl_fn) (struct dm_target *ti, unsigned int cmd,
|
|
unsigned long arg);
|
|
|
|
typedef int (*dm_merge_fn) (struct dm_target *ti, struct bvec_merge_data *bvm,
|
|
struct bio_vec *biovec, int max_size);
|
|
|
|
/*
|
|
* Returns:
|
|
* 0: The target can handle the next I/O immediately.
|
|
* 1: The target can't handle the next I/O immediately.
|
|
*/
|
|
typedef int (*dm_busy_fn) (struct dm_target *ti);
|
|
|
|
void dm_error(const char *message);
|
|
|
|
/*
|
|
* Combine device limits.
|
|
*/
|
|
void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev);
|
|
|
|
struct dm_dev {
|
|
struct block_device *bdev;
|
|
fmode_t mode;
|
|
char name[16];
|
|
};
|
|
|
|
/*
|
|
* Constructors should call these functions to ensure destination devices
|
|
* are opened/closed correctly.
|
|
* FIXME: too many arguments.
|
|
*/
|
|
int dm_get_device(struct dm_target *ti, const char *path, sector_t start,
|
|
sector_t len, fmode_t mode, struct dm_dev **result);
|
|
void dm_put_device(struct dm_target *ti, struct dm_dev *d);
|
|
|
|
/*
|
|
* Information about a target type
|
|
*/
|
|
|
|
/*
|
|
* Target features
|
|
*/
|
|
#define DM_TARGET_SUPPORTS_BARRIERS 0x00000001
|
|
|
|
struct target_type {
|
|
uint64_t features;
|
|
const char *name;
|
|
struct module *module;
|
|
unsigned version[3];
|
|
dm_ctr_fn ctr;
|
|
dm_dtr_fn dtr;
|
|
dm_map_fn map;
|
|
dm_map_request_fn map_rq;
|
|
dm_endio_fn end_io;
|
|
dm_request_endio_fn rq_end_io;
|
|
dm_flush_fn flush;
|
|
dm_presuspend_fn presuspend;
|
|
dm_postsuspend_fn postsuspend;
|
|
dm_preresume_fn preresume;
|
|
dm_resume_fn resume;
|
|
dm_status_fn status;
|
|
dm_message_fn message;
|
|
dm_ioctl_fn ioctl;
|
|
dm_merge_fn merge;
|
|
dm_busy_fn busy;
|
|
};
|
|
|
|
struct io_restrictions {
|
|
unsigned long bounce_pfn;
|
|
unsigned long seg_boundary_mask;
|
|
unsigned max_hw_sectors;
|
|
unsigned max_sectors;
|
|
unsigned max_segment_size;
|
|
unsigned short hardsect_size;
|
|
unsigned short max_hw_segments;
|
|
unsigned short max_phys_segments;
|
|
unsigned char no_cluster; /* inverted so that 0 is default */
|
|
};
|
|
|
|
struct dm_target {
|
|
struct dm_table *table;
|
|
struct target_type *type;
|
|
|
|
/* target limits */
|
|
sector_t begin;
|
|
sector_t len;
|
|
|
|
/* FIXME: turn this into a mask, and merge with io_restrictions */
|
|
/* Always a power of 2 */
|
|
sector_t split_io;
|
|
|
|
/*
|
|
* These are automatically filled in by
|
|
* dm_table_get_device.
|
|
*/
|
|
struct io_restrictions limits;
|
|
|
|
/* target specific data */
|
|
void *private;
|
|
|
|
/* Used to provide an error string from the ctr */
|
|
char *error;
|
|
};
|
|
|
|
int dm_register_target(struct target_type *t);
|
|
void dm_unregister_target(struct target_type *t);
|
|
|
|
/*-----------------------------------------------------------------
|
|
* Functions for creating and manipulating mapped devices.
|
|
* Drop the reference with dm_put when you finish with the object.
|
|
*---------------------------------------------------------------*/
|
|
|
|
/*
|
|
* DM_ANY_MINOR chooses the next available minor number.
|
|
*/
|
|
#define DM_ANY_MINOR (-1)
|
|
int dm_create(int minor, struct mapped_device **md);
|
|
|
|
/*
|
|
* Reference counting for md.
|
|
*/
|
|
struct mapped_device *dm_get_md(dev_t dev);
|
|
void dm_get(struct mapped_device *md);
|
|
void dm_put(struct mapped_device *md);
|
|
|
|
/*
|
|
* An arbitrary pointer may be stored alongside a mapped device.
|
|
*/
|
|
void dm_set_mdptr(struct mapped_device *md, void *ptr);
|
|
void *dm_get_mdptr(struct mapped_device *md);
|
|
|
|
/*
|
|
* A device can still be used while suspended, but I/O is deferred.
|
|
*/
|
|
int dm_suspend(struct mapped_device *md, unsigned suspend_flags);
|
|
int dm_resume(struct mapped_device *md);
|
|
|
|
/*
|
|
* Event functions.
|
|
*/
|
|
uint32_t dm_get_event_nr(struct mapped_device *md);
|
|
int dm_wait_event(struct mapped_device *md, int event_nr);
|
|
uint32_t dm_next_uevent_seq(struct mapped_device *md);
|
|
void dm_uevent_add(struct mapped_device *md, struct list_head *elist);
|
|
|
|
/*
|
|
* Info functions.
|
|
*/
|
|
const char *dm_device_name(struct mapped_device *md);
|
|
int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid);
|
|
struct gendisk *dm_disk(struct mapped_device *md);
|
|
int dm_suspended(struct mapped_device *md);
|
|
int dm_noflush_suspending(struct dm_target *ti);
|
|
union map_info *dm_get_mapinfo(struct bio *bio);
|
|
|
|
/*
|
|
* Geometry functions.
|
|
*/
|
|
int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo);
|
|
int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo);
|
|
|
|
|
|
/*-----------------------------------------------------------------
|
|
* Functions for manipulating device-mapper tables.
|
|
*---------------------------------------------------------------*/
|
|
|
|
/*
|
|
* First create an empty table.
|
|
*/
|
|
int dm_table_create(struct dm_table **result, fmode_t mode,
|
|
unsigned num_targets, struct mapped_device *md);
|
|
|
|
/*
|
|
* Then call this once for each target.
|
|
*/
|
|
int dm_table_add_target(struct dm_table *t, const char *type,
|
|
sector_t start, sector_t len, char *params);
|
|
|
|
/*
|
|
* Finally call this to make the table ready for use.
|
|
*/
|
|
int dm_table_complete(struct dm_table *t);
|
|
|
|
/*
|
|
* Unplug all devices in a table.
|
|
*/
|
|
void dm_table_unplug_all(struct dm_table *t);
|
|
|
|
/*
|
|
* Table reference counting.
|
|
*/
|
|
struct dm_table *dm_get_table(struct mapped_device *md);
|
|
void dm_table_get(struct dm_table *t);
|
|
void dm_table_put(struct dm_table *t);
|
|
|
|
/*
|
|
* Queries
|
|
*/
|
|
sector_t dm_table_get_size(struct dm_table *t);
|
|
unsigned int dm_table_get_num_targets(struct dm_table *t);
|
|
fmode_t dm_table_get_mode(struct dm_table *t);
|
|
struct mapped_device *dm_table_get_md(struct dm_table *t);
|
|
|
|
/*
|
|
* Trigger an event.
|
|
*/
|
|
void dm_table_event(struct dm_table *t);
|
|
|
|
/*
|
|
* The device must be suspended before calling this method.
|
|
*/
|
|
int dm_swap_table(struct mapped_device *md, struct dm_table *t);
|
|
|
|
/*
|
|
* A wrapper around vmalloc.
|
|
*/
|
|
void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size);
|
|
|
|
/*-----------------------------------------------------------------
|
|
* Macros.
|
|
*---------------------------------------------------------------*/
|
|
#define DM_NAME "device-mapper"
|
|
|
|
#define DMCRIT(f, arg...) \
|
|
printk(KERN_CRIT DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
|
|
|
|
#define DMERR(f, arg...) \
|
|
printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
|
|
#define DMERR_LIMIT(f, arg...) \
|
|
do { \
|
|
if (printk_ratelimit()) \
|
|
printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " \
|
|
f "\n", ## arg); \
|
|
} while (0)
|
|
|
|
#define DMWARN(f, arg...) \
|
|
printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
|
|
#define DMWARN_LIMIT(f, arg...) \
|
|
do { \
|
|
if (printk_ratelimit()) \
|
|
printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " \
|
|
f "\n", ## arg); \
|
|
} while (0)
|
|
|
|
#define DMINFO(f, arg...) \
|
|
printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
|
|
#define DMINFO_LIMIT(f, arg...) \
|
|
do { \
|
|
if (printk_ratelimit()) \
|
|
printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f \
|
|
"\n", ## arg); \
|
|
} while (0)
|
|
|
|
#ifdef CONFIG_DM_DEBUG
|
|
# define DMDEBUG(f, arg...) \
|
|
printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX " DEBUG: " f "\n", ## arg)
|
|
# define DMDEBUG_LIMIT(f, arg...) \
|
|
do { \
|
|
if (printk_ratelimit()) \
|
|
printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX ": " f \
|
|
"\n", ## arg); \
|
|
} while (0)
|
|
#else
|
|
# define DMDEBUG(f, arg...) do {} while (0)
|
|
# define DMDEBUG_LIMIT(f, arg...) do {} while (0)
|
|
#endif
|
|
|
|
#define DMEMIT(x...) sz += ((sz >= maxlen) ? \
|
|
0 : scnprintf(result + sz, maxlen - sz, x))
|
|
|
|
#define SECTOR_SHIFT 9
|
|
|
|
/*
|
|
* Definitions of return values from target end_io function.
|
|
*/
|
|
#define DM_ENDIO_INCOMPLETE 1
|
|
#define DM_ENDIO_REQUEUE 2
|
|
|
|
/*
|
|
* Definitions of return values from target map function.
|
|
*/
|
|
#define DM_MAPIO_SUBMITTED 0
|
|
#define DM_MAPIO_REMAPPED 1
|
|
#define DM_MAPIO_REQUEUE DM_ENDIO_REQUEUE
|
|
|
|
/*
|
|
* Ceiling(n / sz)
|
|
*/
|
|
#define dm_div_up(n, sz) (((n) + (sz) - 1) / (sz))
|
|
|
|
#define dm_sector_div_up(n, sz) ( \
|
|
{ \
|
|
sector_t _r = ((n) + (sz) - 1); \
|
|
sector_div(_r, (sz)); \
|
|
_r; \
|
|
} \
|
|
)
|
|
|
|
/*
|
|
* ceiling(n / size) * size
|
|
*/
|
|
#define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz))
|
|
|
|
#define dm_array_too_big(fixed, obj, num) \
|
|
((num) > (UINT_MAX - (fixed)) / (obj))
|
|
|
|
static inline sector_t to_sector(unsigned long n)
|
|
{
|
|
return (n >> SECTOR_SHIFT);
|
|
}
|
|
|
|
static inline unsigned long to_bytes(sector_t n)
|
|
{
|
|
return (n << SECTOR_SHIFT);
|
|
}
|
|
|
|
#endif /* _LINUX_DEVICE_MAPPER_H */
|