kernel-ark/drivers/md/dm-bio-prison.c
Sasha Levin b67bfe0d42 hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived

        list_for_each_entry(pos, head, member)

The hlist ones were greedy and wanted an extra parameter:

        hlist_for_each_entry(tpos, pos, head, member)

Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.

Besides the semantic patch, there was some manual work required:

 - Fix up the actual hlist iterators in linux/list.h
 - Fix up the declaration of other iterators based on the hlist ones.
 - A very small amount of places were using the 'node' parameter, this
 was modified to use 'obj->member' instead.
 - Coccinelle didn't handle the hlist_for_each_entry_safe iterator
 properly, so those had to be fixed up manually.

The semantic patch which is mostly the work of Peter Senna Tschudin is here:

@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;

type T;
expression a,c,d,e;
identifier b;
statement S;
@@

-T b;
    <+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
    ...+>

[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-27 19:10:24 -08:00

390 lines
8.8 KiB
C

/*
* Copyright (C) 2012 Red Hat, Inc.
*
* This file is released under the GPL.
*/
#include "dm.h"
#include "dm-bio-prison.h"
#include <linux/spinlock.h>
#include <linux/mempool.h>
#include <linux/module.h>
#include <linux/slab.h>
/*----------------------------------------------------------------*/
struct dm_bio_prison_cell {
struct hlist_node list;
struct dm_bio_prison *prison;
struct dm_cell_key key;
struct bio *holder;
struct bio_list bios;
};
struct dm_bio_prison {
spinlock_t lock;
mempool_t *cell_pool;
unsigned nr_buckets;
unsigned hash_mask;
struct hlist_head *cells;
};
/*----------------------------------------------------------------*/
static uint32_t calc_nr_buckets(unsigned nr_cells)
{
uint32_t n = 128;
nr_cells /= 4;
nr_cells = min(nr_cells, 8192u);
while (n < nr_cells)
n <<= 1;
return n;
}
static struct kmem_cache *_cell_cache;
/*
* @nr_cells should be the number of cells you want in use _concurrently_.
* Don't confuse it with the number of distinct keys.
*/
struct dm_bio_prison *dm_bio_prison_create(unsigned nr_cells)
{
unsigned i;
uint32_t nr_buckets = calc_nr_buckets(nr_cells);
size_t len = sizeof(struct dm_bio_prison) +
(sizeof(struct hlist_head) * nr_buckets);
struct dm_bio_prison *prison = kmalloc(len, GFP_KERNEL);
if (!prison)
return NULL;
spin_lock_init(&prison->lock);
prison->cell_pool = mempool_create_slab_pool(nr_cells, _cell_cache);
if (!prison->cell_pool) {
kfree(prison);
return NULL;
}
prison->nr_buckets = nr_buckets;
prison->hash_mask = nr_buckets - 1;
prison->cells = (struct hlist_head *) (prison + 1);
for (i = 0; i < nr_buckets; i++)
INIT_HLIST_HEAD(prison->cells + i);
return prison;
}
EXPORT_SYMBOL_GPL(dm_bio_prison_create);
void dm_bio_prison_destroy(struct dm_bio_prison *prison)
{
mempool_destroy(prison->cell_pool);
kfree(prison);
}
EXPORT_SYMBOL_GPL(dm_bio_prison_destroy);
static uint32_t hash_key(struct dm_bio_prison *prison, struct dm_cell_key *key)
{
const unsigned long BIG_PRIME = 4294967291UL;
uint64_t hash = key->block * BIG_PRIME;
return (uint32_t) (hash & prison->hash_mask);
}
static int keys_equal(struct dm_cell_key *lhs, struct dm_cell_key *rhs)
{
return (lhs->virtual == rhs->virtual) &&
(lhs->dev == rhs->dev) &&
(lhs->block == rhs->block);
}
static struct dm_bio_prison_cell *__search_bucket(struct hlist_head *bucket,
struct dm_cell_key *key)
{
struct dm_bio_prison_cell *cell;
hlist_for_each_entry(cell, bucket, list)
if (keys_equal(&cell->key, key))
return cell;
return NULL;
}
/*
* This may block if a new cell needs allocating. You must ensure that
* cells will be unlocked even if the calling thread is blocked.
*
* Returns 1 if the cell was already held, 0 if @inmate is the new holder.
*/
int dm_bio_detain(struct dm_bio_prison *prison, struct dm_cell_key *key,
struct bio *inmate, struct dm_bio_prison_cell **ref)
{
int r = 1;
unsigned long flags;
uint32_t hash = hash_key(prison, key);
struct dm_bio_prison_cell *cell, *cell2;
BUG_ON(hash > prison->nr_buckets);
spin_lock_irqsave(&prison->lock, flags);
cell = __search_bucket(prison->cells + hash, key);
if (cell) {
bio_list_add(&cell->bios, inmate);
goto out;
}
/*
* Allocate a new cell
*/
spin_unlock_irqrestore(&prison->lock, flags);
cell2 = mempool_alloc(prison->cell_pool, GFP_NOIO);
spin_lock_irqsave(&prison->lock, flags);
/*
* We've been unlocked, so we have to double check that
* nobody else has inserted this cell in the meantime.
*/
cell = __search_bucket(prison->cells + hash, key);
if (cell) {
mempool_free(cell2, prison->cell_pool);
bio_list_add(&cell->bios, inmate);
goto out;
}
/*
* Use new cell.
*/
cell = cell2;
cell->prison = prison;
memcpy(&cell->key, key, sizeof(cell->key));
cell->holder = inmate;
bio_list_init(&cell->bios);
hlist_add_head(&cell->list, prison->cells + hash);
r = 0;
out:
spin_unlock_irqrestore(&prison->lock, flags);
*ref = cell;
return r;
}
EXPORT_SYMBOL_GPL(dm_bio_detain);
/*
* @inmates must have been initialised prior to this call
*/
static void __cell_release(struct dm_bio_prison_cell *cell, struct bio_list *inmates)
{
struct dm_bio_prison *prison = cell->prison;
hlist_del(&cell->list);
if (inmates) {
bio_list_add(inmates, cell->holder);
bio_list_merge(inmates, &cell->bios);
}
mempool_free(cell, prison->cell_pool);
}
void dm_cell_release(struct dm_bio_prison_cell *cell, struct bio_list *bios)
{
unsigned long flags;
struct dm_bio_prison *prison = cell->prison;
spin_lock_irqsave(&prison->lock, flags);
__cell_release(cell, bios);
spin_unlock_irqrestore(&prison->lock, flags);
}
EXPORT_SYMBOL_GPL(dm_cell_release);
/*
* Sometimes we don't want the holder, just the additional bios.
*/
static void __cell_release_no_holder(struct dm_bio_prison_cell *cell, struct bio_list *inmates)
{
struct dm_bio_prison *prison = cell->prison;
hlist_del(&cell->list);
bio_list_merge(inmates, &cell->bios);
mempool_free(cell, prison->cell_pool);
}
void dm_cell_release_no_holder(struct dm_bio_prison_cell *cell, struct bio_list *inmates)
{
unsigned long flags;
struct dm_bio_prison *prison = cell->prison;
spin_lock_irqsave(&prison->lock, flags);
__cell_release_no_holder(cell, inmates);
spin_unlock_irqrestore(&prison->lock, flags);
}
EXPORT_SYMBOL_GPL(dm_cell_release_no_holder);
void dm_cell_error(struct dm_bio_prison_cell *cell)
{
struct dm_bio_prison *prison = cell->prison;
struct bio_list bios;
struct bio *bio;
unsigned long flags;
bio_list_init(&bios);
spin_lock_irqsave(&prison->lock, flags);
__cell_release(cell, &bios);
spin_unlock_irqrestore(&prison->lock, flags);
while ((bio = bio_list_pop(&bios)))
bio_io_error(bio);
}
EXPORT_SYMBOL_GPL(dm_cell_error);
/*----------------------------------------------------------------*/
#define DEFERRED_SET_SIZE 64
struct dm_deferred_entry {
struct dm_deferred_set *ds;
unsigned count;
struct list_head work_items;
};
struct dm_deferred_set {
spinlock_t lock;
unsigned current_entry;
unsigned sweeper;
struct dm_deferred_entry entries[DEFERRED_SET_SIZE];
};
struct dm_deferred_set *dm_deferred_set_create(void)
{
int i;
struct dm_deferred_set *ds;
ds = kmalloc(sizeof(*ds), GFP_KERNEL);
if (!ds)
return NULL;
spin_lock_init(&ds->lock);
ds->current_entry = 0;
ds->sweeper = 0;
for (i = 0; i < DEFERRED_SET_SIZE; i++) {
ds->entries[i].ds = ds;
ds->entries[i].count = 0;
INIT_LIST_HEAD(&ds->entries[i].work_items);
}
return ds;
}
EXPORT_SYMBOL_GPL(dm_deferred_set_create);
void dm_deferred_set_destroy(struct dm_deferred_set *ds)
{
kfree(ds);
}
EXPORT_SYMBOL_GPL(dm_deferred_set_destroy);
struct dm_deferred_entry *dm_deferred_entry_inc(struct dm_deferred_set *ds)
{
unsigned long flags;
struct dm_deferred_entry *entry;
spin_lock_irqsave(&ds->lock, flags);
entry = ds->entries + ds->current_entry;
entry->count++;
spin_unlock_irqrestore(&ds->lock, flags);
return entry;
}
EXPORT_SYMBOL_GPL(dm_deferred_entry_inc);
static unsigned ds_next(unsigned index)
{
return (index + 1) % DEFERRED_SET_SIZE;
}
static void __sweep(struct dm_deferred_set *ds, struct list_head *head)
{
while ((ds->sweeper != ds->current_entry) &&
!ds->entries[ds->sweeper].count) {
list_splice_init(&ds->entries[ds->sweeper].work_items, head);
ds->sweeper = ds_next(ds->sweeper);
}
if ((ds->sweeper == ds->current_entry) && !ds->entries[ds->sweeper].count)
list_splice_init(&ds->entries[ds->sweeper].work_items, head);
}
void dm_deferred_entry_dec(struct dm_deferred_entry *entry, struct list_head *head)
{
unsigned long flags;
spin_lock_irqsave(&entry->ds->lock, flags);
BUG_ON(!entry->count);
--entry->count;
__sweep(entry->ds, head);
spin_unlock_irqrestore(&entry->ds->lock, flags);
}
EXPORT_SYMBOL_GPL(dm_deferred_entry_dec);
/*
* Returns 1 if deferred or 0 if no pending items to delay job.
*/
int dm_deferred_set_add_work(struct dm_deferred_set *ds, struct list_head *work)
{
int r = 1;
unsigned long flags;
unsigned next_entry;
spin_lock_irqsave(&ds->lock, flags);
if ((ds->sweeper == ds->current_entry) &&
!ds->entries[ds->current_entry].count)
r = 0;
else {
list_add(work, &ds->entries[ds->current_entry].work_items);
next_entry = ds_next(ds->current_entry);
if (!ds->entries[next_entry].count)
ds->current_entry = next_entry;
}
spin_unlock_irqrestore(&ds->lock, flags);
return r;
}
EXPORT_SYMBOL_GPL(dm_deferred_set_add_work);
/*----------------------------------------------------------------*/
static int __init dm_bio_prison_init(void)
{
_cell_cache = KMEM_CACHE(dm_bio_prison_cell, 0);
if (!_cell_cache)
return -ENOMEM;
return 0;
}
static void __exit dm_bio_prison_exit(void)
{
kmem_cache_destroy(_cell_cache);
_cell_cache = NULL;
}
/*
* module hooks
*/
module_init(dm_bio_prison_init);
module_exit(dm_bio_prison_exit);
MODULE_DESCRIPTION(DM_NAME " bio prison");
MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
MODULE_LICENSE("GPL");