summaryrefslogtreecommitdiff
path: root/drivers/md/dm-cache-target.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/dm-cache-target.c')
-rw-r--r--drivers/md/dm-cache-target.c141
1 files changed, 86 insertions, 55 deletions
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 5e92fac90b67..dbbcfa580078 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 Red Hat. All rights reserved.
*
@@ -180,15 +181,15 @@ static void continue_after_commit(struct batcher *b, struct continuation *k)
*/
static void issue_after_commit(struct batcher *b, struct bio *bio)
{
- bool commit_scheduled;
+ bool commit_scheduled;
- spin_lock_irq(&b->lock);
- commit_scheduled = b->commit_scheduled;
- bio_list_add(&b->bios, bio);
- spin_unlock_irq(&b->lock);
+ spin_lock_irq(&b->lock);
+ commit_scheduled = b->commit_scheduled;
+ bio_list_add(&b->bios, bio);
+ spin_unlock_irq(&b->lock);
- if (commit_scheduled)
- async_commit(b);
+ if (commit_scheduled)
+ async_commit(b);
}
/*
@@ -275,7 +276,7 @@ enum cache_io_mode {
struct cache_features {
enum cache_metadata_mode mode;
enum cache_io_mode io_mode;
- unsigned metadata_version;
+ unsigned int metadata_version;
bool discard_passdown:1;
};
@@ -362,7 +363,7 @@ struct cache {
* Rather than reconstructing the table line for the status we just
* save it and regurgitate.
*/
- unsigned nr_ctr_args;
+ unsigned int nr_ctr_args;
const char **ctr_args;
struct dm_kcopyd_client *copier;
@@ -378,7 +379,7 @@ struct cache {
unsigned long *dirty_bitset;
atomic_t nr_dirty;
- unsigned policy_nr_args;
+ unsigned int policy_nr_args;
struct dm_cache_policy *policy;
/*
@@ -409,7 +410,7 @@ struct cache {
struct per_bio_data {
bool tick:1;
- unsigned req_nr:2;
+ unsigned int req_nr:2;
struct dm_bio_prison_cell_v2 *cell;
struct dm_hook_info hook_info;
sector_t len;
@@ -517,20 +518,23 @@ static void build_key(dm_oblock_t begin, dm_oblock_t end, struct dm_cell_key_v2
#define WRITE_LOCK_LEVEL 0
#define READ_WRITE_LOCK_LEVEL 1
-static unsigned lock_level(struct bio *bio)
+static unsigned int lock_level(struct bio *bio)
{
return bio_data_dir(bio) == WRITE ?
WRITE_LOCK_LEVEL :
READ_WRITE_LOCK_LEVEL;
}
-/*----------------------------------------------------------------
+/*
+ *--------------------------------------------------------------
* Per bio data
- *--------------------------------------------------------------*/
+ *--------------------------------------------------------------
+ */
static struct per_bio_data *get_per_bio_data(struct bio *bio)
{
struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
+
BUG_ON(!pb);
return pb;
}
@@ -687,6 +691,7 @@ static void clear_discard(struct cache *cache, dm_dblock_t b)
static bool is_discarded(struct cache *cache, dm_dblock_t b)
{
int r;
+
spin_lock_irq(&cache->lock);
r = test_bit(from_dblock(b), cache->discard_bitset);
spin_unlock_irq(&cache->lock);
@@ -697,6 +702,7 @@ static bool is_discarded(struct cache *cache, dm_dblock_t b)
static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b)
{
int r;
+
spin_lock_irq(&cache->lock);
r = test_bit(from_dblock(oblock_to_dblock(cache, b)),
cache->discard_bitset);
@@ -705,9 +711,11 @@ static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b)
return r;
}
-/*----------------------------------------------------------------
+/*
+ * -------------------------------------------------------------
* Remapping
- *--------------------------------------------------------------*/
+ *--------------------------------------------------------------
+ */
static void remap_to_origin(struct cache *cache, struct bio *bio)
{
bio_set_dev(bio, cache->origin_dev->bdev);
@@ -809,6 +817,7 @@ static void accounted_request(struct cache *cache, struct bio *bio)
static void issue_op(struct bio *bio, void *context)
{
struct cache *cache = context;
+
accounted_request(cache, bio);
}
@@ -833,9 +842,11 @@ static void remap_to_origin_and_cache(struct cache *cache, struct bio *bio,
remap_to_cache(cache, bio, cblock);
}
-/*----------------------------------------------------------------
+/*
+ *--------------------------------------------------------------
* Failure modes
- *--------------------------------------------------------------*/
+ *--------------------------------------------------------------
+ */
static enum cache_metadata_mode get_cache_mode(struct cache *cache)
{
return cache->features.mode;
@@ -848,7 +859,7 @@ static const char *cache_device_name(struct cache *cache)
static void notify_mode_switch(struct cache *cache, enum cache_metadata_mode mode)
{
- const char *descs[] = {
+ static const char *descs[] = {
"write",
"read-only",
"fail"
@@ -972,13 +983,14 @@ static void update_stats(struct cache_stats *stats, enum policy_operation op)
}
}
-/*----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------------
* Migration processing
*
* Migration covers moving data from the origin device to the cache, or
* vice versa.
- *--------------------------------------------------------------*/
-
+ *---------------------------------------------------------------------
+ */
static void inc_io_migrations(struct cache *cache)
{
atomic_inc(&cache->nr_io_migrations);
@@ -1066,6 +1078,7 @@ static void quiesce(struct dm_cache_migration *mg,
static struct dm_cache_migration *ws_to_mg(struct work_struct *ws)
{
struct continuation *k = container_of(ws, struct continuation, ws);
+
return container_of(k, struct dm_cache_migration, k);
}
@@ -1217,6 +1230,7 @@ static void mg_complete(struct dm_cache_migration *mg, bool success)
static void mg_success(struct work_struct *ws)
{
struct dm_cache_migration *mg = ws_to_mg(ws);
+
mg_complete(mg, mg->k.input == 0);
}
@@ -1355,6 +1369,7 @@ static void mg_copy(struct work_struct *ws)
* Fallback to a real full copy after doing some tidying up.
*/
bool rb = bio_detain_shared(mg->cache, mg->op->oblock, mg->overwrite_bio);
+
BUG_ON(rb); /* An exclussive lock must _not_ be held for this block */
mg->overwrite_bio = NULL;
inc_io_migrations(mg->cache);
@@ -1430,9 +1445,11 @@ static int mg_start(struct cache *cache, struct policy_work *op, struct bio *bio
return mg_lock_writes(mg);
}
-/*----------------------------------------------------------------
+/*
+ *--------------------------------------------------------------
* invalidation processing
- *--------------------------------------------------------------*/
+ *--------------------------------------------------------------
+ */
static void invalidate_complete(struct dm_cache_migration *mg, bool success)
{
@@ -1455,12 +1472,15 @@ static void invalidate_complete(struct dm_cache_migration *mg, bool success)
static void invalidate_completed(struct work_struct *ws)
{
struct dm_cache_migration *mg = ws_to_mg(ws);
+
invalidate_complete(mg, !mg->k.input);
}
static int invalidate_cblock(struct cache *cache, dm_cblock_t cblock)
{
- int r = policy_invalidate_mapping(cache->policy, cblock);
+ int r;
+
+ r = policy_invalidate_mapping(cache->policy, cblock);
if (!r) {
r = dm_cache_remove_mapping(cache->cmd, cblock);
if (r) {
@@ -1553,9 +1573,11 @@ static int invalidate_start(struct cache *cache, dm_cblock_t cblock,
return invalidate_lock(mg);
}
-/*----------------------------------------------------------------
+/*
+ *--------------------------------------------------------------
* bio processing
- *--------------------------------------------------------------*/
+ *--------------------------------------------------------------
+ */
enum busy {
IDLE,
@@ -1763,9 +1785,11 @@ static bool process_discard_bio(struct cache *cache, struct bio *bio)
{
dm_dblock_t b, e;
- // FIXME: do we need to lock the region? Or can we just assume the
- // user wont be so foolish as to issue discard concurrently with
- // other IO?
+ /*
+ * FIXME: do we need to lock the region? Or can we just assume the
+ * user wont be so foolish as to issue discard concurrently with
+ * other IO?
+ */
calc_discard_block_range(cache, bio, &b, &e);
while (b != e) {
set_discard(cache, b);
@@ -1805,16 +1829,18 @@ static void process_deferred_bios(struct work_struct *ws)
else
commit_needed = process_bio(cache, bio) || commit_needed;
+ cond_resched();
}
if (commit_needed)
schedule_commit(&cache->committer);
}
-/*----------------------------------------------------------------
+/*
+ *--------------------------------------------------------------
* Main worker loop
- *--------------------------------------------------------------*/
-
+ *--------------------------------------------------------------
+ */
static void requeue_deferred_bios(struct cache *cache)
{
struct bio *bio;
@@ -1827,6 +1853,7 @@ static void requeue_deferred_bios(struct cache *cache)
while ((bio = bio_list_pop(&bios))) {
bio->bi_status = BLK_STS_DM_REQUEUE;
bio_endio(bio);
+ cond_resched();
}
}
@@ -1867,12 +1894,16 @@ static void check_migrations(struct work_struct *ws)
r = mg_start(cache, op, NULL);
if (r)
break;
+
+ cond_resched();
}
}
-/*----------------------------------------------------------------
+/*
+ *--------------------------------------------------------------
* Target methods
- *--------------------------------------------------------------*/
+ *--------------------------------------------------------------
+ */
/*
* This function gets called on the error paths of the constructor, so we
@@ -1880,7 +1911,7 @@ static void check_migrations(struct work_struct *ws)
*/
static void destroy(struct cache *cache)
{
- unsigned i;
+ unsigned int i;
mempool_exit(&cache->migration_pool);
@@ -2120,7 +2151,7 @@ static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
};
int r, mode_ctr = 0;
- unsigned argc;
+ unsigned int argc;
const char *arg;
struct cache_features *cf = &ca->features;
@@ -2540,7 +2571,7 @@ bad:
static int copy_ctr_args(struct cache *cache, int argc, const char **argv)
{
- unsigned i;
+ unsigned int i;
const char **copy;
copy = kcalloc(argc, sizeof(*copy), GFP_KERNEL);
@@ -2562,7 +2593,7 @@ static int copy_ctr_args(struct cache *cache, int argc, const char **argv)
return 0;
}
-static int cache_ctr(struct dm_target *ti, unsigned argc, char **argv)
+static int cache_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
int r = -EINVAL;
struct cache_args *ca;
@@ -2665,7 +2696,7 @@ static int write_dirty_bitset(struct cache *cache)
static int write_discard_bitset(struct cache *cache)
{
- unsigned i, r;
+ unsigned int i, r;
if (get_cache_mode(cache) >= CM_READ_ONLY)
return -EINVAL;
@@ -2979,11 +3010,11 @@ static void cache_resume(struct dm_target *ti)
}
static void emit_flags(struct cache *cache, char *result,
- unsigned maxlen, ssize_t *sz_ptr)
+ unsigned int maxlen, ssize_t *sz_ptr)
{
ssize_t sz = *sz_ptr;
struct cache_features *cf = &cache->features;
- unsigned count = (cf->metadata_version == 2) + !cf->discard_passdown + 1;
+ unsigned int count = (cf->metadata_version == 2) + !cf->discard_passdown + 1;
DMEMIT("%u ", count);
@@ -3023,10 +3054,10 @@ static void emit_flags(struct cache *cache, char *result,
* <policy name> <#policy args> <policy args>* <cache metadata mode> <needs_check>
*/
static void cache_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+ unsigned int status_flags, char *result, unsigned int maxlen)
{
int r = 0;
- unsigned i;
+ unsigned int i;
ssize_t sz = 0;
dm_block_t nr_free_blocks_metadata = 0;
dm_block_t nr_blocks_metadata = 0;
@@ -3063,18 +3094,18 @@ static void cache_status(struct dm_target *ti, status_type_t type,
residency = policy_residency(cache->policy);
DMEMIT("%u %llu/%llu %llu %llu/%llu %u %u %u %u %u %u %lu ",
- (unsigned)DM_CACHE_METADATA_BLOCK_SIZE,
+ (unsigned int)DM_CACHE_METADATA_BLOCK_SIZE,
(unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
(unsigned long long)nr_blocks_metadata,
(unsigned long long)cache->sectors_per_block,
(unsigned long long) from_cblock(residency),
(unsigned long long) from_cblock(cache->cache_size),
- (unsigned) atomic_read(&cache->stats.read_hit),
- (unsigned) atomic_read(&cache->stats.read_miss),
- (unsigned) atomic_read(&cache->stats.write_hit),
- (unsigned) atomic_read(&cache->stats.write_miss),
- (unsigned) atomic_read(&cache->stats.demotion),
- (unsigned) atomic_read(&cache->stats.promotion),
+ (unsigned int) atomic_read(&cache->stats.read_hit),
+ (unsigned int) atomic_read(&cache->stats.read_miss),
+ (unsigned int) atomic_read(&cache->stats.write_hit),
+ (unsigned int) atomic_read(&cache->stats.write_miss),
+ (unsigned int) atomic_read(&cache->stats.demotion),
+ (unsigned int) atomic_read(&cache->stats.promotion),
(unsigned long) atomic_read(&cache->nr_dirty));
emit_flags(cache, result, maxlen, &sz);
@@ -3253,11 +3284,11 @@ static int request_invalidation(struct cache *cache, struct cblock_range *range)
return r;
}
-static int process_invalidate_cblocks_message(struct cache *cache, unsigned count,
+static int process_invalidate_cblocks_message(struct cache *cache, unsigned int count,
const char **cblock_ranges)
{
int r = 0;
- unsigned i;
+ unsigned int i;
struct cblock_range range;
if (!passthrough_mode(cache)) {
@@ -3294,8 +3325,8 @@ static int process_invalidate_cblocks_message(struct cache *cache, unsigned coun
*
* The key migration_threshold is supported by the cache target core.
*/
-static int cache_message(struct dm_target *ti, unsigned argc, char **argv,
- char *result, unsigned maxlen)
+static int cache_message(struct dm_target *ti, unsigned int argc, char **argv,
+ char *result, unsigned int maxlen)
{
struct cache *cache = ti->private;