mirror of
https://github.com/lkl/linux.git
synced 2025-12-19 16:13:19 +09:00
dm: address indent/space issues
Signed-off-by: Heinz Mauelshagen <heinzm@redhat.com> Signed-off-by: Mike Snitzer <snitzer@kernel.org>
This commit is contained in:
committed by
Mike Snitzer
parent
96422281ba
commit
255e264649
@@ -76,7 +76,7 @@ struct dm_cache_policy {
|
|||||||
* background work.
|
* background work.
|
||||||
*/
|
*/
|
||||||
int (*get_background_work)(struct dm_cache_policy *p, bool idle,
|
int (*get_background_work)(struct dm_cache_policy *p, bool idle,
|
||||||
struct policy_work **result);
|
struct policy_work **result);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* You must pass in the same work pointer that you were given, not
|
* You must pass in the same work pointer that you were given, not
|
||||||
|
|||||||
@@ -2503,7 +2503,7 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string
|
|||||||
type = &key_type_encrypted;
|
type = &key_type_encrypted;
|
||||||
set_key = set_key_encrypted;
|
set_key = set_key_encrypted;
|
||||||
} else if (IS_ENABLED(CONFIG_TRUSTED_KEYS) &&
|
} else if (IS_ENABLED(CONFIG_TRUSTED_KEYS) &&
|
||||||
!strncmp(key_string, "trusted:", key_desc - key_string + 1)) {
|
!strncmp(key_string, "trusted:", key_desc - key_string + 1)) {
|
||||||
type = &key_type_trusted;
|
type = &key_type_trusted;
|
||||||
set_key = set_key_trusted;
|
set_key = set_key_trusted;
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
@@ -2301,7 +2301,6 @@ offload_to_thread:
|
|||||||
else
|
else
|
||||||
skip_check:
|
skip_check:
|
||||||
dec_in_flight(dio);
|
dec_in_flight(dio);
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
INIT_WORK(&dio->work, integrity_metadata);
|
INIT_WORK(&dio->work, integrity_metadata);
|
||||||
queue_work(ic->metadata_wq, &dio->work);
|
queue_work(ic->metadata_wq, &dio->work);
|
||||||
@@ -4085,7 +4084,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned int argc, char **argv
|
|||||||
} else if (sscanf(opt_string, "block_size:%u%c", &val, &dummy) == 1) {
|
} else if (sscanf(opt_string, "block_size:%u%c", &val, &dummy) == 1) {
|
||||||
if (val < 1 << SECTOR_SHIFT ||
|
if (val < 1 << SECTOR_SHIFT ||
|
||||||
val > MAX_SECTORS_PER_BLOCK << SECTOR_SHIFT ||
|
val > MAX_SECTORS_PER_BLOCK << SECTOR_SHIFT ||
|
||||||
(val & (val -1))) {
|
(val & (val - 1))) {
|
||||||
r = -EINVAL;
|
r = -EINVAL;
|
||||||
ti->error = "Invalid block_size argument";
|
ti->error = "Invalid block_size argument";
|
||||||
goto bad;
|
goto bad;
|
||||||
@@ -4405,7 +4404,7 @@ try_smaller_buffer:
|
|||||||
if (ic->internal_hash) {
|
if (ic->internal_hash) {
|
||||||
size_t recalc_tags_size;
|
size_t recalc_tags_size;
|
||||||
ic->recalc_wq = alloc_workqueue("dm-integrity-recalc", WQ_MEM_RECLAIM, 1);
|
ic->recalc_wq = alloc_workqueue("dm-integrity-recalc", WQ_MEM_RECLAIM, 1);
|
||||||
if (!ic->recalc_wq ) {
|
if (!ic->recalc_wq) {
|
||||||
ti->error = "Cannot allocate workqueue";
|
ti->error = "Cannot allocate workqueue";
|
||||||
r = -ENOMEM;
|
r = -ENOMEM;
|
||||||
goto bad;
|
goto bad;
|
||||||
|
|||||||
@@ -758,8 +758,8 @@ static void core_set_region_sync(struct dm_dirty_log *log, region_t region,
|
|||||||
log_clear_bit(lc, lc->recovering_bits, region);
|
log_clear_bit(lc, lc->recovering_bits, region);
|
||||||
if (in_sync) {
|
if (in_sync) {
|
||||||
log_set_bit(lc, lc->sync_bits, region);
|
log_set_bit(lc, lc->sync_bits, region);
|
||||||
lc->sync_count++;
|
lc->sync_count++;
|
||||||
} else if (log_test_bit(lc->sync_bits, region)) {
|
} else if (log_test_bit(lc->sync_bits, region)) {
|
||||||
lc->sync_count--;
|
lc->sync_count--;
|
||||||
log_clear_bit(lc, lc->sync_bits, region);
|
log_clear_bit(lc, lc->sync_bits, region);
|
||||||
}
|
}
|
||||||
@@ -767,9 +767,9 @@ static void core_set_region_sync(struct dm_dirty_log *log, region_t region,
|
|||||||
|
|
||||||
static region_t core_get_sync_count(struct dm_dirty_log *log)
|
static region_t core_get_sync_count(struct dm_dirty_log *log)
|
||||||
{
|
{
|
||||||
struct log_c *lc = (struct log_c *) log->context;
|
struct log_c *lc = (struct log_c *) log->context;
|
||||||
|
|
||||||
return lc->sync_count;
|
return lc->sync_count;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define DMEMIT_SYNC \
|
#define DMEMIT_SYNC \
|
||||||
|
|||||||
@@ -363,8 +363,8 @@ static struct {
|
|||||||
const int mode;
|
const int mode;
|
||||||
const char *param;
|
const char *param;
|
||||||
} _raid456_journal_mode[] = {
|
} _raid456_journal_mode[] = {
|
||||||
{ R5C_JOURNAL_MODE_WRITE_THROUGH , "writethrough" },
|
{ R5C_JOURNAL_MODE_WRITE_THROUGH, "writethrough" },
|
||||||
{ R5C_JOURNAL_MODE_WRITE_BACK , "writeback" }
|
{ R5C_JOURNAL_MODE_WRITE_BACK, "writeback" }
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Return MD raid4/5/6 journal mode for dm @journal_mode one */
|
/* Return MD raid4/5/6 journal mode for dm @journal_mode one */
|
||||||
@@ -1115,7 +1115,7 @@ too_many:
|
|||||||
* [stripe_cache <sectors>] Stripe cache size for higher RAIDs
|
* [stripe_cache <sectors>] Stripe cache size for higher RAIDs
|
||||||
* [region_size <sectors>] Defines granularity of bitmap
|
* [region_size <sectors>] Defines granularity of bitmap
|
||||||
* [journal_dev <dev>] raid4/5/6 journaling deviice
|
* [journal_dev <dev>] raid4/5/6 journaling deviice
|
||||||
* (i.e. write hole closing log)
|
* (i.e. write hole closing log)
|
||||||
*
|
*
|
||||||
* RAID10-only options:
|
* RAID10-only options:
|
||||||
* [raid10_copies <# copies>] Number of copies. (Default: 2)
|
* [raid10_copies <# copies>] Number of copies. (Default: 2)
|
||||||
@@ -4002,7 +4002,7 @@ static int raid_preresume(struct dm_target *ti)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Resize bitmap to adjust to changed region size (aka MD bitmap chunksize) or grown device size */
|
/* Resize bitmap to adjust to changed region size (aka MD bitmap chunksize) or grown device size */
|
||||||
if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) && mddev->bitmap &&
|
if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) && mddev->bitmap &&
|
||||||
(test_bit(RT_FLAG_RS_GROW, &rs->runtime_flags) ||
|
(test_bit(RT_FLAG_RS_GROW, &rs->runtime_flags) ||
|
||||||
(rs->requested_bitmap_chunk_sectors &&
|
(rs->requested_bitmap_chunk_sectors &&
|
||||||
mddev->bitmap_info.chunksize != to_bytes(rs->requested_bitmap_chunk_sectors)))) {
|
mddev->bitmap_info.chunksize != to_bytes(rs->requested_bitmap_chunk_sectors)))) {
|
||||||
|
|||||||
@@ -904,7 +904,7 @@ static struct mirror_set *alloc_context(unsigned int nr_mirrors,
|
|||||||
if (IS_ERR(ms->io_client)) {
|
if (IS_ERR(ms->io_client)) {
|
||||||
ti->error = "Error creating dm_io client";
|
ti->error = "Error creating dm_io client";
|
||||||
kfree(ms);
|
kfree(ms);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
ms->rh = dm_region_hash_create(ms, dispatch_bios, wakeup_mirrord,
|
ms->rh = dm_region_hash_create(ms, dispatch_bios, wakeup_mirrord,
|
||||||
|
|||||||
@@ -73,7 +73,7 @@ static sector_t high(struct dm_table *t, unsigned int l, unsigned int n)
|
|||||||
n = get_child(n, CHILDREN_PER_NODE - 1);
|
n = get_child(n, CHILDREN_PER_NODE - 1);
|
||||||
|
|
||||||
if (n >= t->counts[l])
|
if (n >= t->counts[l])
|
||||||
return (sector_t) - 1;
|
return (sector_t) -1;
|
||||||
|
|
||||||
return get_node(t, l, n)[KEYS_PER_NODE - 1];
|
return get_node(t, l, n)[KEYS_PER_NODE - 1];
|
||||||
}
|
}
|
||||||
@@ -1530,7 +1530,7 @@ static bool dm_table_any_dev_attr(struct dm_table *t,
|
|||||||
if (ti->type->iterate_devices &&
|
if (ti->type->iterate_devices &&
|
||||||
ti->type->iterate_devices(ti, func, data))
|
ti->type->iterate_devices(ti, func, data))
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1181,9 +1181,9 @@ static void process_prepared_discard_passdown_pt1(struct dm_thin_new_mapping *m)
|
|||||||
discard_parent = bio_alloc(NULL, 1, 0, GFP_NOIO);
|
discard_parent = bio_alloc(NULL, 1, 0, GFP_NOIO);
|
||||||
discard_parent->bi_end_io = passdown_endio;
|
discard_parent->bi_end_io = passdown_endio;
|
||||||
discard_parent->bi_private = m;
|
discard_parent->bi_private = m;
|
||||||
if (m->maybe_shared)
|
if (m->maybe_shared)
|
||||||
passdown_double_checking_shared_status(m, discard_parent);
|
passdown_double_checking_shared_status(m, discard_parent);
|
||||||
else {
|
else {
|
||||||
struct discard_op op;
|
struct discard_op op;
|
||||||
|
|
||||||
begin_discard(&op, tc, discard_parent);
|
begin_discard(&op, tc, discard_parent);
|
||||||
|
|||||||
@@ -531,7 +531,7 @@ static void ssd_commit_flushed(struct dm_writecache *wc, bool wait_for_ios)
|
|||||||
req.notify.context = &endio;
|
req.notify.context = &endio;
|
||||||
|
|
||||||
/* writing via async dm-io (implied by notify.fn above) won't return an error */
|
/* writing via async dm-io (implied by notify.fn above) won't return an error */
|
||||||
(void) dm_io(&req, 1, ®ion, NULL);
|
(void) dm_io(&req, 1, ®ion, NULL);
|
||||||
i = j;
|
i = j;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -727,7 +727,7 @@ static int shadow_child(struct dm_btree_info *info, struct dm_btree_value_type *
|
|||||||
* nodes, so saves metadata space.
|
* nodes, so saves metadata space.
|
||||||
*/
|
*/
|
||||||
static int split_two_into_three(struct shadow_spine *s, unsigned int parent_index,
|
static int split_two_into_three(struct shadow_spine *s, unsigned int parent_index,
|
||||||
struct dm_btree_value_type *vt, uint64_t key)
|
struct dm_btree_value_type *vt, uint64_t key)
|
||||||
{
|
{
|
||||||
int r;
|
int r;
|
||||||
unsigned int middle_index;
|
unsigned int middle_index;
|
||||||
@@ -782,7 +782,7 @@ static int split_two_into_three(struct shadow_spine *s, unsigned int parent_inde
|
|||||||
if (shadow_current(s) != right)
|
if (shadow_current(s) != right)
|
||||||
unlock_block(s->info, right);
|
unlock_block(s->info, right);
|
||||||
|
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@@ -1217,7 +1217,7 @@ int btree_get_overwrite_leaf(struct dm_btree_info *info, dm_block_t root,
|
|||||||
static bool need_insert(struct btree_node *node, uint64_t *keys,
|
static bool need_insert(struct btree_node *node, uint64_t *keys,
|
||||||
unsigned int level, unsigned int index)
|
unsigned int level, unsigned int index)
|
||||||
{
|
{
|
||||||
return ((index >= le32_to_cpu(node->header.nr_entries)) ||
|
return ((index >= le32_to_cpu(node->header.nr_entries)) ||
|
||||||
(le64_to_cpu(node->keys[index]) != keys[level]));
|
(le64_to_cpu(node->keys[index]) != keys[level]));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -391,7 +391,7 @@ int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
|
|||||||
}
|
}
|
||||||
|
|
||||||
int sm_ll_find_common_free_block(struct ll_disk *old_ll, struct ll_disk *new_ll,
|
int sm_ll_find_common_free_block(struct ll_disk *old_ll, struct ll_disk *new_ll,
|
||||||
dm_block_t begin, dm_block_t end, dm_block_t *b)
|
dm_block_t begin, dm_block_t end, dm_block_t *b)
|
||||||
{
|
{
|
||||||
int r;
|
int r;
|
||||||
uint32_t count;
|
uint32_t count;
|
||||||
|
|||||||
@@ -121,7 +121,7 @@ int sm_ll_lookup(struct ll_disk *ll, dm_block_t b, uint32_t *result);
|
|||||||
int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
|
int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
|
||||||
dm_block_t end, dm_block_t *result);
|
dm_block_t end, dm_block_t *result);
|
||||||
int sm_ll_find_common_free_block(struct ll_disk *old_ll, struct ll_disk *new_ll,
|
int sm_ll_find_common_free_block(struct ll_disk *old_ll, struct ll_disk *new_ll,
|
||||||
dm_block_t begin, dm_block_t end, dm_block_t *result);
|
dm_block_t begin, dm_block_t end, dm_block_t *result);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The next three functions return (via nr_allocations) the net number of
|
* The next three functions return (via nr_allocations) the net number of
|
||||||
|
|||||||
Reference in New Issue
Block a user