mirror of
https://github.com/lkl/linux.git
synced 2025-12-19 16:13:19 +09:00
btrfs, block: move REQ_CGROUP_PUNT to btrfs
REQ_CGROUP_PUNT is a bit annoying as it is hard to follow and adds a branch to the bio submission hot path. To fix this, export blkcg_punt_bio_submit and let btrfs call it directly. Add a new REQ_FS_PRIVATE flag for btrfs to indicate to it's own low-level bio submission code that a punt to the cgroup submission helper is required. Reviewed-by: Jens Axboe <axboe@kernel.dk> Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
committed by
David Sterba
parent
0a0596fbbe
commit
3480373ebd
@@ -1688,24 +1688,27 @@ out_unlock:
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blkcg_policy_unregister);
|
||||
|
||||
bool __blkcg_punt_bio_submit(struct bio *bio)
|
||||
/*
|
||||
* When a shared kthread issues a bio for a cgroup, doing so synchronously can
|
||||
* lead to priority inversions as the kthread can be trapped waiting for that
|
||||
* cgroup. Use this helper instead of submit_bio to punt the actual issuing to
|
||||
* a dedicated per-blkcg work item to avoid such priority inversions.
|
||||
*/
|
||||
void blkcg_punt_bio_submit(struct bio *bio)
|
||||
{
|
||||
struct blkcg_gq *blkg = bio->bi_blkg;
|
||||
|
||||
/* consume the flag first */
|
||||
bio->bi_opf &= ~REQ_CGROUP_PUNT;
|
||||
|
||||
/* never bounce for the root cgroup */
|
||||
if (!blkg->parent)
|
||||
return false;
|
||||
|
||||
spin_lock_bh(&blkg->async_bio_lock);
|
||||
bio_list_add(&blkg->async_bios, bio);
|
||||
spin_unlock_bh(&blkg->async_bio_lock);
|
||||
|
||||
queue_work(blkcg_punt_bio_wq, &blkg->async_bio_work);
|
||||
return true;
|
||||
if (blkg->parent) {
|
||||
spin_lock_bh(&blkg->async_bio_lock);
|
||||
bio_list_add(&blkg->async_bios, bio);
|
||||
spin_unlock_bh(&blkg->async_bio_lock);
|
||||
queue_work(blkcg_punt_bio_wq, &blkg->async_bio_work);
|
||||
} else {
|
||||
/* never bounce for the root cgroup */
|
||||
submit_bio(bio);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blkcg_punt_bio_submit);
|
||||
|
||||
/*
|
||||
* Scale the accumulated delay based on how long it has been since we updated
|
||||
|
||||
@@ -375,16 +375,6 @@ static inline void blkg_put(struct blkcg_gq *blkg)
|
||||
if (((d_blkg) = blkg_lookup(css_to_blkcg(pos_css), \
|
||||
(p_blkg)->q)))
|
||||
|
||||
bool __blkcg_punt_bio_submit(struct bio *bio);
|
||||
|
||||
static inline bool blkcg_punt_bio_submit(struct bio *bio)
|
||||
{
|
||||
if (bio->bi_opf & REQ_CGROUP_PUNT)
|
||||
return __blkcg_punt_bio_submit(bio);
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline void blkcg_bio_issue_init(struct bio *bio)
|
||||
{
|
||||
bio_issue_init(&bio->bi_issue, bio_sectors(bio));
|
||||
@@ -506,8 +496,6 @@ static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return
|
||||
static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
|
||||
static inline void blkg_get(struct blkcg_gq *blkg) { }
|
||||
static inline void blkg_put(struct blkcg_gq *blkg) { }
|
||||
|
||||
static inline bool blkcg_punt_bio_submit(struct bio *bio) { return false; }
|
||||
static inline void blkcg_bio_issue_init(struct bio *bio) { }
|
||||
static inline void blk_cgroup_bio_start(struct bio *bio) { }
|
||||
static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio) { return true; }
|
||||
|
||||
@@ -830,9 +830,6 @@ EXPORT_SYMBOL(submit_bio_noacct);
|
||||
*/
|
||||
void submit_bio(struct bio *bio)
|
||||
{
|
||||
if (blkcg_punt_bio_submit(bio))
|
||||
return;
|
||||
|
||||
if (bio_op(bio) == REQ_OP_READ) {
|
||||
task_io_account_read(bio->bi_iter.bi_size);
|
||||
count_vm_events(PGPGIN, bio_sectors(bio));
|
||||
|
||||
Reference in New Issue
Block a user