mirror of
https://github.com/lkl/linux.git
synced 2025-12-19 08:03:01 +09:00
scsi: block: Remove REQ_OP_WRITE_SAME support
No more users of REQ_OP_WRITE_SAME or drivers implementing it are left, so remove the infrastructure. [mkp: fold in and tweak sysfs reporting fix] Link: https://lore.kernel.org/r/20220209082828.2629273-8-hch@lst.de Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com> Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
committed by
Martin K. Petersen
parent
a773187e37
commit
73bd66d9c8
@@ -152,22 +152,6 @@ static struct bio *blk_bio_write_zeroes_split(struct request_queue *q,
|
||||
return bio_split(bio, q->limits.max_write_zeroes_sectors, GFP_NOIO, bs);
|
||||
}
|
||||
|
||||
static struct bio *blk_bio_write_same_split(struct request_queue *q,
|
||||
struct bio *bio,
|
||||
struct bio_set *bs,
|
||||
unsigned *nsegs)
|
||||
{
|
||||
*nsegs = 1;
|
||||
|
||||
if (!q->limits.max_write_same_sectors)
|
||||
return NULL;
|
||||
|
||||
if (bio_sectors(bio) <= q->limits.max_write_same_sectors)
|
||||
return NULL;
|
||||
|
||||
return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs);
|
||||
}
|
||||
|
||||
/*
|
||||
* Return the maximum number of sectors from the start of a bio that may be
|
||||
* submitted as a single request to a block device. If enough sectors remain,
|
||||
@@ -351,10 +335,6 @@ void __blk_queue_split(struct request_queue *q, struct bio **bio,
|
||||
split = blk_bio_write_zeroes_split(q, *bio, &q->bio_split,
|
||||
nr_segs);
|
||||
break;
|
||||
case REQ_OP_WRITE_SAME:
|
||||
split = blk_bio_write_same_split(q, *bio, &q->bio_split,
|
||||
nr_segs);
|
||||
break;
|
||||
default:
|
||||
split = blk_bio_segment_split(q, *bio, &q->bio_split, nr_segs);
|
||||
break;
|
||||
@@ -416,8 +396,6 @@ unsigned int blk_recalc_rq_segments(struct request *rq)
|
||||
return 1;
|
||||
case REQ_OP_WRITE_ZEROES:
|
||||
return 0;
|
||||
case REQ_OP_WRITE_SAME:
|
||||
return 1;
|
||||
}
|
||||
|
||||
rq_for_each_bvec(bv, rq, iter)
|
||||
@@ -555,8 +533,6 @@ int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
|
||||
|
||||
if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
|
||||
nsegs = __blk_bvec_map_sg(rq->special_vec, sglist, last_sg);
|
||||
else if (rq->bio && bio_op(rq->bio) == REQ_OP_WRITE_SAME)
|
||||
nsegs = __blk_bvec_map_sg(bio_iovec(rq->bio), sglist, last_sg);
|
||||
else if (rq->bio)
|
||||
nsegs = __blk_bios_map_sg(q, rq->bio, sglist, last_sg);
|
||||
|
||||
@@ -757,13 +733,6 @@ static enum elv_merge blk_try_req_merge(struct request *req,
|
||||
return ELEVATOR_NO_MERGE;
|
||||
}
|
||||
|
||||
static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b)
|
||||
{
|
||||
if (bio_page(a) == bio_page(b) && bio_offset(a) == bio_offset(b))
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* For non-mq, this has to be called with the request spinlock acquired.
|
||||
* For mq with scheduling, the appropriate queue wide lock should be held.
|
||||
@@ -780,10 +749,6 @@ static struct request *attempt_merge(struct request_queue *q,
|
||||
if (rq_data_dir(req) != rq_data_dir(next))
|
||||
return NULL;
|
||||
|
||||
if (req_op(req) == REQ_OP_WRITE_SAME &&
|
||||
!blk_write_same_mergeable(req->bio, next->bio))
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* Don't allow merge of different write hints, or for a hint with
|
||||
* non-hint IO.
|
||||
@@ -912,11 +877,6 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
|
||||
if (!bio_crypt_rq_ctx_compatible(rq, bio))
|
||||
return false;
|
||||
|
||||
/* must be using the same buffer */
|
||||
if (req_op(rq) == REQ_OP_WRITE_SAME &&
|
||||
!blk_write_same_mergeable(rq->bio, bio))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Don't allow merge of different write hints, or for a hint with
|
||||
* non-hint IO.
|
||||
|
||||
Reference in New Issue
Block a user