block: replace REQ_THROTTLED with a bio flag

It's the last bio-only REQ_* flag, and we have space for it in the bio
bi_flags field.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Shaun Tancheff <shaun.tancheff@seagate.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
Christoph Hellwig 2016-10-20 15:12:12 +02:00 committed by Jens Axboe
parent 188bd2b16b
commit 8d2bbd4c82
2 changed files with 7 additions and 11 deletions

View File

@ -818,13 +818,13 @@ static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
tg->io_disp[rw]++;
/*
* REQ_THROTTLED is used to prevent the same bio to be throttled
* BIO_THROTTLED is used to prevent the same bio to be throttled
* more than once as a throttled bio will go through blk-throtl the
* second time when it eventually gets issued. Set it when a bio
* is being charged to a tg.
*/
if (!(bio->bi_opf & REQ_THROTTLED))
bio->bi_opf |= REQ_THROTTLED;
if (!bio_flagged(bio, BIO_THROTTLED))
bio_set_flag(bio, BIO_THROTTLED);
}
/**
@ -1401,7 +1401,7 @@ bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
WARN_ON_ONCE(!rcu_read_lock_held());
/* see throtl_charge_bio() */
if ((bio->bi_opf & REQ_THROTTLED) || !tg->has_rules[rw])
if (bio_flagged(bio, BIO_THROTTLED) || !tg->has_rules[rw])
goto out;
spin_lock_irq(q->queue_lock);
@ -1480,7 +1480,7 @@ out:
* being issued.
*/
if (!throttled)
bio->bi_opf &= ~REQ_THROTTLED;
bio_clear_flag(bio, BIO_THROTTLED);
return throttled;
}

View File

@ -119,6 +119,8 @@ struct bio {
#define BIO_QUIET 6 /* Make BIO Quiet */
#define BIO_CHAIN 7 /* chained bio, ->bi_remaining in effect */
#define BIO_REFFED 8 /* bio has elevated ->bi_cnt */
#define BIO_THROTTLED 9 /* This bio has already been subjected to
* throttling rules. Don't do it again. */
/*
* Flags starting here get preserved by bio_reset() - this includes
@ -165,10 +167,6 @@ enum rq_flag_bits {
__REQ_PREFLUSH, /* request for cache flush */
__REQ_RAHEAD, /* read ahead, can fail anytime */
/* bio only flags */
__REQ_THROTTLED, /* This bio has already been subjected to
* throttling rules. Don't do it again. */
/* request only flags */
__REQ_SORTED, /* elevator knows about this request */
__REQ_SOFTBARRIER, /* may not be passed by ioscheduler */
@ -213,8 +211,6 @@ enum rq_flag_bits {
(REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_PREFLUSH | REQ_FUA | REQ_FLUSH_SEQ)
#define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
#define REQ_THROTTLED (1ULL << __REQ_THROTTLED)
#define REQ_SORTED (1ULL << __REQ_SORTED)
#define REQ_SOFTBARRIER (1ULL << __REQ_SOFTBARRIER)
#define REQ_FUA (1ULL << __REQ_FUA)