block: Rename blk_queue_zone_size and bdev_zone_size

All block device data fields and functions returning a number of 512B
sectors are by convention named xxx_sectors while names in the form
xxx_size are generally used for a number of bytes. The blk_queue_zone_size
and bdev_zone_size functions were not following this convention so rename
them.

No functional change is introduced by this patch.

Signed-off-by: Damien Le Moal <damien.lemoal@wdc.com>

Collapsed the two patches, they were nonsensically split and broke
bisection.

Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
Damien Le Moal 2017-01-12 07:58:32 -07:00 committed by Jens Axboe
parent b5a10c5f75
commit f99e86485c
5 changed files with 17 additions and 17 deletions

View File

@ -16,7 +16,7 @@
static inline sector_t blk_zone_start(struct request_queue *q, static inline sector_t blk_zone_start(struct request_queue *q,
sector_t sector) sector_t sector)
{ {
sector_t zone_mask = blk_queue_zone_size(q) - 1; sector_t zone_mask = blk_queue_zone_sectors(q) - 1;
return sector & ~zone_mask; return sector & ~zone_mask;
} }
@ -222,7 +222,7 @@ int blkdev_reset_zones(struct block_device *bdev,
return -EINVAL; return -EINVAL;
/* Check alignment (handle eventual smaller last zone) */ /* Check alignment (handle eventual smaller last zone) */
zone_sectors = blk_queue_zone_size(q); zone_sectors = blk_queue_zone_sectors(q);
if (sector & (zone_sectors - 1)) if (sector & (zone_sectors - 1))
return -EINVAL; return -EINVAL;

View File

@ -434,7 +434,7 @@ static bool part_zone_aligned(struct gendisk *disk,
struct block_device *bdev, struct block_device *bdev,
sector_t from, sector_t size) sector_t from, sector_t size)
{ {
unsigned int zone_size = bdev_zone_size(bdev); unsigned int zone_sectors = bdev_zone_sectors(bdev);
/* /*
* If this function is called, then the disk is a zoned block device * If this function is called, then the disk is a zoned block device
@ -446,7 +446,7 @@ static bool part_zone_aligned(struct gendisk *disk,
* regular block devices (no zone operation) and their zone size will * regular block devices (no zone operation) and their zone size will
* be reported as 0. Allow this case. * be reported as 0. Allow this case.
*/ */
if (!zone_size) if (!zone_sectors)
return true; return true;
/* /*
@ -455,24 +455,24 @@ static bool part_zone_aligned(struct gendisk *disk,
* use it. Check the zone size too: it should be a power of 2 number * use it. Check the zone size too: it should be a power of 2 number
* of sectors. * of sectors.
*/ */
if (WARN_ON_ONCE(!is_power_of_2(zone_size))) { if (WARN_ON_ONCE(!is_power_of_2(zone_sectors))) {
u32 rem; u32 rem;
div_u64_rem(from, zone_size, &rem); div_u64_rem(from, zone_sectors, &rem);
if (rem) if (rem)
return false; return false;
if ((from + size) < get_capacity(disk)) { if ((from + size) < get_capacity(disk)) {
div_u64_rem(size, zone_size, &rem); div_u64_rem(size, zone_sectors, &rem);
if (rem) if (rem)
return false; return false;
} }
} else { } else {
if (from & (zone_size - 1)) if (from & (zone_sectors - 1))
return false; return false;
if ((from + size) < get_capacity(disk) && if ((from + size) < get_capacity(disk) &&
(size & (zone_size - 1))) (size & (zone_sectors - 1)))
return false; return false;
} }

View File

@ -713,8 +713,8 @@ static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi,
} }
sector = SECTOR_FROM_BLOCK(blkstart); sector = SECTOR_FROM_BLOCK(blkstart);
if (sector & (bdev_zone_size(bdev) - 1) || if (sector & (bdev_zone_sectors(bdev) - 1) ||
nr_sects != bdev_zone_size(bdev)) { nr_sects != bdev_zone_sectors(bdev)) {
f2fs_msg(sbi->sb, KERN_INFO, f2fs_msg(sbi->sb, KERN_INFO,
"(%d) %s: Unaligned discard attempted (block %x + %x)", "(%d) %s: Unaligned discard attempted (block %x + %x)",
devi, sbi->s_ndevs ? FDEV(devi).path: "", devi, sbi->s_ndevs ? FDEV(devi).path: "",

View File

@ -1553,16 +1553,16 @@ static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
return 0; return 0;
if (sbi->blocks_per_blkz && sbi->blocks_per_blkz != if (sbi->blocks_per_blkz && sbi->blocks_per_blkz !=
SECTOR_TO_BLOCK(bdev_zone_size(bdev))) SECTOR_TO_BLOCK(bdev_zone_sectors(bdev)))
return -EINVAL; return -EINVAL;
sbi->blocks_per_blkz = SECTOR_TO_BLOCK(bdev_zone_size(bdev)); sbi->blocks_per_blkz = SECTOR_TO_BLOCK(bdev_zone_sectors(bdev));
if (sbi->log_blocks_per_blkz && sbi->log_blocks_per_blkz != if (sbi->log_blocks_per_blkz && sbi->log_blocks_per_blkz !=
__ilog2_u32(sbi->blocks_per_blkz)) __ilog2_u32(sbi->blocks_per_blkz))
return -EINVAL; return -EINVAL;
sbi->log_blocks_per_blkz = __ilog2_u32(sbi->blocks_per_blkz); sbi->log_blocks_per_blkz = __ilog2_u32(sbi->blocks_per_blkz);
FDEV(devi).nr_blkz = SECTOR_TO_BLOCK(nr_sectors) >> FDEV(devi).nr_blkz = SECTOR_TO_BLOCK(nr_sectors) >>
sbi->log_blocks_per_blkz; sbi->log_blocks_per_blkz;
if (nr_sectors & (bdev_zone_size(bdev) - 1)) if (nr_sectors & (bdev_zone_sectors(bdev) - 1))
FDEV(devi).nr_blkz++; FDEV(devi).nr_blkz++;
FDEV(devi).blkz_type = kmalloc(FDEV(devi).nr_blkz, GFP_KERNEL); FDEV(devi).blkz_type = kmalloc(FDEV(devi).nr_blkz, GFP_KERNEL);

View File

@ -739,7 +739,7 @@ static inline bool blk_queue_is_zoned(struct request_queue *q)
} }
} }
static inline unsigned int blk_queue_zone_size(struct request_queue *q) static inline unsigned int blk_queue_zone_sectors(struct request_queue *q)
{ {
return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0; return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0;
} }
@ -1536,12 +1536,12 @@ static inline bool bdev_is_zoned(struct block_device *bdev)
return false; return false;
} }
static inline unsigned int bdev_zone_size(struct block_device *bdev) static inline unsigned int bdev_zone_sectors(struct block_device *bdev)
{ {
struct request_queue *q = bdev_get_queue(bdev); struct request_queue *q = bdev_get_queue(bdev);
if (q) if (q)
return blk_queue_zone_size(q); return blk_queue_zone_sectors(q);
return 0; return 0;
} }