Commit e1defc4f authored by Martin K. Petersen's avatar Martin K. Petersen Committed by Jens Axboe

block: Do away with the notion of hardsect_size

Until now we have had a 1:1 mapping between storage device physical
block size and the logical block sized used when addressing the device.
With SATA 4KB drives coming out that will no longer be the case.  The
sector size will be 4KB but the logical block size will remain
512-bytes.  Hence we need to distinguish between the physical block size
and the logical ditto.

This patch renames hardsect_size to logical_block_size.
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: default avatarJens Axboe <jens.axboe@oracle.com>
parent 9bd7de51
...@@ -250,7 +250,7 @@ axon_ram_probe(struct of_device *device, const struct of_device_id *device_id) ...@@ -250,7 +250,7 @@ axon_ram_probe(struct of_device *device, const struct of_device_id *device_id)
set_capacity(bank->disk, bank->size >> AXON_RAM_SECTOR_SHIFT); set_capacity(bank->disk, bank->size >> AXON_RAM_SECTOR_SHIFT);
blk_queue_make_request(bank->disk->queue, axon_ram_make_request); blk_queue_make_request(bank->disk->queue, axon_ram_make_request);
blk_queue_hardsect_size(bank->disk->queue, AXON_RAM_SECTOR_SIZE); blk_queue_logical_block_size(bank->disk->queue, AXON_RAM_SECTOR_SIZE);
add_disk(bank->disk); add_disk(bank->disk);
bank->irq_id = irq_of_parse_and_map(device->node, 0); bank->irq_id = irq_of_parse_and_map(device->node, 0);
......
...@@ -340,7 +340,7 @@ int blk_integrity_register(struct gendisk *disk, struct blk_integrity *template) ...@@ -340,7 +340,7 @@ int blk_integrity_register(struct gendisk *disk, struct blk_integrity *template)
kobject_uevent(&bi->kobj, KOBJ_ADD); kobject_uevent(&bi->kobj, KOBJ_ADD);
bi->flags |= INTEGRITY_FLAG_READ | INTEGRITY_FLAG_WRITE; bi->flags |= INTEGRITY_FLAG_READ | INTEGRITY_FLAG_WRITE;
bi->sector_size = disk->queue->hardsect_size; bi->sector_size = queue_logical_block_size(disk->queue);
disk->integrity = bi; disk->integrity = bi;
} else } else
bi = disk->integrity; bi = disk->integrity;
......
...@@ -134,7 +134,7 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn) ...@@ -134,7 +134,7 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
q->backing_dev_info.state = 0; q->backing_dev_info.state = 0;
q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY; q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
blk_queue_max_sectors(q, SAFE_MAX_SECTORS); blk_queue_max_sectors(q, SAFE_MAX_SECTORS);
blk_queue_hardsect_size(q, 512); blk_queue_logical_block_size(q, 512);
blk_queue_dma_alignment(q, 511); blk_queue_dma_alignment(q, 511);
blk_queue_congestion_threshold(q); blk_queue_congestion_threshold(q);
q->nr_batching = BLK_BATCH_REQ; q->nr_batching = BLK_BATCH_REQ;
...@@ -288,21 +288,20 @@ void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size) ...@@ -288,21 +288,20 @@ void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
EXPORT_SYMBOL(blk_queue_max_segment_size); EXPORT_SYMBOL(blk_queue_max_segment_size);
/** /**
* blk_queue_hardsect_size - set hardware sector size for the queue * blk_queue_logical_block_size - set logical block size for the queue
* @q: the request queue for the device * @q: the request queue for the device
* @size: the hardware sector size, in bytes * @size: the logical block size, in bytes
* *
* Description: * Description:
* This should typically be set to the lowest possible sector size * This should be set to the lowest possible block size that the
* that the hardware can operate on (possible without reverting to * storage device can address. The default of 512 covers most
* even internal read-modify-write operations). Usually the default * hardware.
* of 512 covers most hardware.
**/ **/
void blk_queue_hardsect_size(struct request_queue *q, unsigned short size) void blk_queue_logical_block_size(struct request_queue *q, unsigned short size)
{ {
q->hardsect_size = size; q->logical_block_size = size;
} }
EXPORT_SYMBOL(blk_queue_hardsect_size); EXPORT_SYMBOL(blk_queue_logical_block_size);
/* /*
* Returns the minimum that is _not_ zero, unless both are zero. * Returns the minimum that is _not_ zero, unless both are zero.
...@@ -324,7 +323,7 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) ...@@ -324,7 +323,7 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
t->max_phys_segments = min_not_zero(t->max_phys_segments, b->max_phys_segments); t->max_phys_segments = min_not_zero(t->max_phys_segments, b->max_phys_segments);
t->max_hw_segments = min_not_zero(t->max_hw_segments, b->max_hw_segments); t->max_hw_segments = min_not_zero(t->max_hw_segments, b->max_hw_segments);
t->max_segment_size = min_not_zero(t->max_segment_size, b->max_segment_size); t->max_segment_size = min_not_zero(t->max_segment_size, b->max_segment_size);
t->hardsect_size = max(t->hardsect_size, b->hardsect_size); t->logical_block_size = max(t->logical_block_size, b->logical_block_size);
if (!t->queue_lock) if (!t->queue_lock)
WARN_ON_ONCE(1); WARN_ON_ONCE(1);
else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) { else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
......
...@@ -100,9 +100,9 @@ static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) ...@@ -100,9 +100,9 @@ static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
return queue_var_show(max_sectors_kb, (page)); return queue_var_show(max_sectors_kb, (page));
} }
static ssize_t queue_hw_sector_size_show(struct request_queue *q, char *page) static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
{ {
return queue_var_show(q->hardsect_size, page); return queue_var_show(queue_logical_block_size(q), page);
} }
static ssize_t static ssize_t
...@@ -249,7 +249,12 @@ static struct queue_sysfs_entry queue_iosched_entry = { ...@@ -249,7 +249,12 @@ static struct queue_sysfs_entry queue_iosched_entry = {
static struct queue_sysfs_entry queue_hw_sector_size_entry = { static struct queue_sysfs_entry queue_hw_sector_size_entry = {
.attr = {.name = "hw_sector_size", .mode = S_IRUGO }, .attr = {.name = "hw_sector_size", .mode = S_IRUGO },
.show = queue_hw_sector_size_show, .show = queue_logical_block_size_show,
};
static struct queue_sysfs_entry queue_logical_block_size_entry = {
.attr = {.name = "logical_block_size", .mode = S_IRUGO },
.show = queue_logical_block_size_show,
}; };
static struct queue_sysfs_entry queue_nonrot_entry = { static struct queue_sysfs_entry queue_nonrot_entry = {
...@@ -283,6 +288,7 @@ static struct attribute *default_attrs[] = { ...@@ -283,6 +288,7 @@ static struct attribute *default_attrs[] = {
&queue_max_sectors_entry.attr, &queue_max_sectors_entry.attr,
&queue_iosched_entry.attr, &queue_iosched_entry.attr,
&queue_hw_sector_size_entry.attr, &queue_hw_sector_size_entry.attr,
&queue_logical_block_size_entry.attr,
&queue_nonrot_entry.attr, &queue_nonrot_entry.attr,
&queue_nomerges_entry.attr, &queue_nomerges_entry.attr,
&queue_rq_affinity_entry.attr, &queue_rq_affinity_entry.attr,
......
...@@ -763,7 +763,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg) ...@@ -763,7 +763,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
case BLKBSZGET_32: /* get the logical block size (cf. BLKSSZGET) */ case BLKBSZGET_32: /* get the logical block size (cf. BLKSSZGET) */
return compat_put_int(arg, block_size(bdev)); return compat_put_int(arg, block_size(bdev));
case BLKSSZGET: /* get block device hardware sector size */ case BLKSSZGET: /* get block device hardware sector size */
return compat_put_int(arg, bdev_hardsect_size(bdev)); return compat_put_int(arg, bdev_logical_block_size(bdev));
case BLKSECTGET: case BLKSECTGET:
return compat_put_ushort(arg, return compat_put_ushort(arg,
bdev_get_queue(bdev)->max_sectors); bdev_get_queue(bdev)->max_sectors);
......
...@@ -311,7 +311,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, ...@@ -311,7 +311,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
case BLKBSZGET: /* get the logical block size (cf. BLKSSZGET) */ case BLKBSZGET: /* get the logical block size (cf. BLKSSZGET) */
return put_int(arg, block_size(bdev)); return put_int(arg, block_size(bdev));
case BLKSSZGET: /* get block device hardware sector size */ case BLKSSZGET: /* get block device hardware sector size */
return put_int(arg, bdev_hardsect_size(bdev)); return put_int(arg, bdev_logical_block_size(bdev));
case BLKSECTGET: case BLKSECTGET:
return put_ushort(arg, bdev_get_queue(bdev)->max_sectors); return put_ushort(arg, bdev_get_queue(bdev)->max_sectors);
case BLKRASET: case BLKRASET:
......
...@@ -1389,8 +1389,8 @@ static void cciss_add_disk(ctlr_info_t *h, struct gendisk *disk, ...@@ -1389,8 +1389,8 @@ static void cciss_add_disk(ctlr_info_t *h, struct gendisk *disk,
disk->queue->queuedata = h; disk->queue->queuedata = h;
blk_queue_hardsect_size(disk->queue, blk_queue_logical_block_size(disk->queue,
h->drv[drv_index].block_size); h->drv[drv_index].block_size);
/* Make sure all queue data is written out before */ /* Make sure all queue data is written out before */
/* setting h->drv[drv_index].queue, as setting this */ /* setting h->drv[drv_index].queue, as setting this */
...@@ -2298,7 +2298,7 @@ static int cciss_revalidate(struct gendisk *disk) ...@@ -2298,7 +2298,7 @@ static int cciss_revalidate(struct gendisk *disk)
cciss_geometry_inquiry(h->ctlr, logvol, 1, total_size, block_size, cciss_geometry_inquiry(h->ctlr, logvol, 1, total_size, block_size,
inq_buff, drv); inq_buff, drv);
blk_queue_hardsect_size(drv->queue, drv->block_size); blk_queue_logical_block_size(drv->queue, drv->block_size);
set_capacity(disk, drv->nr_blocks); set_capacity(disk, drv->nr_blocks);
kfree(inq_buff); kfree(inq_buff);
......
...@@ -474,7 +474,7 @@ static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev) ...@@ -474,7 +474,7 @@ static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
disk->fops = &ida_fops; disk->fops = &ida_fops;
if (j && !drv->nr_blks) if (j && !drv->nr_blks)
continue; continue;
blk_queue_hardsect_size(hba[i]->queue, drv->blk_size); blk_queue_logical_block_size(hba[i]->queue, drv->blk_size);
set_capacity(disk, drv->nr_blks); set_capacity(disk, drv->nr_blks);
disk->queue = hba[i]->queue; disk->queue = hba[i]->queue;
disk->private_data = drv; disk->private_data = drv;
...@@ -1546,7 +1546,7 @@ static int revalidate_allvol(ctlr_info_t *host) ...@@ -1546,7 +1546,7 @@ static int revalidate_allvol(ctlr_info_t *host)
drv_info_t *drv = &host->drv[i]; drv_info_t *drv = &host->drv[i];
if (i && !drv->nr_blks) if (i && !drv->nr_blks)
continue; continue;
blk_queue_hardsect_size(host->queue, drv->blk_size); blk_queue_logical_block_size(host->queue, drv->blk_size);
set_capacity(disk, drv->nr_blks); set_capacity(disk, drv->nr_blks);
disk->queue = host->queue; disk->queue = host->queue;
disk->private_data = drv; disk->private_data = drv;
......
...@@ -724,7 +724,7 @@ static int __init hd_init(void) ...@@ -724,7 +724,7 @@ static int __init hd_init(void)
blk_queue_max_sectors(hd_queue, 255); blk_queue_max_sectors(hd_queue, 255);
init_timer(&device_timer); init_timer(&device_timer);
device_timer.function = hd_times_out; device_timer.function = hd_times_out;
blk_queue_hardsect_size(hd_queue, 512); blk_queue_logical_block_size(hd_queue, 512);
if (!NR_HD) { if (!NR_HD) {
/* /*
......
...@@ -996,7 +996,7 @@ static int mg_probe(struct platform_device *plat_dev) ...@@ -996,7 +996,7 @@ static int mg_probe(struct platform_device *plat_dev)
goto probe_err_6; goto probe_err_6;
} }
blk_queue_max_sectors(host->breq, MG_MAX_SECTS); blk_queue_max_sectors(host->breq, MG_MAX_SECTS);
blk_queue_hardsect_size(host->breq, MG_SECTOR_SIZE); blk_queue_logical_block_size(host->breq, MG_SECTOR_SIZE);
init_timer(&host->timer); init_timer(&host->timer);
host->timer.function = mg_times_out; host->timer.function = mg_times_out;
......
...@@ -2657,7 +2657,7 @@ static void pkt_init_queue(struct pktcdvd_device *pd) ...@@ -2657,7 +2657,7 @@ static void pkt_init_queue(struct pktcdvd_device *pd)
struct request_queue *q = pd->disk->queue; struct request_queue *q = pd->disk->queue;
blk_queue_make_request(q, pkt_make_request); blk_queue_make_request(q, pkt_make_request);
blk_queue_hardsect_size(q, CD_FRAMESIZE); blk_queue_logical_block_size(q, CD_FRAMESIZE);
blk_queue_max_sectors(q, PACKET_MAX_SECTORS); blk_queue_max_sectors(q, PACKET_MAX_SECTORS);
blk_queue_merge_bvec(q, pkt_merge_bvec); blk_queue_merge_bvec(q, pkt_merge_bvec);
q->queuedata = pd; q->queuedata = pd;
......
...@@ -477,7 +477,7 @@ static int __devinit ps3disk_probe(struct ps3_system_bus_device *_dev) ...@@ -477,7 +477,7 @@ static int __devinit ps3disk_probe(struct ps3_system_bus_device *_dev)
blk_queue_max_sectors(queue, dev->bounce_size >> 9); blk_queue_max_sectors(queue, dev->bounce_size >> 9);
blk_queue_segment_boundary(queue, -1UL); blk_queue_segment_boundary(queue, -1UL);
blk_queue_dma_alignment(queue, dev->blk_size-1); blk_queue_dma_alignment(queue, dev->blk_size-1);
blk_queue_hardsect_size(queue, dev->blk_size); blk_queue_logical_block_size(queue, dev->blk_size);
blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN_FLUSH, blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN_FLUSH,
ps3disk_prepare_flush); ps3disk_prepare_flush);
......
...@@ -722,7 +722,7 @@ static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun, ...@@ -722,7 +722,7 @@ static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
/* /*
* build the command * build the command
* *
* The call to blk_queue_hardsect_size() guarantees that request * The call to blk_queue_logical_block_size() guarantees that request
* is aligned, but it is given in terms of 512 byte units, always. * is aligned, but it is given in terms of 512 byte units, always.
*/ */
block = blk_rq_pos(rq) >> lun->capacity.bshift; block = blk_rq_pos(rq) >> lun->capacity.bshift;
...@@ -1749,7 +1749,7 @@ static int ub_bd_revalidate(struct gendisk *disk) ...@@ -1749,7 +1749,7 @@ static int ub_bd_revalidate(struct gendisk *disk)
ub_revalidate(lun->udev, lun); ub_revalidate(lun->udev, lun);
/* XXX Support sector size switching like in sr.c */ /* XXX Support sector size switching like in sr.c */
blk_queue_hardsect_size(disk->queue, lun->capacity.bsize); blk_queue_logical_block_size(disk->queue, lun->capacity.bsize);
set_capacity(disk, lun->capacity.nsec); set_capacity(disk, lun->capacity.nsec);
// set_disk_ro(sdkp->disk, lun->readonly); // set_disk_ro(sdkp->disk, lun->readonly);
...@@ -2324,7 +2324,7 @@ static int ub_probe_lun(struct ub_dev *sc, int lnum) ...@@ -2324,7 +2324,7 @@ static int ub_probe_lun(struct ub_dev *sc, int lnum)
blk_queue_max_phys_segments(q, UB_MAX_REQ_SG); blk_queue_max_phys_segments(q, UB_MAX_REQ_SG);
blk_queue_segment_boundary(q, 0xffffffff); /* Dubious. */ blk_queue_segment_boundary(q, 0xffffffff); /* Dubious. */
blk_queue_max_sectors(q, UB_MAX_SECTORS); blk_queue_max_sectors(q, UB_MAX_SECTORS);
blk_queue_hardsect_size(q, lun->capacity.bsize); blk_queue_logical_block_size(q, lun->capacity.bsize);
lun->disk = disk; lun->disk = disk;
q->queuedata = lun; q->queuedata = lun;
......
...@@ -347,7 +347,7 @@ static int virtblk_probe(struct virtio_device *vdev) ...@@ -347,7 +347,7 @@ static int virtblk_probe(struct virtio_device *vdev)
offsetof(struct virtio_blk_config, blk_size), offsetof(struct virtio_blk_config, blk_size),
&blk_size); &blk_size);
if (!err) if (!err)
blk_queue_hardsect_size(vblk->disk->queue, blk_size); blk_queue_logical_block_size(vblk->disk->queue, blk_size);
add_disk(vblk->disk); add_disk(vblk->disk);
return 0; return 0;
......
...@@ -344,7 +344,7 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size) ...@@ -344,7 +344,7 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq); queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
/* Hard sector size and max sectors impersonate the equiv. hardware. */ /* Hard sector size and max sectors impersonate the equiv. hardware. */
blk_queue_hardsect_size(rq, sector_size); blk_queue_logical_block_size(rq, sector_size);
blk_queue_max_sectors(rq, 512); blk_queue_max_sectors(rq, 512);
/* Each segment in a request is up to an aligned page in size. */ /* Each segment in a request is up to an aligned page in size. */
......
...@@ -984,7 +984,7 @@ static int __devinit ace_setup(struct ace_device *ace) ...@@ -984,7 +984,7 @@ static int __devinit ace_setup(struct ace_device *ace)
ace->queue = blk_init_queue(ace_request, &ace->lock); ace->queue = blk_init_queue(ace_request, &ace->lock);
if (ace->queue == NULL) if (ace->queue == NULL)
goto err_blk_initq; goto err_blk_initq;
blk_queue_hardsect_size(ace->queue, 512); blk_queue_logical_block_size(ace->queue, 512);
/* /*
* Allocate and initialize GD structure * Allocate and initialize GD structure
......
...@@ -739,7 +739,7 @@ static void __devinit probe_gdrom_setupdisk(void) ...@@ -739,7 +739,7 @@ static void __devinit probe_gdrom_setupdisk(void)
static int __devinit probe_gdrom_setupqueue(void) static int __devinit probe_gdrom_setupqueue(void)
{ {
blk_queue_hardsect_size(gd.gdrom_rq, GDROM_HARD_SECTOR); blk_queue_logical_block_size(gd.gdrom_rq, GDROM_HARD_SECTOR);
/* using DMA so memory will need to be contiguous */ /* using DMA so memory will need to be contiguous */
blk_queue_max_hw_segments(gd.gdrom_rq, 1); blk_queue_max_hw_segments(gd.gdrom_rq, 1);
/* set a large max size to get most from DMA */ /* set a large max size to get most from DMA */
......
...@@ -469,8 +469,8 @@ static void vio_handle_cd_event(struct HvLpEvent *event) ...@@ -469,8 +469,8 @@ static void vio_handle_cd_event(struct HvLpEvent *event)
case viocdopen: case viocdopen:
if (event->xRc == 0) { if (event->xRc == 0) {
di = &viocd_diskinfo[bevent->disk]; di = &viocd_diskinfo[bevent->disk];
blk_queue_hardsect_size(di->viocd_disk->queue, blk_queue_logical_block_size(di->viocd_disk->queue,
bevent->block_size); bevent->block_size);
set_capacity(di->viocd_disk, set_capacity(di->viocd_disk,
bevent->media_size * bevent->media_size *
bevent->block_size / 512); bevent->block_size / 512);
......
...@@ -71,7 +71,7 @@ static int raw_open(struct inode *inode, struct file *filp) ...@@ -71,7 +71,7 @@ static int raw_open(struct inode *inode, struct file *filp)
err = bd_claim(bdev, raw_open); err = bd_claim(bdev, raw_open);
if (err) if (err)
goto out1; goto out1;
err = set_blocksize(bdev, bdev_hardsect_size(bdev)); err = set_blocksize(bdev, bdev_logical_block_size(bdev));
if (err) if (err)
goto out2; goto out2;
filp->f_flags |= O_DIRECT; filp->f_flags |= O_DIRECT;
......
...@@ -182,7 +182,7 @@ static void cdrom_analyze_sense_data(ide_drive_t *drive, ...@@ -182,7 +182,7 @@ static void cdrom_analyze_sense_data(ide_drive_t *drive,
(sense->information[2] << 8) | (sense->information[2] << 8) |
(sense->information[3]); (sense->information[3]);
if (drive->queue->hardsect_size == 2048) if (queue_logical_block_size(drive->queue) == 2048)
/* device sector size is 2K */ /* device sector size is 2K */
sector <<= 2; sector <<= 2;
...@@ -737,7 +737,7 @@ static ide_startstop_t cdrom_start_rw(ide_drive_t *drive, struct request *rq) ...@@ -737,7 +737,7 @@ static ide_startstop_t cdrom_start_rw(ide_drive_t *drive, struct request *rq)
struct request_queue *q = drive->queue; struct request_queue *q = drive->queue;
int write = rq_data_dir(rq) == WRITE; int write = rq_data_dir(rq) == WRITE;
unsigned short sectors_per_frame = unsigned short sectors_per_frame =
queue_hardsect_size(q) >> SECTOR_BITS; queue_logical_block_size(q) >> SECTOR_BITS;
ide_debug_log(IDE_DBG_RQ, "rq->cmd[0]: 0x%x, rq->cmd_flags: 0x%x, " ide_debug_log(IDE_DBG_RQ, "rq->cmd[0]: 0x%x, rq->cmd_flags: 0x%x, "
"secs_per_frame: %u", "secs_per_frame: %u",
...@@ -1021,8 +1021,8 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense) ...@@ -1021,8 +1021,8 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
/* save a private copy of the TOC capacity for error handling */ /* save a private copy of the TOC capacity for error handling */
drive->probed_capacity = toc->capacity * sectors_per_frame; drive->probed_capacity = toc->capacity * sectors_per_frame;
blk_queue_hardsect_size(drive->queue, blk_queue_logical_block_size(drive->queue,
sectors_per_frame << SECTOR_BITS); sectors_per_frame << SECTOR_BITS);
/* first read just the header, so we know how long the TOC is */ /* first read just the header, so we know how long the TOC is */
stat = cdrom_read_tocentry(drive, 0, 1, 0, (char *) &toc->hdr, stat = cdrom_read_tocentry(drive, 0, 1, 0, (char *) &toc->hdr,
...@@ -1338,7 +1338,7 @@ static int ide_cdrom_probe_capabilities(ide_drive_t *drive) ...@@ -1338,7 +1338,7 @@ static int ide_cdrom_probe_capabilities(ide_drive_t *drive)
/* standard prep_rq_fn that builds 10 byte cmds */ /* standard prep_rq_fn that builds 10 byte cmds */
static int ide_cdrom_prep_fs(struct request_queue *q, struct request *rq) static int ide_cdrom_prep_fs(struct request_queue *q, struct request *rq)
{ {
int hard_sect = queue_hardsect_size(q); int hard_sect = queue_logical_block_size(q);
long block = (long)blk_rq_pos(rq) / (hard_sect >> 9); long block = (long)blk_rq_pos(rq) / (hard_sect >> 9);
unsigned long blocks = blk_rq_sectors(rq) / (hard_sect >> 9); unsigned long blocks = blk_rq_sectors(rq) / (hard_sect >> 9);
...@@ -1543,7 +1543,7 @@ static int ide_cdrom_setup(ide_drive_t *drive) ...@@ -1543,7 +1543,7 @@ static int ide_cdrom_setup(ide_drive_t *drive)
nslots = ide_cdrom_probe_capabilities(drive); nslots = ide_cdrom_probe_capabilities(drive);
blk_queue_hardsect_size(q, CD_FRAMESIZE); blk_queue_logical_block_size(q, CD_FRAMESIZE);
if (ide_cdrom_register(drive, nslots)) { if (ide_cdrom_register(drive, nslots)) {
printk(KERN_ERR PFX "%s: %s failed to register device with the" printk(KERN_ERR PFX "%s: %s failed to register device with the"
......
...@@ -232,7 +232,7 @@ static struct page *read_sb_page(mddev_t *mddev, long offset, ...@@ -232,7 +232,7 @@ static struct page *read_sb_page(mddev_t *mddev, long offset,
target = rdev->sb_start + offset + index * (PAGE_SIZE/512); target = rdev->sb_start + offset + index * (PAGE_SIZE/512);
if (sync_page_io(rdev->bdev, target, if (sync_page_io(rdev->bdev, target,
roundup(size, bdev_hardsect_size(rdev->bdev)), roundup(size, bdev_logical_block_size(rdev->bdev)),
page, READ)) { page, READ)) {
page->index = index; page->index = index;
attach_page_buffers(page, NULL); /* so that free_buffer will attach_page_buffers(page, NULL); /* so that free_buffer will
...@@ -287,7 +287,7 @@ static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait) ...@@ -287,7 +287,7 @@ static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
int size = PAGE_SIZE; int size = PAGE_SIZE;
if (page->index == bitmap->file_pages-1) if (page->index == bitmap->file_pages-1)
size = roundup(bitmap->last_page_size, size = roundup(bitmap->last_page_size,
bdev_hardsect_size(rdev->bdev)); bdev_logical_block_size(rdev->bdev));
/* Just make sure we aren't corrupting data or /* Just make sure we aren't corrupting data or
* metadata * metadata
*/ */
......
...@@ -178,7 +178,7 @@ static int set_chunk_size(struct dm_exception_store *store, ...@@ -178,7 +178,7 @@ static int set_chunk_size(struct dm_exception_store *store,
} }
/* Validate the chunk size against the device block size */ /* Validate the chunk size against the device block size */
if (chunk_size_ulong % (bdev_hardsect_size(store->cow->bdev) >> 9)) { if (chunk_size_ulong % (bdev_logical_block_size(store->cow->bdev) >> 9)) {
*error = "Chunk size is not a multiple of device blocksize"; *error = "Chunk size is not a multiple of device blocksize";
return -EINVAL; return -EINVAL;
} }
......
...@@ -413,7 +413,8 @@ static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti, ...@@ -413,7 +413,8 @@ static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti,
* Buffer holds both header and bitset. * Buffer holds both header and bitset.
*/ */
buf_size = dm_round_up((LOG_OFFSET << SECTOR_SHIFT) + buf_size = dm_round_up((LOG_OFFSET << SECTOR_SHIFT) +
bitset_size, ti->limits.hardsect_size); bitset_size,
ti->limits.logical_block_size);
if (buf_size > dev->bdev->bd_inode->i_size) { if (buf_size > dev->bdev->bd_inode->i_size) {
DMWARN("log device %s too small: need %llu bytes", DMWARN("log device %s too small: need %llu bytes",
......
...@@ -282,7 +282,7 @@ static int read_header(struct pstore *ps, int *new_snapshot) ...@@ -282,7 +282,7 @@ static int read_header(struct pstore *ps, int *new_snapshot)
*/ */
if (!ps->store->chunk_size) { if (!ps->store->chunk_size) {
ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS, ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS,
bdev_hardsect_size(ps->store->cow->bdev) >> 9); bdev_logical_block_size(ps->store->cow->bdev) >> 9);
ps->store->chunk_mask = ps->store->chunk_size - 1; ps->store->chunk_mask = ps->store->chunk_size - 1;
ps->store->chunk_shift = ffs(ps->store->chunk_size) - 1; ps->store->chunk_shift = ffs(ps->store->chunk_size) - 1;
chunk_size_supplied = 0; chunk_size_supplied = 0;
......
...@@ -108,7 +108,8 @@ static void combine_restrictions_low(struct io_restrictions *lhs, ...@@ -108,7 +108,8 @@ static void combine_restrictions_low(struct io_restrictions *lhs,
lhs->max_hw_segments = lhs->max_hw_segments =
min_not_zero(lhs->max_hw_segments, rhs->max_hw_segments); min_not_zero(lhs->max_hw_segments, rhs->max_hw_segments);
lhs->hardsect_size = max(lhs->hardsect_size, rhs->hardsect_size); lhs->logical_block_size = max(lhs->logical_block_size,
rhs->logical_block_size);
lhs->max_segment_size = lhs->max_segment_size =
min_not_zero(lhs->max_segment_size, rhs->max_segment_size); min_not_zero(lhs->max_segment_size, rhs->max_segment_size);
...@@ -529,7 +530,8 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev) ...@@ -529,7 +530,8 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
rs->max_hw_segments = rs->max_hw_segments =
min_not_zero(rs->max_hw_segments, q->max_hw_segments); min_not_zero(rs->max_hw_segments, q->max_hw_segments);
rs->hardsect_size = max(rs->hardsect_size, q->hardsect_size); rs->logical_block_size = max(rs->logical_block_size,