Commit 1ce48904 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'block-2.6.24' of git://git.kernel.dk/data/git/linux-2.6-block

* 'block-2.6.24' of git://git.kernel.dk/data/git/linux-2.6-block: (37 commits)
  [BLOCK] Fix failing compile with BLK_DEV_IO_TRACE=n
  compat_ioctl: move floppy handlers to block/compat_ioctl.c
  compat_ioctl: move cdrom handlers to block/compat_ioctl.c
  compat_ioctl: move BLKPG handling to block/compat_ioctl.c
  compat_ioctl: move hdio calls to block/compat_ioctl.c
  compat_ioctl: handle blk_trace ioctls
  compat_ioctl: add compat_blkdev_driver_ioctl()
  compat_ioctl: move common block ioctls to compat_blkdev_ioctl
  Sysace: Don't enable IRQ until after interrupt handler is registered
  Sysace: sparse fixes
  Sysace: Minor coding convention fixup
  drivers/block/umem: use DRIVER_NAME where appropriate
  drivers/block/umem: trim trailing whitespace
  drivers/block/umem: minor cleanups
  drivers/block/umem: use dev_printk()
  drivers/block/umem: move private include away from include/linux
  Sysace: Labels in C code should not be indented.
  Sysace: Add of_platform_bus binding
  Sysace: Move IRQ handler registration to occur after FSM is initialized
  Sysace: minor rework and cleanup changes
  ...
parents 55982fd1 780513ec
......@@ -477,9 +477,9 @@ With this multipage bio design:
the same bi_io_vec array, but with the index and size accordingly modified)
- A linked list of bios is used as before for unrelated merges (*) - this
avoids reallocs and makes independent completions easier to handle.
- Code that traverses the req list needs to make a distinction between
segments of a request (bio_for_each_segment) and the distinct completion
units/bios (rq_for_each_bio).
- Code that traverses the req list can find all the segments of a bio
by using rq_for_each_segment. This handles the fact that a request
has multiple bios, each of which can have multiple segments.
- Drivers which can't process a large bio in one shot can use the bi_idx
field to keep track of the next bio_vec entry to process.
(e.g a 1MB bio_vec needs to be handled in max 128kB chunks for IDE)
......@@ -664,14 +664,14 @@ in lvm or md.
3.2.1 Traversing segments and completion units in a request
The macros bio_for_each_segment() and rq_for_each_bio() should be used for
traversing the bios in the request list (drivers should avoid directly
trying to do it themselves). Using these helpers should also make it easier
to cope with block changes in the future.
The macro rq_for_each_segment() should be used for traversing the bios
in the request list (drivers should avoid directly trying to do it
themselves). Using these helpers should also make it easier to cope
with block changes in the future.
rq_for_each_bio(bio, rq)
bio_for_each_segment(bio_vec, bio, i)
/* bio_vec is now current segment */
struct req_iterator iter;
rq_for_each_segment(bio_vec, rq, iter)
/* bio_vec is now current segment */
I/O completion callbacks are per-bio rather than per-segment, so drivers
that traverse bio chains on completion need to keep that in mind. Drivers
......
......@@ -86,8 +86,15 @@ extern int sys_ioprio_get(int, int);
#error "Unsupported arch"
#endif
_syscall3(int, ioprio_set, int, which, int, who, int, ioprio);
_syscall2(int, ioprio_get, int, which, int, who);
static inline int ioprio_set(int which, int who, int ioprio)
{
return syscall(__NR_ioprio_set, which, who, ioprio);
}
static inline int ioprio_get(int which, int who)
{
return syscall(__NR_ioprio_get, which, who);
}
enum {
IOPRIO_CLASS_NONE,
......
......@@ -4156,6 +4156,13 @@ W: http://oss.sgi.com/projects/xfs
T: git git://oss.sgi.com:8090/xfs/xfs-2.6.git
S: Supported
XILINX SYSTEMACE DRIVER
P: Grant Likely
M: grant.likely@secretlab.ca
W: http://www.secretlab.ca/
L: linux-kernel@vger.kernel.org
S: Maintained
XILINX UARTLITE SERIAL DRIVER
P: Peter Korsgaard
M: jacmet@sunsite.dk
......
......@@ -11,3 +11,4 @@ obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o
obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o
obj-$(CONFIG_BLK_DEV_IO_TRACE) += blktrace.o
obj-$(CONFIG_COMPAT) += compat_ioctl.o
......@@ -312,33 +312,26 @@ static struct rchan_callbacks blk_relay_callbacks = {
/*
* Setup everything required to start tracing
*/
static int blk_trace_setup(struct request_queue *q, struct block_device *bdev,
char __user *arg)
int do_blk_trace_setup(struct request_queue *q, struct block_device *bdev,
struct blk_user_trace_setup *buts)
{
struct blk_user_trace_setup buts;
struct blk_trace *old_bt, *bt = NULL;
struct dentry *dir = NULL;
char b[BDEVNAME_SIZE];
int ret, i;
if (copy_from_user(&buts, arg, sizeof(buts)))
return -EFAULT;
if (!buts.buf_size || !buts.buf_nr)
if (!buts->buf_size || !buts->buf_nr)
return -EINVAL;
strcpy(buts.name, bdevname(bdev, b));
strcpy(buts->name, bdevname(bdev, b));
/*
* some device names have larger paths - convert the slashes
* to underscores for this to work as expected
*/
for (i = 0; i < strlen(buts.name); i++)
if (buts.name[i] == '/')
buts.name[i] = '_';
if (copy_to_user(arg, &buts, sizeof(buts)))
return -EFAULT;
for (i = 0; i < strlen(buts->name); i++)
if (buts->name[i] == '/')
buts->name[i] = '_';
ret = -ENOMEM;
bt = kzalloc(sizeof(*bt), GFP_KERNEL);
......@@ -350,7 +343,7 @@ static int blk_trace_setup(struct request_queue *q, struct block_device *bdev,
goto err;
ret = -ENOENT;
dir = blk_create_tree(buts.name);
dir = blk_create_tree(buts->name);
if (!dir)
goto err;
......@@ -363,20 +356,21 @@ static int blk_trace_setup(struct request_queue *q, struct block_device *bdev,
if (!bt->dropped_file)
goto err;
bt->rchan = relay_open("trace", dir, buts.buf_size, buts.buf_nr, &blk_relay_callbacks, bt);
bt->rchan = relay_open("trace", dir, buts->buf_size,
buts->buf_nr, &blk_relay_callbacks, bt);
if (!bt->rchan)
goto err;
bt->act_mask = buts.act_mask;
bt->act_mask = buts->act_mask;
if (!bt->act_mask)
bt->act_mask = (u16) -1;
bt->start_lba = buts.start_lba;
bt->end_lba = buts.end_lba;
bt->start_lba = buts->start_lba;
bt->end_lba = buts->end_lba;
if (!bt->end_lba)
bt->end_lba = -1ULL;
bt->pid = buts.pid;
bt->pid = buts->pid;
bt->trace_state = Blktrace_setup;
ret = -EBUSY;
......@@ -401,6 +395,26 @@ err:
return ret;
}
static int blk_trace_setup(struct request_queue *q, struct block_device *bdev,
char __user *arg)
{
struct blk_user_trace_setup buts;
int ret;
ret = copy_from_user(&buts, arg, sizeof(buts));
if (ret)
return -EFAULT;
ret = do_blk_trace_setup(q, bdev, &buts);
if (ret)
return ret;
if (copy_to_user(arg, &buts, sizeof(buts)))
return -EFAULT;
return 0;
}
static int blk_trace_startstop(struct request_queue *q, int start)
{
struct blk_trace *bt;
......
This diff is collapsed.
......@@ -217,6 +217,10 @@ int blkdev_driver_ioctl(struct inode *inode, struct file *file,
}
EXPORT_SYMBOL_GPL(blkdev_driver_ioctl);
/*
* always keep this in sync with compat_blkdev_ioctl() and
* compat_blkdev_locked_ioctl()
*/
int blkdev_ioctl(struct inode *inode, struct file *file, unsigned cmd,
unsigned long arg)
{
......@@ -284,21 +288,4 @@ int blkdev_ioctl(struct inode *inode, struct file *file, unsigned cmd,
return blkdev_driver_ioctl(inode, file, disk, cmd, arg);
}
/* Most of the generic ioctls are handled in the normal fallback path.
This assumes the blkdev's low level compat_ioctl always returns
ENOIOCTLCMD for unknown ioctls. */
long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
{
struct block_device *bdev = file->f_path.dentry->d_inode->i_bdev;
struct gendisk *disk = bdev->bd_disk;
int ret = -ENOIOCTLCMD;
if (disk->fops->compat_ioctl) {
lock_kernel();
ret = disk->fops->compat_ioctl(file, cmd, arg);
unlock_kernel();
}
return ret;
}
EXPORT_SYMBOL_GPL(blkdev_ioctl);
......@@ -42,6 +42,9 @@ static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io);
static void init_request_from_bio(struct request *req, struct bio *bio);
static int __make_request(struct request_queue *q, struct bio *bio);
static struct io_context *current_io_context(gfp_t gfp_flags, int node);
static void blk_recalc_rq_segments(struct request *rq);
static void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
struct bio *bio);
/*
* For the allocated request tables
......@@ -428,7 +431,6 @@ static void queue_flush(struct request_queue *q, unsigned which)
static inline struct request *start_ordered(struct request_queue *q,
struct request *rq)
{
q->bi_size = 0;
q->orderr = 0;
q->ordered = q->next_ordered;
q->ordseq |= QUEUE_ORDSEQ_STARTED;
......@@ -525,56 +527,36 @@ int blk_do_ordered(struct request_queue *q, struct request **rqp)
return 1;
}
static int flush_dry_bio_endio(struct bio *bio, unsigned int bytes, int error)
{
struct request_queue *q = bio->bi_private;
/*
* This is dry run, restore bio_sector and size. We'll finish
* this request again with the original bi_end_io after an
* error occurs or post flush is complete.
*/
q->bi_size += bytes;
if (bio->bi_size)
return 1;
/* Reset bio */
set_bit(BIO_UPTODATE, &bio->bi_flags);
bio->bi_size = q->bi_size;
bio->bi_sector -= (q->bi_size >> 9);
q->bi_size = 0;
return 0;
}
static int ordered_bio_endio(struct request *rq, struct bio *bio,
unsigned int nbytes, int error)
static void req_bio_endio(struct request *rq, struct bio *bio,
unsigned int nbytes, int error)
{
struct request_queue *q = rq->q;
bio_end_io_t *endio;
void *private;
if (&q->bar_rq != rq)
return 0;
/*
* Okay, this is the barrier request in progress, dry finish it.
*/
if (error && !q->orderr)
q->orderr = error;
if (&q->bar_rq != rq) {
if (error)
clear_bit(BIO_UPTODATE, &bio->bi_flags);
else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
error = -EIO;
endio = bio->bi_end_io;
private = bio->bi_private;
bio->bi_end_io = flush_dry_bio_endio;
bio->bi_private = q;
bio_endio(bio, nbytes, error);
if (unlikely(nbytes > bio->bi_size)) {
printk("%s: want %u bytes done, only %u left\n",
__FUNCTION__, nbytes, bio->bi_size);
nbytes = bio->bi_size;
}
bio->bi_end_io = endio;
bio->bi_private = private;
bio->bi_size -= nbytes;
bio->bi_sector += (nbytes >> 9);
if (bio->bi_size == 0)
bio_endio(bio, error);
} else {
return 1;
/*
* Okay, this is the barrier request in progress, just
* record the error;
*/
if (error && !q->orderr)
q->orderr = error;
}
}
/**
......@@ -1220,16 +1202,40 @@ EXPORT_SYMBOL(blk_dump_rq_flags);
void blk_recount_segments(struct request_queue *q, struct bio *bio)
{
struct request rq;
struct bio *nxt = bio->bi_next;
rq.q = q;
rq.bio = rq.biotail = bio;
bio->bi_next = NULL;
blk_recalc_rq_segments(&rq);
bio->bi_next = nxt;
bio->bi_phys_segments = rq.nr_phys_segments;
bio->bi_hw_segments = rq.nr_hw_segments;
bio->bi_flags |= (1 << BIO_SEG_VALID);
}
EXPORT_SYMBOL(blk_recount_segments);
static void blk_recalc_rq_segments(struct request *rq)
{
int nr_phys_segs;
int nr_hw_segs;
unsigned int phys_size;
unsigned int hw_size;
struct bio_vec *bv, *bvprv = NULL;
int i, nr_phys_segs, nr_hw_segs, seg_size, hw_seg_size, cluster;
int seg_size;
int hw_seg_size;
int cluster;
struct req_iterator iter;
int high, highprv = 1;
struct request_queue *q = rq->q;
if (unlikely(!bio->bi_io_vec))
if (!rq->bio)
return;
cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
hw_seg_size = seg_size = nr_phys_segs = nr_hw_segs = 0;
bio_for_each_segment(bv, bio, i) {
hw_seg_size = seg_size = 0;
phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0;
rq_for_each_segment(bv, rq, iter) {
/*
* the trick here is making sure that a high page is never
* considered part of another segment, since that might
......@@ -1255,12 +1261,13 @@ void blk_recount_segments(struct request_queue *q, struct bio *bio)
}
new_segment:
if (BIOVEC_VIRT_MERGEABLE(bvprv, bv) &&
!BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len)) {
!BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len))
hw_seg_size += bv->bv_len;
} else {
else {
new_hw_segment:
if (hw_seg_size > bio->bi_hw_front_size)
bio->bi_hw_front_size = hw_seg_size;
if (nr_hw_segs == 1 &&
hw_seg_size > rq->bio->bi_hw_front_size)
rq->bio->bi_hw_front_size = hw_seg_size;
hw_seg_size = BIOVEC_VIRT_START_SIZE(bv) + bv->bv_len;
nr_hw_segs++;
}
......@@ -1270,15 +1277,15 @@ new_hw_segment:
seg_size = bv->bv_len;
highprv = high;
}
if (hw_seg_size > bio->bi_hw_back_size)
bio->bi_hw_back_size = hw_seg_size;
if (nr_hw_segs == 1 && hw_seg_size > bio->bi_hw_front_size)
bio->bi_hw_front_size = hw_seg_size;
bio->bi_phys_segments = nr_phys_segs;
bio->bi_hw_segments = nr_hw_segs;
bio->bi_flags |= (1 << BIO_SEG_VALID);
if (nr_hw_segs == 1 &&
hw_seg_size > rq->bio->bi_hw_front_size)
rq->bio->bi_hw_front_size = hw_seg_size;
if (hw_seg_size > rq->biotail->bi_hw_back_size)
rq->biotail->bi_hw_back_size = hw_seg_size;
rq->nr_phys_segments = nr_phys_segs;
rq->nr_hw_segments = nr_hw_segs;
}
EXPORT_SYMBOL(blk_recount_segments);
static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
struct bio *nxt)
......@@ -1325,8 +1332,8 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
struct scatterlist *sg)
{
struct bio_vec *bvec, *bvprv;
struct bio *bio;
int nsegs, i, cluster;
struct req_iterator iter;
int nsegs, cluster;
nsegs = 0;
cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
......@@ -1335,35 +1342,30 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
* for each bio in rq
*/
bvprv = NULL;
rq_for_each_bio(bio, rq) {
/*
* for each segment in bio
*/
bio_for_each_segment(bvec, bio, i) {
int nbytes = bvec->bv_len;
rq_for_each_segment(bvec, rq, iter) {
int nbytes = bvec->bv_len;
if (bvprv && cluster) {
if (sg[nsegs - 1].length + nbytes > q->max_segment_size)
goto new_segment;
if (bvprv && cluster) {
if (sg[nsegs - 1].length + nbytes > q->max_segment_size)
goto new_segment;
if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
goto new_segment;
if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
goto new_segment;
if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
goto new_segment;
if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
goto new_segment;
sg[nsegs - 1].length += nbytes;
} else {
sg[nsegs - 1].length += nbytes;
} else {
new_segment:
memset(&sg[nsegs],0,sizeof(struct scatterlist));
sg[nsegs].page = bvec->bv_page;
sg[nsegs].length = nbytes;
sg[nsegs].offset = bvec->bv_offset;
memset(&sg[nsegs],0,sizeof(struct scatterlist));
sg[nsegs].page = bvec->bv_page;
sg[nsegs].length = nbytes;
sg[nsegs].offset = bvec->bv_offset;
nsegs++;
}
bvprv = bvec;
} /* segments in bio */
} /* bios in rq */
nsegs++;
}
bvprv = bvec;
} /* segments in rq */
return nsegs;
}
......@@ -1420,7 +1422,8 @@ static inline int ll_new_hw_segment(struct request_queue *q,
return 1;
}
int ll_back_merge_fn(struct request_queue *q, struct request *req, struct bio *bio)
static int ll_back_merge_fn(struct request_queue *q, struct request *req,
struct bio *bio)
{
unsigned short max_sectors;
int len;
......@@ -1456,7 +1459,6 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req, struct bio *b
return ll_new_hw_segment(q, req, bio);
}
EXPORT_SYMBOL(ll_back_merge_fn);
static int ll_front_merge_fn(struct request_queue *q, struct request *req,
struct bio *bio)
......@@ -2346,6 +2348,23 @@ static int __blk_rq_unmap_user(struct bio *bio)
return ret;
}
int blk_rq_append_bio(struct request_queue *q, struct request *rq,
struct bio *bio)
{
if (!rq->bio)
blk_rq_bio_prep(q, rq, bio);
else if (!ll_back_merge_fn(q, rq, bio))
return -EINVAL;
else {
rq->biotail->bi_next = bio;
rq->biotail = bio;
rq->data_len += bio->bi_size;
}
return 0;
}
EXPORT_SYMBOL(blk_rq_append_bio);
static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
void __user *ubuf, unsigned int len)
{
......@@ -2377,23 +2396,12 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
*/
bio_get(bio);
if (!rq->bio)
blk_rq_bio_prep(q, rq, bio);
else if (!ll_back_merge_fn(q, rq, bio)) {
ret = -EINVAL;
goto unmap_bio;
} else {
rq->biotail->bi_next = bio;
rq->biotail = bio;
rq->data_len += bio->bi_size;
}
return bio->bi_size;
ret = blk_rq_append_bio(q, rq, bio);
if (!ret)
return bio->bi_size;
unmap_bio:
/* if it was boucned we must call the end io function */
bio_endio(bio, bio->bi_size, 0);
bio_endio(bio, 0);
__blk_rq_unmap_user(orig_bio);
bio_put(bio);
return ret;
......@@ -2502,7 +2510,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
return PTR_ERR(bio);
if (bio->bi_size != len) {
bio_endio(bio, bio->bi_size, 0);
bio_endio(bio, 0);
bio_unmap_user(bio);
return -EINVAL;
}
......@@ -2912,15 +2920,9 @@ static void init_request_from_bio(struct request *req, struct bio *bio)
req->errors = 0;
req->hard_sector = req->sector = bio->bi_sector;
req->hard_nr_sectors = req->nr_sectors = bio_sectors(bio);
req->current_nr_sectors = req->hard_cur_sectors = bio_cur_sectors(bio);
req->nr_phys_segments = bio_phys_segments(req->q, bio);
req->nr_hw_segments = bio_hw_segments(req->q, bio);
req->buffer = bio_data(bio); /* see ->buffer comment above */
req->bio = req->biotail = bio;
req->ioprio = bio_prio(bio);
req->rq_disk = bio->bi_bdev->bd_disk;
req->start_time = jiffies;
blk_rq_bio_prep(req->q, req, bio);
}
static int __make_request(struct request_queue *q, struct bio *bio)
......@@ -3038,7 +3040,7 @@ out:
return 0;
end_io:
bio_endio(bio, nr_sectors << 9, err);
bio_endio(bio, err);
return 0;
}
......@@ -3185,7 +3187,7 @@ static inline void __generic_make_request(struct bio *bio)
bdevname(bio->bi_bdev, b),
(long long) bio->bi_sector);
end_io:
bio_endio(bio, bio->bi_size, -EIO);
bio_endio(bio, -EIO);
break;
}
......@@ -3329,48 +3331,6 @@ void submit_bio(int rw, struct bio *bio)
EXPORT_SYMBOL(submit_bio);
static void blk_recalc_rq_segments(struct request *rq)
{
struct bio *bio, *prevbio = NULL;
int nr_phys_segs, nr_hw_segs;
unsigned int phys_size, hw_size;
struct request_queue *q = rq->q;
if (!rq->bio)
return;
phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0;
rq_for_each_bio(bio, rq) {
/* Force bio hw/phys segs to be recalculated. */
bio->bi_flags &= ~(1 << BIO_SEG_VALID);
nr_phys_segs += bio_phys_segments(q, bio);
nr_hw_segs += bio_hw_segments(q, bio);
if (prevbio) {
int pseg = phys_size + prevbio->bi_size + bio->bi_size;
int hseg = hw_size + prevbio->bi_size + bio->bi_size;
if (blk_phys_contig_segment(q, prevbio, bio) &&
pseg <= q->max_segment_size) {
nr_phys_segs--;
phys_size += prevbio->bi_size + bio->bi_size;
} else
phys_size = 0;
if (blk_hw_contig_segment(q, prevbio, bio) &&
hseg <= q->max_segment_size) {
nr_hw_segs--;
hw_size += prevbio->bi_size + bio->bi_size;
} else
hw_size = 0;
}
prevbio = bio;
}
rq->nr_phys_segments = nr_phys_segs;
rq->nr_hw_segments = nr_hw_segs;
}
static void blk_recalc_rq_sectors(struct request *rq, int nsect)
{