[kernel] r10276 - dists/trunk/linux-2.6/debian/patches/bugfix/all
Maximilian Attems
maks at alioth.debian.org
Wed Jan 30 09:36:23 UTC 2008
Author: maks
Date: Wed Jan 30 09:36:20 2008
New Revision: 10276
Log:
update to patch-2.6.24-git7
allmost nothing to do
Added:
dists/trunk/linux-2.6/debian/patches/bugfix/all/patch-2.6.24-git7
- copied, changed from r10274, /dists/trunk/linux-2.6/debian/patches/bugfix/all/patch-2.6.24-git6
Removed:
dists/trunk/linux-2.6/debian/patches/bugfix/all/patch-2.6.24-git6
Copied: dists/trunk/linux-2.6/debian/patches/bugfix/all/patch-2.6.24-git7 (from r10274, /dists/trunk/linux-2.6/debian/patches/bugfix/all/patch-2.6.24-git6)
==============================================================================
--- /dists/trunk/linux-2.6/debian/patches/bugfix/all/patch-2.6.24-git6 (original)
+++ dists/trunk/linux-2.6/debian/patches/bugfix/all/patch-2.6.24-git7 Wed Jan 30 09:36:20 2008
@@ -7181,124 +7181,6 @@
M: linux390 at de.ibm.com
L: linux-s390 at vger.kernel.org
W: http://www.ibm.com/developerworks/linux/linux390/
-diff --git a/Makefile b/Makefile
-index 189d8ef..f7bf864 100644
---- a/Makefile
-+++ b/Makefile
-@@ -169,7 +169,7 @@ SUBARCH := $(shell uname -m | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \
- -e s/arm.*/arm/ -e s/sa110/arm/ \
- -e s/s390x/s390/ -e s/parisc64/parisc/ \
- -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \
-- -e s/sh[234].*/sh/ )
-+ -e s/sh.*/sh/ )
-
- # Cross compiling and selecting different set of gcc/bin-utils
- # ---------------------------------------------------------------------------
-@@ -520,6 +520,11 @@ KBUILD_CFLAGS += -g
- KBUILD_AFLAGS += -gdwarf-2
- endif
-
-+# We trigger additional mismatches with less inlining
-+ifdef CONFIG_DEBUG_SECTION_MISMATCH
-+KBUILD_CFLAGS += $(call cc-option, -fno-inline-functions-called-once)
-+endif
-+
- # Force gcc to behave correct even for buggy distributions
- KBUILD_CFLAGS += $(call cc-option, -fno-stack-protector)
-
-@@ -793,7 +798,7 @@ define rule_vmlinux-modpost
- endef
-
- # vmlinux image - including updated kernel symbols
--vmlinux: $(vmlinux-lds) $(vmlinux-init) $(vmlinux-main) $(kallsyms.o) vmlinux.o FORCE
-+vmlinux: $(vmlinux-lds) $(vmlinux-init) $(vmlinux-main) vmlinux.o $(kallsyms.o) FORCE
- ifdef CONFIG_HEADERS_CHECK
- $(Q)$(MAKE) -f $(srctree)/Makefile headers_check
- endif
-@@ -804,7 +809,9 @@ endif
- $(call if_changed_rule,vmlinux__)
- $(Q)rm -f .old_version
-
--vmlinux.o: $(vmlinux-lds) $(vmlinux-init) $(vmlinux-main) $(kallsyms.o) FORCE
-+# build vmlinux.o first to catch section mismatch errors early
-+$(kallsyms.o): vmlinux.o
-+vmlinux.o: $(vmlinux-lds) $(vmlinux-init) $(vmlinux-main) FORCE
- $(call if_changed_rule,vmlinux-modpost)
-
- # The actual objects are generated when descending,
-@@ -1021,9 +1028,14 @@ ifdef CONFIG_MODULES
- all: modules
-
- # Build modules
-+#
-+# A module can be listed more than once in obj-m resulting in
-+# duplicate lines in modules.order files. Those are removed
-+# using awk while concatenating to the final file.
-
- PHONY += modules
- modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux)
-+ $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
- @echo ' Building modules, stage 2.';
- $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
-
-@@ -1051,6 +1063,7 @@ _modinst_:
- rm -f $(MODLIB)/build ; \
- ln -s $(objtree) $(MODLIB)/build ; \
- fi
-+ @cp -f $(objtree)/modules.order $(MODLIB)/
- $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modinst
-
- # This depmod is only for convenience to give the initial
-@@ -1110,7 +1123,7 @@ clean: archclean $(clean-dirs)
- @find . $(RCS_FIND_IGNORE) \
- \( -name '*.[oas]' -o -name '*.ko' -o -name '.*.cmd' \
- -o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \
-- -o -name '*.symtypes' \) \
-+ -o -name '*.symtypes' -o -name 'modules.order' \) \
- -type f -print | xargs rm -f
-
- # mrproper - Delete all generated files, including .config
-@@ -1175,7 +1188,7 @@ help:
- @echo ' dir/ - Build all files in dir and below'
- @echo ' dir/file.[ois] - Build specified target only'
- @echo ' dir/file.ko - Build module including final link'
-- @echo ' rpm - Build a kernel as an RPM package'
-+ @echo ' prepare - Set up for building external modules'
- @echo ' tags/TAGS - Generate tags file for editors'
- @echo ' cscope - Generate cscope index'
- @echo ' kernelrelease - Output the release version string'
-@@ -1188,6 +1201,8 @@ help:
- @echo 'Static analysers'
- @echo ' checkstack - Generate a list of stack hogs'
- @echo ' namespacecheck - Name space analysis on compiled kernel'
-+ @echo ' versioncheck - Sanity check on version.h usage'
-+ @echo ' includecheck - Check for duplicate included header files'
- @echo ' export_report - List the usages of all exported symbols'
- @if [ -r $(srctree)/include/asm-$(SRCARCH)/Kbuild ]; then \
- echo ' headers_check - Sanity check on exported headers'; \
-@@ -1371,6 +1386,7 @@ define xtags
- if $1 --version 2>&1 | grep -iq exuberant; then \
- $(all-sources) | xargs $1 -a \
- -I __initdata,__exitdata,__acquires,__releases \
-+ -I __read_mostly,____cacheline_aligned,____cacheline_aligned_in_smp,____cacheline_internodealigned_in_smp \
- -I EXPORT_SYMBOL,EXPORT_SYMBOL_GPL \
- --extra=+f --c-kinds=+px \
- --regex-asm='/^ENTRY\(([^)]*)\).*/\1/'; \
-@@ -1428,12 +1444,12 @@ tags: FORCE
- includecheck:
- find * $(RCS_FIND_IGNORE) \
- -name '*.[hcS]' -type f -print | sort \
-- | xargs $(PERL) -w scripts/checkincludes.pl
-+ | xargs $(PERL) -w $(srctree)/scripts/checkincludes.pl
-
- versioncheck:
- find * $(RCS_FIND_IGNORE) \
- -name '*.[hcS]' -type f -print | sort \
-- | xargs $(PERL) -w scripts/checkversion.pl
-+ | xargs $(PERL) -w $(srctree)/scripts/checkversion.pl
-
- namespacecheck:
- $(PERL) $(srctree)/scripts/namespace.pl
diff --git a/arch/alpha/kernel/vmlinux.lds.S b/arch/alpha/kernel/vmlinux.lds.S
index 55c05b5..f13249b 100644
--- a/arch/alpha/kernel/vmlinux.lds.S
@@ -135872,6 +135754,21 @@
obj-y = io.o console.o setup.o network.o
+diff --git a/block/Makefile b/block/Makefile
+index 8261081..5a43c7d 100644
+--- a/block/Makefile
++++ b/block/Makefile
+@@ -2,7 +2,9 @@
+ # Makefile for the kernel block layer
+ #
+
+-obj-$(CONFIG_BLOCK) := elevator.o ll_rw_blk.o ioctl.o genhd.o scsi_ioctl.o
++obj-$(CONFIG_BLOCK) := elevator.o blk-core.o blk-tag.o blk-sysfs.o \
++ blk-barrier.o blk-settings.o blk-ioc.o blk-map.o \
++ blk-exec.o blk-merge.o ioctl.o genhd.o scsi_ioctl.o
+
+ obj-$(CONFIG_BLK_DEV_BSG) += bsg.o
+ obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o
diff --git a/block/as-iosched.c b/block/as-iosched.c
index cb5e53b..b201d16 100644
--- a/block/as-iosched.c
@@ -135996,6 +135893,4627 @@
}
}
+diff --git a/block/blk-barrier.c b/block/blk-barrier.c
+new file mode 100644
+index 0000000..5f74fec
+--- /dev/null
++++ b/block/blk-barrier.c
+@@ -0,0 +1,319 @@
++/*
++ * Functions related to barrier IO handling
++ */
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/bio.h>
++#include <linux/blkdev.h>
++
++#include "blk.h"
++
++/**
++ * blk_queue_ordered - does this queue support ordered writes
++ * @q: the request queue
++ * @ordered: one of QUEUE_ORDERED_*
++ * @prepare_flush_fn: rq setup helper for cache flush ordered writes
++ *
++ * Description:
++ * For journalled file systems, doing ordered writes on a commit
++ * block instead of explicitly doing wait_on_buffer (which is bad
++ * for performance) can be a big win. Block drivers supporting this
++ * feature should call this function and indicate so.
++ *
++ **/
++int blk_queue_ordered(struct request_queue *q, unsigned ordered,
++ prepare_flush_fn *prepare_flush_fn)
++{
++ if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) &&
++ prepare_flush_fn == NULL) {
++ printk(KERN_ERR "blk_queue_ordered: prepare_flush_fn required\n");
++ return -EINVAL;
++ }
++
++ if (ordered != QUEUE_ORDERED_NONE &&
++ ordered != QUEUE_ORDERED_DRAIN &&
++ ordered != QUEUE_ORDERED_DRAIN_FLUSH &&
++ ordered != QUEUE_ORDERED_DRAIN_FUA &&
++ ordered != QUEUE_ORDERED_TAG &&
++ ordered != QUEUE_ORDERED_TAG_FLUSH &&
++ ordered != QUEUE_ORDERED_TAG_FUA) {
++ printk(KERN_ERR "blk_queue_ordered: bad value %d\n", ordered);
++ return -EINVAL;
++ }
++
++ q->ordered = ordered;
++ q->next_ordered = ordered;
++ q->prepare_flush_fn = prepare_flush_fn;
++
++ return 0;
++}
++
++EXPORT_SYMBOL(blk_queue_ordered);
++
++/*
++ * Cache flushing for ordered writes handling
++ */
++inline unsigned blk_ordered_cur_seq(struct request_queue *q)
++{
++ if (!q->ordseq)
++ return 0;
++ return 1 << ffz(q->ordseq);
++}
++
++unsigned blk_ordered_req_seq(struct request *rq)
++{
++ struct request_queue *q = rq->q;
++
++ BUG_ON(q->ordseq == 0);
++
++ if (rq == &q->pre_flush_rq)
++ return QUEUE_ORDSEQ_PREFLUSH;
++ if (rq == &q->bar_rq)
++ return QUEUE_ORDSEQ_BAR;
++ if (rq == &q->post_flush_rq)
++ return QUEUE_ORDSEQ_POSTFLUSH;
++
++ /*
++ * !fs requests don't need to follow barrier ordering. Always
++ * put them at the front. This fixes the following deadlock.
++ *
++ * http://thread.gmane.org/gmane.linux.kernel/537473
++ */
++ if (!blk_fs_request(rq))
++ return QUEUE_ORDSEQ_DRAIN;
++
++ if ((rq->cmd_flags & REQ_ORDERED_COLOR) ==
++ (q->orig_bar_rq->cmd_flags & REQ_ORDERED_COLOR))
++ return QUEUE_ORDSEQ_DRAIN;
++ else
++ return QUEUE_ORDSEQ_DONE;
++}
++
++void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
++{
++ struct request *rq;
++
++ if (error && !q->orderr)
++ q->orderr = error;
++
++ BUG_ON(q->ordseq & seq);
++ q->ordseq |= seq;
++
++ if (blk_ordered_cur_seq(q) != QUEUE_ORDSEQ_DONE)
++ return;
++
++ /*
++ * Okay, sequence complete.
++ */
++ q->ordseq = 0;
++ rq = q->orig_bar_rq;
++
++ if (__blk_end_request(rq, q->orderr, blk_rq_bytes(rq)))
++ BUG();
++}
++
++static void pre_flush_end_io(struct request *rq, int error)
++{
++ elv_completed_request(rq->q, rq);
++ blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_PREFLUSH, error);
++}
++
++static void bar_end_io(struct request *rq, int error)
++{
++ elv_completed_request(rq->q, rq);
++ blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_BAR, error);
++}
++
++static void post_flush_end_io(struct request *rq, int error)
++{
++ elv_completed_request(rq->q, rq);
++ blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_POSTFLUSH, error);
++}
++
++static void queue_flush(struct request_queue *q, unsigned which)
++{
++ struct request *rq;
++ rq_end_io_fn *end_io;
++
++ if (which == QUEUE_ORDERED_PREFLUSH) {
++ rq = &q->pre_flush_rq;
++ end_io = pre_flush_end_io;
++ } else {
++ rq = &q->post_flush_rq;
++ end_io = post_flush_end_io;
++ }
++
++ rq->cmd_flags = REQ_HARDBARRIER;
++ rq_init(q, rq);
++ rq->elevator_private = NULL;
++ rq->elevator_private2 = NULL;
++ rq->rq_disk = q->bar_rq.rq_disk;
++ rq->end_io = end_io;
++ q->prepare_flush_fn(q, rq);
++
++ elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
++}
++
++static inline struct request *start_ordered(struct request_queue *q,
++ struct request *rq)
++{
++ q->orderr = 0;
++ q->ordered = q->next_ordered;
++ q->ordseq |= QUEUE_ORDSEQ_STARTED;
++
++ /*
++ * Prep proxy barrier request.
++ */
++ blkdev_dequeue_request(rq);
++ q->orig_bar_rq = rq;
++ rq = &q->bar_rq;
++ rq->cmd_flags = 0;
++ rq_init(q, rq);
++ if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
++ rq->cmd_flags |= REQ_RW;
++ if (q->ordered & QUEUE_ORDERED_FUA)
++ rq->cmd_flags |= REQ_FUA;
++ rq->elevator_private = NULL;
++ rq->elevator_private2 = NULL;
++ init_request_from_bio(rq, q->orig_bar_rq->bio);
++ rq->end_io = bar_end_io;
++
++ /*
++ * Queue ordered sequence. As we stack them at the head, we
++ * need to queue in reverse order. Note that we rely on that
++ * no fs request uses ELEVATOR_INSERT_FRONT and thus no fs
++ * request gets inbetween ordered sequence. If this request is
++ * an empty barrier, we don't need to do a postflush ever since
++ * there will be no data written between the pre and post flush.
++ * Hence a single flush will suffice.
++ */
++ if ((q->ordered & QUEUE_ORDERED_POSTFLUSH) && !blk_empty_barrier(rq))
++ queue_flush(q, QUEUE_ORDERED_POSTFLUSH);
++ else
++ q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH;
++
++ elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
++
++ if (q->ordered & QUEUE_ORDERED_PREFLUSH) {
++ queue_flush(q, QUEUE_ORDERED_PREFLUSH);
++ rq = &q->pre_flush_rq;
++ } else
++ q->ordseq |= QUEUE_ORDSEQ_PREFLUSH;
++
++ if ((q->ordered & QUEUE_ORDERED_TAG) || q->in_flight == 0)
++ q->ordseq |= QUEUE_ORDSEQ_DRAIN;
++ else
++ rq = NULL;
++
++ return rq;
++}
++
++int blk_do_ordered(struct request_queue *q, struct request **rqp)
++{
++ struct request *rq = *rqp;
++ const int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);
++
++ if (!q->ordseq) {
++ if (!is_barrier)
++ return 1;
++
++ if (q->next_ordered != QUEUE_ORDERED_NONE) {
++ *rqp = start_ordered(q, rq);
++ return 1;
++ } else {
++ /*
++ * This can happen when the queue switches to
++ * ORDERED_NONE while this request is on it.
++ */
++ blkdev_dequeue_request(rq);
++ if (__blk_end_request(rq, -EOPNOTSUPP,
++ blk_rq_bytes(rq)))
++ BUG();
++ *rqp = NULL;
++ return 0;
++ }
++ }
++
++ /*
++ * Ordered sequence in progress
++ */
++
++ /* Special requests are not subject to ordering rules. */
++ if (!blk_fs_request(rq) &&
++ rq != &q->pre_flush_rq && rq != &q->post_flush_rq)
++ return 1;
++
++ if (q->ordered & QUEUE_ORDERED_TAG) {
++ /* Ordered by tag. Blocking the next barrier is enough. */
++ if (is_barrier && rq != &q->bar_rq)
++ *rqp = NULL;
++ } else {
++ /* Ordered by draining. Wait for turn. */
++ WARN_ON(blk_ordered_req_seq(rq) < blk_ordered_cur_seq(q));
++ if (blk_ordered_req_seq(rq) > blk_ordered_cur_seq(q))
++ *rqp = NULL;
++ }
++
++ return 1;
++}
++
++static void bio_end_empty_barrier(struct bio *bio, int err)
++{
++ if (err)
++ clear_bit(BIO_UPTODATE, &bio->bi_flags);
++
++ complete(bio->bi_private);
++}
++
++/**
++ * blkdev_issue_flush - queue a flush
++ * @bdev: blockdev to issue flush for
++ * @error_sector: error sector
++ *
++ * Description:
++ * Issue a flush for the block device in question. Caller can supply
++ * room for storing the error offset in case of a flush error, if they
++ * wish to. Caller must run wait_for_completion() on its own.
++ */
++int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
++{
++ DECLARE_COMPLETION_ONSTACK(wait);
++ struct request_queue *q;
++ struct bio *bio;
++ int ret;
++
++ if (bdev->bd_disk == NULL)
++ return -ENXIO;
++
++ q = bdev_get_queue(bdev);
++ if (!q)
++ return -ENXIO;
++
++ bio = bio_alloc(GFP_KERNEL, 0);
++ if (!bio)
++ return -ENOMEM;
++
++ bio->bi_end_io = bio_end_empty_barrier;
++ bio->bi_private = &wait;
++ bio->bi_bdev = bdev;
++ submit_bio(1 << BIO_RW_BARRIER, bio);
++
++ wait_for_completion(&wait);
++
++ /*
++ * The driver must store the error location in ->bi_sector, if
++ * it supports it. For non-stacked drivers, this should be copied
++ * from rq->sector.
++ */
++ if (error_sector)
++ *error_sector = bio->bi_sector;
++
++ ret = 0;
++ if (!bio_flagged(bio, BIO_UPTODATE))
++ ret = -EIO;
++
++ bio_put(bio);
++ return ret;
++}
++
++EXPORT_SYMBOL(blkdev_issue_flush);
+diff --git a/block/blk-core.c b/block/blk-core.c
+new file mode 100644
+index 0000000..8ff9944
+--- /dev/null
++++ b/block/blk-core.c
+@@ -0,0 +1,2034 @@
++/*
++ * Copyright (C) 1991, 1992 Linus Torvalds
++ * Copyright (C) 1994, Karl Keyte: Added support for disk statistics
++ * Elevator latency, (C) 2000 Andrea Arcangeli <andrea at suse.de> SuSE
++ * Queue request tables / lock, selectable elevator, Jens Axboe <axboe at suse.de>
++ * kernel-doc documentation started by NeilBrown <neilb at cse.unsw.edu.au> - July2000
++ * bio rewrite, highmem i/o, etc, Jens Axboe <axboe at suse.de> - may 2001
++ */
++
++/*
++ * This handles all read/write requests to block devices
++ */
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/backing-dev.h>
++#include <linux/bio.h>
++#include <linux/blkdev.h>
++#include <linux/highmem.h>
++#include <linux/mm.h>
++#include <linux/kernel_stat.h>
++#include <linux/string.h>
++#include <linux/init.h>
++#include <linux/completion.h>
++#include <linux/slab.h>
++#include <linux/swap.h>
++#include <linux/writeback.h>
++#include <linux/task_io_accounting_ops.h>
++#include <linux/interrupt.h>
++#include <linux/cpu.h>
++#include <linux/blktrace_api.h>
++#include <linux/fault-inject.h>
++
++#include "blk.h"
++
++static int __make_request(struct request_queue *q, struct bio *bio);
++
++/*
++ * For the allocated request tables
++ */
++struct kmem_cache *request_cachep;
++
++/*
++ * For queue allocation
++ */
++struct kmem_cache *blk_requestq_cachep = NULL;
++
++/*
++ * Controlling structure to kblockd
++ */
++static struct workqueue_struct *kblockd_workqueue;
++
++static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
++
++static void drive_stat_acct(struct request *rq, int new_io)
++{
++ int rw = rq_data_dir(rq);
++
++ if (!blk_fs_request(rq) || !rq->rq_disk)
++ return;
++
++ if (!new_io) {
++ __disk_stat_inc(rq->rq_disk, merges[rw]);
++ } else {
++ disk_round_stats(rq->rq_disk);
++ rq->rq_disk->in_flight++;
++ }
++}
++
++void blk_queue_congestion_threshold(struct request_queue *q)
++{
++ int nr;
++
++ nr = q->nr_requests - (q->nr_requests / 8) + 1;
++ if (nr > q->nr_requests)
++ nr = q->nr_requests;
++ q->nr_congestion_on = nr;
++
++ nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1;
++ if (nr < 1)
++ nr = 1;
++ q->nr_congestion_off = nr;
++}
++
++/**
++ * blk_get_backing_dev_info - get the address of a queue's backing_dev_info
++ * @bdev: device
++ *
++ * Locates the passed device's request queue and returns the address of its
++ * backing_dev_info
++ *
++ * Will return NULL if the request queue cannot be located.
++ */
++struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
++{
++ struct backing_dev_info *ret = NULL;
++ struct request_queue *q = bdev_get_queue(bdev);
++
++ if (q)
++ ret = &q->backing_dev_info;
++ return ret;
++}
++EXPORT_SYMBOL(blk_get_backing_dev_info);
++
++void rq_init(struct request_queue *q, struct request *rq)
++{
++ INIT_LIST_HEAD(&rq->queuelist);
++ INIT_LIST_HEAD(&rq->donelist);
++
++ rq->errors = 0;
++ rq->bio = rq->biotail = NULL;
++ INIT_HLIST_NODE(&rq->hash);
++ RB_CLEAR_NODE(&rq->rb_node);
++ rq->ioprio = 0;
++ rq->buffer = NULL;
++ rq->ref_count = 1;
++ rq->q = q;
++ rq->special = NULL;
++ rq->data_len = 0;
++ rq->data = NULL;
++ rq->nr_phys_segments = 0;
++ rq->sense = NULL;
++ rq->end_io = NULL;
++ rq->end_io_data = NULL;
++ rq->completion_data = NULL;
++ rq->next_rq = NULL;
++}
++
++static void req_bio_endio(struct request *rq, struct bio *bio,
++ unsigned int nbytes, int error)
++{
++ struct request_queue *q = rq->q;
++
++ if (&q->bar_rq != rq) {
++ if (error)
++ clear_bit(BIO_UPTODATE, &bio->bi_flags);
++ else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
++ error = -EIO;
++
++ if (unlikely(nbytes > bio->bi_size)) {
++ printk("%s: want %u bytes done, only %u left\n",
++ __FUNCTION__, nbytes, bio->bi_size);
++ nbytes = bio->bi_size;
++ }
++
++ bio->bi_size -= nbytes;
++ bio->bi_sector += (nbytes >> 9);
++ if (bio->bi_size == 0)
++ bio_endio(bio, error);
++ } else {
++
++ /*
++ * Okay, this is the barrier request in progress, just
++ * record the error;
++ */
++ if (error && !q->orderr)
++ q->orderr = error;
++ }
++}
++
++void blk_dump_rq_flags(struct request *rq, char *msg)
++{
++ int bit;
++
++ printk("%s: dev %s: type=%x, flags=%x\n", msg,
++ rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
++ rq->cmd_flags);
++
++ printk("\nsector %llu, nr/cnr %lu/%u\n", (unsigned long long)rq->sector,
++ rq->nr_sectors,
++ rq->current_nr_sectors);
++ printk("bio %p, biotail %p, buffer %p, data %p, len %u\n", rq->bio, rq->biotail, rq->buffer, rq->data, rq->data_len);
++
++ if (blk_pc_request(rq)) {
++ printk("cdb: ");
++ for (bit = 0; bit < sizeof(rq->cmd); bit++)
++ printk("%02x ", rq->cmd[bit]);
++ printk("\n");
++ }
++}
++
++EXPORT_SYMBOL(blk_dump_rq_flags);
++
++/*
++ * "plug" the device if there are no outstanding requests: this will
++ * force the transfer to start only after we have put all the requests
++ * on the list.
++ *
++ * This is called with interrupts off and no requests on the queue and
++ * with the queue lock held.
++ */
++void blk_plug_device(struct request_queue *q)
++{
++ WARN_ON(!irqs_disabled());
++
++ /*
++ * don't plug a stopped queue, it must be paired with blk_start_queue()
++ * which will restart the queueing
++ */
++ if (blk_queue_stopped(q))
++ return;
++
++ if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) {
++ mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
++ blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG);
++ }
++}
++
++EXPORT_SYMBOL(blk_plug_device);
++
++/*
++ * remove the queue from the plugged list, if present. called with
++ * queue lock held and interrupts disabled.
++ */
++int blk_remove_plug(struct request_queue *q)
++{
++ WARN_ON(!irqs_disabled());
++
++ if (!test_and_clear_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags))
++ return 0;
++
++ del_timer(&q->unplug_timer);
++ return 1;
++}
++
++EXPORT_SYMBOL(blk_remove_plug);
++
++/*
++ * remove the plug and let it rip..
++ */
++void __generic_unplug_device(struct request_queue *q)
++{
++ if (unlikely(blk_queue_stopped(q)))
++ return;
++
++ if (!blk_remove_plug(q))
++ return;
++
++ q->request_fn(q);
++}
++EXPORT_SYMBOL(__generic_unplug_device);
++
++/**
++ * generic_unplug_device - fire a request queue
++ * @q: The &struct request_queue in question
++ *
++ * Description:
++ * Linux uses plugging to build bigger requests queues before letting
++ * the device have at them. If a queue is plugged, the I/O scheduler
++ * is still adding and merging requests on the queue. Once the queue
++ * gets unplugged, the request_fn defined for the queue is invoked and
++ * transfers started.
++ **/
++void generic_unplug_device(struct request_queue *q)
++{
++ spin_lock_irq(q->queue_lock);
++ __generic_unplug_device(q);
++ spin_unlock_irq(q->queue_lock);
++}
++EXPORT_SYMBOL(generic_unplug_device);
++
++static void blk_backing_dev_unplug(struct backing_dev_info *bdi,
++ struct page *page)
++{
++ struct request_queue *q = bdi->unplug_io_data;
++
++ blk_unplug(q);
++}
++
++void blk_unplug_work(struct work_struct *work)
++{
++ struct request_queue *q =
++ container_of(work, struct request_queue, unplug_work);
++
++ blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
++ q->rq.count[READ] + q->rq.count[WRITE]);
++
++ q->unplug_fn(q);
++}
++
++void blk_unplug_timeout(unsigned long data)
++{
++ struct request_queue *q = (struct request_queue *)data;
++
++ blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, NULL,
++ q->rq.count[READ] + q->rq.count[WRITE]);
++
++ kblockd_schedule_work(&q->unplug_work);
++}
++
++void blk_unplug(struct request_queue *q)
++{
++ /*
++ * devices don't necessarily have an ->unplug_fn defined
++ */
++ if (q->unplug_fn) {
++ blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
++ q->rq.count[READ] + q->rq.count[WRITE]);
++
++ q->unplug_fn(q);
++ }
++}
++EXPORT_SYMBOL(blk_unplug);
++
++/**
++ * blk_start_queue - restart a previously stopped queue
++ * @q: The &struct request_queue in question
++ *
++ * Description:
++ * blk_start_queue() will clear the stop flag on the queue, and call
++ * the request_fn for the queue if it was in a stopped state when
++ * entered. Also see blk_stop_queue(). Queue lock must be held.
++ **/
++void blk_start_queue(struct request_queue *q)
++{
++ WARN_ON(!irqs_disabled());
++
++ clear_bit(QUEUE_FLAG_STOPPED, &q->queue_flags);
++
++ /*
++ * one level of recursion is ok and is much faster than kicking
++ * the unplug handling
++ */
++ if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
++ q->request_fn(q);
++ clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags);
++ } else {
++ blk_plug_device(q);
++ kblockd_schedule_work(&q->unplug_work);
++ }
++}
++
++EXPORT_SYMBOL(blk_start_queue);
++
++/**
++ * blk_stop_queue - stop a queue
++ * @q: The &struct request_queue in question
++ *
++ * Description:
++ * The Linux block layer assumes that a block driver will consume all
++ * entries on the request queue when the request_fn strategy is called.
++ * Often this will not happen, because of hardware limitations (queue
++ * depth settings). If a device driver gets a 'queue full' response,
++ * or if it simply chooses not to queue more I/O at one point, it can
++ * call this function to prevent the request_fn from being called until
++ * the driver has signalled it's ready to go again. This happens by calling
++ * blk_start_queue() to restart queue operations. Queue lock must be held.
++ **/
++void blk_stop_queue(struct request_queue *q)
++{
++ blk_remove_plug(q);
++ set_bit(QUEUE_FLAG_STOPPED, &q->queue_flags);
++}
++EXPORT_SYMBOL(blk_stop_queue);
++
++/**
++ * blk_sync_queue - cancel any pending callbacks on a queue
++ * @q: the queue
++ *
++ * Description:
++ * The block layer may perform asynchronous callback activity
++ * on a queue, such as calling the unplug function after a timeout.
++ * A block device may call blk_sync_queue to ensure that any
++ * such activity is cancelled, thus allowing it to release resources
++ * that the callbacks might use. The caller must already have made sure
++ * that its ->make_request_fn will not re-add plugging prior to calling
++ * this function.
++ *
++ */
++void blk_sync_queue(struct request_queue *q)
++{
++ del_timer_sync(&q->unplug_timer);
++ kblockd_flush_work(&q->unplug_work);
++}
++EXPORT_SYMBOL(blk_sync_queue);
++
++/**
++ * blk_run_queue - run a single device queue
++ * @q: The queue to run
++ */
++void blk_run_queue(struct request_queue *q)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(q->queue_lock, flags);
++ blk_remove_plug(q);
++
++ /*
++ * Only recurse once to avoid overrunning the stack, let the unplug
++ * handling reinvoke the handler shortly if we already got there.
++ */
++ if (!elv_queue_empty(q)) {
++ if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
++ q->request_fn(q);
++ clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags);
++ } else {
++ blk_plug_device(q);
++ kblockd_schedule_work(&q->unplug_work);
++ }
++ }
++
++ spin_unlock_irqrestore(q->queue_lock, flags);
++}
++EXPORT_SYMBOL(blk_run_queue);
++
++void blk_put_queue(struct request_queue *q)
++{
++ kobject_put(&q->kobj);
++}
++EXPORT_SYMBOL(blk_put_queue);
++
++void blk_cleanup_queue(struct request_queue * q)
++{
++ mutex_lock(&q->sysfs_lock);
++ set_bit(QUEUE_FLAG_DEAD, &q->queue_flags);
++ mutex_unlock(&q->sysfs_lock);
++
++ if (q->elevator)
++ elevator_exit(q->elevator);
++
++ blk_put_queue(q);
++}
++
++EXPORT_SYMBOL(blk_cleanup_queue);
++
++static int blk_init_free_list(struct request_queue *q)
++{
++ struct request_list *rl = &q->rq;
++
++ rl->count[READ] = rl->count[WRITE] = 0;
++ rl->starved[READ] = rl->starved[WRITE] = 0;
++ rl->elvpriv = 0;
++ init_waitqueue_head(&rl->wait[READ]);
++ init_waitqueue_head(&rl->wait[WRITE]);
++
++ rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
++ mempool_free_slab, request_cachep, q->node);
++
++ if (!rl->rq_pool)
++ return -ENOMEM;
++
++ return 0;
++}
++
++struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
++{
++ return blk_alloc_queue_node(gfp_mask, -1);
++}
++EXPORT_SYMBOL(blk_alloc_queue);
++
++struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
++{
++ struct request_queue *q;
++ int err;
++
++ q = kmem_cache_alloc_node(blk_requestq_cachep,
++ gfp_mask | __GFP_ZERO, node_id);
++ if (!q)
++ return NULL;
++
++ q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
++ q->backing_dev_info.unplug_io_data = q;
++ err = bdi_init(&q->backing_dev_info);
++ if (err) {
++ kmem_cache_free(blk_requestq_cachep, q);
++ return NULL;
++ }
++
++ init_timer(&q->unplug_timer);
++
++ kobject_init(&q->kobj, &blk_queue_ktype);
++
++ mutex_init(&q->sysfs_lock);
++
++ return q;
++}
++EXPORT_SYMBOL(blk_alloc_queue_node);
++
++/**
++ * blk_init_queue - prepare a request queue for use with a block device
++ * @rfn: The function to be called to process requests that have been
++ * placed on the queue.
++ * @lock: Request queue spin lock
++ *
++ * Description:
++ * If a block device wishes to use the standard request handling procedures,
++ * which sorts requests and coalesces adjacent requests, then it must
++ * call blk_init_queue(). The function @rfn will be called when there
++ * are requests on the queue that need to be processed. If the device
++ * supports plugging, then @rfn may not be called immediately when requests
++ * are available on the queue, but may be called at some time later instead.
++ * Plugged queues are generally unplugged when a buffer belonging to one
++ * of the requests on the queue is needed, or due to memory pressure.
++ *
++ * @rfn is not required, or even expected, to remove all requests off the
++ * queue, but only as many as it can handle at a time. If it does leave
++ * requests on the queue, it is responsible for arranging that the requests
++ * get dealt with eventually.
++ *
++ * The queue spin lock must be held while manipulating the requests on the
++ * request queue; this lock will be taken also from interrupt context, so irq
++ * disabling is needed for it.
++ *
++ * Function returns a pointer to the initialized request queue, or NULL if
++ * it didn't succeed.
++ *
++ * Note:
++ * blk_init_queue() must be paired with a blk_cleanup_queue() call
++ * when the block device is deactivated (such as at module unload).
++ **/
++
++struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
++{
++ return blk_init_queue_node(rfn, lock, -1);
++}
++EXPORT_SYMBOL(blk_init_queue);
++
++struct request_queue *
++blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
++{
++ struct request_queue *q = blk_alloc_queue_node(GFP_KERNEL, node_id);
++
++ if (!q)
++ return NULL;
++
++ q->node = node_id;
++ if (blk_init_free_list(q)) {
++ kmem_cache_free(blk_requestq_cachep, q);
++ return NULL;
++ }
++
++ /*
++ * if caller didn't supply a lock, they get per-queue locking with
++ * our embedded lock
++ */
++ if (!lock) {
++ spin_lock_init(&q->__queue_lock);
++ lock = &q->__queue_lock;
++ }
++
++ q->request_fn = rfn;
++ q->prep_rq_fn = NULL;
++ q->unplug_fn = generic_unplug_device;
++ q->queue_flags = (1 << QUEUE_FLAG_CLUSTER);
++ q->queue_lock = lock;
++
++ blk_queue_segment_boundary(q, 0xffffffff);
++
++ blk_queue_make_request(q, __make_request);
++ blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE);
++
++ blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
++ blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
++
++ q->sg_reserved_size = INT_MAX;
++
++ /*
++ * all done
++ */
++ if (!elevator_init(q, NULL)) {
++ blk_queue_congestion_threshold(q);
++ return q;
++ }
++
++ blk_put_queue(q);
++ return NULL;
++}
++EXPORT_SYMBOL(blk_init_queue_node);
++
++int blk_get_queue(struct request_queue *q)
++{
++ if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
++ kobject_get(&q->kobj);
++ return 0;
++ }
++
++ return 1;
++}
++
++EXPORT_SYMBOL(blk_get_queue);
++
++static inline void blk_free_request(struct request_queue *q, struct request *rq)
++{
++ if (rq->cmd_flags & REQ_ELVPRIV)
++ elv_put_request(q, rq);
++ mempool_free(rq, q->rq.rq_pool);
++}
++
++static struct request *
++blk_alloc_request(struct request_queue *q, int rw, int priv, gfp_t gfp_mask)
++{
++ struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
++
++ if (!rq)
++ return NULL;
++
++ /*
++ * first three bits are identical in rq->cmd_flags and bio->bi_rw,
++ * see bio.h and blkdev.h
++ */
++ rq->cmd_flags = rw | REQ_ALLOCED;
++
++ if (priv) {
++ if (unlikely(elv_set_request(q, rq, gfp_mask))) {
++ mempool_free(rq, q->rq.rq_pool);
++ return NULL;
++ }
++ rq->cmd_flags |= REQ_ELVPRIV;
++ }
++
++ return rq;
++}
++
++/*
++ * ioc_batching returns true if the ioc is a valid batching request and
++ * should be given priority access to a request.
++ */
++static inline int ioc_batching(struct request_queue *q, struct io_context *ioc)
++{
++ if (!ioc)
++ return 0;
++
++ /*
++ * Make sure the process is able to allocate at least 1 request
++ * even if the batch times out, otherwise we could theoretically
++ * lose wakeups.
++ */
++ return ioc->nr_batch_requests == q->nr_batching ||
++ (ioc->nr_batch_requests > 0
++ && time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME));
++}
++
++/*
++ * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This
++ * will cause the process to be a "batcher" on all queues in the system. This
++ * is the behaviour we want though - once it gets a wakeup it should be given
++ * a nice run.
++ */
++static void ioc_set_batching(struct request_queue *q, struct io_context *ioc)
++{
++ if (!ioc || ioc_batching(q, ioc))
++ return;
++
++ ioc->nr_batch_requests = q->nr_batching;
++ ioc->last_waited = jiffies;
++}
++
++static void __freed_request(struct request_queue *q, int rw)
++{
++ struct request_list *rl = &q->rq;
++
++ if (rl->count[rw] < queue_congestion_off_threshold(q))
++ blk_clear_queue_congested(q, rw);
++
++ if (rl->count[rw] + 1 <= q->nr_requests) {
++ if (waitqueue_active(&rl->wait[rw]))
++ wake_up(&rl->wait[rw]);
++
++ blk_clear_queue_full(q, rw);
++ }
++}
++
++/*
++ * A request has just been released. Account for it, update the full and
++ * congestion status, wake up any waiters. Called under q->queue_lock.
++ */
++static void freed_request(struct request_queue *q, int rw, int priv)
++{
++ struct request_list *rl = &q->rq;
++
++ rl->count[rw]--;
++ if (priv)
++ rl->elvpriv--;
++
++ __freed_request(q, rw);
++
++ if (unlikely(rl->starved[rw ^ 1]))
++ __freed_request(q, rw ^ 1);
++}
++
++#define blkdev_free_rq(list) list_entry((list)->next, struct request, queuelist)
++/*
++ * Get a free request, queue_lock must be held.
++ * Returns NULL on failure, with queue_lock held.
++ * Returns !NULL on success, with queue_lock *not held*.
++ */
++static struct request *get_request(struct request_queue *q, int rw_flags,
++ struct bio *bio, gfp_t gfp_mask)
++{
++ struct request *rq = NULL;
++ struct request_list *rl = &q->rq;
++ struct io_context *ioc = NULL;
++ const int rw = rw_flags & 0x01;
++ int may_queue, priv;
++
++ may_queue = elv_may_queue(q, rw_flags);
++ if (may_queue == ELV_MQUEUE_NO)
++ goto rq_starved;
++
++ if (rl->count[rw]+1 >= queue_congestion_on_threshold(q)) {
++ if (rl->count[rw]+1 >= q->nr_requests) {
++ ioc = current_io_context(GFP_ATOMIC, q->node);
++ /*
++ * The queue will fill after this allocation, so set
++ * it as full, and mark this process as "batching".
++ * This process will be allowed to complete a batch of
++ * requests, others will be blocked.
++ */
++ if (!blk_queue_full(q, rw)) {
++ ioc_set_batching(q, ioc);
++ blk_set_queue_full(q, rw);
++ } else {
++ if (may_queue != ELV_MQUEUE_MUST
++ && !ioc_batching(q, ioc)) {
++ /*
++ * The queue is full and the allocating
++ * process is not a "batcher", and not
++ * exempted by the IO scheduler
++ */
++ goto out;
++ }
++ }
++ }
++ blk_set_queue_congested(q, rw);
++ }
++
++ /*
++ * Only allow batching queuers to allocate up to 50% over the defined
++ * limit of requests, otherwise we could have thousands of requests
++ * allocated with any setting of ->nr_requests
++ */
++ if (rl->count[rw] >= (3 * q->nr_requests / 2))
++ goto out;
++
++ rl->count[rw]++;
++ rl->starved[rw] = 0;
++
++ priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
++ if (priv)
++ rl->elvpriv++;
++
++ spin_unlock_irq(q->queue_lock);
++
++ rq = blk_alloc_request(q, rw_flags, priv, gfp_mask);
++ if (unlikely(!rq)) {
++ /*
++ * Allocation failed presumably due to memory. Undo anything
++ * we might have messed up.
++ *
++ * Allocating task should really be put onto the front of the
++ * wait queue, but this is pretty rare.
++ */
++ spin_lock_irq(q->queue_lock);
++ freed_request(q, rw, priv);
++
++ /*
++ * in the very unlikely event that allocation failed and no
++ * requests for this direction was pending, mark us starved
++ * so that freeing of a request in the other direction will
++ * notice us. another possible fix would be to split the
++ * rq mempool into READ and WRITE
++ */
++rq_starved:
++ if (unlikely(rl->count[rw] == 0))
++ rl->starved[rw] = 1;
++
++ goto out;
++ }
++
++ /*
++ * ioc may be NULL here, and ioc_batching will be false. That's
++ * OK, if the queue is under the request limit then requests need
++ * not count toward the nr_batch_requests limit. There will always
++ * be some limit enforced by BLK_BATCH_TIME.
++ */
++ if (ioc_batching(q, ioc))
++ ioc->nr_batch_requests--;
++
++ rq_init(q, rq);
++
++ blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ);
++out:
++ return rq;
++}
++
++/*
++ * No available requests for this queue, unplug the device and wait for some
++ * requests to become available.
++ *
++ * Called with q->queue_lock held, and returns with it unlocked.
++ */
++static struct request *get_request_wait(struct request_queue *q, int rw_flags,
++ struct bio *bio)
++{
++ const int rw = rw_flags & 0x01;
++ struct request *rq;
++
++ rq = get_request(q, rw_flags, bio, GFP_NOIO);
++ while (!rq) {
++ DEFINE_WAIT(wait);
++ struct request_list *rl = &q->rq;
++
++ prepare_to_wait_exclusive(&rl->wait[rw], &wait,
++ TASK_UNINTERRUPTIBLE);
++
++ rq = get_request(q, rw_flags, bio, GFP_NOIO);
++
++ if (!rq) {
++ struct io_context *ioc;
++
++ blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ);
++
++ __generic_unplug_device(q);
++ spin_unlock_irq(q->queue_lock);
++ io_schedule();
++
++ /*
++ * After sleeping, we become a "batching" process and
++ * will be able to allocate at least one request, and
++ * up to a big batch of them for a small period time.
++ * See ioc_batching, ioc_set_batching
++ */
++ ioc = current_io_context(GFP_NOIO, q->node);
++ ioc_set_batching(q, ioc);
++
++ spin_lock_irq(q->queue_lock);
++ }
++ finish_wait(&rl->wait[rw], &wait);
++ }
++
++ return rq;
++}
++
++struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
++{
++ struct request *rq;
++
++ BUG_ON(rw != READ && rw != WRITE);
++
++ spin_lock_irq(q->queue_lock);
++ if (gfp_mask & __GFP_WAIT) {
++ rq = get_request_wait(q, rw, NULL);
++ } else {
++ rq = get_request(q, rw, NULL, gfp_mask);
++ if (!rq)
++ spin_unlock_irq(q->queue_lock);
++ }
++ /* q->queue_lock is unlocked at this point */
++
++ return rq;
++}
++EXPORT_SYMBOL(blk_get_request);
++
++/**
++ * blk_start_queueing - initiate dispatch of requests to device
++ * @q: request queue to kick into gear
++ *
++ * This is basically a helper to remove the need to know whether a queue
++ * is plugged or not if someone just wants to initiate dispatch of requests
++ * for this queue.
++ *
++ * The queue lock must be held with interrupts disabled.
++ */
++void blk_start_queueing(struct request_queue *q)
++{
++ if (!blk_queue_plugged(q))
++ q->request_fn(q);
++ else
++ __generic_unplug_device(q);
++}
++EXPORT_SYMBOL(blk_start_queueing);
++
++/**
++ * blk_requeue_request - put a request back on queue
++ * @q: request queue where request should be inserted
++ * @rq: request to be inserted
++ *
++ * Description:
++ * Drivers often keep queueing requests until the hardware cannot accept
++ * more, when that condition happens we need to put the request back
++ * on the queue. Must be called with queue lock held.
++ */
++void blk_requeue_request(struct request_queue *q, struct request *rq)
++{
++ blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
++
++ if (blk_rq_tagged(rq))
++ blk_queue_end_tag(q, rq);
++
++ elv_requeue_request(q, rq);
++}
++
++EXPORT_SYMBOL(blk_requeue_request);
++
++/**
++ * blk_insert_request - insert a special request in to a request queue
++ * @q: request queue where request should be inserted
++ * @rq: request to be inserted
++ * @at_head: insert request at head or tail of queue
++ * @data: private data
++ *
++ * Description:
++ * Many block devices need to execute commands asynchronously, so they don't
++ * block the whole kernel from preemption during request execution. This is
++ * accomplished normally by inserting aritficial requests tagged as
++ * REQ_SPECIAL in to the corresponding request queue, and letting them be
++ * scheduled for actual execution by the request queue.
++ *
++ * We have the option of inserting the head or the tail of the queue.
++ * Typically we use the tail for new ioctls and so forth. We use the head
++ * of the queue for things like a QUEUE_FULL message from a device, or a
++ * host that is unable to accept a particular command.
++ */
++void blk_insert_request(struct request_queue *q, struct request *rq,
++ int at_head, void *data)
++{
++ int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
++ unsigned long flags;
++
++ /*
++ * tell I/O scheduler that this isn't a regular read/write (ie it
++ * must not attempt merges on this) and that it acts as a soft
++ * barrier
++ */
++ rq->cmd_type = REQ_TYPE_SPECIAL;
++ rq->cmd_flags |= REQ_SOFTBARRIER;
++
++ rq->special = data;
++
++ spin_lock_irqsave(q->queue_lock, flags);
++
++ /*
++ * If command is tagged, release the tag
++ */
++ if (blk_rq_tagged(rq))
++ blk_queue_end_tag(q, rq);
++
++ drive_stat_acct(rq, 1);
++ __elv_add_request(q, rq, where, 0);
++ blk_start_queueing(q);
++ spin_unlock_irqrestore(q->queue_lock, flags);
++}
++
++EXPORT_SYMBOL(blk_insert_request);
++
++/*
++ * add-request adds a request to the linked list.
++ * queue lock is held and interrupts disabled, as we muck with the
++ * request queue list.
++ */
++static inline void add_request(struct request_queue * q, struct request * req)
++{
++ drive_stat_acct(req, 1);
++
++ /*
++ * elevator indicated where it wants this request to be
++ * inserted at elevator_merge time
++ */
++ __elv_add_request(q, req, ELEVATOR_INSERT_SORT, 0);
++}
++
++/*
++ * disk_round_stats() - Round off the performance stats on a struct
++ * disk_stats.
++ *
++ * The average IO queue length and utilisation statistics are maintained
++ * by observing the current state of the queue length and the amount of
++ * time it has been in this state for.
++ *
++ * Normally, that accounting is done on IO completion, but that can result
++ * in more than a second's worth of IO being accounted for within any one
++ * second, leading to >100% utilisation. To deal with that, we call this
++ * function to do a round-off before returning the results when reading
++ * /proc/diskstats. This accounts immediately for all queue usage up to
++ * the current jiffies and restarts the counters again.
++ */
++void disk_round_stats(struct gendisk *disk)
++{
++ unsigned long now = jiffies;
++
++ if (now == disk->stamp)
++ return;
++
++ if (disk->in_flight) {
++ __disk_stat_add(disk, time_in_queue,
++ disk->in_flight * (now - disk->stamp));
++ __disk_stat_add(disk, io_ticks, (now - disk->stamp));
++ }
++ disk->stamp = now;
++}
++
++EXPORT_SYMBOL_GPL(disk_round_stats);
++
++/*
++ * queue lock must be held
++ */
++void __blk_put_request(struct request_queue *q, struct request *req)
++{
++ if (unlikely(!q))
++ return;
++ if (unlikely(--req->ref_count))
++ return;
++
++ elv_completed_request(q, req);
++
++ /*
++ * Request may not have originated from ll_rw_blk. if not,
++ * it didn't come out of our reserved rq pools
++ */
++ if (req->cmd_flags & REQ_ALLOCED) {
++ int rw = rq_data_dir(req);
++ int priv = req->cmd_flags & REQ_ELVPRIV;
++
++ BUG_ON(!list_empty(&req->queuelist));
++ BUG_ON(!hlist_unhashed(&req->hash));
++
++ blk_free_request(q, req);
++ freed_request(q, rw, priv);
++ }
++}
++
++EXPORT_SYMBOL_GPL(__blk_put_request);
++
++void blk_put_request(struct request *req)
++{
++ unsigned long flags;
++ struct request_queue *q = req->q;
++
++ /*
++ * Gee, IDE calls in w/ NULL q. Fix IDE and remove the
++ * following if (q) test.
++ */
++ if (q) {
++ spin_lock_irqsave(q->queue_lock, flags);
++ __blk_put_request(q, req);
++ spin_unlock_irqrestore(q->queue_lock, flags);
++ }
++}
++
++EXPORT_SYMBOL(blk_put_request);
++
++void init_request_from_bio(struct request *req, struct bio *bio)
++{
++ req->cmd_type = REQ_TYPE_FS;
++
++ /*
++ * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST)
++ */
++ if (bio_rw_ahead(bio) || bio_failfast(bio))
++ req->cmd_flags |= REQ_FAILFAST;
++
++ /*
++ * REQ_BARRIER implies no merging, but lets make it explicit
++ */
++ if (unlikely(bio_barrier(bio)))
++ req->cmd_flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
++
++ if (bio_sync(bio))
++ req->cmd_flags |= REQ_RW_SYNC;
++ if (bio_rw_meta(bio))
++ req->cmd_flags |= REQ_RW_META;
++
++ req->errors = 0;
++ req->hard_sector = req->sector = bio->bi_sector;
++ req->ioprio = bio_prio(bio);
++ req->start_time = jiffies;
++ blk_rq_bio_prep(req->q, req, bio);
++}
++
++static int __make_request(struct request_queue *q, struct bio *bio)
++{
++ struct request *req;
++ int el_ret, nr_sectors, barrier, err;
++ const unsigned short prio = bio_prio(bio);
++ const int sync = bio_sync(bio);
++ int rw_flags;
++
++ nr_sectors = bio_sectors(bio);
++
++ /*
++ * low level driver can indicate that it wants pages above a
++ * certain limit bounced to low memory (ie for highmem, or even
++ * ISA dma in theory)
++ */
++ blk_queue_bounce(q, &bio);
++
++ barrier = bio_barrier(bio);
++ if (unlikely(barrier) && (q->next_ordered == QUEUE_ORDERED_NONE)) {
++ err = -EOPNOTSUPP;
++ goto end_io;
++ }
++
++ spin_lock_irq(q->queue_lock);
++
++ if (unlikely(barrier) || elv_queue_empty(q))
++ goto get_rq;
++
++ el_ret = elv_merge(q, &req, bio);
++ switch (el_ret) {
++ case ELEVATOR_BACK_MERGE:
++ BUG_ON(!rq_mergeable(req));
++
++ if (!ll_back_merge_fn(q, req, bio))
++ break;
++
++ blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
++
++ req->biotail->bi_next = bio;
++ req->biotail = bio;
++ req->nr_sectors = req->hard_nr_sectors += nr_sectors;
++ req->ioprio = ioprio_best(req->ioprio, prio);
++ drive_stat_acct(req, 0);
++ if (!attempt_back_merge(q, req))
++ elv_merged_request(q, req, el_ret);
++ goto out;
++
++ case ELEVATOR_FRONT_MERGE:
++ BUG_ON(!rq_mergeable(req));
++
++ if (!ll_front_merge_fn(q, req, bio))
++ break;
++
++ blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
++
++ bio->bi_next = req->bio;
++ req->bio = bio;
++
++ /*
++ * may not be valid. if the low level driver said
++ * it didn't need a bounce buffer then it better
++ * not touch req->buffer either...
++ */
++ req->buffer = bio_data(bio);
++ req->current_nr_sectors = bio_cur_sectors(bio);
++ req->hard_cur_sectors = req->current_nr_sectors;
++ req->sector = req->hard_sector = bio->bi_sector;
++ req->nr_sectors = req->hard_nr_sectors += nr_sectors;
++ req->ioprio = ioprio_best(req->ioprio, prio);
++ drive_stat_acct(req, 0);
++ if (!attempt_front_merge(q, req))
++ elv_merged_request(q, req, el_ret);
++ goto out;
++
++ /* ELV_NO_MERGE: elevator says don't/can't merge. */
++ default:
++ ;
++ }
++
++get_rq:
++ /*
++ * This sync check and mask will be re-done in init_request_from_bio(),
++ * but we need to set it earlier to expose the sync flag to the
++ * rq allocator and io schedulers.
++ */
++ rw_flags = bio_data_dir(bio);
++ if (sync)
++ rw_flags |= REQ_RW_SYNC;
++
++ /*
++ * Grab a free request. This is might sleep but can not fail.
++ * Returns with the queue unlocked.
++ */
++ req = get_request_wait(q, rw_flags, bio);
++
++ /*
++ * After dropping the lock and possibly sleeping here, our request
++ * may now be mergeable after it had proven unmergeable (above).
++ * We don't worry about that case for efficiency. It won't happen
++ * often, and the elevators are able to handle it.
++ */
++ init_request_from_bio(req, bio);
++
++ spin_lock_irq(q->queue_lock);
++ if (elv_queue_empty(q))
++ blk_plug_device(q);
++ add_request(q, req);
++out:
++ if (sync)
++ __generic_unplug_device(q);
++
++ spin_unlock_irq(q->queue_lock);
++ return 0;
++
++end_io:
++ bio_endio(bio, err);
++ return 0;
++}
++
++/*
++ * If bio->bi_dev is a partition, remap the location
++ */
++static inline void blk_partition_remap(struct bio *bio)
++{
++ struct block_device *bdev = bio->bi_bdev;
++
++ if (bio_sectors(bio) && bdev != bdev->bd_contains) {
++ struct hd_struct *p = bdev->bd_part;
++ const int rw = bio_data_dir(bio);
++
++ p->sectors[rw] += bio_sectors(bio);
++ p->ios[rw]++;
++
++ bio->bi_sector += p->start_sect;
++ bio->bi_bdev = bdev->bd_contains;
++
++ blk_add_trace_remap(bdev_get_queue(bio->bi_bdev), bio,
++ bdev->bd_dev, bio->bi_sector,
++ bio->bi_sector - p->start_sect);
++ }
++}
++
++static void handle_bad_sector(struct bio *bio)
++{
++ char b[BDEVNAME_SIZE];
++
++ printk(KERN_INFO "attempt to access beyond end of device\n");
++ printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n",
++ bdevname(bio->bi_bdev, b),
++ bio->bi_rw,
++ (unsigned long long)bio->bi_sector + bio_sectors(bio),
++ (long long)(bio->bi_bdev->bd_inode->i_size >> 9));
++
++ set_bit(BIO_EOF, &bio->bi_flags);
++}
++
++#ifdef CONFIG_FAIL_MAKE_REQUEST
++
++static DECLARE_FAULT_ATTR(fail_make_request);
++
++static int __init setup_fail_make_request(char *str)
++{
++ return setup_fault_attr(&fail_make_request, str);
++}
++__setup("fail_make_request=", setup_fail_make_request);
++
++static int should_fail_request(struct bio *bio)
++{
++ if ((bio->bi_bdev->bd_disk->flags & GENHD_FL_FAIL) ||
++ (bio->bi_bdev->bd_part && bio->bi_bdev->bd_part->make_it_fail))
++ return should_fail(&fail_make_request, bio->bi_size);
++
++ return 0;
++}
++
++static int __init fail_make_request_debugfs(void)
++{
++ return init_fault_attr_dentries(&fail_make_request,
++ "fail_make_request");
++}
++
++late_initcall(fail_make_request_debugfs);
++
++#else /* CONFIG_FAIL_MAKE_REQUEST */
++
++static inline int should_fail_request(struct bio *bio)
++{
++ return 0;
++}
++
++#endif /* CONFIG_FAIL_MAKE_REQUEST */
++
++/*
++ * Check whether this bio extends beyond the end of the device.
++ */
++static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
++{
++ sector_t maxsector;
++
++ if (!nr_sectors)
++ return 0;
++
++ /* Test device or partition size, when known. */
++ maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
++ if (maxsector) {
++ sector_t sector = bio->bi_sector;
++
++ if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
++ /*
++ * This may well happen - the kernel calls bread()
++ * without checking the size of the device, e.g., when
++ * mounting a device.
++ */
++ handle_bad_sector(bio);
++ return 1;
++ }
++ }
++
++ return 0;
++}
++
++/**
++ * generic_make_request: hand a buffer to its device driver for I/O
++ * @bio: The bio describing the location in memory and on the device.
++ *
++ * generic_make_request() is used to make I/O requests of block
++ * devices. It is passed a &struct bio, which describes the I/O that needs
++ * to be done.
++ *
++ * generic_make_request() does not return any status. The
++ * success/failure status of the request, along with notification of
++ * completion, is delivered asynchronously through the bio->bi_end_io
++ * function described (one day) else where.
++ *
++ * The caller of generic_make_request must make sure that bi_io_vec
++ * are set to describe the memory buffer, and that bi_dev and bi_sector are
++ * set to describe the device address, and the
++ * bi_end_io and optionally bi_private are set to describe how
++ * completion notification should be signaled.
++ *
++ * generic_make_request and the drivers it calls may use bi_next if this
++ * bio happens to be merged with someone else, and may change bi_dev and
++ * bi_sector for remaps as it sees fit. So the values of these fields
++ * should NOT be depended on after the call to generic_make_request.
++ */
++static inline void __generic_make_request(struct bio *bio)
++{
++ struct request_queue *q;
++ sector_t old_sector;
++ int ret, nr_sectors = bio_sectors(bio);
++ dev_t old_dev;
++ int err = -EIO;
++
++ might_sleep();
++
++ if (bio_check_eod(bio, nr_sectors))
++ goto end_io;
++
++ /*
++ * Resolve the mapping until finished. (drivers are
++ * still free to implement/resolve their own stacking
++ * by explicitly returning 0)
++ *
++ * NOTE: we don't repeat the blk_size check for each new device.
++ * Stacking drivers are expected to know what they are doing.
++ */
++ old_sector = -1;
++ old_dev = 0;
++ do {
++ char b[BDEVNAME_SIZE];
++
++ q = bdev_get_queue(bio->bi_bdev);
++ if (!q) {
++ printk(KERN_ERR
++ "generic_make_request: Trying to access "
++ "nonexistent block-device %s (%Lu)\n",
++ bdevname(bio->bi_bdev, b),
++ (long long) bio->bi_sector);
++end_io:
++ bio_endio(bio, err);
++ break;
++ }
++
++ if (unlikely(nr_sectors > q->max_hw_sectors)) {
++ printk("bio too big device %s (%u > %u)\n",
++ bdevname(bio->bi_bdev, b),
++ bio_sectors(bio),
++ q->max_hw_sectors);
++ goto end_io;
++ }
++
++ if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
++ goto end_io;
++
++ if (should_fail_request(bio))
++ goto end_io;
++
++ /*
++ * If this device has partitions, remap block n
++ * of partition p to block n+start(p) of the disk.
++ */
++ blk_partition_remap(bio);
++
++ if (old_sector != -1)
++ blk_add_trace_remap(q, bio, old_dev, bio->bi_sector,
++ old_sector);
++
++ blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
++
++ old_sector = bio->bi_sector;
++ old_dev = bio->bi_bdev->bd_dev;
++
++ if (bio_check_eod(bio, nr_sectors))
++ goto end_io;
++ if (bio_empty_barrier(bio) && !q->prepare_flush_fn) {
++ err = -EOPNOTSUPP;
++ goto end_io;
++ }
++
++ ret = q->make_request_fn(q, bio);
++ } while (ret);
++}
++
++/*
++ * We only want one ->make_request_fn to be active at a time,
++ * else stack usage with stacked devices could be a problem.
++ * So use current->bio_{list,tail} to keep a list of requests
++ * submited by a make_request_fn function.
++ * current->bio_tail is also used as a flag to say if
++ * generic_make_request is currently active in this task or not.
++ * If it is NULL, then no make_request is active. If it is non-NULL,
++ * then a make_request is active, and new requests should be added
++ * at the tail
++ */
++void generic_make_request(struct bio *bio)
++{
++ if (current->bio_tail) {
++ /* make_request is active */
++ *(current->bio_tail) = bio;
++ bio->bi_next = NULL;
++ current->bio_tail = &bio->bi_next;
++ return;
++ }
++ /* following loop may be a bit non-obvious, and so deserves some
++ * explanation.
++ * Before entering the loop, bio->bi_next is NULL (as all callers
++ * ensure that) so we have a list with a single bio.
++ * We pretend that we have just taken it off a longer list, so
++ * we assign bio_list to the next (which is NULL) and bio_tail
++ * to &bio_list, thus initialising the bio_list of new bios to be
++ * added. __generic_make_request may indeed add some more bios
++ * through a recursive call to generic_make_request. If it
++ * did, we find a non-NULL value in bio_list and re-enter the loop
++ * from the top. In this case we really did just take the bio
++ * of the top of the list (no pretending) and so fixup bio_list and
++ * bio_tail or bi_next, and call into __generic_make_request again.
++ *
++ * The loop was structured like this to make only one call to
++ * __generic_make_request (which is important as it is large and
++ * inlined) and to keep the structure simple.
++ */
++ BUG_ON(bio->bi_next);
++ do {
++ current->bio_list = bio->bi_next;
++ if (bio->bi_next == NULL)
++ current->bio_tail = ¤t->bio_list;
++ else
++ bio->bi_next = NULL;
++ __generic_make_request(bio);
++ bio = current->bio_list;
++ } while (bio);
++ current->bio_tail = NULL; /* deactivate */
++}
++
++EXPORT_SYMBOL(generic_make_request);
++
++/**
++ * submit_bio: submit a bio to the block device layer for I/O
++ * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead)
++ * @bio: The &struct bio which describes the I/O
++ *
++ * submit_bio() is very similar in purpose to generic_make_request(), and
++ * uses that function to do most of the work. Both are fairly rough
++ * interfaces, @bio must be presetup and ready for I/O.
++ *
++ */
++void submit_bio(int rw, struct bio *bio)
++{
++ int count = bio_sectors(bio);
++
++ bio->bi_rw |= rw;
++
++ /*
++ * If it's a regular read/write or a barrier with data attached,
++ * go through the normal accounting stuff before submission.
++ */
++ if (!bio_empty_barrier(bio)) {
++
++ BIO_BUG_ON(!bio->bi_size);
++ BIO_BUG_ON(!bio->bi_io_vec);
++
++ if (rw & WRITE) {
++ count_vm_events(PGPGOUT, count);
++ } else {
++ task_io_account_read(bio->bi_size);
++ count_vm_events(PGPGIN, count);
++ }
++
++ if (unlikely(block_dump)) {
++ char b[BDEVNAME_SIZE];
++ printk(KERN_DEBUG "%s(%d): %s block %Lu on %s\n",
++ current->comm, task_pid_nr(current),
++ (rw & WRITE) ? "WRITE" : "READ",
++ (unsigned long long)bio->bi_sector,
++ bdevname(bio->bi_bdev,b));
++ }
++ }
++
++ generic_make_request(bio);
++}
++
++EXPORT_SYMBOL(submit_bio);
++
++/**
++ * __end_that_request_first - end I/O on a request
++ * @req: the request being processed
++ * @error: 0 for success, < 0 for error
++ * @nr_bytes: number of bytes to complete
++ *
++ * Description:
++ * Ends I/O on a number of bytes attached to @req, and sets it up
++ * for the next range of segments (if any) in the cluster.
++ *
++ * Return:
++ * 0 - we are done with this request, call end_that_request_last()
++ * 1 - still buffers pending for this request
++ **/
++static int __end_that_request_first(struct request *req, int error,
++ int nr_bytes)
++{
++ int total_bytes, bio_nbytes, next_idx = 0;
++ struct bio *bio;
++
++ blk_add_trace_rq(req->q, req, BLK_TA_COMPLETE);
++
++ /*
++ * for a REQ_BLOCK_PC request, we want to carry any eventual
++ * sense key with us all the way through
++ */
++ if (!blk_pc_request(req))
++ req->errors = 0;
++
++ if (error) {
++ if (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))
++ printk("end_request: I/O error, dev %s, sector %llu\n",
++ req->rq_disk ? req->rq_disk->disk_name : "?",
++ (unsigned long long)req->sector);
++ }
++
++ if (blk_fs_request(req) && req->rq_disk) {
++ const int rw = rq_data_dir(req);
++
++ disk_stat_add(req->rq_disk, sectors[rw], nr_bytes >> 9);
++ }
++
++ total_bytes = bio_nbytes = 0;
++ while ((bio = req->bio) != NULL) {
++ int nbytes;
++
++ /*
++ * For an empty barrier request, the low level driver must
++ * store a potential error location in ->sector. We pass
++ * that back up in ->bi_sector.
++ */
++ if (blk_empty_barrier(req))
++ bio->bi_sector = req->sector;
++
++ if (nr_bytes >= bio->bi_size) {
++ req->bio = bio->bi_next;
++ nbytes = bio->bi_size;
++ req_bio_endio(req, bio, nbytes, error);
++ next_idx = 0;
++ bio_nbytes = 0;
++ } else {
++ int idx = bio->bi_idx + next_idx;
++
++ if (unlikely(bio->bi_idx >= bio->bi_vcnt)) {
++ blk_dump_rq_flags(req, "__end_that");
++ printk("%s: bio idx %d >= vcnt %d\n",
++ __FUNCTION__,
++ bio->bi_idx, bio->bi_vcnt);
++ break;
++ }
++
++ nbytes = bio_iovec_idx(bio, idx)->bv_len;
++ BIO_BUG_ON(nbytes > bio->bi_size);
++
++ /*
++ * not a complete bvec done
++ */
++ if (unlikely(nbytes > nr_bytes)) {
++ bio_nbytes += nr_bytes;
++ total_bytes += nr_bytes;
++ break;
++ }
++
++ /*
++ * advance to the next vector
++ */
++ next_idx++;
++ bio_nbytes += nbytes;
++ }
++
++ total_bytes += nbytes;
++ nr_bytes -= nbytes;
++
++ if ((bio = req->bio)) {
++ /*
++ * end more in this run, or just return 'not-done'
++ */
++ if (unlikely(nr_bytes <= 0))
++ break;
++ }
++ }
++
++ /*
++ * completely done
++ */
++ if (!req->bio)
++ return 0;
++
++ /*
++ * if the request wasn't completed, update state
++ */
++ if (bio_nbytes) {
++ req_bio_endio(req, bio, bio_nbytes, error);
++ bio->bi_idx += next_idx;
++ bio_iovec(bio)->bv_offset += nr_bytes;
++ bio_iovec(bio)->bv_len -= nr_bytes;
++ }
++
++ blk_recalc_rq_sectors(req, total_bytes >> 9);
++ blk_recalc_rq_segments(req);
++ return 1;
++}
++
++/*
++ * splice the completion data to a local structure and hand off to
++ * process_completion_queue() to complete the requests
++ */
++static void blk_done_softirq(struct softirq_action *h)
++{
++ struct list_head *cpu_list, local_list;
++
++ local_irq_disable();
++ cpu_list = &__get_cpu_var(blk_cpu_done);
++ list_replace_init(cpu_list, &local_list);
++ local_irq_enable();
++
++ while (!list_empty(&local_list)) {
++ struct request *rq = list_entry(local_list.next, struct request, donelist);
++
++ list_del_init(&rq->donelist);
++ rq->q->softirq_done_fn(rq);
++ }
++}
++
++static int __cpuinit blk_cpu_notify(struct notifier_block *self, unsigned long action,
++ void *hcpu)
++{
++ /*
++ * If a CPU goes away, splice its entries to the current CPU
++ * and trigger a run of the softirq
++ */
++ if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
++ int cpu = (unsigned long) hcpu;
++
++ local_irq_disable();
++ list_splice_init(&per_cpu(blk_cpu_done, cpu),
++ &__get_cpu_var(blk_cpu_done));
++ raise_softirq_irqoff(BLOCK_SOFTIRQ);
++ local_irq_enable();
++ }
++
++ return NOTIFY_OK;
++}
++
++
++static struct notifier_block blk_cpu_notifier __cpuinitdata = {
++ .notifier_call = blk_cpu_notify,
++};
++
++/**
++ * blk_complete_request - end I/O on a request
++ * @req: the request being processed
++ *
++ * Description:
++ * Ends all I/O on a request. It does not handle partial completions,
++ * unless the driver actually implements this in its completion callback
++ * through requeueing. The actual completion happens out-of-order,
++ * through a softirq handler. The user must have registered a completion
++ * callback through blk_queue_softirq_done().
++ **/
++
++void blk_complete_request(struct request *req)
++{
++ struct list_head *cpu_list;
++ unsigned long flags;
++
++ BUG_ON(!req->q->softirq_done_fn);
++
++ local_irq_save(flags);
++
++ cpu_list = &__get_cpu_var(blk_cpu_done);
++ list_add_tail(&req->donelist, cpu_list);
++ raise_softirq_irqoff(BLOCK_SOFTIRQ);
++
++ local_irq_restore(flags);
++}
++
++EXPORT_SYMBOL(blk_complete_request);
++
++/*
++ * queue lock must be held
++ */
++static void end_that_request_last(struct request *req, int error)
++{
++ struct gendisk *disk = req->rq_disk;
++
++ if (blk_rq_tagged(req))
++ blk_queue_end_tag(req->q, req);
++
++ if (blk_queued_rq(req))
++ blkdev_dequeue_request(req);
++
++ if (unlikely(laptop_mode) && blk_fs_request(req))
++ laptop_io_completion();
++
++ /*
++ * Account IO completion. bar_rq isn't accounted as a normal
++ * IO on queueing nor completion. Accounting the containing
++ * request is enough.
++ */
++ if (disk && blk_fs_request(req) && req != &req->q->bar_rq) {
++ unsigned long duration = jiffies - req->start_time;
++ const int rw = rq_data_dir(req);
++
++ __disk_stat_inc(disk, ios[rw]);
++ __disk_stat_add(disk, ticks[rw], duration);
++ disk_round_stats(disk);
++ disk->in_flight--;
++ }
++
++ if (req->end_io)
++ req->end_io(req, error);
++ else {
++ if (blk_bidi_rq(req))
++ __blk_put_request(req->next_rq->q, req->next_rq);
++
++ __blk_put_request(req->q, req);
++ }
++}
++
++static inline void __end_request(struct request *rq, int uptodate,
++ unsigned int nr_bytes)
++{
++ int error = 0;
++
++ if (uptodate <= 0)
++ error = uptodate ? uptodate : -EIO;
++
++ __blk_end_request(rq, error, nr_bytes);
++}
++
++/**
++ * blk_rq_bytes - Returns bytes left to complete in the entire request
++ **/
++unsigned int blk_rq_bytes(struct request *rq)
++{
++ if (blk_fs_request(rq))
++ return rq->hard_nr_sectors << 9;
++
++ return rq->data_len;
++}
++EXPORT_SYMBOL_GPL(blk_rq_bytes);
++
++/**
++ * blk_rq_cur_bytes - Returns bytes left to complete in the current segment
++ **/
++unsigned int blk_rq_cur_bytes(struct request *rq)
++{
++ if (blk_fs_request(rq))
++ return rq->current_nr_sectors << 9;
++
++ if (rq->bio)
++ return rq->bio->bi_size;
++
++ return rq->data_len;
++}
++EXPORT_SYMBOL_GPL(blk_rq_cur_bytes);
++
++/**
++ * end_queued_request - end all I/O on a queued request
++ * @rq: the request being processed
++ * @uptodate: error value or 0/1 uptodate flag
++ *
++ * Description:
++ * Ends all I/O on a request, and removes it from the block layer queues.
++ * Not suitable for normal IO completion, unless the driver still has
++ * the request attached to the block layer.
++ *
++ **/
++void end_queued_request(struct request *rq, int uptodate)
++{
++ __end_request(rq, uptodate, blk_rq_bytes(rq));
++}
++EXPORT_SYMBOL(end_queued_request);
++
++/**
++ * end_dequeued_request - end all I/O on a dequeued request
++ * @rq: the request being processed
++ * @uptodate: error value or 0/1 uptodate flag
++ *
++ * Description:
++ * Ends all I/O on a request. The request must already have been
++ * dequeued using blkdev_dequeue_request(), as is normally the case
++ * for most drivers.
++ *
++ **/
++void end_dequeued_request(struct request *rq, int uptodate)
++{
++ __end_request(rq, uptodate, blk_rq_bytes(rq));
++}
++EXPORT_SYMBOL(end_dequeued_request);
++
++
++/**
++ * end_request - end I/O on the current segment of the request
++ * @req: the request being processed
++ * @uptodate: error value or 0/1 uptodate flag
++ *
++ * Description:
++ * Ends I/O on the current segment of a request. If that is the only
++ * remaining segment, the request is also completed and freed.
++ *
++ * This is a remnant of how older block drivers handled IO completions.
++ * Modern drivers typically end IO on the full request in one go, unless
++ * they have a residual value to account for. For that case this function
++ * isn't really useful, unless the residual just happens to be the
++ * full current segment. In other words, don't use this function in new
++ * code. Either use end_request_completely(), or the
++ * end_that_request_chunk() (along with end_that_request_last()) for
++ * partial completions.
++ *
++ **/
++void end_request(struct request *req, int uptodate)
++{
++ __end_request(req, uptodate, req->hard_cur_sectors << 9);
++}
++EXPORT_SYMBOL(end_request);
++
++/**
++ * blk_end_io - Generic end_io function to complete a request.
++ * @rq: the request being processed
++ * @error: 0 for success, < 0 for error
++ * @nr_bytes: number of bytes to complete @rq
++ * @bidi_bytes: number of bytes to complete @rq->next_rq
++ * @drv_callback: function called between completion of bios in the request
++ * and completion of the request.
++ * If the callback returns non 0, this helper returns without
++ * completion of the request.
++ *
++ * Description:
++ * Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
++ * If @rq has leftover, sets it up for the next range of segments.
++ *
++ * Return:
++ * 0 - we are done with this request
++ * 1 - this request is not freed yet, it still has pending buffers.
++ **/
++static int blk_end_io(struct request *rq, int error, int nr_bytes,
++ int bidi_bytes, int (drv_callback)(struct request *))
++{
++ struct request_queue *q = rq->q;
++ unsigned long flags = 0UL;
++
++ if (blk_fs_request(rq) || blk_pc_request(rq)) {
++ if (__end_that_request_first(rq, error, nr_bytes))
++ return 1;
++
++ /* Bidi request must be completed as a whole */
++ if (blk_bidi_rq(rq) &&
++ __end_that_request_first(rq->next_rq, error, bidi_bytes))
++ return 1;
++ }
++
++ /* Special feature for tricky drivers */
++ if (drv_callback && drv_callback(rq))
++ return 1;
++
++ add_disk_randomness(rq->rq_disk);
++
++ spin_lock_irqsave(q->queue_lock, flags);
++ end_that_request_last(rq, error);
++ spin_unlock_irqrestore(q->queue_lock, flags);
++
++ return 0;
++}
++
++/**
++ * blk_end_request - Helper function for drivers to complete the request.
++ * @rq: the request being processed
++ * @error: 0 for success, < 0 for error
++ * @nr_bytes: number of bytes to complete
++ *
++ * Description:
++ * Ends I/O on a number of bytes attached to @rq.
++ * If @rq has leftover, sets it up for the next range of segments.
++ *
++ * Return:
++ * 0 - we are done with this request
++ * 1 - still buffers pending for this request
++ **/
++int blk_end_request(struct request *rq, int error, int nr_bytes)
++{
++ return blk_end_io(rq, error, nr_bytes, 0, NULL);
++}
++EXPORT_SYMBOL_GPL(blk_end_request);
++
++/**
++ * __blk_end_request - Helper function for drivers to complete the request.
++ * @rq: the request being processed
++ * @error: 0 for success, < 0 for error
++ * @nr_bytes: number of bytes to complete
++ *
++ * Description:
++ * Must be called with queue lock held unlike blk_end_request().
++ *
++ * Return:
++ * 0 - we are done with this request
++ * 1 - still buffers pending for this request
++ **/
++int __blk_end_request(struct request *rq, int error, int nr_bytes)
++{
++ if (blk_fs_request(rq) || blk_pc_request(rq)) {
++ if (__end_that_request_first(rq, error, nr_bytes))
++ return 1;
++ }
++
++ add_disk_randomness(rq->rq_disk);
++
++ end_that_request_last(rq, error);
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(__blk_end_request);
++
++/**
++ * blk_end_bidi_request - Helper function for drivers to complete bidi request.
++ * @rq: the bidi request being processed
++ * @error: 0 for success, < 0 for error
++ * @nr_bytes: number of bytes to complete @rq
++ * @bidi_bytes: number of bytes to complete @rq->next_rq
++ *
++ * Description:
++ * Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
++ *
++ * Return:
++ * 0 - we are done with this request
++ * 1 - still buffers pending for this request
++ **/
++int blk_end_bidi_request(struct request *rq, int error, int nr_bytes,
++ int bidi_bytes)
++{
++ return blk_end_io(rq, error, nr_bytes, bidi_bytes, NULL);
++}
++EXPORT_SYMBOL_GPL(blk_end_bidi_request);
++
++/**
++ * blk_end_request_callback - Special helper function for tricky drivers
++ * @rq: the request being processed
++ * @error: 0 for success, < 0 for error
++ * @nr_bytes: number of bytes to complete
++ * @drv_callback: function called between completion of bios in the request
++ * and completion of the request.
++ * If the callback returns non 0, this helper returns without
++ * completion of the request.
++ *
++ * Description:
++ * Ends I/O on a number of bytes attached to @rq.
++ * If @rq has leftover, sets it up for the next range of segments.
++ *
++ * This special helper function is used only for existing tricky drivers.
++ * (e.g. cdrom_newpc_intr() of ide-cd)
++ * This interface will be removed when such drivers are rewritten.
++ * Don't use this interface in other places anymore.
++ *
++ * Return:
++ * 0 - we are done with this request
++ * 1 - this request is not freed yet.
++ * this request still has pending buffers or
++ * the driver doesn't want to finish this request yet.
++ **/
++int blk_end_request_callback(struct request *rq, int error, int nr_bytes,
++ int (drv_callback)(struct request *))
++{
++ return blk_end_io(rq, error, nr_bytes, 0, drv_callback);
++}
++EXPORT_SYMBOL_GPL(blk_end_request_callback);
++
++void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
++ struct bio *bio)
++{
++ /* first two bits are identical in rq->cmd_flags and bio->bi_rw */
++ rq->cmd_flags |= (bio->bi_rw & 3);
++
++ rq->nr_phys_segments = bio_phys_segments(q, bio);
++ rq->nr_hw_segments = bio_hw_segments(q, bio);
++ rq->current_nr_sectors = bio_cur_sectors(bio);
++ rq->hard_cur_sectors = rq->current_nr_sectors;
++ rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio);
++ rq->buffer = bio_data(bio);
++ rq->data_len = bio->bi_size;
++
++ rq->bio = rq->biotail = bio;
++
++ if (bio->bi_bdev)
++ rq->rq_disk = bio->bi_bdev->bd_disk;
++}
++
++int kblockd_schedule_work(struct work_struct *work)
++{
++ return queue_work(kblockd_workqueue, work);
++}
++
++EXPORT_SYMBOL(kblockd_schedule_work);
++
++void kblockd_flush_work(struct work_struct *work)
++{
++ cancel_work_sync(work);
++}
++EXPORT_SYMBOL(kblockd_flush_work);
++
++int __init blk_dev_init(void)
++{
++ int i;
++
++ kblockd_workqueue = create_workqueue("kblockd");
++ if (!kblockd_workqueue)
++ panic("Failed to create kblockd\n");
++
++ request_cachep = kmem_cache_create("blkdev_requests",
++ sizeof(struct request), 0, SLAB_PANIC, NULL);
++
++ blk_requestq_cachep = kmem_cache_create("blkdev_queue",
++ sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
++
++ for_each_possible_cpu(i)
++ INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
++
++ open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL);
++ register_hotcpu_notifier(&blk_cpu_notifier);
++
++ return 0;
++}
++
+diff --git a/block/blk-exec.c b/block/blk-exec.c
+new file mode 100644
+index 0000000..ebfb44e
+--- /dev/null
++++ b/block/blk-exec.c
+@@ -0,0 +1,105 @@
++/*
++ * Functions related to setting various queue properties from drivers
++ */
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/bio.h>
++#include <linux/blkdev.h>
++
++#include "blk.h"
++
++/*
++ * for max sense size
++ */
++#include <scsi/scsi_cmnd.h>
++
++/**
++ * blk_end_sync_rq - executes a completion event on a request
++ * @rq: request to complete
++ * @error: end io status of the request
++ */
++void blk_end_sync_rq(struct request *rq, int error)
++{
++ struct completion *waiting = rq->end_io_data;
++
++ rq->end_io_data = NULL;
++ __blk_put_request(rq->q, rq);
++
++ /*
++ * complete last, if this is a stack request the process (and thus
++ * the rq pointer) could be invalid right after this complete()
++ */
++ complete(waiting);
++}
++EXPORT_SYMBOL(blk_end_sync_rq);
++
++/**
++ * blk_execute_rq_nowait - insert a request into queue for execution
++ * @q: queue to insert the request in
++ * @bd_disk: matching gendisk
++ * @rq: request to insert
++ * @at_head: insert request at head or tail of queue
++ * @done: I/O completion handler
++ *
++ * Description:
++ * Insert a fully prepared request at the back of the io scheduler queue
++ * for execution. Don't wait for completion.
++ */
++void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
++ struct request *rq, int at_head,
++ rq_end_io_fn *done)
++{
++ int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
++
++ rq->rq_disk = bd_disk;
++ rq->cmd_flags |= REQ_NOMERGE;
++ rq->end_io = done;
++ WARN_ON(irqs_disabled());
++ spin_lock_irq(q->queue_lock);
++ __elv_add_request(q, rq, where, 1);
++ __generic_unplug_device(q);
++ spin_unlock_irq(q->queue_lock);
++}
++EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
++
++/**
++ * blk_execute_rq - insert a request into queue for execution
++ * @q: queue to insert the request in
++ * @bd_disk: matching gendisk
++ * @rq: request to insert
++ * @at_head: insert request at head or tail of queue
++ *
++ * Description:
++ * Insert a fully prepared request at the back of the io scheduler queue
++ * for execution and wait for completion.
++ */
++int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
++ struct request *rq, int at_head)
++{
++ DECLARE_COMPLETION_ONSTACK(wait);
++ char sense[SCSI_SENSE_BUFFERSIZE];
++ int err = 0;
++
++ /*
++ * we need an extra reference to the request, so we can look at
++ * it after io completion
++ */
++ rq->ref_count++;
++
++ if (!rq->sense) {
++ memset(sense, 0, sizeof(sense));
++ rq->sense = sense;
++ rq->sense_len = 0;
++ }
++
++ rq->end_io_data = &wait;
++ blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq);
++ wait_for_completion(&wait);
++
++ if (rq->errors)
++ err = -EIO;
++
++ return err;
++}
++
++EXPORT_SYMBOL(blk_execute_rq);
+diff --git a/block/blk-ioc.c b/block/blk-ioc.c
+new file mode 100644
+index 0000000..6d16755
+--- /dev/null
++++ b/block/blk-ioc.c
+@@ -0,0 +1,194 @@
++/*
++ * Functions related to io context handling
++ */
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/bio.h>
++#include <linux/blkdev.h>
++#include <linux/bootmem.h> /* for max_pfn/max_low_pfn */
++
++#include "blk.h"
++
++/*
++ * For io context allocations
++ */
++static struct kmem_cache *iocontext_cachep;
++
++static void cfq_dtor(struct io_context *ioc)
++{
++ struct cfq_io_context *cic[1];
++ int r;
++
++ /*
++ * We don't have a specific key to lookup with, so use the gang
++ * lookup to just retrieve the first item stored. The cfq exit
++ * function will iterate the full tree, so any member will do.
++ */
++ r = radix_tree_gang_lookup(&ioc->radix_root, (void **) cic, 0, 1);
++ if (r > 0)
++ cic[0]->dtor(ioc);
++}
++
++/*
++ * IO Context helper functions. put_io_context() returns 1 if there are no
++ * more users of this io context, 0 otherwise.
++ */
++int put_io_context(struct io_context *ioc)
++{
++ if (ioc == NULL)
++ return 1;
++
++ BUG_ON(atomic_read(&ioc->refcount) == 0);
++
++ if (atomic_dec_and_test(&ioc->refcount)) {
++ rcu_read_lock();
++ if (ioc->aic && ioc->aic->dtor)
++ ioc->aic->dtor(ioc->aic);
++ rcu_read_unlock();
++ cfq_dtor(ioc);
++
++ kmem_cache_free(iocontext_cachep, ioc);
++ return 1;
++ }
++ return 0;
++}
++EXPORT_SYMBOL(put_io_context);
++
++static void cfq_exit(struct io_context *ioc)
++{
++ struct cfq_io_context *cic[1];
++ int r;
++
++ rcu_read_lock();
++ /*
++ * See comment for cfq_dtor()
++ */
++ r = radix_tree_gang_lookup(&ioc->radix_root, (void **) cic, 0, 1);
++ rcu_read_unlock();
++
++ if (r > 0)
++ cic[0]->exit(ioc);
++}
++
++/* Called by the exitting task */
++void exit_io_context(void)
++{
++ struct io_context *ioc;
++
++ task_lock(current);
++ ioc = current->io_context;
++ current->io_context = NULL;
++ task_unlock(current);
++
++ if (atomic_dec_and_test(&ioc->nr_tasks)) {
++ if (ioc->aic && ioc->aic->exit)
++ ioc->aic->exit(ioc->aic);
++ cfq_exit(ioc);
++
++ put_io_context(ioc);
++ }
++}
++
++struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
++{
++ struct io_context *ret;
++
++ ret = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node);
++ if (ret) {
++ atomic_set(&ret->refcount, 1);
++ atomic_set(&ret->nr_tasks, 1);
++ spin_lock_init(&ret->lock);
++ ret->ioprio_changed = 0;
++ ret->ioprio = 0;
++ ret->last_waited = jiffies; /* doesn't matter... */
++ ret->nr_batch_requests = 0; /* because this is 0 */
++ ret->aic = NULL;
++ INIT_RADIX_TREE(&ret->radix_root, GFP_ATOMIC | __GFP_HIGH);
++ ret->ioc_data = NULL;
++ }
++
++ return ret;
++}
++
++/*
++ * If the current task has no IO context then create one and initialise it.
++ * Otherwise, return its existing IO context.
++ *
++ * This returned IO context doesn't have a specifically elevated refcount,
++ * but since the current task itself holds a reference, the context can be
++ * used in general code, so long as it stays within `current` context.
++ */
++struct io_context *current_io_context(gfp_t gfp_flags, int node)
++{
++ struct task_struct *tsk = current;
++ struct io_context *ret;
++
++ ret = tsk->io_context;
++ if (likely(ret))
++ return ret;
++
++ ret = alloc_io_context(gfp_flags, node);
++ if (ret) {
++ /* make sure set_task_ioprio() sees the settings above */
++ smp_wmb();
++ tsk->io_context = ret;
++ }
++
++ return ret;
++}
++
++/*
++ * If the current task has no IO context then create one and initialise it.
++ * If it does have a context, take a ref on it.
++ *
++ * This is always called in the context of the task which submitted the I/O.
++ */
++struct io_context *get_io_context(gfp_t gfp_flags, int node)
++{
++ struct io_context *ret = NULL;
++
++ /*
++ * Check for unlikely race with exiting task. ioc ref count is
++ * zero when ioc is being detached.
++ */
++ do {
++ ret = current_io_context(gfp_flags, node);
++ if (unlikely(!ret))
++ break;
++ } while (!atomic_inc_not_zero(&ret->refcount));
++
++ return ret;
++}
++EXPORT_SYMBOL(get_io_context);
++
++void copy_io_context(struct io_context **pdst, struct io_context **psrc)
++{
++ struct io_context *src = *psrc;
++ struct io_context *dst = *pdst;
++
++ if (src) {
++ BUG_ON(atomic_read(&src->refcount) == 0);
++ atomic_inc(&src->refcount);
++ put_io_context(dst);
++ *pdst = src;
++ }
++}
++EXPORT_SYMBOL(copy_io_context);
++
++void swap_io_context(struct io_context **ioc1, struct io_context **ioc2)
++{
++ struct io_context *temp;
++ temp = *ioc1;
++ *ioc1 = *ioc2;
++ *ioc2 = temp;
++}
++EXPORT_SYMBOL(swap_io_context);
++
++int __init blk_ioc_init(void)
++{
++ iocontext_cachep = kmem_cache_create("blkdev_ioc",
++ sizeof(struct io_context), 0, SLAB_PANIC, NULL);
++ return 0;
++}
++subsys_initcall(blk_ioc_init);
+diff --git a/block/blk-map.c b/block/blk-map.c
+new file mode 100644
+index 0000000..916cfc9
+--- /dev/null
++++ b/block/blk-map.c
+@@ -0,0 +1,264 @@
++/*
++ * Functions related to mapping data to requests
++ */
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/bio.h>
++#include <linux/blkdev.h>
++
++#include "blk.h"
++
++int blk_rq_append_bio(struct request_queue *q, struct request *rq,
++ struct bio *bio)
++{
++ if (!rq->bio)
++ blk_rq_bio_prep(q, rq, bio);
++ else if (!ll_back_merge_fn(q, rq, bio))
++ return -EINVAL;
++ else {
++ rq->biotail->bi_next = bio;
++ rq->biotail = bio;
++
++ rq->data_len += bio->bi_size;
++ }
++ return 0;
++}
++EXPORT_SYMBOL(blk_rq_append_bio);
++
++static int __blk_rq_unmap_user(struct bio *bio)
++{
++ int ret = 0;
++
++ if (bio) {
++ if (bio_flagged(bio, BIO_USER_MAPPED))
++ bio_unmap_user(bio);
++ else
++ ret = bio_uncopy_user(bio);
++ }
++
++ return ret;
++}
++
++static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
++ void __user *ubuf, unsigned int len)
++{
++ unsigned long uaddr;
++ struct bio *bio, *orig_bio;
++ int reading, ret;
++
++ reading = rq_data_dir(rq) == READ;
++
++ /*
++ * if alignment requirement is satisfied, map in user pages for
++ * direct dma. else, set up kernel bounce buffers
++ */
++ uaddr = (unsigned long) ubuf;
++ if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q)))
++ bio = bio_map_user(q, NULL, uaddr, len, reading);
++ else
++ bio = bio_copy_user(q, uaddr, len, reading);
++
++ if (IS_ERR(bio))
++ return PTR_ERR(bio);
++
++ orig_bio = bio;
++ blk_queue_bounce(q, &bio);
++
++ /*
++ * We link the bounce buffer in and could have to traverse it
++ * later so we have to get a ref to prevent it from being freed
++ */
++ bio_get(bio);
++
++ ret = blk_rq_append_bio(q, rq, bio);
++ if (!ret)
++ return bio->bi_size;
++
++ /* if it was boucned we must call the end io function */
++ bio_endio(bio, 0);
++ __blk_rq_unmap_user(orig_bio);
++ bio_put(bio);
++ return ret;
++}
++
++/**
++ * blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage
++ * @q: request queue where request should be inserted
++ * @rq: request structure to fill
++ * @ubuf: the user buffer
++ * @len: length of user data
++ *
++ * Description:
++ * Data will be mapped directly for zero copy io, if possible. Otherwise
++ * a kernel bounce buffer is used.
++ *
++ * A matching blk_rq_unmap_user() must be issued at the end of io, while
++ * still in process context.
++ *
++ * Note: The mapped bio may need to be bounced through blk_queue_bounce()
++ * before being submitted to the device, as pages mapped may be out of
++ * reach. It's the callers responsibility to make sure this happens. The
++ * original bio must be passed back in to blk_rq_unmap_user() for proper
++ * unmapping.
++ */
++int blk_rq_map_user(struct request_queue *q, struct request *rq,
++ void __user *ubuf, unsigned long len)
++{
++ unsigned long bytes_read = 0;
++ struct bio *bio = NULL;
++ int ret;
++
++ if (len > (q->max_hw_sectors << 9))
++ return -EINVAL;
++ if (!len || !ubuf)
++ return -EINVAL;
++
++ while (bytes_read != len) {
++ unsigned long map_len, end, start;
++
++ map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE);
++ end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1)
++ >> PAGE_SHIFT;
++ start = (unsigned long)ubuf >> PAGE_SHIFT;
++
++ /*
++ * A bad offset could cause us to require BIO_MAX_PAGES + 1
++ * pages. If this happens we just lower the requested
++ * mapping len by a page so that we can fit
++ */
++ if (end - start > BIO_MAX_PAGES)
++ map_len -= PAGE_SIZE;
++
++ ret = __blk_rq_map_user(q, rq, ubuf, map_len);
++ if (ret < 0)
++ goto unmap_rq;
++ if (!bio)
++ bio = rq->bio;
++ bytes_read += ret;
++ ubuf += ret;
++ }
++
++ rq->buffer = rq->data = NULL;
++ return 0;
++unmap_rq:
++ blk_rq_unmap_user(bio);
++ return ret;
++}
++
++EXPORT_SYMBOL(blk_rq_map_user);
++
++/**
++ * blk_rq_map_user_iov - map user data to a request, for REQ_BLOCK_PC usage
++ * @q: request queue where request should be inserted
++ * @rq: request to map data to
++ * @iov: pointer to the iovec
++ * @iov_count: number of elements in the iovec
++ * @len: I/O byte count
++ *
++ * Description:
++ * Data will be mapped directly for zero copy io, if possible. Otherwise
++ * a kernel bounce buffer is used.
++ *
++ * A matching blk_rq_unmap_user() must be issued at the end of io, while
++ * still in process context.
++ *
++ * Note: The mapped bio may need to be bounced through blk_queue_bounce()
++ * before being submitted to the device, as pages mapped may be out of
++ * reach. It's the callers responsibility to make sure this happens. The
++ * original bio must be passed back in to blk_rq_unmap_user() for proper
++ * unmapping.
++ */
++int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
++ struct sg_iovec *iov, int iov_count, unsigned int len)
++{
++ struct bio *bio;
++
++ if (!iov || iov_count <= 0)
++ return -EINVAL;
++
++ /* we don't allow misaligned data like bio_map_user() does. If the
++ * user is using sg, they're expected to know the alignment constraints
++ * and respect them accordingly */
++ bio = bio_map_user_iov(q, NULL, iov, iov_count, rq_data_dir(rq)== READ);
++ if (IS_ERR(bio))
++ return PTR_ERR(bio);
++
++ if (bio->bi_size != len) {
++ bio_endio(bio, 0);
++ bio_unmap_user(bio);
++ return -EINVAL;
++ }
++
++ bio_get(bio);
++ blk_rq_bio_prep(q, rq, bio);
++ rq->buffer = rq->data = NULL;
++ return 0;
++}
++
++EXPORT_SYMBOL(blk_rq_map_user_iov);
++
++/**
++ * blk_rq_unmap_user - unmap a request with user data
++ * @bio: start of bio list
++ *
++ * Description:
++ * Unmap a rq previously mapped by blk_rq_map_user(). The caller must
++ * supply the original rq->bio from the blk_rq_map_user() return, since
++ * the io completion may have changed rq->bio.
++ */
++int blk_rq_unmap_user(struct bio *bio)
++{
++ struct bio *mapped_bio;
++ int ret = 0, ret2;
++
++ while (bio) {
++ mapped_bio = bio;
++ if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
++ mapped_bio = bio->bi_private;
++
++ ret2 = __blk_rq_unmap_user(mapped_bio);
++ if (ret2 && !ret)
++ ret = ret2;
++
++ mapped_bio = bio;
++ bio = bio->bi_next;
++ bio_put(mapped_bio);
++ }
++
++ return ret;
++}
++
++EXPORT_SYMBOL(blk_rq_unmap_user);
++
++/**
++ * blk_rq_map_kern - map kernel data to a request, for REQ_BLOCK_PC usage
++ * @q: request queue where request should be inserted
++ * @rq: request to fill
++ * @kbuf: the kernel buffer
++ * @len: length of user data
++ * @gfp_mask: memory allocation flags
++ */
++int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
++ unsigned int len, gfp_t gfp_mask)
++{
++ struct bio *bio;
++
++ if (len > (q->max_hw_sectors << 9))
++ return -EINVAL;
++ if (!len || !kbuf)
++ return -EINVAL;
++
++ bio = bio_map_kern(q, kbuf, len, gfp_mask);
++ if (IS_ERR(bio))
++ return PTR_ERR(bio);
++
++ if (rq_data_dir(rq) == WRITE)
++ bio->bi_rw |= (1 << BIO_RW);
++
++ blk_rq_bio_prep(q, rq, bio);
++ blk_queue_bounce(q, &rq->bio);
++ rq->buffer = rq->data = NULL;
++ return 0;
++}
++
++EXPORT_SYMBOL(blk_rq_map_kern);
+diff --git a/block/blk-merge.c b/block/blk-merge.c
+new file mode 100644
+index 0000000..5023f0b
+--- /dev/null
++++ b/block/blk-merge.c
+@@ -0,0 +1,485 @@
++/*
++ * Functions related to segment and merge handling
++ */
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/bio.h>
++#include <linux/blkdev.h>
++#include <linux/scatterlist.h>
++
++#include "blk.h"
++
++void blk_recalc_rq_sectors(struct request *rq, int nsect)
++{
++ if (blk_fs_request(rq)) {
++ rq->hard_sector += nsect;
++ rq->hard_nr_sectors -= nsect;
++
++ /*
++ * Move the I/O submission pointers ahead if required.
++ */
++ if ((rq->nr_sectors >= rq->hard_nr_sectors) &&
++ (rq->sector <= rq->hard_sector)) {
++ rq->sector = rq->hard_sector;
++ rq->nr_sectors = rq->hard_nr_sectors;
++ rq->hard_cur_sectors = bio_cur_sectors(rq->bio);
++ rq->current_nr_sectors = rq->hard_cur_sectors;
++ rq->buffer = bio_data(rq->bio);
++ }
++
++ /*
++ * if total number of sectors is less than the first segment
++ * size, something has gone terribly wrong
++ */
++ if (rq->nr_sectors < rq->current_nr_sectors) {
++ printk("blk: request botched\n");
++ rq->nr_sectors = rq->current_nr_sectors;
++ }
++ }
++}
++
++void blk_recalc_rq_segments(struct request *rq)
++{
++ int nr_phys_segs;
++ int nr_hw_segs;
++ unsigned int phys_size;
++ unsigned int hw_size;
++ struct bio_vec *bv, *bvprv = NULL;
++ int seg_size;
++ int hw_seg_size;
++ int cluster;
++ struct req_iterator iter;
++ int high, highprv = 1;
++ struct request_queue *q = rq->q;
++
++ if (!rq->bio)
++ return;
++
++ cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
++ hw_seg_size = seg_size = 0;
++ phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0;
++ rq_for_each_segment(bv, rq, iter) {
++ /*
++ * the trick here is making sure that a high page is never
++ * considered part of another segment, since that might
++ * change with the bounce page.
++ */
++ high = page_to_pfn(bv->bv_page) > q->bounce_pfn;
++ if (high || highprv)
++ goto new_hw_segment;
++ if (cluster) {
++ if (seg_size + bv->bv_len > q->max_segment_size)
++ goto new_segment;
++ if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
++ goto new_segment;
++ if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
++ goto new_segment;
++ if (BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len))
++ goto new_hw_segment;
++
++ seg_size += bv->bv_len;
++ hw_seg_size += bv->bv_len;
++ bvprv = bv;
++ continue;
++ }
++new_segment:
++ if (BIOVEC_VIRT_MERGEABLE(bvprv, bv) &&
++ !BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len))
++ hw_seg_size += bv->bv_len;
++ else {
++new_hw_segment:
++ if (nr_hw_segs == 1 &&
++ hw_seg_size > rq->bio->bi_hw_front_size)
++ rq->bio->bi_hw_front_size = hw_seg_size;
++ hw_seg_size = BIOVEC_VIRT_START_SIZE(bv) + bv->bv_len;
++ nr_hw_segs++;
++ }
++
++ nr_phys_segs++;
++ bvprv = bv;
++ seg_size = bv->bv_len;
++ highprv = high;
++ }
++
++ if (nr_hw_segs == 1 &&
++ hw_seg_size > rq->bio->bi_hw_front_size)
++ rq->bio->bi_hw_front_size = hw_seg_size;
++ if (hw_seg_size > rq->biotail->bi_hw_back_size)
++ rq->biotail->bi_hw_back_size = hw_seg_size;
++ rq->nr_phys_segments = nr_phys_segs;
++ rq->nr_hw_segments = nr_hw_segs;
++}
++
++void blk_recount_segments(struct request_queue *q, struct bio *bio)
++{
++ struct request rq;
++ struct bio *nxt = bio->bi_next;
++ rq.q = q;
++ rq.bio = rq.biotail = bio;
++ bio->bi_next = NULL;
++ blk_recalc_rq_segments(&rq);
++ bio->bi_next = nxt;
++ bio->bi_phys_segments = rq.nr_phys_segments;
++ bio->bi_hw_segments = rq.nr_hw_segments;
++ bio->bi_flags |= (1 << BIO_SEG_VALID);
++}
++EXPORT_SYMBOL(blk_recount_segments);
++
++static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
++ struct bio *nxt)
++{
++ if (!(q->queue_flags & (1 << QUEUE_FLAG_CLUSTER)))
++ return 0;
++
++ if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
++ return 0;
++ if (bio->bi_size + nxt->bi_size > q->max_segment_size)
++ return 0;
++
++ /*
++ * bio and nxt are contigous in memory, check if the queue allows
++ * these two to be merged into one
++ */
++ if (BIO_SEG_BOUNDARY(q, bio, nxt))
++ return 1;
++
++ return 0;
++}
++
++static int blk_hw_contig_segment(struct request_queue *q, struct bio *bio,
++ struct bio *nxt)
++{
++ if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
++ blk_recount_segments(q, bio);
++ if (unlikely(!bio_flagged(nxt, BIO_SEG_VALID)))
++ blk_recount_segments(q, nxt);
++ if (!BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)) ||
++ BIOVEC_VIRT_OVERSIZE(bio->bi_hw_back_size + nxt->bi_hw_front_size))
++ return 0;
++ if (bio->bi_hw_back_size + nxt->bi_hw_front_size > q->max_segment_size)
++ return 0;
++
++ return 1;
++}
++
++/*
++ * map a request to scatterlist, return number of sg entries setup. Caller
++ * must make sure sg can hold rq->nr_phys_segments entries
++ */
++int blk_rq_map_sg(struct request_queue *q, struct request *rq,
++ struct scatterlist *sglist)
++{
++ struct bio_vec *bvec, *bvprv;
++ struct req_iterator iter;
++ struct scatterlist *sg;
++ int nsegs, cluster;
++
++ nsegs = 0;
++ cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
++
++ /*
++ * for each bio in rq
++ */
++ bvprv = NULL;
++ sg = NULL;
++ rq_for_each_segment(bvec, rq, iter) {
++ int nbytes = bvec->bv_len;
++
++ if (bvprv && cluster) {
++ if (sg->length + nbytes > q->max_segment_size)
++ goto new_segment;
++
++ if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
++ goto new_segment;
++ if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
++ goto new_segment;
++
++ sg->length += nbytes;
++ } else {
++new_segment:
++ if (!sg)
++ sg = sglist;
++ else {
++ /*
++ * If the driver previously mapped a shorter
++ * list, we could see a termination bit
++ * prematurely unless it fully inits the sg
++ * table on each mapping. We KNOW that there
++ * must be more entries here or the driver
++ * would be buggy, so force clear the
++ * termination bit to avoid doing a full
++ * sg_init_table() in drivers for each command.
++ */
++ sg->page_link &= ~0x02;
++ sg = sg_next(sg);
++ }
++
++ sg_set_page(sg, bvec->bv_page, nbytes, bvec->bv_offset);
++ nsegs++;
++ }
++ bvprv = bvec;
++ } /* segments in rq */
++
++ if (q->dma_drain_size) {
++ sg->page_link &= ~0x02;
++ sg = sg_next(sg);
++ sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
++ q->dma_drain_size,
++ ((unsigned long)q->dma_drain_buffer) &
++ (PAGE_SIZE - 1));
++ nsegs++;
++ }
++
++ if (sg)
++ sg_mark_end(sg);
++
++ return nsegs;
++}
++
++EXPORT_SYMBOL(blk_rq_map_sg);
++
++static inline int ll_new_mergeable(struct request_queue *q,
++ struct request *req,
++ struct bio *bio)
++{
++ int nr_phys_segs = bio_phys_segments(q, bio);
++
++ if (req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
++ req->cmd_flags |= REQ_NOMERGE;
++ if (req == q->last_merge)
++ q->last_merge = NULL;
++ return 0;
++ }
++
++ /*
++ * A hw segment is just getting larger, bump just the phys
++ * counter.
++ */
++ req->nr_phys_segments += nr_phys_segs;
++ return 1;
++}
++
++static inline int ll_new_hw_segment(struct request_queue *q,
++ struct request *req,
++ struct bio *bio)
++{
++ int nr_hw_segs = bio_hw_segments(q, bio);
++ int nr_phys_segs = bio_phys_segments(q, bio);
++
++ if (req->nr_hw_segments + nr_hw_segs > q->max_hw_segments
++ || req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
++ req->cmd_flags |= REQ_NOMERGE;
++ if (req == q->last_merge)
++ q->last_merge = NULL;
++ return 0;
++ }
++
++ /*
++ * This will form the start of a new hw segment. Bump both
++ * counters.
++ */
++ req->nr_hw_segments += nr_hw_segs;
++ req->nr_phys_segments += nr_phys_segs;
++ return 1;
++}
++
++int ll_back_merge_fn(struct request_queue *q, struct request *req,
++ struct bio *bio)
++{
++ unsigned short max_sectors;
++ int len;
++
++ if (unlikely(blk_pc_request(req)))
++ max_sectors = q->max_hw_sectors;
++ else
++ max_sectors = q->max_sectors;
++
++ if (req->nr_sectors + bio_sectors(bio) > max_sectors) {
++ req->cmd_flags |= REQ_NOMERGE;
++ if (req == q->last_merge)
++ q->last_merge = NULL;
++ return 0;
++ }
++ if (unlikely(!bio_flagged(req->biotail, BIO_SEG_VALID)))
++ blk_recount_segments(q, req->biotail);
++ if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
++ blk_recount_segments(q, bio);
++ len = req->biotail->bi_hw_back_size + bio->bi_hw_front_size;
++ if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio)) &&
++ !BIOVEC_VIRT_OVERSIZE(len)) {
++ int mergeable = ll_new_mergeable(q, req, bio);
++
++ if (mergeable) {
++ if (req->nr_hw_segments == 1)
++ req->bio->bi_hw_front_size = len;
++ if (bio->bi_hw_segments == 1)
++ bio->bi_hw_back_size = len;
++ }
++ return mergeable;
++ }
++
++ return ll_new_hw_segment(q, req, bio);
++}
++
++int ll_front_merge_fn(struct request_queue *q, struct request *req,
++ struct bio *bio)
++{
++ unsigned short max_sectors;
++ int len;
++
++ if (unlikely(blk_pc_request(req)))
++ max_sectors = q->max_hw_sectors;
++ else
++ max_sectors = q->max_sectors;
++
++
++ if (req->nr_sectors + bio_sectors(bio) > max_sectors) {
++ req->cmd_flags |= REQ_NOMERGE;
++ if (req == q->last_merge)
++ q->last_merge = NULL;
++ return 0;
++ }
++ len = bio->bi_hw_back_size + req->bio->bi_hw_front_size;
++ if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
++ blk_recount_segments(q, bio);
++ if (unlikely(!bio_flagged(req->bio, BIO_SEG_VALID)))
++ blk_recount_segments(q, req->bio);
++ if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(req->bio)) &&
++ !BIOVEC_VIRT_OVERSIZE(len)) {
++ int mergeable = ll_new_mergeable(q, req, bio);
++
++ if (mergeable) {
++ if (bio->bi_hw_segments == 1)
++ bio->bi_hw_front_size = len;
++ if (req->nr_hw_segments == 1)
++ req->biotail->bi_hw_back_size = len;
++ }
++ return mergeable;
++ }
++
++ return ll_new_hw_segment(q, req, bio);
++}
++
++static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
++ struct request *next)
++{
++ int total_phys_segments;
++ int total_hw_segments;
++
++ /*
++ * First check if the either of the requests are re-queued
++ * requests. Can't merge them if they are.
++ */
++ if (req->special || next->special)
++ return 0;
++
++ /*
++ * Will it become too large?
++ */
++ if ((req->nr_sectors + next->nr_sectors) > q->max_sectors)
++ return 0;
++
++ total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
++ if (blk_phys_contig_segment(q, req->biotail, next->bio))
++ total_phys_segments--;
++
++ if (total_phys_segments > q->max_phys_segments)
++ return 0;
++
++ total_hw_segments = req->nr_hw_segments + next->nr_hw_segments;
++ if (blk_hw_contig_segment(q, req->biotail, next->bio)) {
++ int len = req->biotail->bi_hw_back_size + next->bio->bi_hw_front_size;
++ /*
++ * propagate the combined length to the end of the requests
++ */
++ if (req->nr_hw_segments == 1)
++ req->bio->bi_hw_front_size = len;
++ if (next->nr_hw_segments == 1)
++ next->biotail->bi_hw_back_size = len;
++ total_hw_segments--;
++ }
++
++ if (total_hw_segments > q->max_hw_segments)
++ return 0;
++
++ /* Merge is OK... */
++ req->nr_phys_segments = total_phys_segments;
++ req->nr_hw_segments = total_hw_segments;
++ return 1;
++}
++
++/*
++ * Has to be called with the request spinlock acquired
++ */
++static int attempt_merge(struct request_queue *q, struct request *req,
++ struct request *next)
++{
++ if (!rq_mergeable(req) || !rq_mergeable(next))
++ return 0;
++
++ /*
++ * not contiguous
++ */
++ if (req->sector + req->nr_sectors != next->sector)
++ return 0;
++
++ if (rq_data_dir(req) != rq_data_dir(next)
++ || req->rq_disk != next->rq_disk
++ || next->special)
++ return 0;
++
++ /*
++ * If we are allowed to merge, then append bio list
++ * from next to rq and release next. merge_requests_fn
++ * will have updated segment counts, update sector
++ * counts here.
++ */
++ if (!ll_merge_requests_fn(q, req, next))
++ return 0;
++
++ /*
++ * At this point we have either done a back merge
++ * or front merge. We need the smaller start_time of
++ * the merged requests to be the current request
++ * for accounting purposes.
++ */
++ if (time_after(req->start_time, next->start_time))
++ req->start_time = next->start_time;
++
++ req->biotail->bi_next = next->bio;
++ req->biotail = next->biotail;
++
++ req->nr_sectors = req->hard_nr_sectors += next->hard_nr_sectors;
++
++ elv_merge_requests(q, req, next);
++
++ if (req->rq_disk) {
++ disk_round_stats(req->rq_disk);
++ req->rq_disk->in_flight--;
++ }
++
++ req->ioprio = ioprio_best(req->ioprio, next->ioprio);
++
++ __blk_put_request(q, next);
++ return 1;
++}
++
++int attempt_back_merge(struct request_queue *q, struct request *rq)
++{
++ struct request *next = elv_latter_request(q, rq);
++
++ if (next)
++ return attempt_merge(q, rq, next);
++
++ return 0;
++}
++
++int attempt_front_merge(struct request_queue *q, struct request *rq)
++{
++ struct request *prev = elv_former_request(q, rq);
++
++ if (prev)
++ return attempt_merge(q, prev, rq);
++
++ return 0;
++}
+diff --git a/block/blk-settings.c b/block/blk-settings.c
+new file mode 100644
+index 0000000..4df09a1
+--- /dev/null
++++ b/block/blk-settings.c
+@@ -0,0 +1,402 @@
++/*
++ * Functions related to setting various queue properties from drivers
++ */
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/bio.h>
++#include <linux/blkdev.h>
++#include <linux/bootmem.h> /* for max_pfn/max_low_pfn */
++
++#include "blk.h"
++
++unsigned long blk_max_low_pfn, blk_max_pfn;
++EXPORT_SYMBOL(blk_max_low_pfn);
++EXPORT_SYMBOL(blk_max_pfn);
++
++/**
++ * blk_queue_prep_rq - set a prepare_request function for queue
++ * @q: queue
++ * @pfn: prepare_request function
++ *
++ * It's possible for a queue to register a prepare_request callback which
++ * is invoked before the request is handed to the request_fn. The goal of
++ * the function is to prepare a request for I/O, it can be used to build a
++ * cdb from the request data for instance.
++ *
++ */
++void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
++{
++ q->prep_rq_fn = pfn;
++}
++
++EXPORT_SYMBOL(blk_queue_prep_rq);
++
++/**
++ * blk_queue_merge_bvec - set a merge_bvec function for queue
++ * @q: queue
++ * @mbfn: merge_bvec_fn
++ *
++ * Usually queues have static limitations on the max sectors or segments that
++ * we can put in a request. Stacking drivers may have some settings that
++ * are dynamic, and thus we have to query the queue whether it is ok to
++ * add a new bio_vec to a bio at a given offset or not. If the block device
++ * has such limitations, it needs to register a merge_bvec_fn to control
++ * the size of bio's sent to it. Note that a block device *must* allow a
++ * single page to be added to an empty bio. The block device driver may want
++ * to use the bio_split() function to deal with these bio's. By default
++ * no merge_bvec_fn is defined for a queue, and only the fixed limits are
++ * honored.
++ */
++void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn)
++{
++ q->merge_bvec_fn = mbfn;
++}
++
++EXPORT_SYMBOL(blk_queue_merge_bvec);
++
++void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
++{
++ q->softirq_done_fn = fn;
++}
++
++EXPORT_SYMBOL(blk_queue_softirq_done);
++
++/**
++ * blk_queue_make_request - define an alternate make_request function for a device
++ * @q: the request queue for the device to be affected
++ * @mfn: the alternate make_request function
++ *
++ * Description:
++ * The normal way for &struct bios to be passed to a device
++ * driver is for them to be collected into requests on a request
++ * queue, and then to allow the device driver to select requests
++ * off that queue when it is ready. This works well for many block
++ * devices. However some block devices (typically virtual devices
++ * such as md or lvm) do not benefit from the processing on the
++ * request queue, and are served best by having the requests passed
++ * directly to them. This can be achieved by providing a function
++ * to blk_queue_make_request().
++ *
++ * Caveat:
++ * The driver that does this *must* be able to deal appropriately
++ * with buffers in "highmemory". This can be accomplished by either calling
++ * __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
++ * blk_queue_bounce() to create a buffer in normal memory.
++ **/
++void blk_queue_make_request(struct request_queue * q, make_request_fn * mfn)
++{
++ /*
++ * set defaults
++ */
++ q->nr_requests = BLKDEV_MAX_RQ;
++ blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
++ blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
++ q->make_request_fn = mfn;
++ q->backing_dev_info.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
++ q->backing_dev_info.state = 0;
++ q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
++ blk_queue_max_sectors(q, SAFE_MAX_SECTORS);
++ blk_queue_hardsect_size(q, 512);
++ blk_queue_dma_alignment(q, 511);
++ blk_queue_congestion_threshold(q);
++ q->nr_batching = BLK_BATCH_REQ;
++
++ q->unplug_thresh = 4; /* hmm */
++ q->unplug_delay = (3 * HZ) / 1000; /* 3 milliseconds */
++ if (q->unplug_delay == 0)
++ q->unplug_delay = 1;
++
++ INIT_WORK(&q->unplug_work, blk_unplug_work);
++
++ q->unplug_timer.function = blk_unplug_timeout;
++ q->unplug_timer.data = (unsigned long)q;
++
++ /*
++ * by default assume old behaviour and bounce for any highmem page
++ */
++ blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
++}
++
++EXPORT_SYMBOL(blk_queue_make_request);
++
++/**
++ * blk_queue_bounce_limit - set bounce buffer limit for queue
++ * @q: the request queue for the device
++ * @dma_addr: bus address limit
++ *
++ * Description:
++ * Different hardware can have different requirements as to what pages
++ * it can do I/O directly to. A low level driver can call
++ * blk_queue_bounce_limit to have lower memory pages allocated as bounce
++ * buffers for doing I/O to pages residing above @page.
++ **/
++void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr)
++{
++ unsigned long bounce_pfn = dma_addr >> PAGE_SHIFT;
++ int dma = 0;
++
++ q->bounce_gfp = GFP_NOIO;
++#if BITS_PER_LONG == 64
++ /* Assume anything <= 4GB can be handled by IOMMU.
++ Actually some IOMMUs can handle everything, but I don't
++ know of a way to test this here. */
++ if (bounce_pfn < (min_t(u64,0xffffffff,BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
++ dma = 1;
++ q->bounce_pfn = max_low_pfn;
++#else
++ if (bounce_pfn < blk_max_low_pfn)
++ dma = 1;
++ q->bounce_pfn = bounce_pfn;
++#endif
++ if (dma) {
++ init_emergency_isa_pool();
++ q->bounce_gfp = GFP_NOIO | GFP_DMA;
++ q->bounce_pfn = bounce_pfn;
++ }
++}
++
++EXPORT_SYMBOL(blk_queue_bounce_limit);
++
++/**
++ * blk_queue_max_sectors - set max sectors for a request for this queue
++ * @q: the request queue for the device
++ * @max_sectors: max sectors in the usual 512b unit
++ *
++ * Description:
++ * Enables a low level driver to set an upper limit on the size of
++ * received requests.
++ **/
++void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors)
++{
++ if ((max_sectors << 9) < PAGE_CACHE_SIZE) {
++ max_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
++ printk("%s: set to minimum %d\n", __FUNCTION__, max_sectors);
++ }
++
++ if (BLK_DEF_MAX_SECTORS > max_sectors)
++ q->max_hw_sectors = q->max_sectors = max_sectors;
++ else {
++ q->max_sectors = BLK_DEF_MAX_SECTORS;
++ q->max_hw_sectors = max_sectors;
++ }
++}
++
++EXPORT_SYMBOL(blk_queue_max_sectors);
++
++/**
++ * blk_queue_max_phys_segments - set max phys segments for a request for this queue
++ * @q: the request queue for the device
++ * @max_segments: max number of segments
++ *
++ * Description:
++ * Enables a low level driver to set an upper limit on the number of
++ * physical data segments in a request. This would be the largest sized
++ * scatter list the driver could handle.
++ **/
++void blk_queue_max_phys_segments(struct request_queue *q,
++ unsigned short max_segments)
++{
++ if (!max_segments) {
++ max_segments = 1;
++ printk("%s: set to minimum %d\n", __FUNCTION__, max_segments);
++ }
++
++ q->max_phys_segments = max_segments;
++}
++
++EXPORT_SYMBOL(blk_queue_max_phys_segments);
++
++/**
++ * blk_queue_max_hw_segments - set max hw segments for a request for this queue
++ * @q: the request queue for the device
++ * @max_segments: max number of segments
++ *
++ * Description:
++ * Enables a low level driver to set an upper limit on the number of
++ * hw data segments in a request. This would be the largest number of
++ * address/length pairs the host adapter can actually give as once
++ * to the device.
++ **/
++void blk_queue_max_hw_segments(struct request_queue *q,
++ unsigned short max_segments)
++{
++ if (!max_segments) {
++ max_segments = 1;
++ printk("%s: set to minimum %d\n", __FUNCTION__, max_segments);
++ }
++
++ q->max_hw_segments = max_segments;
++}
++
++EXPORT_SYMBOL(blk_queue_max_hw_segments);
++
++/**
++ * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
++ * @q: the request queue for the device
++ * @max_size: max size of segment in bytes
++ *
++ * Description:
++ * Enables a low level driver to set an upper limit on the size of a
++ * coalesced segment
++ **/
++void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
++{
++ if (max_size < PAGE_CACHE_SIZE) {
++ max_size = PAGE_CACHE_SIZE;
++ printk("%s: set to minimum %d\n", __FUNCTION__, max_size);
++ }
++
++ q->max_segment_size = max_size;
++}
++
++EXPORT_SYMBOL(blk_queue_max_segment_size);
++
++/**
++ * blk_queue_hardsect_size - set hardware sector size for the queue
++ * @q: the request queue for the device
++ * @size: the hardware sector size, in bytes
++ *
++ * Description:
++ * This should typically be set to the lowest possible sector size
++ * that the hardware can operate on (possible without reverting to
++ * even internal read-modify-write operations). Usually the default
++ * of 512 covers most hardware.
++ **/
++void blk_queue_hardsect_size(struct request_queue *q, unsigned short size)
++{
++ q->hardsect_size = size;
++}
++
++EXPORT_SYMBOL(blk_queue_hardsect_size);
++
++/*
++ * Returns the minimum that is _not_ zero, unless both are zero.
++ */
++#define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
++
++/**
++ * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers
++ * @t: the stacking driver (top)
++ * @b: the underlying device (bottom)
++ **/
++void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
++{
++ /* zero is "infinity" */
++ t->max_sectors = min_not_zero(t->max_sectors,b->max_sectors);
++ t->max_hw_sectors = min_not_zero(t->max_hw_sectors,b->max_hw_sectors);
++
++ t->max_phys_segments = min(t->max_phys_segments,b->max_phys_segments);
++ t->max_hw_segments = min(t->max_hw_segments,b->max_hw_segments);
++ t->max_segment_size = min(t->max_segment_size,b->max_segment_size);
++ t->hardsect_size = max(t->hardsect_size,b->hardsect_size);
++ if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags))
++ clear_bit(QUEUE_FLAG_CLUSTER, &t->queue_flags);
++}
++
++EXPORT_SYMBOL(blk_queue_stack_limits);
++
++/**
++ * blk_queue_dma_drain - Set up a drain buffer for excess dma.
++ *
++ * @q: the request queue for the device
++ * @buf: physically contiguous buffer
++ * @size: size of the buffer in bytes
++ *
++ * Some devices have excess DMA problems and can't simply discard (or
++ * zero fill) the unwanted piece of the transfer. They have to have a
++ * real area of memory to transfer it into. The use case for this is
++ * ATAPI devices in DMA mode. If the packet command causes a transfer
++ * bigger than the transfer size some HBAs will lock up if there
++ * aren't DMA elements to contain the excess transfer. What this API
++ * does is adjust the queue so that the buf is always appended
++ * silently to the scatterlist.
++ *
++ * Note: This routine adjusts max_hw_segments to make room for
++ * appending the drain buffer. If you call
++ * blk_queue_max_hw_segments() or blk_queue_max_phys_segments() after
++ * calling this routine, you must set the limit to one fewer than your
++ * device can support otherwise there won't be room for the drain
++ * buffer.
++ */
++int blk_queue_dma_drain(struct request_queue *q, void *buf,
++ unsigned int size)
++{
++ if (q->max_hw_segments < 2 || q->max_phys_segments < 2)
++ return -EINVAL;
++ /* make room for appending the drain */
++ --q->max_hw_segments;
++ --q->max_phys_segments;
++ q->dma_drain_buffer = buf;
++ q->dma_drain_size = size;
++
++ return 0;
++}
++
++EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
++
++/**
++ * blk_queue_segment_boundary - set boundary rules for segment merging
++ * @q: the request queue for the device
++ * @mask: the memory boundary mask
++ **/
++void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
++{
++ if (mask < PAGE_CACHE_SIZE - 1) {
++ mask = PAGE_CACHE_SIZE - 1;
++ printk("%s: set to minimum %lx\n", __FUNCTION__, mask);
++ }
++
++ q->seg_boundary_mask = mask;
++}
++
++EXPORT_SYMBOL(blk_queue_segment_boundary);
++
++/**
++ * blk_queue_dma_alignment - set dma length and memory alignment
++ * @q: the request queue for the device
++ * @mask: alignment mask
++ *
++ * description:
++ * set required memory and length aligment for direct dma transactions.
++ * this is used when buiding direct io requests for the queue.
++ *
++ **/
++void blk_queue_dma_alignment(struct request_queue *q, int mask)
++{
++ q->dma_alignment = mask;
++}
++
++EXPORT_SYMBOL(blk_queue_dma_alignment);
++
++/**
++ * blk_queue_update_dma_alignment - update dma length and memory alignment
++ * @q: the request queue for the device
++ * @mask: alignment mask
++ *
++ * description:
++ * update required memory and length aligment for direct dma transactions.
++ * If the requested alignment is larger than the current alignment, then
++ * the current queue alignment is updated to the new value, otherwise it
++ * is left alone. The design of this is to allow multiple objects
++ * (driver, device, transport etc) to set their respective
++ * alignments without having them interfere.
++ *
++ **/
++void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
++{
++ BUG_ON(mask > PAGE_SIZE);
++
++ if (mask > q->dma_alignment)
++ q->dma_alignment = mask;
++}
++
++EXPORT_SYMBOL(blk_queue_update_dma_alignment);
++
++int __init blk_settings_init(void)
++{
++ blk_max_low_pfn = max_low_pfn - 1;
++ blk_max_pfn = max_pfn - 1;
++ return 0;
++}
++subsys_initcall(blk_settings_init);
+diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
+new file mode 100644
+index 0000000..bc28776
+--- /dev/null
++++ b/block/blk-sysfs.c
+@@ -0,0 +1,309 @@
++/*
++ * Functions related to sysfs handling
++ */
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/bio.h>
++#include <linux/blkdev.h>
++#include <linux/blktrace_api.h>
++
++#include "blk.h"
++
++struct queue_sysfs_entry {
++ struct attribute attr;
++ ssize_t (*show)(struct request_queue *, char *);
++ ssize_t (*store)(struct request_queue *, const char *, size_t);
++};
++
++static ssize_t
++queue_var_show(unsigned int var, char *page)
++{
++ return sprintf(page, "%d\n", var);
++}
++
++static ssize_t
++queue_var_store(unsigned long *var, const char *page, size_t count)
++{
++ char *p = (char *) page;
++
++ *var = simple_strtoul(p, &p, 10);
++ return count;
++}
++
++static ssize_t queue_requests_show(struct request_queue *q, char *page)
++{
++ return queue_var_show(q->nr_requests, (page));
++}
++
++static ssize_t
++queue_requests_store(struct request_queue *q, const char *page, size_t count)
++{
++ struct request_list *rl = &q->rq;
++ unsigned long nr;
++ int ret = queue_var_store(&nr, page, count);
++ if (nr < BLKDEV_MIN_RQ)
++ nr = BLKDEV_MIN_RQ;
++
++ spin_lock_irq(q->queue_lock);
++ q->nr_requests = nr;
++ blk_queue_congestion_threshold(q);
++
++ if (rl->count[READ] >= queue_congestion_on_threshold(q))
++ blk_set_queue_congested(q, READ);
++ else if (rl->count[READ] < queue_congestion_off_threshold(q))
++ blk_clear_queue_congested(q, READ);
++
++ if (rl->count[WRITE] >= queue_congestion_on_threshold(q))
++ blk_set_queue_congested(q, WRITE);
++ else if (rl->count[WRITE] < queue_congestion_off_threshold(q))
++ blk_clear_queue_congested(q, WRITE);
++
++ if (rl->count[READ] >= q->nr_requests) {
++ blk_set_queue_full(q, READ);
++ } else if (rl->count[READ]+1 <= q->nr_requests) {
++ blk_clear_queue_full(q, READ);
++ wake_up(&rl->wait[READ]);
++ }
++
++ if (rl->count[WRITE] >= q->nr_requests) {
++ blk_set_queue_full(q, WRITE);
++ } else if (rl->count[WRITE]+1 <= q->nr_requests) {
++ blk_clear_queue_full(q, WRITE);
++ wake_up(&rl->wait[WRITE]);
++ }
++ spin_unlock_irq(q->queue_lock);
++ return ret;
++}
++
++static ssize_t queue_ra_show(struct request_queue *q, char *page)
++{
++ int ra_kb = q->backing_dev_info.ra_pages << (PAGE_CACHE_SHIFT - 10);
++
++ return queue_var_show(ra_kb, (page));
++}
++
++static ssize_t
++queue_ra_store(struct request_queue *q, const char *page, size_t count)
++{
++ unsigned long ra_kb;
++ ssize_t ret = queue_var_store(&ra_kb, page, count);
++
++ spin_lock_irq(q->queue_lock);
++ q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10);
++ spin_unlock_irq(q->queue_lock);
++
++ return ret;
++}
++
++static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
++{
++ int max_sectors_kb = q->max_sectors >> 1;
++
++ return queue_var_show(max_sectors_kb, (page));
++}
++
++static ssize_t queue_hw_sector_size_show(struct request_queue *q, char *page)
++{
++ return queue_var_show(q->hardsect_size, page);
++}
++
++static ssize_t
++queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
++{
++ unsigned long max_sectors_kb,
++ max_hw_sectors_kb = q->max_hw_sectors >> 1,
++ page_kb = 1 << (PAGE_CACHE_SHIFT - 10);
++ ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
++
++ if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
++ return -EINVAL;
++ /*
++ * Take the queue lock to update the readahead and max_sectors
++ * values synchronously:
++ */
++ spin_lock_irq(q->queue_lock);
++ q->max_sectors = max_sectors_kb << 1;
++ spin_unlock_irq(q->queue_lock);
++
++ return ret;
++}
++
++static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
++{
++ int max_hw_sectors_kb = q->max_hw_sectors >> 1;
++
++ return queue_var_show(max_hw_sectors_kb, (page));
++}
++
++
++static struct queue_sysfs_entry queue_requests_entry = {
++ .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
++ .show = queue_requests_show,
++ .store = queue_requests_store,
++};
++
++static struct queue_sysfs_entry queue_ra_entry = {
++ .attr = {.name = "read_ahead_kb", .mode = S_IRUGO | S_IWUSR },
++ .show = queue_ra_show,
++ .store = queue_ra_store,
++};
++
++static struct queue_sysfs_entry queue_max_sectors_entry = {
++ .attr = {.name = "max_sectors_kb", .mode = S_IRUGO | S_IWUSR },
++ .show = queue_max_sectors_show,
++ .store = queue_max_sectors_store,
++};
++
++static struct queue_sysfs_entry queue_max_hw_sectors_entry = {
++ .attr = {.name = "max_hw_sectors_kb", .mode = S_IRUGO },
++ .show = queue_max_hw_sectors_show,
++};
++
++static struct queue_sysfs_entry queue_iosched_entry = {
++ .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR },
++ .show = elv_iosched_show,
++ .store = elv_iosched_store,
++};
++
++static struct queue_sysfs_entry queue_hw_sector_size_entry = {
++ .attr = {.name = "hw_sector_size", .mode = S_IRUGO },
++ .show = queue_hw_sector_size_show,
++};
++
++static struct attribute *default_attrs[] = {
++ &queue_requests_entry.attr,
++ &queue_ra_entry.attr,
++ &queue_max_hw_sectors_entry.attr,
++ &queue_max_sectors_entry.attr,
++ &queue_iosched_entry.attr,
++ &queue_hw_sector_size_entry.attr,
++ NULL,
++};
++
++#define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
++
++static ssize_t
++queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
++{
++ struct queue_sysfs_entry *entry = to_queue(attr);
++ struct request_queue *q =
++ container_of(kobj, struct request_queue, kobj);
++ ssize_t res;
++
++ if (!entry->show)
++ return -EIO;
++ mutex_lock(&q->sysfs_lock);
++ if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
++ mutex_unlock(&q->sysfs_lock);
++ return -ENOENT;
++ }
++ res = entry->show(q, page);
++ mutex_unlock(&q->sysfs_lock);
++ return res;
++}
++
++static ssize_t
++queue_attr_store(struct kobject *kobj, struct attribute *attr,
++ const char *page, size_t length)
++{
++ struct queue_sysfs_entry *entry = to_queue(attr);
++ struct request_queue *q = container_of(kobj, struct request_queue, kobj);
++
++ ssize_t res;
++
++ if (!entry->store)
++ return -EIO;
++ mutex_lock(&q->sysfs_lock);
++ if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
++ mutex_unlock(&q->sysfs_lock);
++ return -ENOENT;
++ }
++ res = entry->store(q, page, length);
++ mutex_unlock(&q->sysfs_lock);
++ return res;
++}
++
++/**
++ * blk_cleanup_queue: - release a &struct request_queue when it is no longer needed
++ * @kobj: the kobj belonging of the request queue to be released
++ *
++ * Description:
++ * blk_cleanup_queue is the pair to blk_init_queue() or
++ * blk_queue_make_request(). It should be called when a request queue is
++ * being released; typically when a block device is being de-registered.
++ * Currently, its primary task it to free all the &struct request
++ * structures that were allocated to the queue and the queue itself.
++ *
++ * Caveat:
++ * Hopefully the low level driver will have finished any
++ * outstanding requests first...
++ **/
++static void blk_release_queue(struct kobject *kobj)
++{
++ struct request_queue *q =
++ container_of(kobj, struct request_queue, kobj);
++ struct request_list *rl = &q->rq;
++
++ blk_sync_queue(q);
++
++ if (rl->rq_pool)
++ mempool_destroy(rl->rq_pool);
++
++ if (q->queue_tags)
++ __blk_queue_free_tags(q);
++
++ blk_trace_shutdown(q);
++
++ bdi_destroy(&q->backing_dev_info);
++ kmem_cache_free(blk_requestq_cachep, q);
++}
++
++static struct sysfs_ops queue_sysfs_ops = {
++ .show = queue_attr_show,
++ .store = queue_attr_store,
++};
++
++struct kobj_type blk_queue_ktype = {
++ .sysfs_ops = &queue_sysfs_ops,
++ .default_attrs = default_attrs,
++ .release = blk_release_queue,
++};
++
++int blk_register_queue(struct gendisk *disk)
++{
++ int ret;
++
++ struct request_queue *q = disk->queue;
++
++ if (!q || !q->request_fn)
++ return -ENXIO;
++
++ ret = kobject_add(&q->kobj, kobject_get(&disk->dev.kobj),
++ "%s", "queue");
++ if (ret < 0)
++ return ret;
++
++ kobject_uevent(&q->kobj, KOBJ_ADD);
++
++ ret = elv_register_queue(q);
++ if (ret) {
++ kobject_uevent(&q->kobj, KOBJ_REMOVE);
++ kobject_del(&q->kobj);
++ return ret;
++ }
++
++ return 0;
++}
++
++void blk_unregister_queue(struct gendisk *disk)
++{
++ struct request_queue *q = disk->queue;
++
++ if (q && q->request_fn) {
++ elv_unregister_queue(q);
++
++ kobject_uevent(&q->kobj, KOBJ_REMOVE);
++ kobject_del(&q->kobj);
++ kobject_put(&disk->dev.kobj);
++ }
++}
+diff --git a/block/blk-tag.c b/block/blk-tag.c
+new file mode 100644
+index 0000000..d1fd300
+--- /dev/null
++++ b/block/blk-tag.c
+@@ -0,0 +1,396 @@
++/*
++ * Functions related to tagged command queuing
++ */
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/bio.h>
++#include <linux/blkdev.h>
++
++/**
++ * blk_queue_find_tag - find a request by its tag and queue
++ * @q: The request queue for the device
++ * @tag: The tag of the request
++ *
++ * Notes:
++ * Should be used when a device returns a tag and you want to match
++ * it with a request.
++ *
++ * no locks need be held.
++ **/
++struct request *blk_queue_find_tag(struct request_queue *q, int tag)
++{
++ return blk_map_queue_find_tag(q->queue_tags, tag);
++}
++
++EXPORT_SYMBOL(blk_queue_find_tag);
++
++/**
++ * __blk_free_tags - release a given set of tag maintenance info
++ * @bqt: the tag map to free
++ *
++ * Tries to free the specified @bqt at . Returns true if it was
++ * actually freed and false if there are still references using it
++ */
++static int __blk_free_tags(struct blk_queue_tag *bqt)
++{
++ int retval;
++
++ retval = atomic_dec_and_test(&bqt->refcnt);
++ if (retval) {
++ BUG_ON(bqt->busy);
++
++ kfree(bqt->tag_index);
++ bqt->tag_index = NULL;
++
++ kfree(bqt->tag_map);
++ bqt->tag_map = NULL;
++
++ kfree(bqt);
++ }
++
++ return retval;
++}
++
++/**
++ * __blk_queue_free_tags - release tag maintenance info
++ * @q: the request queue for the device
++ *
++ * Notes:
++ * blk_cleanup_queue() will take care of calling this function, if tagging
++ * has been used. So there's no need to call this directly.
++ **/
++void __blk_queue_free_tags(struct request_queue *q)
++{
++ struct blk_queue_tag *bqt = q->queue_tags;
++
++ if (!bqt)
++ return;
++
++ __blk_free_tags(bqt);
++
++ q->queue_tags = NULL;
++ q->queue_flags &= ~(1 << QUEUE_FLAG_QUEUED);
++}
++
++/**
++ * blk_free_tags - release a given set of tag maintenance info
++ * @bqt: the tag map to free
++ *
++ * For externally managed @bqt@ frees the map. Callers of this
++ * function must guarantee to have released all the queues that
++ * might have been using this tag map.
++ */
++void blk_free_tags(struct blk_queue_tag *bqt)
++{
++ if (unlikely(!__blk_free_tags(bqt)))
++ BUG();
++}
++EXPORT_SYMBOL(blk_free_tags);
++
++/**
++ * blk_queue_free_tags - release tag maintenance info
++ * @q: the request queue for the device
++ *
++ * Notes:
++ * This is used to disabled tagged queuing to a device, yet leave
++ * queue in function.
++ **/
++void blk_queue_free_tags(struct request_queue *q)
++{
++ clear_bit(QUEUE_FLAG_QUEUED, &q->queue_flags);
++}
++
++EXPORT_SYMBOL(blk_queue_free_tags);
++
++static int
++init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth)
++{
++ struct request **tag_index;
++ unsigned long *tag_map;
++ int nr_ulongs;
++
++ if (q && depth > q->nr_requests * 2) {
++ depth = q->nr_requests * 2;
++ printk(KERN_ERR "%s: adjusted depth to %d\n",
++ __FUNCTION__, depth);
++ }
++
++ tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC);
++ if (!tag_index)
++ goto fail;
++
++ nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG;
++ tag_map = kzalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC);
++ if (!tag_map)
++ goto fail;
++
++ tags->real_max_depth = depth;
++ tags->max_depth = depth;
++ tags->tag_index = tag_index;
++ tags->tag_map = tag_map;
++
++ return 0;
++fail:
++ kfree(tag_index);
++ return -ENOMEM;
++}
++
++static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q,
++ int depth)
++{
++ struct blk_queue_tag *tags;
++
++ tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC);
++ if (!tags)
++ goto fail;
++
++ if (init_tag_map(q, tags, depth))
++ goto fail;
++
++ tags->busy = 0;
++ atomic_set(&tags->refcnt, 1);
++ return tags;
++fail:
++ kfree(tags);
++ return NULL;
++}
++
++/**
++ * blk_init_tags - initialize the tag info for an external tag map
++ * @depth: the maximum queue depth supported
++ * @tags: the tag to use
++ **/
++struct blk_queue_tag *blk_init_tags(int depth)
++{
++ return __blk_queue_init_tags(NULL, depth);
++}
++EXPORT_SYMBOL(blk_init_tags);
++
++/**
++ * blk_queue_init_tags - initialize the queue tag info
++ * @q: the request queue for the device
++ * @depth: the maximum queue depth supported
++ * @tags: the tag to use
++ **/
++int blk_queue_init_tags(struct request_queue *q, int depth,
++ struct blk_queue_tag *tags)
++{
++ int rc;
++
++ BUG_ON(tags && q->queue_tags && tags != q->queue_tags);
++
++ if (!tags && !q->queue_tags) {
++ tags = __blk_queue_init_tags(q, depth);
++
++ if (!tags)
++ goto fail;
++ } else if (q->queue_tags) {
++ if ((rc = blk_queue_resize_tags(q, depth)))
++ return rc;
++ set_bit(QUEUE_FLAG_QUEUED, &q->queue_flags);
++ return 0;
++ } else
++ atomic_inc(&tags->refcnt);
++
++ /*
++ * assign it, all done
++ */
++ q->queue_tags = tags;
++ q->queue_flags |= (1 << QUEUE_FLAG_QUEUED);
++ INIT_LIST_HEAD(&q->tag_busy_list);
++ return 0;
++fail:
++ kfree(tags);
++ return -ENOMEM;
++}
++
++EXPORT_SYMBOL(blk_queue_init_tags);
++
++/**
++ * blk_queue_resize_tags - change the queueing depth
++ * @q: the request queue for the device
++ * @new_depth: the new max command queueing depth
++ *
++ * Notes:
++ * Must be called with the queue lock held.
++ **/
++int blk_queue_resize_tags(struct request_queue *q, int new_depth)
++{
++ struct blk_queue_tag *bqt = q->queue_tags;
++ struct request **tag_index;
++ unsigned long *tag_map;
++ int max_depth, nr_ulongs;
++
++ if (!bqt)
++ return -ENXIO;
++
++ /*
++ * if we already have large enough real_max_depth. just
++ * adjust max_depth. *NOTE* as requests with tag value
++ * between new_depth and real_max_depth can be in-flight, tag
++ * map can not be shrunk blindly here.
++ */
++ if (new_depth <= bqt->real_max_depth) {
++ bqt->max_depth = new_depth;
++ return 0;
++ }
++
++ /*
++ * Currently cannot replace a shared tag map with a new
++ * one, so error out if this is the case
++ */
++ if (atomic_read(&bqt->refcnt) != 1)
++ return -EBUSY;
++
++ /*
++ * save the old state info, so we can copy it back
++ */
++ tag_index = bqt->tag_index;
++ tag_map = bqt->tag_map;
++ max_depth = bqt->real_max_depth;
++
++ if (init_tag_map(q, bqt, new_depth))
++ return -ENOMEM;
++
++ memcpy(bqt->tag_index, tag_index, max_depth * sizeof(struct request *));
++ nr_ulongs = ALIGN(max_depth, BITS_PER_LONG) / BITS_PER_LONG;
++ memcpy(bqt->tag_map, tag_map, nr_ulongs * sizeof(unsigned long));
++
++ kfree(tag_index);
++ kfree(tag_map);
++ return 0;
++}
++
++EXPORT_SYMBOL(blk_queue_resize_tags);
++
++/**
++ * blk_queue_end_tag - end tag operations for a request
++ * @q: the request queue for the device
++ * @rq: the request that has completed
++ *
++ * Description:
++ * Typically called when end_that_request_first() returns 0, meaning
++ * all transfers have been done for a request. It's important to call
++ * this function before end_that_request_last(), as that will put the
++ * request back on the free list thus corrupting the internal tag list.
++ *
++ * Notes:
++ * queue lock must be held.
++ **/
++void blk_queue_end_tag(struct request_queue *q, struct request *rq)
++{
++ struct blk_queue_tag *bqt = q->queue_tags;
++ int tag = rq->tag;
++
++ BUG_ON(tag == -1);
++
++ if (unlikely(tag >= bqt->real_max_depth))
++ /*
++ * This can happen after tag depth has been reduced.
++ * FIXME: how about a warning or info message here?
++ */
++ return;
++
++ list_del_init(&rq->queuelist);
++ rq->cmd_flags &= ~REQ_QUEUED;
++ rq->tag = -1;
++
++ if (unlikely(bqt->tag_index[tag] == NULL))
++ printk(KERN_ERR "%s: tag %d is missing\n",
++ __FUNCTION__, tag);
++
++ bqt->tag_index[tag] = NULL;
++
++ if (unlikely(!test_bit(tag, bqt->tag_map))) {
++ printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n",
++ __FUNCTION__, tag);
++ return;
++ }
++ /*
++ * The tag_map bit acts as a lock for tag_index[bit], so we need
++ * unlock memory barrier semantics.
++ */
++ clear_bit_unlock(tag, bqt->tag_map);
++ bqt->busy--;
++}
++
++EXPORT_SYMBOL(blk_queue_end_tag);
++
++/**
++ * blk_queue_start_tag - find a free tag and assign it
++ * @q: the request queue for the device
++ * @rq: the block request that needs tagging
++ *
++ * Description:
++ * This can either be used as a stand-alone helper, or possibly be
++ * assigned as the queue &prep_rq_fn (in which case &struct request
++ * automagically gets a tag assigned). Note that this function
++ * assumes that any type of request can be queued! if this is not
++ * true for your device, you must check the request type before
++ * calling this function. The request will also be removed from
++ * the request queue, so it's the drivers responsibility to readd
++ * it if it should need to be restarted for some reason.
++ *
++ * Notes:
++ * queue lock must be held.
++ **/
++int blk_queue_start_tag(struct request_queue *q, struct request *rq)
++{
++ struct blk_queue_tag *bqt = q->queue_tags;
++ int tag;
++
++ if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
++ printk(KERN_ERR
++ "%s: request %p for device [%s] already tagged %d",
++ __FUNCTION__, rq,
++ rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);
++ BUG();
++ }
++
++ /*
++ * Protect against shared tag maps, as we may not have exclusive
++ * access to the tag map.
++ */
++ do {
++ tag = find_first_zero_bit(bqt->tag_map, bqt->max_depth);
++ if (tag >= bqt->max_depth)
++ return 1;
++
++ } while (test_and_set_bit_lock(tag, bqt->tag_map));
++ /*
++ * We need lock ordering semantics given by test_and_set_bit_lock.
++ * See blk_queue_end_tag for details.
++ */
++
++ rq->cmd_flags |= REQ_QUEUED;
++ rq->tag = tag;
++ bqt->tag_index[tag] = rq;
++ blkdev_dequeue_request(rq);
++ list_add(&rq->queuelist, &q->tag_busy_list);
++ bqt->busy++;
++ return 0;
++}
++
++EXPORT_SYMBOL(blk_queue_start_tag);
++
++/**
++ * blk_queue_invalidate_tags - invalidate all pending tags
++ * @q: the request queue for the device
++ *
++ * Description:
++ * Hardware conditions may dictate a need to stop all pending requests.
++ * In this case, we will safely clear the block side of the tag queue and
++ * readd all requests to the request queue in the right order.
++ *
++ * Notes:
++ * queue lock must be held.
++ **/
++void blk_queue_invalidate_tags(struct request_queue *q)
++{
++ struct list_head *tmp, *n;
++
++ list_for_each_safe(tmp, n, &q->tag_busy_list)
++ blk_requeue_request(q, list_entry_rq(tmp));
++}
++
++EXPORT_SYMBOL(blk_queue_invalidate_tags);
+diff --git a/block/blk.h b/block/blk.h
+new file mode 100644
+index 0000000..ec898dd
+--- /dev/null
++++ b/block/blk.h
+@@ -0,0 +1,53 @@
++#ifndef BLK_INTERNAL_H
++#define BLK_INTERNAL_H
++
++/* Amount of time in which a process may batch requests */
++#define BLK_BATCH_TIME (HZ/50UL)
++
++/* Number of requests a "batching" process may submit */
++#define BLK_BATCH_REQ 32
++
++extern struct kmem_cache *blk_requestq_cachep;
++extern struct kobj_type blk_queue_ktype;
++
++void rq_init(struct request_queue *q, struct request *rq);
++void init_request_from_bio(struct request *req, struct bio *bio);
++void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
++ struct bio *bio);
++void __blk_queue_free_tags(struct request_queue *q);
++
++void blk_unplug_work(struct work_struct *work);
++void blk_unplug_timeout(unsigned long data);
++
++struct io_context *current_io_context(gfp_t gfp_flags, int node);
++
++int ll_back_merge_fn(struct request_queue *q, struct request *req,
++ struct bio *bio);
++int ll_front_merge_fn(struct request_queue *q, struct request *req,
++ struct bio *bio);
++int attempt_back_merge(struct request_queue *q, struct request *rq);
++int attempt_front_merge(struct request_queue *q, struct request *rq);
++void blk_recalc_rq_segments(struct request *rq);
++void blk_recalc_rq_sectors(struct request *rq, int nsect);
++
++void blk_queue_congestion_threshold(struct request_queue *q);
++
++/*
++ * Return the threshold (number of used requests) at which the queue is
++ * considered to be congested. It include a little hysteresis to keep the
++ * context switch rate down.
++ */
++static inline int queue_congestion_on_threshold(struct request_queue *q)
++{
++ return q->nr_congestion_on;
++}
++
++/*
++ * The threshold at which a queue is considered to be uncongested
++ */
++static inline int queue_congestion_off_threshold(struct request_queue *q)
++{
++ return q->nr_congestion_off;
++}
++
++#endif
diff --git a/block/blktrace.c b/block/blktrace.c
index 9b4da4a..568588c 100644
--- a/block/blktrace.c
@@ -136944,1020 +141462,4286 @@
-/*
- * Timer running if an idle class queue is waiting for service
- */
--static void cfq_idle_class_timer(unsigned long data)
+-static void cfq_idle_class_timer(unsigned long data)
+-{
+- struct cfq_data *cfqd = (struct cfq_data *) data;
+- unsigned long flags;
+-
+- spin_lock_irqsave(cfqd->queue->queue_lock, flags);
+-
+- /*
+- * race with a non-idle queue, reset timer
+- */
+- if (!start_idle_class_timer(cfqd))
+- cfq_schedule_dispatch(cfqd);
+-
+- spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
+-}
+-
+ static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
+ {
+ del_timer_sync(&cfqd->idle_slice_timer);
+- del_timer_sync(&cfqd->idle_class_timer);
+ kblockd_flush_work(&cfqd->unplug_work);
+ }
+
+@@ -2126,10 +2117,6 @@ static void *cfq_init_queue(struct request_queue *q)
+ cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
+ cfqd->idle_slice_timer.data = (unsigned long) cfqd;
+
+- init_timer(&cfqd->idle_class_timer);
+- cfqd->idle_class_timer.function = cfq_idle_class_timer;
+- cfqd->idle_class_timer.data = (unsigned long) cfqd;
+-
+ INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
+
+ cfqd->last_end_request = jiffies;
+@@ -2160,7 +2147,7 @@ static int __init cfq_slab_setup(void)
+ if (!cfq_pool)
+ goto fail;
+
+- cfq_ioc_pool = KMEM_CACHE(cfq_io_context, 0);
++ cfq_ioc_pool = KMEM_CACHE(cfq_io_context, SLAB_DESTROY_BY_RCU);
+ if (!cfq_ioc_pool)
+ goto fail;
+
+diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
+index cae0a85..b733732 100644
+--- a/block/compat_ioctl.c
++++ b/block/compat_ioctl.c
+@@ -545,6 +545,7 @@ static int compat_blk_trace_setup(struct block_device *bdev, char __user *arg)
+ struct blk_user_trace_setup buts;
+ struct compat_blk_user_trace_setup cbuts;
+ struct request_queue *q;
++ char b[BDEVNAME_SIZE];
+ int ret;
+
+ q = bdev_get_queue(bdev);
+@@ -554,6 +555,8 @@ static int compat_blk_trace_setup(struct block_device *bdev, char __user *arg)
+ if (copy_from_user(&cbuts, arg, sizeof(cbuts)))
+ return -EFAULT;
+
++ strcpy(b, bdevname(bdev, b));
++
+ buts = (struct blk_user_trace_setup) {
+ .act_mask = cbuts.act_mask,
+ .buf_size = cbuts.buf_size,
+@@ -565,7 +568,7 @@ static int compat_blk_trace_setup(struct block_device *bdev, char __user *arg)
+ memcpy(&buts.name, &cbuts.name, 32);
+
+ mutex_lock(&bdev->bd_mutex);
+- ret = do_blk_trace_setup(q, bdev, &buts);
++ ret = do_blk_trace_setup(q, b, bdev->bd_dev, &buts);
+ mutex_unlock(&bdev->bd_mutex);
+ if (ret)
+ return ret;
+diff --git a/block/elevator.c b/block/elevator.c
+index e452deb..8cd5775 100644
+--- a/block/elevator.c
++++ b/block/elevator.c
+@@ -185,9 +185,7 @@ static elevator_t *elevator_alloc(struct request_queue *q,
+
+ eq->ops = &e->ops;
+ eq->elevator_type = e;
+- kobject_init(&eq->kobj);
+- kobject_set_name(&eq->kobj, "%s", "iosched");
+- eq->kobj.ktype = &elv_ktype;
++ kobject_init(&eq->kobj, &elv_ktype);
+ mutex_init(&eq->sysfs_lock);
+
+ eq->hash = kmalloc_node(sizeof(struct hlist_head) * ELV_HASH_ENTRIES,
+@@ -743,7 +741,21 @@ struct request *elv_next_request(struct request_queue *q)
+ q->boundary_rq = NULL;
+ }
+
+- if ((rq->cmd_flags & REQ_DONTPREP) || !q->prep_rq_fn)
++ if (rq->cmd_flags & REQ_DONTPREP)
++ break;
++
++ if (q->dma_drain_size && rq->data_len) {
++ /*
++ * make sure space for the drain appears we
++ * know we can do this because max_hw_segments
++ * has been adjusted to be one fewer than the
++ * device can handle
++ */
++ rq->nr_phys_segments++;
++ rq->nr_hw_segments++;
++ }
++
++ if (!q->prep_rq_fn)
+ break;
+
+ ret = q->prep_rq_fn(q, rq);
+@@ -756,6 +768,16 @@ struct request *elv_next_request(struct request_queue *q)
+ * avoid resource deadlock. REQ_STARTED will
+ * prevent other fs requests from passing this one.
+ */
++ if (q->dma_drain_size && rq->data_len &&
++ !(rq->cmd_flags & REQ_DONTPREP)) {
++ /*
++ * remove the space for the drain we added
++ * so that we don't add it again
++ */
++ --rq->nr_phys_segments;
++ --rq->nr_hw_segments;
++ }
++
+ rq = NULL;
+ break;
+ } else if (ret == BLKPREP_KILL) {
+@@ -931,9 +953,7 @@ int elv_register_queue(struct request_queue *q)
+ elevator_t *e = q->elevator;
+ int error;
+
+- e->kobj.parent = &q->kobj;
+-
+- error = kobject_add(&e->kobj);
++ error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
+ if (!error) {
+ struct elv_fs_entry *attr = e->elevator_type->elevator_attrs;
+ if (attr) {
+diff --git a/block/genhd.c b/block/genhd.c
+index f2ac914..de2ebb2 100644
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -17,8 +17,10 @@
+ #include <linux/buffer_head.h>
+ #include <linux/mutex.h>
+
+-struct kset block_subsys;
+-static DEFINE_MUTEX(block_subsys_lock);
++static DEFINE_MUTEX(block_class_lock);
++#ifndef CONFIG_SYSFS_DEPRECATED
++struct kobject *block_depr;
++#endif
+
+ /*
+ * Can be deleted altogether. Later.
+@@ -37,19 +39,17 @@ static inline int major_to_index(int major)
+ }
+
+ #ifdef CONFIG_PROC_FS
+-
+ void blkdev_show(struct seq_file *f, off_t offset)
+ {
+ struct blk_major_name *dp;
+
+ if (offset < BLKDEV_MAJOR_HASH_SIZE) {
+- mutex_lock(&block_subsys_lock);
++ mutex_lock(&block_class_lock);
+ for (dp = major_names[offset]; dp; dp = dp->next)
+ seq_printf(f, "%3d %s\n", dp->major, dp->name);
+- mutex_unlock(&block_subsys_lock);
++ mutex_unlock(&block_class_lock);
+ }
+ }
+-
+ #endif /* CONFIG_PROC_FS */
+
+ int register_blkdev(unsigned int major, const char *name)
+@@ -57,7 +57,7 @@ int register_blkdev(unsigned int major, const char *name)
+ struct blk_major_name **n, *p;
+ int index, ret = 0;
+
+- mutex_lock(&block_subsys_lock);
++ mutex_lock(&block_class_lock);
+
+ /* temporary */
+ if (major == 0) {
+@@ -102,7 +102,7 @@ int register_blkdev(unsigned int major, const char *name)
+ kfree(p);
+ }
+ out:
+- mutex_unlock(&block_subsys_lock);
++ mutex_unlock(&block_class_lock);
+ return ret;
+ }
+
+@@ -114,7 +114,7 @@ void unregister_blkdev(unsigned int major, const char *name)
+ struct blk_major_name *p = NULL;
+ int index = major_to_index(major);
+
+- mutex_lock(&block_subsys_lock);
++ mutex_lock(&block_class_lock);
+ for (n = &major_names[index]; *n; n = &(*n)->next)
+ if ((*n)->major == major)
+ break;
+@@ -124,7 +124,7 @@ void unregister_blkdev(unsigned int major, const char *name)
+ p = *n;
+ *n = p->next;
+ }
+- mutex_unlock(&block_subsys_lock);
++ mutex_unlock(&block_class_lock);
+ kfree(p);
+ }
+
+@@ -137,29 +137,30 @@ static struct kobj_map *bdev_map;
+ * range must be nonzero
+ * The hash chain is sorted on range, so that subranges can override.
+ */
+-void blk_register_region(dev_t dev, unsigned long range, struct module *module,
++void blk_register_region(dev_t devt, unsigned long range, struct module *module,
+ struct kobject *(*probe)(dev_t, int *, void *),
+ int (*lock)(dev_t, void *), void *data)
+ {
+- kobj_map(bdev_map, dev, range, module, probe, lock, data);
++ kobj_map(bdev_map, devt, range, module, probe, lock, data);
+ }
+
+ EXPORT_SYMBOL(blk_register_region);
+
+-void blk_unregister_region(dev_t dev, unsigned long range)
++void blk_unregister_region(dev_t devt, unsigned long range)
+ {
+- kobj_unmap(bdev_map, dev, range);
++ kobj_unmap(bdev_map, devt, range);
+ }
+
+ EXPORT_SYMBOL(blk_unregister_region);
+
+-static struct kobject *exact_match(dev_t dev, int *part, void *data)
++static struct kobject *exact_match(dev_t devt, int *part, void *data)
+ {
+ struct gendisk *p = data;
+- return &p->kobj;
++
++ return &p->dev.kobj;
+ }
+
+-static int exact_lock(dev_t dev, void *data)
++static int exact_lock(dev_t devt, void *data)
+ {
+ struct gendisk *p = data;
+
+@@ -194,8 +195,6 @@ void unlink_gendisk(struct gendisk *disk)
+ disk->minors);
+ }
+
+-#define to_disk(obj) container_of(obj,struct gendisk,kobj)
+-
+ /**
+ * get_gendisk - get partitioning information for a given device
+ * @dev: device to get partitioning information for
+@@ -203,10 +202,12 @@ void unlink_gendisk(struct gendisk *disk)
+ * This function gets the structure containing partitioning
+ * information for the given device @dev.
+ */
+-struct gendisk *get_gendisk(dev_t dev, int *part)
++struct gendisk *get_gendisk(dev_t devt, int *part)
+ {
+- struct kobject *kobj = kobj_lookup(bdev_map, dev, part);
+- return kobj ? to_disk(kobj) : NULL;
++ struct kobject *kobj = kobj_lookup(bdev_map, devt, part);
++ struct device *dev = kobj_to_dev(kobj);
++
++ return kobj ? dev_to_disk(dev) : NULL;
+ }
+
+ /*
+@@ -216,13 +217,17 @@ struct gendisk *get_gendisk(dev_t dev, int *part)
+ */
+ void __init printk_all_partitions(void)
+ {
+- int n;
++ struct device *dev;
+ struct gendisk *sgp;
++ char buf[BDEVNAME_SIZE];
++ int n;
+
+- mutex_lock(&block_subsys_lock);
++ mutex_lock(&block_class_lock);
+ /* For each block device... */
+- list_for_each_entry(sgp, &block_subsys.list, kobj.entry) {
+- char buf[BDEVNAME_SIZE];
++ list_for_each_entry(dev, &block_class.devices, node) {
++ if (dev->type != &disk_type)
++ continue;
++ sgp = dev_to_disk(dev);
+ /*
+ * Don't show empty devices or things that have been surpressed
+ */
+@@ -255,38 +260,46 @@ void __init printk_all_partitions(void)
+ sgp->major, n + 1 + sgp->first_minor,
+ (unsigned long long)sgp->part[n]->nr_sects >> 1,
+ disk_name(sgp, n + 1, buf));
+- } /* partition subloop */
+- } /* Block device loop */
++ }
++ }
+
+- mutex_unlock(&block_subsys_lock);
+- return;
++ mutex_unlock(&block_class_lock);
+ }
+
+ #ifdef CONFIG_PROC_FS
+ /* iterator */
+ static void *part_start(struct seq_file *part, loff_t *pos)
+ {
+- struct list_head *p;
+- loff_t l = *pos;
++ loff_t k = *pos;
++ struct device *dev;
+
+- mutex_lock(&block_subsys_lock);
+- list_for_each(p, &block_subsys.list)
+- if (!l--)
+- return list_entry(p, struct gendisk, kobj.entry);
++ mutex_lock(&block_class_lock);
++ list_for_each_entry(dev, &block_class.devices, node) {
++ if (dev->type != &disk_type)
++ continue;
++ if (!k--)
++ return dev_to_disk(dev);
++ }
+ return NULL;
+ }
+
+ static void *part_next(struct seq_file *part, void *v, loff_t *pos)
+ {
+- struct list_head *p = ((struct gendisk *)v)->kobj.entry.next;
++ struct gendisk *gp = v;
++ struct device *dev;
+ ++*pos;
+- return p==&block_subsys.list ? NULL :
+- list_entry(p, struct gendisk, kobj.entry);
++ list_for_each_entry(dev, &gp->dev.node, node) {
++ if (&dev->node == &block_class.devices)
++ return NULL;
++ if (dev->type == &disk_type)
++ return dev_to_disk(dev);
++ }
++ return NULL;
+ }
+
+ static void part_stop(struct seq_file *part, void *v)
+ {
+- mutex_unlock(&block_subsys_lock);
++ mutex_unlock(&block_class_lock);
+ }
+
+ static int show_partition(struct seq_file *part, void *v)
+@@ -295,7 +308,7 @@ static int show_partition(struct seq_file *part, void *v)
+ int n;
+ char buf[BDEVNAME_SIZE];
+
+- if (&sgp->kobj.entry == block_subsys.list.next)
++ if (&sgp->dev.node == block_class.devices.next)
+ seq_puts(part, "major minor #blocks name\n\n");
+
+ /* Don't show non-partitionable removeable devices or empty devices */
+@@ -324,111 +337,82 @@ static int show_partition(struct seq_file *part, void *v)
+ return 0;
+ }
+
+-struct seq_operations partitions_op = {
+- .start =part_start,
+- .next = part_next,
+- .stop = part_stop,
+- .show = show_partition
++const struct seq_operations partitions_op = {
++ .start = part_start,
++ .next = part_next,
++ .stop = part_stop,
++ .show = show_partition
+ };
+ #endif
+
+
+ extern int blk_dev_init(void);
+
+-static struct kobject *base_probe(dev_t dev, int *part, void *data)
++static struct kobject *base_probe(dev_t devt, int *part, void *data)
+ {
+- if (request_module("block-major-%d-%d", MAJOR(dev), MINOR(dev)) > 0)
++ if (request_module("block-major-%d-%d", MAJOR(devt), MINOR(devt)) > 0)
+ /* Make old-style 2.4 aliases work */
+- request_module("block-major-%d", MAJOR(dev));
++ request_module("block-major-%d", MAJOR(devt));
+ return NULL;
+ }
+
+ static int __init genhd_device_init(void)
+ {
+- int err;
+-
+- bdev_map = kobj_map_init(base_probe, &block_subsys_lock);
++ class_register(&block_class);
++ bdev_map = kobj_map_init(base_probe, &block_class_lock);
+ blk_dev_init();
+- err = subsystem_register(&block_subsys);
+- if (err < 0)
+- printk(KERN_WARNING "%s: subsystem_register error: %d\n",
+- __FUNCTION__, err);
+- return err;
++
++#ifndef CONFIG_SYSFS_DEPRECATED
++ /* create top-level block dir */
++ block_depr = kobject_create_and_add("block", NULL);
++#endif
++ return 0;
+ }
+
+ subsys_initcall(genhd_device_init);
+
+-
+-
+-/*
+- * kobject & sysfs bindings for block devices
+- */
+-static ssize_t disk_attr_show(struct kobject *kobj, struct attribute *attr,
+- char *page)
++static ssize_t disk_range_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
+ {
+- struct gendisk *disk = to_disk(kobj);
+- struct disk_attribute *disk_attr =
+- container_of(attr,struct disk_attribute,attr);
+- ssize_t ret = -EIO;
++ struct gendisk *disk = dev_to_disk(dev);
+
+- if (disk_attr->show)
+- ret = disk_attr->show(disk,page);
+- return ret;
++ return sprintf(buf, "%d\n", disk->minors);
+ }
+
+-static ssize_t disk_attr_store(struct kobject * kobj, struct attribute * attr,
+- const char *page, size_t count)
++static ssize_t disk_removable_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
+ {
+- struct gendisk *disk = to_disk(kobj);
+- struct disk_attribute *disk_attr =
+- container_of(attr,struct disk_attribute,attr);
+- ssize_t ret = 0;
++ struct gendisk *disk = dev_to_disk(dev);
+
+- if (disk_attr->store)
+- ret = disk_attr->store(disk, page, count);
+- return ret;
++ return sprintf(buf, "%d\n",
++ (disk->flags & GENHD_FL_REMOVABLE ? 1 : 0));
+ }
+
+-static struct sysfs_ops disk_sysfs_ops = {
+- .show = &disk_attr_show,
+- .store = &disk_attr_store,
+-};
+-
+-static ssize_t disk_uevent_store(struct gendisk * disk,
+- const char *buf, size_t count)
+-{
+- kobject_uevent(&disk->kobj, KOBJ_ADD);
+- return count;
+-}
+-static ssize_t disk_dev_read(struct gendisk * disk, char *page)
+-{
+- dev_t base = MKDEV(disk->major, disk->first_minor);
+- return print_dev_t(page, base);
+-}
+-static ssize_t disk_range_read(struct gendisk * disk, char *page)
++static ssize_t disk_size_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
+ {
+- return sprintf(page, "%d\n", disk->minors);
+-}
+-static ssize_t disk_removable_read(struct gendisk * disk, char *page)
+-{
+- return sprintf(page, "%d\n",
+- (disk->flags & GENHD_FL_REMOVABLE ? 1 : 0));
++ struct gendisk *disk = dev_to_disk(dev);
+
++ return sprintf(buf, "%llu\n", (unsigned long long)get_capacity(disk));
+ }
+-static ssize_t disk_size_read(struct gendisk * disk, char *page)
+-{
+- return sprintf(page, "%llu\n", (unsigned long long)get_capacity(disk));
+-}
+-static ssize_t disk_capability_read(struct gendisk *disk, char *page)
++
++static ssize_t disk_capability_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
+ {
+- return sprintf(page, "%x\n", disk->flags);
++ struct gendisk *disk = dev_to_disk(dev);
++
++ return sprintf(buf, "%x\n", disk->flags);
+ }
+-static ssize_t disk_stats_read(struct gendisk * disk, char *page)
++
++static ssize_t disk_stat_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
+ {
++ struct gendisk *disk = dev_to_disk(dev);
++
+ preempt_disable();
+ disk_round_stats(disk);
+ preempt_enable();
+- return sprintf(page,
++ return sprintf(buf,
+ "%8lu %8lu %8llu %8u "
+ "%8lu %8lu %8llu %8u "
+ "%8u %8u %8u"
+@@ -445,40 +429,21 @@ static ssize_t disk_stats_read(struct gendisk * disk, char *page)
+ jiffies_to_msecs(disk_stat_read(disk, io_ticks)),
+ jiffies_to_msecs(disk_stat_read(disk, time_in_queue)));
+ }
+-static struct disk_attribute disk_attr_uevent = {
+- .attr = {.name = "uevent", .mode = S_IWUSR },
+- .store = disk_uevent_store
+-};
+-static struct disk_attribute disk_attr_dev = {
+- .attr = {.name = "dev", .mode = S_IRUGO },
+- .show = disk_dev_read
+-};
+-static struct disk_attribute disk_attr_range = {
+- .attr = {.name = "range", .mode = S_IRUGO },
+- .show = disk_range_read
+-};
+-static struct disk_attribute disk_attr_removable = {
+- .attr = {.name = "removable", .mode = S_IRUGO },
+- .show = disk_removable_read
+-};
+-static struct disk_attribute disk_attr_size = {
+- .attr = {.name = "size", .mode = S_IRUGO },
+- .show = disk_size_read
+-};
+-static struct disk_attribute disk_attr_capability = {
+- .attr = {.name = "capability", .mode = S_IRUGO },
+- .show = disk_capability_read
+-};
+-static struct disk_attribute disk_attr_stat = {
+- .attr = {.name = "stat", .mode = S_IRUGO },
+- .show = disk_stats_read
+-};
+
+ #ifdef CONFIG_FAIL_MAKE_REQUEST
++static ssize_t disk_fail_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct gendisk *disk = dev_to_disk(dev);
++
++ return sprintf(buf, "%d\n", disk->flags & GENHD_FL_FAIL ? 1 : 0);
++}
+
+-static ssize_t disk_fail_store(struct gendisk * disk,
++static ssize_t disk_fail_store(struct device *dev,
++ struct device_attribute *attr,
+ const char *buf, size_t count)
+ {
++ struct gendisk *disk = dev_to_disk(dev);
+ int i;
+
+ if (count > 0 && sscanf(buf, "%d", &i) > 0) {
+@@ -490,136 +455,100 @@ static ssize_t disk_fail_store(struct gendisk * disk,
+
+ return count;
+ }
+-static ssize_t disk_fail_read(struct gendisk * disk, char *page)
+-{
+- return sprintf(page, "%d\n", disk->flags & GENHD_FL_FAIL ? 1 : 0);
+-}
+-static struct disk_attribute disk_attr_fail = {
+- .attr = {.name = "make-it-fail", .mode = S_IRUGO | S_IWUSR },
+- .store = disk_fail_store,
+- .show = disk_fail_read
+-};
+
+ #endif
+
+-static struct attribute * default_attrs[] = {
+- &disk_attr_uevent.attr,
+- &disk_attr_dev.attr,
+- &disk_attr_range.attr,
+- &disk_attr_removable.attr,
+- &disk_attr_size.attr,
+- &disk_attr_stat.attr,
+- &disk_attr_capability.attr,
++static DEVICE_ATTR(range, S_IRUGO, disk_range_show, NULL);
++static DEVICE_ATTR(removable, S_IRUGO, disk_removable_show, NULL);
++static DEVICE_ATTR(size, S_IRUGO, disk_size_show, NULL);
++static DEVICE_ATTR(capability, S_IRUGO, disk_capability_show, NULL);
++static DEVICE_ATTR(stat, S_IRUGO, disk_stat_show, NULL);
++#ifdef CONFIG_FAIL_MAKE_REQUEST
++static struct device_attribute dev_attr_fail =
++ __ATTR(make-it-fail, S_IRUGO|S_IWUSR, disk_fail_show, disk_fail_store);
++#endif
++
++static struct attribute *disk_attrs[] = {
++ &dev_attr_range.attr,
++ &dev_attr_removable.attr,
++ &dev_attr_size.attr,
++ &dev_attr_capability.attr,
++ &dev_attr_stat.attr,
+ #ifdef CONFIG_FAIL_MAKE_REQUEST
+- &disk_attr_fail.attr,
++ &dev_attr_fail.attr,
+ #endif
+- NULL,
++ NULL
++};
++
++static struct attribute_group disk_attr_group = {
++ .attrs = disk_attrs,
+ };
+
+-static void disk_release(struct kobject * kobj)
++static struct attribute_group *disk_attr_groups[] = {
++ &disk_attr_group,
++ NULL
++};
++
++static void disk_release(struct device *dev)
+ {
+- struct gendisk *disk = to_disk(kobj);
++ struct gendisk *disk = dev_to_disk(dev);
++
+ kfree(disk->random);
+ kfree(disk->part);
+ free_disk_stats(disk);
+ kfree(disk);
+ }
+-
+-static struct kobj_type ktype_block = {
+- .release = disk_release,
+- .sysfs_ops = &disk_sysfs_ops,
+- .default_attrs = default_attrs,
++struct class block_class = {
++ .name = "block",
+ };
+
+-extern struct kobj_type ktype_part;
+-
+-static int block_uevent_filter(struct kset *kset, struct kobject *kobj)
+-{
+- struct kobj_type *ktype = get_ktype(kobj);
+-
+- return ((ktype == &ktype_block) || (ktype == &ktype_part));
+-}
+-
+-static int block_uevent(struct kset *kset, struct kobject *kobj,
+- struct kobj_uevent_env *env)
+-{
+- struct kobj_type *ktype = get_ktype(kobj);
+- struct device *physdev;
+- struct gendisk *disk;
+- struct hd_struct *part;
+-
+- if (ktype == &ktype_block) {
+- disk = container_of(kobj, struct gendisk, kobj);
+- add_uevent_var(env, "MINOR=%u", disk->first_minor);
+- } else if (ktype == &ktype_part) {
+- disk = container_of(kobj->parent, struct gendisk, kobj);
+- part = container_of(kobj, struct hd_struct, kobj);
+- add_uevent_var(env, "MINOR=%u",
+- disk->first_minor + part->partno);
+- } else
+- return 0;
+-
+- add_uevent_var(env, "MAJOR=%u", disk->major);
+-
+- /* add physical device, backing this device */
+- physdev = disk->driverfs_dev;
+- if (physdev) {
+- char *path = kobject_get_path(&physdev->kobj, GFP_KERNEL);
+-
+- add_uevent_var(env, "PHYSDEVPATH=%s", path);
+- kfree(path);
+-
+- if (physdev->bus)
+- add_uevent_var(env, "PHYSDEVBUS=%s", physdev->bus->name);
+-
+- if (physdev->driver)
+- add_uevent_var(env, physdev->driver->name);
+- }
+-
+- return 0;
+-}
+-
+-static struct kset_uevent_ops block_uevent_ops = {
+- .filter = block_uevent_filter,
+- .uevent = block_uevent,
++struct device_type disk_type = {
++ .name = "disk",
++ .groups = disk_attr_groups,
++ .release = disk_release,
+ };
+
+-decl_subsys(block, &ktype_block, &block_uevent_ops);
+-
+ /*
+ * aggregate disk stat collector. Uses the same stats that the sysfs
+ * entries do, above, but makes them available through one seq_file.
+- * Watching a few disks may be efficient through sysfs, but watching
+- * all of them will be more efficient through this interface.
+ *
+ * The output looks suspiciously like /proc/partitions with a bunch of
+ * extra fields.
+ */
+
+-/* iterator */
+ static void *diskstats_start(struct seq_file *part, loff_t *pos)
+ {
+ loff_t k = *pos;
+- struct list_head *p;
++ struct device *dev;
+
+- mutex_lock(&block_subsys_lock);
+- list_for_each(p, &block_subsys.list)
++ mutex_lock(&block_class_lock);
++ list_for_each_entry(dev, &block_class.devices, node) {
++ if (dev->type != &disk_type)
++ continue;
+ if (!k--)
+- return list_entry(p, struct gendisk, kobj.entry);
++ return dev_to_disk(dev);
++ }
+ return NULL;
+ }
+
+ static void *diskstats_next(struct seq_file *part, void *v, loff_t *pos)
+ {
+- struct list_head *p = ((struct gendisk *)v)->kobj.entry.next;
++ struct gendisk *gp = v;
++ struct device *dev;
++
+ ++*pos;
+- return p==&block_subsys.list ? NULL :
+- list_entry(p, struct gendisk, kobj.entry);
++ list_for_each_entry(dev, &gp->dev.node, node) {
++ if (&dev->node == &block_class.devices)
++ return NULL;
++ if (dev->type == &disk_type)
++ return dev_to_disk(dev);
++ }
++ return NULL;
+ }
+
+ static void diskstats_stop(struct seq_file *part, void *v)
+ {
+- mutex_unlock(&block_subsys_lock);
++ mutex_unlock(&block_class_lock);
+ }
+
+ static int diskstats_show(struct seq_file *s, void *v)
+@@ -629,7 +558,7 @@ static int diskstats_show(struct seq_file *s, void *v)
+ int n = 0;
+
+ /*
+- if (&sgp->kobj.entry == block_subsys.kset.list.next)
++ if (&gp->dev.kobj.entry == block_class.devices.next)
+ seq_puts(s, "major minor name"
+ " rio rmerge rsect ruse wio wmerge "
+ "wsect wuse running use aveq"
+@@ -666,7 +595,7 @@ static int diskstats_show(struct seq_file *s, void *v)
+ return 0;
+ }
+
+-struct seq_operations diskstats_op = {
++const struct seq_operations diskstats_op = {
+ .start = diskstats_start,
+ .next = diskstats_next,
+ .stop = diskstats_stop,
+@@ -683,7 +612,7 @@ static void media_change_notify_thread(struct work_struct *work)
+ * set enviroment vars to indicate which event this is for
+ * so that user space will know to go check the media status.
+ */
+- kobject_uevent_env(&gd->kobj, KOBJ_CHANGE, envp);
++ kobject_uevent_env(&gd->dev.kobj, KOBJ_CHANGE, envp);
+ put_device(gd->driverfs_dev);
+ }
+
+@@ -694,6 +623,25 @@ void genhd_media_change_notify(struct gendisk *disk)
+ }
+ EXPORT_SYMBOL_GPL(genhd_media_change_notify);
+
++dev_t blk_lookup_devt(const char *name)
++{
++ struct device *dev;
++ dev_t devt = MKDEV(0, 0);
++
++ mutex_lock(&block_class_lock);
++ list_for_each_entry(dev, &block_class.devices, node) {
++ if (strcmp(dev->bus_id, name) == 0) {
++ devt = dev->devt;
++ break;
++ }
++ }
++ mutex_unlock(&block_class_lock);
++
++ return devt;
++}
++
++EXPORT_SYMBOL(blk_lookup_devt);
++
+ struct gendisk *alloc_disk(int minors)
+ {
+ return alloc_disk_node(minors, -1);
+@@ -721,9 +669,10 @@ struct gendisk *alloc_disk_node(int minors, int node_id)
+ }
+ }
+ disk->minors = minors;
+- kobj_set_kset_s(disk,block_subsys);
+- kobject_init(&disk->kobj);
+ rand_initialize_disk(disk);
++ disk->dev.class = &block_class;
++ disk->dev.type = &disk_type;
++ device_initialize(&disk->dev);
+ INIT_WORK(&disk->async_notify,
+ media_change_notify_thread);
+ }
+@@ -743,7 +692,7 @@ struct kobject *get_disk(struct gendisk *disk)
+ owner = disk->fops->owner;
+ if (owner && !try_module_get(owner))
+ return NULL;
+- kobj = kobject_get(&disk->kobj);
++ kobj = kobject_get(&disk->dev.kobj);
+ if (kobj == NULL) {
+ module_put(owner);
+ return NULL;
+@@ -757,7 +706,7 @@ EXPORT_SYMBOL(get_disk);
+ void put_disk(struct gendisk *disk)
+ {
+ if (disk)
+- kobject_put(&disk->kobj);
++ kobject_put(&disk->dev.kobj);
+ }
+
+ EXPORT_SYMBOL(put_disk);
+diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
+deleted file mode 100644
+index 8b91994..0000000
+--- a/block/ll_rw_blk.c
++++ /dev/null
+@@ -1,4214 +0,0 @@
+-/*
+- * Copyright (C) 1991, 1992 Linus Torvalds
+- * Copyright (C) 1994, Karl Keyte: Added support for disk statistics
+- * Elevator latency, (C) 2000 Andrea Arcangeli <andrea at suse.de> SuSE
+- * Queue request tables / lock, selectable elevator, Jens Axboe <axboe at suse.de>
+- * kernel-doc documentation started by NeilBrown <neilb at cse.unsw.edu.au> - July2000
+- * bio rewrite, highmem i/o, etc, Jens Axboe <axboe at suse.de> - may 2001
+- */
+-
+-/*
+- * This handles all read/write requests to block devices
+- */
+-#include <linux/kernel.h>
+-#include <linux/module.h>
+-#include <linux/backing-dev.h>
+-#include <linux/bio.h>
+-#include <linux/blkdev.h>
+-#include <linux/highmem.h>
+-#include <linux/mm.h>
+-#include <linux/kernel_stat.h>
+-#include <linux/string.h>
+-#include <linux/init.h>
+-#include <linux/bootmem.h> /* for max_pfn/max_low_pfn */
+-#include <linux/completion.h>
+-#include <linux/slab.h>
+-#include <linux/swap.h>
+-#include <linux/writeback.h>
+-#include <linux/task_io_accounting_ops.h>
+-#include <linux/interrupt.h>
+-#include <linux/cpu.h>
+-#include <linux/blktrace_api.h>
+-#include <linux/fault-inject.h>
+-#include <linux/scatterlist.h>
+-
+-/*
+- * for max sense size
+- */
+-#include <scsi/scsi_cmnd.h>
+-
+-static void blk_unplug_work(struct work_struct *work);
+-static void blk_unplug_timeout(unsigned long data);
+-static void drive_stat_acct(struct request *rq, int new_io);
+-static void init_request_from_bio(struct request *req, struct bio *bio);
+-static int __make_request(struct request_queue *q, struct bio *bio);
+-static struct io_context *current_io_context(gfp_t gfp_flags, int node);
+-static void blk_recalc_rq_segments(struct request *rq);
+-static void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
+- struct bio *bio);
+-
+-/*
+- * For the allocated request tables
+- */
+-static struct kmem_cache *request_cachep;
+-
+-/*
+- * For queue allocation
+- */
+-static struct kmem_cache *requestq_cachep;
+-
+-/*
+- * For io context allocations
+- */
+-static struct kmem_cache *iocontext_cachep;
+-
+-/*
+- * Controlling structure to kblockd
+- */
+-static struct workqueue_struct *kblockd_workqueue;
+-
+-unsigned long blk_max_low_pfn, blk_max_pfn;
+-
+-EXPORT_SYMBOL(blk_max_low_pfn);
+-EXPORT_SYMBOL(blk_max_pfn);
+-
+-static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
+-
+-/* Amount of time in which a process may batch requests */
+-#define BLK_BATCH_TIME (HZ/50UL)
+-
+-/* Number of requests a "batching" process may submit */
+-#define BLK_BATCH_REQ 32
+-
+-/*
+- * Return the threshold (number of used requests) at which the queue is
+- * considered to be congested. It include a little hysteresis to keep the
+- * context switch rate down.
+- */
+-static inline int queue_congestion_on_threshold(struct request_queue *q)
+-{
+- return q->nr_congestion_on;
+-}
+-
+-/*
+- * The threshold at which a queue is considered to be uncongested
+- */
+-static inline int queue_congestion_off_threshold(struct request_queue *q)
+-{
+- return q->nr_congestion_off;
+-}
+-
+-static void blk_queue_congestion_threshold(struct request_queue *q)
+-{
+- int nr;
+-
+- nr = q->nr_requests - (q->nr_requests / 8) + 1;
+- if (nr > q->nr_requests)
+- nr = q->nr_requests;
+- q->nr_congestion_on = nr;
+-
+- nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1;
+- if (nr < 1)
+- nr = 1;
+- q->nr_congestion_off = nr;
+-}
+-
+-/**
+- * blk_get_backing_dev_info - get the address of a queue's backing_dev_info
+- * @bdev: device
+- *
+- * Locates the passed device's request queue and returns the address of its
+- * backing_dev_info
+- *
+- * Will return NULL if the request queue cannot be located.
+- */
+-struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
+-{
+- struct backing_dev_info *ret = NULL;
+- struct request_queue *q = bdev_get_queue(bdev);
+-
+- if (q)
+- ret = &q->backing_dev_info;
+- return ret;
+-}
+-EXPORT_SYMBOL(blk_get_backing_dev_info);
+-
+-/**
+- * blk_queue_prep_rq - set a prepare_request function for queue
+- * @q: queue
+- * @pfn: prepare_request function
+- *
+- * It's possible for a queue to register a prepare_request callback which
+- * is invoked before the request is handed to the request_fn. The goal of
+- * the function is to prepare a request for I/O, it can be used to build a
+- * cdb from the request data for instance.
+- *
+- */
+-void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
+-{
+- q->prep_rq_fn = pfn;
+-}
+-
+-EXPORT_SYMBOL(blk_queue_prep_rq);
+-
+-/**
+- * blk_queue_merge_bvec - set a merge_bvec function for queue
+- * @q: queue
+- * @mbfn: merge_bvec_fn
+- *
+- * Usually queues have static limitations on the max sectors or segments that
+- * we can put in a request. Stacking drivers may have some settings that
+- * are dynamic, and thus we have to query the queue whether it is ok to
+- * add a new bio_vec to a bio at a given offset or not. If the block device
+- * has such limitations, it needs to register a merge_bvec_fn to control
+- * the size of bio's sent to it. Note that a block device *must* allow a
+- * single page to be added to an empty bio. The block device driver may want
+- * to use the bio_split() function to deal with these bio's. By default
+- * no merge_bvec_fn is defined for a queue, and only the fixed limits are
+- * honored.
+- */
+-void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn)
+-{
+- q->merge_bvec_fn = mbfn;
+-}
+-
+-EXPORT_SYMBOL(blk_queue_merge_bvec);
+-
+-void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
+-{
+- q->softirq_done_fn = fn;
+-}
+-
+-EXPORT_SYMBOL(blk_queue_softirq_done);
+-
+-/**
+- * blk_queue_make_request - define an alternate make_request function for a device
+- * @q: the request queue for the device to be affected
+- * @mfn: the alternate make_request function
+- *
+- * Description:
+- * The normal way for &struct bios to be passed to a device
+- * driver is for them to be collected into requests on a request
+- * queue, and then to allow the device driver to select requests
+- * off that queue when it is ready. This works well for many block
+- * devices. However some block devices (typically virtual devices
+- * such as md or lvm) do not benefit from the processing on the
+- * request queue, and are served best by having the requests passed
+- * directly to them. This can be achieved by providing a function
+- * to blk_queue_make_request().
+- *
+- * Caveat:
+- * The driver that does this *must* be able to deal appropriately
+- * with buffers in "highmemory". This can be accomplished by either calling
+- * __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
+- * blk_queue_bounce() to create a buffer in normal memory.
+- **/
+-void blk_queue_make_request(struct request_queue * q, make_request_fn * mfn)
+-{
+- /*
+- * set defaults
+- */
+- q->nr_requests = BLKDEV_MAX_RQ;
+- blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
+- blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
+- q->make_request_fn = mfn;
+- q->backing_dev_info.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
+- q->backing_dev_info.state = 0;
+- q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
+- blk_queue_max_sectors(q, SAFE_MAX_SECTORS);
+- blk_queue_hardsect_size(q, 512);
+- blk_queue_dma_alignment(q, 511);
+- blk_queue_congestion_threshold(q);
+- q->nr_batching = BLK_BATCH_REQ;
+-
+- q->unplug_thresh = 4; /* hmm */
+- q->unplug_delay = (3 * HZ) / 1000; /* 3 milliseconds */
+- if (q->unplug_delay == 0)
+- q->unplug_delay = 1;
+-
+- INIT_WORK(&q->unplug_work, blk_unplug_work);
+-
+- q->unplug_timer.function = blk_unplug_timeout;
+- q->unplug_timer.data = (unsigned long)q;
+-
+- /*
+- * by default assume old behaviour and bounce for any highmem page
+- */
+- blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
+-}
+-
+-EXPORT_SYMBOL(blk_queue_make_request);
+-
+-static void rq_init(struct request_queue *q, struct request *rq)
+-{
+- INIT_LIST_HEAD(&rq->queuelist);
+- INIT_LIST_HEAD(&rq->donelist);
+-
+- rq->errors = 0;
+- rq->bio = rq->biotail = NULL;
+- INIT_HLIST_NODE(&rq->hash);
+- RB_CLEAR_NODE(&rq->rb_node);
+- rq->ioprio = 0;
+- rq->buffer = NULL;
+- rq->ref_count = 1;
+- rq->q = q;
+- rq->special = NULL;
+- rq->data_len = 0;
+- rq->data = NULL;
+- rq->nr_phys_segments = 0;
+- rq->sense = NULL;
+- rq->end_io = NULL;
+- rq->end_io_data = NULL;
+- rq->completion_data = NULL;
+- rq->next_rq = NULL;
+-}
+-
+-/**
+- * blk_queue_ordered - does this queue support ordered writes
+- * @q: the request queue
+- * @ordered: one of QUEUE_ORDERED_*
+- * @prepare_flush_fn: rq setup helper for cache flush ordered writes
+- *
+- * Description:
+- * For journalled file systems, doing ordered writes on a commit
+- * block instead of explicitly doing wait_on_buffer (which is bad
+- * for performance) can be a big win. Block drivers supporting this
+- * feature should call this function and indicate so.
+- *
+- **/
+-int blk_queue_ordered(struct request_queue *q, unsigned ordered,
+- prepare_flush_fn *prepare_flush_fn)
+-{
+- if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) &&
+- prepare_flush_fn == NULL) {
+- printk(KERN_ERR "blk_queue_ordered: prepare_flush_fn required\n");
+- return -EINVAL;
+- }
+-
+- if (ordered != QUEUE_ORDERED_NONE &&
+- ordered != QUEUE_ORDERED_DRAIN &&
+- ordered != QUEUE_ORDERED_DRAIN_FLUSH &&
+- ordered != QUEUE_ORDERED_DRAIN_FUA &&
+- ordered != QUEUE_ORDERED_TAG &&
+- ordered != QUEUE_ORDERED_TAG_FLUSH &&
+- ordered != QUEUE_ORDERED_TAG_FUA) {
+- printk(KERN_ERR "blk_queue_ordered: bad value %d\n", ordered);
+- return -EINVAL;
+- }
+-
+- q->ordered = ordered;
+- q->next_ordered = ordered;
+- q->prepare_flush_fn = prepare_flush_fn;
+-
+- return 0;
+-}
+-
+-EXPORT_SYMBOL(blk_queue_ordered);
+-
+-/*
+- * Cache flushing for ordered writes handling
+- */
+-inline unsigned blk_ordered_cur_seq(struct request_queue *q)
+-{
+- if (!q->ordseq)
+- return 0;
+- return 1 << ffz(q->ordseq);
+-}
+-
+-unsigned blk_ordered_req_seq(struct request *rq)
+-{
+- struct request_queue *q = rq->q;
+-
+- BUG_ON(q->ordseq == 0);
+-
+- if (rq == &q->pre_flush_rq)
+- return QUEUE_ORDSEQ_PREFLUSH;
+- if (rq == &q->bar_rq)
+- return QUEUE_ORDSEQ_BAR;
+- if (rq == &q->post_flush_rq)
+- return QUEUE_ORDSEQ_POSTFLUSH;
+-
+- /*
+- * !fs requests don't need to follow barrier ordering. Always
+- * put them at the front. This fixes the following deadlock.
+- *
+- * http://thread.gmane.org/gmane.linux.kernel/537473
+- */
+- if (!blk_fs_request(rq))
+- return QUEUE_ORDSEQ_DRAIN;
+-
+- if ((rq->cmd_flags & REQ_ORDERED_COLOR) ==
+- (q->orig_bar_rq->cmd_flags & REQ_ORDERED_COLOR))
+- return QUEUE_ORDSEQ_DRAIN;
+- else
+- return QUEUE_ORDSEQ_DONE;
+-}
+-
+-void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
+-{
+- struct request *rq;
+- int uptodate;
+-
+- if (error && !q->orderr)
+- q->orderr = error;
+-
+- BUG_ON(q->ordseq & seq);
+- q->ordseq |= seq;
+-
+- if (blk_ordered_cur_seq(q) != QUEUE_ORDSEQ_DONE)
+- return;
+-
+- /*
+- * Okay, sequence complete.
+- */
+- uptodate = 1;
+- if (q->orderr)
+- uptodate = q->orderr;
+-
+- q->ordseq = 0;
+- rq = q->orig_bar_rq;
+-
+- end_that_request_first(rq, uptodate, rq->hard_nr_sectors);
+- end_that_request_last(rq, uptodate);
+-}
+-
+-static void pre_flush_end_io(struct request *rq, int error)
+-{
+- elv_completed_request(rq->q, rq);
+- blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_PREFLUSH, error);
+-}
+-
+-static void bar_end_io(struct request *rq, int error)
+-{
+- elv_completed_request(rq->q, rq);
+- blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_BAR, error);
+-}
+-
+-static void post_flush_end_io(struct request *rq, int error)
+-{
+- elv_completed_request(rq->q, rq);
+- blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_POSTFLUSH, error);
+-}
+-
+-static void queue_flush(struct request_queue *q, unsigned which)
+-{
+- struct request *rq;
+- rq_end_io_fn *end_io;
+-
+- if (which == QUEUE_ORDERED_PREFLUSH) {
+- rq = &q->pre_flush_rq;
+- end_io = pre_flush_end_io;
+- } else {
+- rq = &q->post_flush_rq;
+- end_io = post_flush_end_io;
+- }
+-
+- rq->cmd_flags = REQ_HARDBARRIER;
+- rq_init(q, rq);
+- rq->elevator_private = NULL;
+- rq->elevator_private2 = NULL;
+- rq->rq_disk = q->bar_rq.rq_disk;
+- rq->end_io = end_io;
+- q->prepare_flush_fn(q, rq);
+-
+- elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
+-}
+-
+-static inline struct request *start_ordered(struct request_queue *q,
+- struct request *rq)
+-{
+- q->orderr = 0;
+- q->ordered = q->next_ordered;
+- q->ordseq |= QUEUE_ORDSEQ_STARTED;
+-
+- /*
+- * Prep proxy barrier request.
+- */
+- blkdev_dequeue_request(rq);
+- q->orig_bar_rq = rq;
+- rq = &q->bar_rq;
+- rq->cmd_flags = 0;
+- rq_init(q, rq);
+- if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
+- rq->cmd_flags |= REQ_RW;
+- if (q->ordered & QUEUE_ORDERED_FUA)
+- rq->cmd_flags |= REQ_FUA;
+- rq->elevator_private = NULL;
+- rq->elevator_private2 = NULL;
+- init_request_from_bio(rq, q->orig_bar_rq->bio);
+- rq->end_io = bar_end_io;
+-
+- /*
+- * Queue ordered sequence. As we stack them at the head, we
+- * need to queue in reverse order. Note that we rely on that
+- * no fs request uses ELEVATOR_INSERT_FRONT and thus no fs
+- * request gets inbetween ordered sequence. If this request is
+- * an empty barrier, we don't need to do a postflush ever since
+- * there will be no data written between the pre and post flush.
+- * Hence a single flush will suffice.
+- */
+- if ((q->ordered & QUEUE_ORDERED_POSTFLUSH) && !blk_empty_barrier(rq))
+- queue_flush(q, QUEUE_ORDERED_POSTFLUSH);
+- else
+- q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH;
+-
+- elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
+-
+- if (q->ordered & QUEUE_ORDERED_PREFLUSH) {
+- queue_flush(q, QUEUE_ORDERED_PREFLUSH);
+- rq = &q->pre_flush_rq;
+- } else
+- q->ordseq |= QUEUE_ORDSEQ_PREFLUSH;
+-
+- if ((q->ordered & QUEUE_ORDERED_TAG) || q->in_flight == 0)
+- q->ordseq |= QUEUE_ORDSEQ_DRAIN;
+- else
+- rq = NULL;
+-
+- return rq;
+-}
+-
+-int blk_do_ordered(struct request_queue *q, struct request **rqp)
+-{
+- struct request *rq = *rqp;
+- const int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);
+-
+- if (!q->ordseq) {
+- if (!is_barrier)
+- return 1;
+-
+- if (q->next_ordered != QUEUE_ORDERED_NONE) {
+- *rqp = start_ordered(q, rq);
+- return 1;
+- } else {
+- /*
+- * This can happen when the queue switches to
+- * ORDERED_NONE while this request is on it.
+- */
+- blkdev_dequeue_request(rq);
+- end_that_request_first(rq, -EOPNOTSUPP,
+- rq->hard_nr_sectors);
+- end_that_request_last(rq, -EOPNOTSUPP);
+- *rqp = NULL;
+- return 0;
+- }
+- }
+-
+- /*
+- * Ordered sequence in progress
+- */
+-
+- /* Special requests are not subject to ordering rules. */
+- if (!blk_fs_request(rq) &&
+- rq != &q->pre_flush_rq && rq != &q->post_flush_rq)
+- return 1;
+-
+- if (q->ordered & QUEUE_ORDERED_TAG) {
+- /* Ordered by tag. Blocking the next barrier is enough. */
+- if (is_barrier && rq != &q->bar_rq)
+- *rqp = NULL;
+- } else {
+- /* Ordered by draining. Wait for turn. */
+- WARN_ON(blk_ordered_req_seq(rq) < blk_ordered_cur_seq(q));
+- if (blk_ordered_req_seq(rq) > blk_ordered_cur_seq(q))
+- *rqp = NULL;
+- }
+-
+- return 1;
+-}
+-
+-static void req_bio_endio(struct request *rq, struct bio *bio,
+- unsigned int nbytes, int error)
+-{
+- struct request_queue *q = rq->q;
+-
+- if (&q->bar_rq != rq) {
+- if (error)
+- clear_bit(BIO_UPTODATE, &bio->bi_flags);
+- else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
+- error = -EIO;
+-
+- if (unlikely(nbytes > bio->bi_size)) {
+- printk("%s: want %u bytes done, only %u left\n",
+- __FUNCTION__, nbytes, bio->bi_size);
+- nbytes = bio->bi_size;
+- }
+-
+- bio->bi_size -= nbytes;
+- bio->bi_sector += (nbytes >> 9);
+- if (bio->bi_size == 0)
+- bio_endio(bio, error);
+- } else {
+-
+- /*
+- * Okay, this is the barrier request in progress, just
+- * record the error;
+- */
+- if (error && !q->orderr)
+- q->orderr = error;
+- }
+-}
+-
+-/**
+- * blk_queue_bounce_limit - set bounce buffer limit for queue
+- * @q: the request queue for the device
+- * @dma_addr: bus address limit
+- *
+- * Description:
+- * Different hardware can have different requirements as to what pages
+- * it can do I/O directly to. A low level driver can call
+- * blk_queue_bounce_limit to have lower memory pages allocated as bounce
+- * buffers for doing I/O to pages residing above @page.
+- **/
+-void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr)
+-{
+- unsigned long bounce_pfn = dma_addr >> PAGE_SHIFT;
+- int dma = 0;
+-
+- q->bounce_gfp = GFP_NOIO;
+-#if BITS_PER_LONG == 64
+- /* Assume anything <= 4GB can be handled by IOMMU.
+- Actually some IOMMUs can handle everything, but I don't
+- know of a way to test this here. */
+- if (bounce_pfn < (min_t(u64,0xffffffff,BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
+- dma = 1;
+- q->bounce_pfn = max_low_pfn;
+-#else
+- if (bounce_pfn < blk_max_low_pfn)
+- dma = 1;
+- q->bounce_pfn = bounce_pfn;
+-#endif
+- if (dma) {
+- init_emergency_isa_pool();
+- q->bounce_gfp = GFP_NOIO | GFP_DMA;
+- q->bounce_pfn = bounce_pfn;
+- }
+-}
+-
+-EXPORT_SYMBOL(blk_queue_bounce_limit);
+-
+-/**
+- * blk_queue_max_sectors - set max sectors for a request for this queue
+- * @q: the request queue for the device
+- * @max_sectors: max sectors in the usual 512b unit
+- *
+- * Description:
+- * Enables a low level driver to set an upper limit on the size of
+- * received requests.
+- **/
+-void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors)
+-{
+- if ((max_sectors << 9) < PAGE_CACHE_SIZE) {
+- max_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
+- printk("%s: set to minimum %d\n", __FUNCTION__, max_sectors);
+- }
+-
+- if (BLK_DEF_MAX_SECTORS > max_sectors)
+- q->max_hw_sectors = q->max_sectors = max_sectors;
+- else {
+- q->max_sectors = BLK_DEF_MAX_SECTORS;
+- q->max_hw_sectors = max_sectors;
+- }
+-}
+-
+-EXPORT_SYMBOL(blk_queue_max_sectors);
+-
+-/**
+- * blk_queue_max_phys_segments - set max phys segments for a request for this queue
+- * @q: the request queue for the device
+- * @max_segments: max number of segments
+- *
+- * Description:
+- * Enables a low level driver to set an upper limit on the number of
+- * physical data segments in a request. This would be the largest sized
+- * scatter list the driver could handle.
+- **/
+-void blk_queue_max_phys_segments(struct request_queue *q,
+- unsigned short max_segments)
+-{
+- if (!max_segments) {
+- max_segments = 1;
+- printk("%s: set to minimum %d\n", __FUNCTION__, max_segments);
+- }
+-
+- q->max_phys_segments = max_segments;
+-}
+-
+-EXPORT_SYMBOL(blk_queue_max_phys_segments);
+-
+-/**
+- * blk_queue_max_hw_segments - set max hw segments for a request for this queue
+- * @q: the request queue for the device
+- * @max_segments: max number of segments
+- *
+- * Description:
+- * Enables a low level driver to set an upper limit on the number of
+- * hw data segments in a request. This would be the largest number of
+- * address/length pairs the host adapter can actually give as once
+- * to the device.
+- **/
+-void blk_queue_max_hw_segments(struct request_queue *q,
+- unsigned short max_segments)
+-{
+- if (!max_segments) {
+- max_segments = 1;
+- printk("%s: set to minimum %d\n", __FUNCTION__, max_segments);
+- }
+-
+- q->max_hw_segments = max_segments;
+-}
+-
+-EXPORT_SYMBOL(blk_queue_max_hw_segments);
+-
+-/**
+- * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
+- * @q: the request queue for the device
+- * @max_size: max size of segment in bytes
+- *
+- * Description:
+- * Enables a low level driver to set an upper limit on the size of a
+- * coalesced segment
+- **/
+-void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
+-{
+- if (max_size < PAGE_CACHE_SIZE) {
+- max_size = PAGE_CACHE_SIZE;
+- printk("%s: set to minimum %d\n", __FUNCTION__, max_size);
+- }
+-
+- q->max_segment_size = max_size;
+-}
+-
+-EXPORT_SYMBOL(blk_queue_max_segment_size);
+-
+-/**
+- * blk_queue_hardsect_size - set hardware sector size for the queue
+- * @q: the request queue for the device
+- * @size: the hardware sector size, in bytes
+- *
+- * Description:
+- * This should typically be set to the lowest possible sector size
+- * that the hardware can operate on (possible without reverting to
+- * even internal read-modify-write operations). Usually the default
+- * of 512 covers most hardware.
+- **/
+-void blk_queue_hardsect_size(struct request_queue *q, unsigned short size)
+-{
+- q->hardsect_size = size;
+-}
+-
+-EXPORT_SYMBOL(blk_queue_hardsect_size);
+-
+-/*
+- * Returns the minimum that is _not_ zero, unless both are zero.
+- */
+-#define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
+-
+-/**
+- * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers
+- * @t: the stacking driver (top)
+- * @b: the underlying device (bottom)
+- **/
+-void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
+-{
+- /* zero is "infinity" */
+- t->max_sectors = min_not_zero(t->max_sectors,b->max_sectors);
+- t->max_hw_sectors = min_not_zero(t->max_hw_sectors,b->max_hw_sectors);
+-
+- t->max_phys_segments = min(t->max_phys_segments,b->max_phys_segments);
+- t->max_hw_segments = min(t->max_hw_segments,b->max_hw_segments);
+- t->max_segment_size = min(t->max_segment_size,b->max_segment_size);
+- t->hardsect_size = max(t->hardsect_size,b->hardsect_size);
+- if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags))
+- clear_bit(QUEUE_FLAG_CLUSTER, &t->queue_flags);
+-}
+-
+-EXPORT_SYMBOL(blk_queue_stack_limits);
+-
+-/**
+- * blk_queue_segment_boundary - set boundary rules for segment merging
+- * @q: the request queue for the device
+- * @mask: the memory boundary mask
+- **/
+-void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
+-{
+- if (mask < PAGE_CACHE_SIZE - 1) {
+- mask = PAGE_CACHE_SIZE - 1;
+- printk("%s: set to minimum %lx\n", __FUNCTION__, mask);
+- }
+-
+- q->seg_boundary_mask = mask;
+-}
+-
+-EXPORT_SYMBOL(blk_queue_segment_boundary);
+-
+-/**
+- * blk_queue_dma_alignment - set dma length and memory alignment
+- * @q: the request queue for the device
+- * @mask: alignment mask
+- *
+- * description:
+- * set required memory and length aligment for direct dma transactions.
+- * this is used when buiding direct io requests for the queue.
+- *
+- **/
+-void blk_queue_dma_alignment(struct request_queue *q, int mask)
+-{
+- q->dma_alignment = mask;
+-}
+-
+-EXPORT_SYMBOL(blk_queue_dma_alignment);
+-
+-/**
+- * blk_queue_find_tag - find a request by its tag and queue
+- * @q: The request queue for the device
+- * @tag: The tag of the request
+- *
+- * Notes:
+- * Should be used when a device returns a tag and you want to match
+- * it with a request.
+- *
+- * no locks need be held.
+- **/
+-struct request *blk_queue_find_tag(struct request_queue *q, int tag)
+-{
+- return blk_map_queue_find_tag(q->queue_tags, tag);
+-}
+-
+-EXPORT_SYMBOL(blk_queue_find_tag);
+-
+-/**
+- * __blk_free_tags - release a given set of tag maintenance info
+- * @bqt: the tag map to free
+- *
+- * Tries to free the specified @bqt at . Returns true if it was
+- * actually freed and false if there are still references using it
+- */
+-static int __blk_free_tags(struct blk_queue_tag *bqt)
+-{
+- int retval;
+-
+- retval = atomic_dec_and_test(&bqt->refcnt);
+- if (retval) {
+- BUG_ON(bqt->busy);
+-
+- kfree(bqt->tag_index);
+- bqt->tag_index = NULL;
+-
+- kfree(bqt->tag_map);
+- bqt->tag_map = NULL;
+-
+- kfree(bqt);
+-
+- }
+-
+- return retval;
+-}
+-
+-/**
+- * __blk_queue_free_tags - release tag maintenance info
+- * @q: the request queue for the device
+- *
+- * Notes:
+- * blk_cleanup_queue() will take care of calling this function, if tagging
+- * has been used. So there's no need to call this directly.
+- **/
+-static void __blk_queue_free_tags(struct request_queue *q)
+-{
+- struct blk_queue_tag *bqt = q->queue_tags;
+-
+- if (!bqt)
+- return;
+-
+- __blk_free_tags(bqt);
+-
+- q->queue_tags = NULL;
+- q->queue_flags &= ~(1 << QUEUE_FLAG_QUEUED);
+-}
+-
+-
+-/**
+- * blk_free_tags - release a given set of tag maintenance info
+- * @bqt: the tag map to free
+- *
+- * For externally managed @bqt@ frees the map. Callers of this
+- * function must guarantee to have released all the queues that
+- * might have been using this tag map.
+- */
+-void blk_free_tags(struct blk_queue_tag *bqt)
+-{
+- if (unlikely(!__blk_free_tags(bqt)))
+- BUG();
+-}
+-EXPORT_SYMBOL(blk_free_tags);
+-
+-/**
+- * blk_queue_free_tags - release tag maintenance info
+- * @q: the request queue for the device
+- *
+- * Notes:
+- * This is used to disabled tagged queuing to a device, yet leave
+- * queue in function.
+- **/
+-void blk_queue_free_tags(struct request_queue *q)
+-{
+- clear_bit(QUEUE_FLAG_QUEUED, &q->queue_flags);
+-}
+-
+-EXPORT_SYMBOL(blk_queue_free_tags);
+-
+-static int
+-init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth)
+-{
+- struct request **tag_index;
+- unsigned long *tag_map;
+- int nr_ulongs;
+-
+- if (q && depth > q->nr_requests * 2) {
+- depth = q->nr_requests * 2;
+- printk(KERN_ERR "%s: adjusted depth to %d\n",
+- __FUNCTION__, depth);
+- }
+-
+- tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC);
+- if (!tag_index)
+- goto fail;
+-
+- nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG;
+- tag_map = kzalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC);
+- if (!tag_map)
+- goto fail;
+-
+- tags->real_max_depth = depth;
+- tags->max_depth = depth;
+- tags->tag_index = tag_index;
+- tags->tag_map = tag_map;
+-
+- return 0;
+-fail:
+- kfree(tag_index);
+- return -ENOMEM;
+-}
+-
+-static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q,
+- int depth)
+-{
+- struct blk_queue_tag *tags;
+-
+- tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC);
+- if (!tags)
+- goto fail;
+-
+- if (init_tag_map(q, tags, depth))
+- goto fail;
+-
+- tags->busy = 0;
+- atomic_set(&tags->refcnt, 1);
+- return tags;
+-fail:
+- kfree(tags);
+- return NULL;
+-}
+-
+-/**
+- * blk_init_tags - initialize the tag info for an external tag map
+- * @depth: the maximum queue depth supported
+- * @tags: the tag to use
+- **/
+-struct blk_queue_tag *blk_init_tags(int depth)
+-{
+- return __blk_queue_init_tags(NULL, depth);
+-}
+-EXPORT_SYMBOL(blk_init_tags);
+-
+-/**
+- * blk_queue_init_tags - initialize the queue tag info
+- * @q: the request queue for the device
+- * @depth: the maximum queue depth supported
+- * @tags: the tag to use
+- **/
+-int blk_queue_init_tags(struct request_queue *q, int depth,
+- struct blk_queue_tag *tags)
+-{
+- int rc;
+-
+- BUG_ON(tags && q->queue_tags && tags != q->queue_tags);
+-
+- if (!tags && !q->queue_tags) {
+- tags = __blk_queue_init_tags(q, depth);
+-
+- if (!tags)
+- goto fail;
+- } else if (q->queue_tags) {
+- if ((rc = blk_queue_resize_tags(q, depth)))
+- return rc;
+- set_bit(QUEUE_FLAG_QUEUED, &q->queue_flags);
+- return 0;
+- } else
+- atomic_inc(&tags->refcnt);
+-
+- /*
+- * assign it, all done
+- */
+- q->queue_tags = tags;
+- q->queue_flags |= (1 << QUEUE_FLAG_QUEUED);
+- INIT_LIST_HEAD(&q->tag_busy_list);
+- return 0;
+-fail:
+- kfree(tags);
+- return -ENOMEM;
+-}
+-
+-EXPORT_SYMBOL(blk_queue_init_tags);
+-
+-/**
+- * blk_queue_resize_tags - change the queueing depth
+- * @q: the request queue for the device
+- * @new_depth: the new max command queueing depth
+- *
+- * Notes:
+- * Must be called with the queue lock held.
+- **/
+-int blk_queue_resize_tags(struct request_queue *q, int new_depth)
+-{
+- struct blk_queue_tag *bqt = q->queue_tags;
+- struct request **tag_index;
+- unsigned long *tag_map;
+- int max_depth, nr_ulongs;
+-
+- if (!bqt)
+- return -ENXIO;
+-
+- /*
+- * if we already have large enough real_max_depth. just
+- * adjust max_depth. *NOTE* as requests with tag value
+- * between new_depth and real_max_depth can be in-flight, tag
+- * map can not be shrunk blindly here.
+- */
+- if (new_depth <= bqt->real_max_depth) {
+- bqt->max_depth = new_depth;
+- return 0;
+- }
+-
+- /*
+- * Currently cannot replace a shared tag map with a new
+- * one, so error out if this is the case
+- */
+- if (atomic_read(&bqt->refcnt) != 1)
+- return -EBUSY;
+-
+- /*
+- * save the old state info, so we can copy it back
+- */
+- tag_index = bqt->tag_index;
+- tag_map = bqt->tag_map;
+- max_depth = bqt->real_max_depth;
+-
+- if (init_tag_map(q, bqt, new_depth))
+- return -ENOMEM;
+-
+- memcpy(bqt->tag_index, tag_index, max_depth * sizeof(struct request *));
+- nr_ulongs = ALIGN(max_depth, BITS_PER_LONG) / BITS_PER_LONG;
+- memcpy(bqt->tag_map, tag_map, nr_ulongs * sizeof(unsigned long));
+-
+- kfree(tag_index);
+- kfree(tag_map);
+- return 0;
+-}
+-
+-EXPORT_SYMBOL(blk_queue_resize_tags);
+-
+-/**
+- * blk_queue_end_tag - end tag operations for a request
+- * @q: the request queue for the device
+- * @rq: the request that has completed
+- *
+- * Description:
+- * Typically called when end_that_request_first() returns 0, meaning
+- * all transfers have been done for a request. It's important to call
+- * this function before end_that_request_last(), as that will put the
+- * request back on the free list thus corrupting the internal tag list.
+- *
+- * Notes:
+- * queue lock must be held.
+- **/
+-void blk_queue_end_tag(struct request_queue *q, struct request *rq)
+-{
+- struct blk_queue_tag *bqt = q->queue_tags;
+- int tag = rq->tag;
+-
+- BUG_ON(tag == -1);
+-
+- if (unlikely(tag >= bqt->real_max_depth))
+- /*
+- * This can happen after tag depth has been reduced.
+- * FIXME: how about a warning or info message here?
+- */
+- return;
+-
+- list_del_init(&rq->queuelist);
+- rq->cmd_flags &= ~REQ_QUEUED;
+- rq->tag = -1;
+-
+- if (unlikely(bqt->tag_index[tag] == NULL))
+- printk(KERN_ERR "%s: tag %d is missing\n",
+- __FUNCTION__, tag);
+-
+- bqt->tag_index[tag] = NULL;
+-
+- if (unlikely(!test_bit(tag, bqt->tag_map))) {
+- printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n",
+- __FUNCTION__, tag);
+- return;
+- }
+- /*
+- * The tag_map bit acts as a lock for tag_index[bit], so we need
+- * unlock memory barrier semantics.
+- */
+- clear_bit_unlock(tag, bqt->tag_map);
+- bqt->busy--;
+-}
+-
+-EXPORT_SYMBOL(blk_queue_end_tag);
+-
+-/**
+- * blk_queue_start_tag - find a free tag and assign it
+- * @q: the request queue for the device
+- * @rq: the block request that needs tagging
+- *
+- * Description:
+- * This can either be used as a stand-alone helper, or possibly be
+- * assigned as the queue &prep_rq_fn (in which case &struct request
+- * automagically gets a tag assigned). Note that this function
+- * assumes that any type of request can be queued! if this is not
+- * true for your device, you must check the request type before
+- * calling this function. The request will also be removed from
+- * the request queue, so it's the drivers responsibility to readd
+- * it if it should need to be restarted for some reason.
+- *
+- * Notes:
+- * queue lock must be held.
+- **/
+-int blk_queue_start_tag(struct request_queue *q, struct request *rq)
+-{
+- struct blk_queue_tag *bqt = q->queue_tags;
+- int tag;
+-
+- if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
+- printk(KERN_ERR
+- "%s: request %p for device [%s] already tagged %d",
+- __FUNCTION__, rq,
+- rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);
+- BUG();
+- }
+-
+- /*
+- * Protect against shared tag maps, as we may not have exclusive
+- * access to the tag map.
+- */
+- do {
+- tag = find_first_zero_bit(bqt->tag_map, bqt->max_depth);
+- if (tag >= bqt->max_depth)
+- return 1;
+-
+- } while (test_and_set_bit_lock(tag, bqt->tag_map));
+- /*
+- * We need lock ordering semantics given by test_and_set_bit_lock.
+- * See blk_queue_end_tag for details.
+- */
+-
+- rq->cmd_flags |= REQ_QUEUED;
+- rq->tag = tag;
+- bqt->tag_index[tag] = rq;
+- blkdev_dequeue_request(rq);
+- list_add(&rq->queuelist, &q->tag_busy_list);
+- bqt->busy++;
+- return 0;
+-}
+-
+-EXPORT_SYMBOL(blk_queue_start_tag);
+-
+-/**
+- * blk_queue_invalidate_tags - invalidate all pending tags
+- * @q: the request queue for the device
+- *
+- * Description:
+- * Hardware conditions may dictate a need to stop all pending requests.
+- * In this case, we will safely clear the block side of the tag queue and
+- * readd all requests to the request queue in the right order.
+- *
+- * Notes:
+- * queue lock must be held.
+- **/
+-void blk_queue_invalidate_tags(struct request_queue *q)
+-{
+- struct list_head *tmp, *n;
+-
+- list_for_each_safe(tmp, n, &q->tag_busy_list)
+- blk_requeue_request(q, list_entry_rq(tmp));
+-}
+-
+-EXPORT_SYMBOL(blk_queue_invalidate_tags);
+-
+-void blk_dump_rq_flags(struct request *rq, char *msg)
+-{
+- int bit;
+-
+- printk("%s: dev %s: type=%x, flags=%x\n", msg,
+- rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
+- rq->cmd_flags);
+-
+- printk("\nsector %llu, nr/cnr %lu/%u\n", (unsigned long long)rq->sector,
+- rq->nr_sectors,
+- rq->current_nr_sectors);
+- printk("bio %p, biotail %p, buffer %p, data %p, len %u\n", rq->bio, rq->biotail, rq->buffer, rq->data, rq->data_len);
+-
+- if (blk_pc_request(rq)) {
+- printk("cdb: ");
+- for (bit = 0; bit < sizeof(rq->cmd); bit++)
+- printk("%02x ", rq->cmd[bit]);
+- printk("\n");
+- }
+-}
+-
+-EXPORT_SYMBOL(blk_dump_rq_flags);
+-
+-void blk_recount_segments(struct request_queue *q, struct bio *bio)
+-{
+- struct request rq;
+- struct bio *nxt = bio->bi_next;
+- rq.q = q;
+- rq.bio = rq.biotail = bio;
+- bio->bi_next = NULL;
+- blk_recalc_rq_segments(&rq);
+- bio->bi_next = nxt;
+- bio->bi_phys_segments = rq.nr_phys_segments;
+- bio->bi_hw_segments = rq.nr_hw_segments;
+- bio->bi_flags |= (1 << BIO_SEG_VALID);
+-}
+-EXPORT_SYMBOL(blk_recount_segments);
+-
+-static void blk_recalc_rq_segments(struct request *rq)
+-{
+- int nr_phys_segs;
+- int nr_hw_segs;
+- unsigned int phys_size;
+- unsigned int hw_size;
+- struct bio_vec *bv, *bvprv = NULL;
+- int seg_size;
+- int hw_seg_size;
+- int cluster;
+- struct req_iterator iter;
+- int high, highprv = 1;
+- struct request_queue *q = rq->q;
+-
+- if (!rq->bio)
+- return;
+-
+- cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
+- hw_seg_size = seg_size = 0;
+- phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0;
+- rq_for_each_segment(bv, rq, iter) {
+- /*
+- * the trick here is making sure that a high page is never
+- * considered part of another segment, since that might
+- * change with the bounce page.
+- */
+- high = page_to_pfn(bv->bv_page) > q->bounce_pfn;
+- if (high || highprv)
+- goto new_hw_segment;
+- if (cluster) {
+- if (seg_size + bv->bv_len > q->max_segment_size)
+- goto new_segment;
+- if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
+- goto new_segment;
+- if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
+- goto new_segment;
+- if (BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len))
+- goto new_hw_segment;
+-
+- seg_size += bv->bv_len;
+- hw_seg_size += bv->bv_len;
+- bvprv = bv;
+- continue;
+- }
+-new_segment:
+- if (BIOVEC_VIRT_MERGEABLE(bvprv, bv) &&
+- !BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len))
+- hw_seg_size += bv->bv_len;
+- else {
+-new_hw_segment:
+- if (nr_hw_segs == 1 &&
+- hw_seg_size > rq->bio->bi_hw_front_size)
+- rq->bio->bi_hw_front_size = hw_seg_size;
+- hw_seg_size = BIOVEC_VIRT_START_SIZE(bv) + bv->bv_len;
+- nr_hw_segs++;
+- }
+-
+- nr_phys_segs++;
+- bvprv = bv;
+- seg_size = bv->bv_len;
+- highprv = high;
+- }
+-
+- if (nr_hw_segs == 1 &&
+- hw_seg_size > rq->bio->bi_hw_front_size)
+- rq->bio->bi_hw_front_size = hw_seg_size;
+- if (hw_seg_size > rq->biotail->bi_hw_back_size)
+- rq->biotail->bi_hw_back_size = hw_seg_size;
+- rq->nr_phys_segments = nr_phys_segs;
+- rq->nr_hw_segments = nr_hw_segs;
+-}
+-
+-static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
+- struct bio *nxt)
+-{
+- if (!(q->queue_flags & (1 << QUEUE_FLAG_CLUSTER)))
+- return 0;
+-
+- if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
+- return 0;
+- if (bio->bi_size + nxt->bi_size > q->max_segment_size)
+- return 0;
+-
+- /*
+- * bio and nxt are contigous in memory, check if the queue allows
+- * these two to be merged into one
+- */
+- if (BIO_SEG_BOUNDARY(q, bio, nxt))
+- return 1;
+-
+- return 0;
+-}
+-
+-static int blk_hw_contig_segment(struct request_queue *q, struct bio *bio,
+- struct bio *nxt)
+-{
+- if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
+- blk_recount_segments(q, bio);
+- if (unlikely(!bio_flagged(nxt, BIO_SEG_VALID)))
+- blk_recount_segments(q, nxt);
+- if (!BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)) ||
+- BIOVEC_VIRT_OVERSIZE(bio->bi_hw_back_size + nxt->bi_hw_front_size))
+- return 0;
+- if (bio->bi_hw_back_size + nxt->bi_hw_front_size > q->max_segment_size)
+- return 0;
+-
+- return 1;
+-}
+-
+-/*
+- * map a request to scatterlist, return number of sg entries setup. Caller
+- * must make sure sg can hold rq->nr_phys_segments entries
+- */
+-int blk_rq_map_sg(struct request_queue *q, struct request *rq,
+- struct scatterlist *sglist)
+-{
+- struct bio_vec *bvec, *bvprv;
+- struct req_iterator iter;
+- struct scatterlist *sg;
+- int nsegs, cluster;
+-
+- nsegs = 0;
+- cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
+-
+- /*
+- * for each bio in rq
+- */
+- bvprv = NULL;
+- sg = NULL;
+- rq_for_each_segment(bvec, rq, iter) {
+- int nbytes = bvec->bv_len;
+-
+- if (bvprv && cluster) {
+- if (sg->length + nbytes > q->max_segment_size)
+- goto new_segment;
+-
+- if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
+- goto new_segment;
+- if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
+- goto new_segment;
+-
+- sg->length += nbytes;
+- } else {
+-new_segment:
+- if (!sg)
+- sg = sglist;
+- else {
+- /*
+- * If the driver previously mapped a shorter
+- * list, we could see a termination bit
+- * prematurely unless it fully inits the sg
+- * table on each mapping. We KNOW that there
+- * must be more entries here or the driver
+- * would be buggy, so force clear the
+- * termination bit to avoid doing a full
+- * sg_init_table() in drivers for each command.
+- */
+- sg->page_link &= ~0x02;
+- sg = sg_next(sg);
+- }
+-
+- sg_set_page(sg, bvec->bv_page, nbytes, bvec->bv_offset);
+- nsegs++;
+- }
+- bvprv = bvec;
+- } /* segments in rq */
+-
+- if (sg)
+- sg_mark_end(sg);
+-
+- return nsegs;
+-}
+-
+-EXPORT_SYMBOL(blk_rq_map_sg);
+-
+-/*
+- * the standard queue merge functions, can be overridden with device
+- * specific ones if so desired
+- */
+-
+-static inline int ll_new_mergeable(struct request_queue *q,
+- struct request *req,
+- struct bio *bio)
+-{
+- int nr_phys_segs = bio_phys_segments(q, bio);
+-
+- if (req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
+- req->cmd_flags |= REQ_NOMERGE;
+- if (req == q->last_merge)
+- q->last_merge = NULL;
+- return 0;
+- }
+-
+- /*
+- * A hw segment is just getting larger, bump just the phys
+- * counter.
+- */
+- req->nr_phys_segments += nr_phys_segs;
+- return 1;
+-}
+-
+-static inline int ll_new_hw_segment(struct request_queue *q,
+- struct request *req,
+- struct bio *bio)
+-{
+- int nr_hw_segs = bio_hw_segments(q, bio);
+- int nr_phys_segs = bio_phys_segments(q, bio);
+-
+- if (req->nr_hw_segments + nr_hw_segs > q->max_hw_segments
+- || req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
+- req->cmd_flags |= REQ_NOMERGE;
+- if (req == q->last_merge)
+- q->last_merge = NULL;
+- return 0;
+- }
+-
+- /*
+- * This will form the start of a new hw segment. Bump both
+- * counters.
+- */
+- req->nr_hw_segments += nr_hw_segs;
+- req->nr_phys_segments += nr_phys_segs;
+- return 1;
+-}
+-
+-static int ll_back_merge_fn(struct request_queue *q, struct request *req,
+- struct bio *bio)
+-{
+- unsigned short max_sectors;
+- int len;
+-
+- if (unlikely(blk_pc_request(req)))
+- max_sectors = q->max_hw_sectors;
+- else
+- max_sectors = q->max_sectors;
+-
+- if (req->nr_sectors + bio_sectors(bio) > max_sectors) {
+- req->cmd_flags |= REQ_NOMERGE;
+- if (req == q->last_merge)
+- q->last_merge = NULL;
+- return 0;
+- }
+- if (unlikely(!bio_flagged(req->biotail, BIO_SEG_VALID)))
+- blk_recount_segments(q, req->biotail);
+- if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
+- blk_recount_segments(q, bio);
+- len = req->biotail->bi_hw_back_size + bio->bi_hw_front_size;
+- if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio)) &&
+- !BIOVEC_VIRT_OVERSIZE(len)) {
+- int mergeable = ll_new_mergeable(q, req, bio);
+-
+- if (mergeable) {
+- if (req->nr_hw_segments == 1)
+- req->bio->bi_hw_front_size = len;
+- if (bio->bi_hw_segments == 1)
+- bio->bi_hw_back_size = len;
+- }
+- return mergeable;
+- }
+-
+- return ll_new_hw_segment(q, req, bio);
+-}
+-
+-static int ll_front_merge_fn(struct request_queue *q, struct request *req,
+- struct bio *bio)
+-{
+- unsigned short max_sectors;
+- int len;
+-
+- if (unlikely(blk_pc_request(req)))
+- max_sectors = q->max_hw_sectors;
+- else
+- max_sectors = q->max_sectors;
+-
+-
+- if (req->nr_sectors + bio_sectors(bio) > max_sectors) {
+- req->cmd_flags |= REQ_NOMERGE;
+- if (req == q->last_merge)
+- q->last_merge = NULL;
+- return 0;
+- }
+- len = bio->bi_hw_back_size + req->bio->bi_hw_front_size;
+- if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
+- blk_recount_segments(q, bio);
+- if (unlikely(!bio_flagged(req->bio, BIO_SEG_VALID)))
+- blk_recount_segments(q, req->bio);
+- if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(req->bio)) &&
+- !BIOVEC_VIRT_OVERSIZE(len)) {
+- int mergeable = ll_new_mergeable(q, req, bio);
+-
+- if (mergeable) {
+- if (bio->bi_hw_segments == 1)
+- bio->bi_hw_front_size = len;
+- if (req->nr_hw_segments == 1)
+- req->biotail->bi_hw_back_size = len;
+- }
+- return mergeable;
+- }
+-
+- return ll_new_hw_segment(q, req, bio);
+-}
+-
+-static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
+- struct request *next)
+-{
+- int total_phys_segments;
+- int total_hw_segments;
+-
+- /*
+- * First check if the either of the requests are re-queued
+- * requests. Can't merge them if they are.
+- */
+- if (req->special || next->special)
+- return 0;
+-
+- /*
+- * Will it become too large?
+- */
+- if ((req->nr_sectors + next->nr_sectors) > q->max_sectors)
+- return 0;
+-
+- total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
+- if (blk_phys_contig_segment(q, req->biotail, next->bio))
+- total_phys_segments--;
+-
+- if (total_phys_segments > q->max_phys_segments)
+- return 0;
+-
+- total_hw_segments = req->nr_hw_segments + next->nr_hw_segments;
+- if (blk_hw_contig_segment(q, req->biotail, next->bio)) {
+- int len = req->biotail->bi_hw_back_size + next->bio->bi_hw_front_size;
+- /*
+- * propagate the combined length to the end of the requests
+- */
+- if (req->nr_hw_segments == 1)
+- req->bio->bi_hw_front_size = len;
+- if (next->nr_hw_segments == 1)
+- next->biotail->bi_hw_back_size = len;
+- total_hw_segments--;
+- }
+-
+- if (total_hw_segments > q->max_hw_segments)
+- return 0;
+-
+- /* Merge is OK... */
+- req->nr_phys_segments = total_phys_segments;
+- req->nr_hw_segments = total_hw_segments;
+- return 1;
+-}
+-
+-/*
+- * "plug" the device if there are no outstanding requests: this will
+- * force the transfer to start only after we have put all the requests
+- * on the list.
+- *
+- * This is called with interrupts off and no requests on the queue and
+- * with the queue lock held.
+- */
+-void blk_plug_device(struct request_queue *q)
+-{
+- WARN_ON(!irqs_disabled());
+-
+- /*
+- * don't plug a stopped queue, it must be paired with blk_start_queue()
+- * which will restart the queueing
+- */
+- if (blk_queue_stopped(q))
+- return;
+-
+- if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) {
+- mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
+- blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG);
+- }
+-}
+-
+-EXPORT_SYMBOL(blk_plug_device);
+-
+-/*
+- * remove the queue from the plugged list, if present. called with
+- * queue lock held and interrupts disabled.
+- */
+-int blk_remove_plug(struct request_queue *q)
+-{
+- WARN_ON(!irqs_disabled());
+-
+- if (!test_and_clear_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags))
+- return 0;
+-
+- del_timer(&q->unplug_timer);
+- return 1;
+-}
+-
+-EXPORT_SYMBOL(blk_remove_plug);
+-
+-/*
+- * remove the plug and let it rip..
+- */
+-void __generic_unplug_device(struct request_queue *q)
+-{
+- if (unlikely(blk_queue_stopped(q)))
+- return;
+-
+- if (!blk_remove_plug(q))
+- return;
+-
+- q->request_fn(q);
+-}
+-EXPORT_SYMBOL(__generic_unplug_device);
+-
+-/**
+- * generic_unplug_device - fire a request queue
+- * @q: The &struct request_queue in question
+- *
+- * Description:
+- * Linux uses plugging to build bigger requests queues before letting
+- * the device have at them. If a queue is plugged, the I/O scheduler
+- * is still adding and merging requests on the queue. Once the queue
+- * gets unplugged, the request_fn defined for the queue is invoked and
+- * transfers started.
+- **/
+-void generic_unplug_device(struct request_queue *q)
+-{
+- spin_lock_irq(q->queue_lock);
+- __generic_unplug_device(q);
+- spin_unlock_irq(q->queue_lock);
+-}
+-EXPORT_SYMBOL(generic_unplug_device);
+-
+-static void blk_backing_dev_unplug(struct backing_dev_info *bdi,
+- struct page *page)
+-{
+- struct request_queue *q = bdi->unplug_io_data;
+-
+- blk_unplug(q);
+-}
+-
+-static void blk_unplug_work(struct work_struct *work)
+-{
+- struct request_queue *q =
+- container_of(work, struct request_queue, unplug_work);
+-
+- blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
+- q->rq.count[READ] + q->rq.count[WRITE]);
+-
+- q->unplug_fn(q);
+-}
+-
+-static void blk_unplug_timeout(unsigned long data)
+-{
+- struct request_queue *q = (struct request_queue *)data;
+-
+- blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, NULL,
+- q->rq.count[READ] + q->rq.count[WRITE]);
+-
+- kblockd_schedule_work(&q->unplug_work);
+-}
+-
+-void blk_unplug(struct request_queue *q)
+-{
+- /*
+- * devices don't necessarily have an ->unplug_fn defined
+- */
+- if (q->unplug_fn) {
+- blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
+- q->rq.count[READ] + q->rq.count[WRITE]);
+-
+- q->unplug_fn(q);
+- }
+-}
+-EXPORT_SYMBOL(blk_unplug);
+-
+-/**
+- * blk_start_queue - restart a previously stopped queue
+- * @q: The &struct request_queue in question
+- *
+- * Description:
+- * blk_start_queue() will clear the stop flag on the queue, and call
+- * the request_fn for the queue if it was in a stopped state when
+- * entered. Also see blk_stop_queue(). Queue lock must be held.
+- **/
+-void blk_start_queue(struct request_queue *q)
+-{
+- WARN_ON(!irqs_disabled());
+-
+- clear_bit(QUEUE_FLAG_STOPPED, &q->queue_flags);
+-
+- /*
+- * one level of recursion is ok and is much faster than kicking
+- * the unplug handling
+- */
+- if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
+- q->request_fn(q);
+- clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags);
+- } else {
+- blk_plug_device(q);
+- kblockd_schedule_work(&q->unplug_work);
+- }
+-}
+-
+-EXPORT_SYMBOL(blk_start_queue);
+-
+-/**
+- * blk_stop_queue - stop a queue
+- * @q: The &struct request_queue in question
+- *
+- * Description:
+- * The Linux block layer assumes that a block driver will consume all
+- * entries on the request queue when the request_fn strategy is called.
+- * Often this will not happen, because of hardware limitations (queue
+- * depth settings). If a device driver gets a 'queue full' response,
+- * or if it simply chooses not to queue more I/O at one point, it can
+- * call this function to prevent the request_fn from being called until
+- * the driver has signalled it's ready to go again. This happens by calling
+- * blk_start_queue() to restart queue operations. Queue lock must be held.
+- **/
+-void blk_stop_queue(struct request_queue *q)
+-{
+- blk_remove_plug(q);
+- set_bit(QUEUE_FLAG_STOPPED, &q->queue_flags);
+-}
+-EXPORT_SYMBOL(blk_stop_queue);
+-
+-/**
+- * blk_sync_queue - cancel any pending callbacks on a queue
+- * @q: the queue
+- *
+- * Description:
+- * The block layer may perform asynchronous callback activity
+- * on a queue, such as calling the unplug function after a timeout.
+- * A block device may call blk_sync_queue to ensure that any
+- * such activity is cancelled, thus allowing it to release resources
+- * that the callbacks might use. The caller must already have made sure
+- * that its ->make_request_fn will not re-add plugging prior to calling
+- * this function.
+- *
+- */
+-void blk_sync_queue(struct request_queue *q)
+-{
+- del_timer_sync(&q->unplug_timer);
+- kblockd_flush_work(&q->unplug_work);
+-}
+-EXPORT_SYMBOL(blk_sync_queue);
+-
+-/**
+- * blk_run_queue - run a single device queue
+- * @q: The queue to run
+- */
+-void blk_run_queue(struct request_queue *q)
+-{
+- unsigned long flags;
+-
+- spin_lock_irqsave(q->queue_lock, flags);
+- blk_remove_plug(q);
+-
+- /*
+- * Only recurse once to avoid overrunning the stack, let the unplug
+- * handling reinvoke the handler shortly if we already got there.
+- */
+- if (!elv_queue_empty(q)) {
+- if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
+- q->request_fn(q);
+- clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags);
+- } else {
+- blk_plug_device(q);
+- kblockd_schedule_work(&q->unplug_work);
+- }
+- }
+-
+- spin_unlock_irqrestore(q->queue_lock, flags);
+-}
+-EXPORT_SYMBOL(blk_run_queue);
+-
+-/**
+- * blk_cleanup_queue: - release a &struct request_queue when it is no longer needed
+- * @kobj: the kobj belonging of the request queue to be released
+- *
+- * Description:
+- * blk_cleanup_queue is the pair to blk_init_queue() or
+- * blk_queue_make_request(). It should be called when a request queue is
+- * being released; typically when a block device is being de-registered.
+- * Currently, its primary task it to free all the &struct request
+- * structures that were allocated to the queue and the queue itself.
+- *
+- * Caveat:
+- * Hopefully the low level driver will have finished any
+- * outstanding requests first...
+- **/
+-static void blk_release_queue(struct kobject *kobj)
+-{
+- struct request_queue *q =
+- container_of(kobj, struct request_queue, kobj);
+- struct request_list *rl = &q->rq;
+-
+- blk_sync_queue(q);
+-
+- if (rl->rq_pool)
+- mempool_destroy(rl->rq_pool);
+-
+- if (q->queue_tags)
+- __blk_queue_free_tags(q);
+-
+- blk_trace_shutdown(q);
+-
+- bdi_destroy(&q->backing_dev_info);
+- kmem_cache_free(requestq_cachep, q);
+-}
+-
+-void blk_put_queue(struct request_queue *q)
+-{
+- kobject_put(&q->kobj);
+-}
+-EXPORT_SYMBOL(blk_put_queue);
+-
+-void blk_cleanup_queue(struct request_queue * q)
+-{
+- mutex_lock(&q->sysfs_lock);
+- set_bit(QUEUE_FLAG_DEAD, &q->queue_flags);
+- mutex_unlock(&q->sysfs_lock);
+-
+- if (q->elevator)
+- elevator_exit(q->elevator);
+-
+- blk_put_queue(q);
+-}
+-
+-EXPORT_SYMBOL(blk_cleanup_queue);
+-
+-static int blk_init_free_list(struct request_queue *q)
+-{
+- struct request_list *rl = &q->rq;
+-
+- rl->count[READ] = rl->count[WRITE] = 0;
+- rl->starved[READ] = rl->starved[WRITE] = 0;
+- rl->elvpriv = 0;
+- init_waitqueue_head(&rl->wait[READ]);
+- init_waitqueue_head(&rl->wait[WRITE]);
+-
+- rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
+- mempool_free_slab, request_cachep, q->node);
+-
+- if (!rl->rq_pool)
+- return -ENOMEM;
+-
+- return 0;
+-}
+-
+-struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
+-{
+- return blk_alloc_queue_node(gfp_mask, -1);
+-}
+-EXPORT_SYMBOL(blk_alloc_queue);
+-
+-static struct kobj_type queue_ktype;
+-
+-struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
+-{
+- struct request_queue *q;
+- int err;
+-
+- q = kmem_cache_alloc_node(requestq_cachep,
+- gfp_mask | __GFP_ZERO, node_id);
+- if (!q)
+- return NULL;
+-
+- q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
+- q->backing_dev_info.unplug_io_data = q;
+- err = bdi_init(&q->backing_dev_info);
+- if (err) {
+- kmem_cache_free(requestq_cachep, q);
+- return NULL;
+- }
+-
+- init_timer(&q->unplug_timer);
+-
+- kobject_set_name(&q->kobj, "%s", "queue");
+- q->kobj.ktype = &queue_ktype;
+- kobject_init(&q->kobj);
+-
+- mutex_init(&q->sysfs_lock);
+-
+- return q;
+-}
+-EXPORT_SYMBOL(blk_alloc_queue_node);
+-
+-/**
+- * blk_init_queue - prepare a request queue for use with a block device
+- * @rfn: The function to be called to process requests that have been
+- * placed on the queue.
+- * @lock: Request queue spin lock
+- *
+- * Description:
+- * If a block device wishes to use the standard request handling procedures,
+- * which sorts requests and coalesces adjacent requests, then it must
+- * call blk_init_queue(). The function @rfn will be called when there
+- * are requests on the queue that need to be processed. If the device
+- * supports plugging, then @rfn may not be called immediately when requests
+- * are available on the queue, but may be called at some time later instead.
+- * Plugged queues are generally unplugged when a buffer belonging to one
+- * of the requests on the queue is needed, or due to memory pressure.
+- *
+- * @rfn is not required, or even expected, to remove all requests off the
+- * queue, but only as many as it can handle at a time. If it does leave
+- * requests on the queue, it is responsible for arranging that the requests
+- * get dealt with eventually.
+- *
+- * The queue spin lock must be held while manipulating the requests on the
+- * request queue; this lock will be taken also from interrupt context, so irq
+- * disabling is needed for it.
+- *
+- * Function returns a pointer to the initialized request queue, or NULL if
+- * it didn't succeed.
+- *
+- * Note:
+- * blk_init_queue() must be paired with a blk_cleanup_queue() call
+- * when the block device is deactivated (such as at module unload).
+- **/
+-
+-struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
+-{
+- return blk_init_queue_node(rfn, lock, -1);
+-}
+-EXPORT_SYMBOL(blk_init_queue);
+-
+-struct request_queue *
+-blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
+-{
+- struct request_queue *q = blk_alloc_queue_node(GFP_KERNEL, node_id);
+-
+- if (!q)
+- return NULL;
+-
+- q->node = node_id;
+- if (blk_init_free_list(q)) {
+- kmem_cache_free(requestq_cachep, q);
+- return NULL;
+- }
+-
+- /*
+- * if caller didn't supply a lock, they get per-queue locking with
+- * our embedded lock
+- */
+- if (!lock) {
+- spin_lock_init(&q->__queue_lock);
+- lock = &q->__queue_lock;
+- }
+-
+- q->request_fn = rfn;
+- q->prep_rq_fn = NULL;
+- q->unplug_fn = generic_unplug_device;
+- q->queue_flags = (1 << QUEUE_FLAG_CLUSTER);
+- q->queue_lock = lock;
+-
+- blk_queue_segment_boundary(q, 0xffffffff);
+-
+- blk_queue_make_request(q, __make_request);
+- blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE);
+-
+- blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
+- blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
+-
+- q->sg_reserved_size = INT_MAX;
+-
+- /*
+- * all done
+- */
+- if (!elevator_init(q, NULL)) {
+- blk_queue_congestion_threshold(q);
+- return q;
+- }
+-
+- blk_put_queue(q);
+- return NULL;
+-}
+-EXPORT_SYMBOL(blk_init_queue_node);
+-
+-int blk_get_queue(struct request_queue *q)
+-{
+- if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
+- kobject_get(&q->kobj);
+- return 0;
+- }
+-
+- return 1;
+-}
+-
+-EXPORT_SYMBOL(blk_get_queue);
+-
+-static inline void blk_free_request(struct request_queue *q, struct request *rq)
+-{
+- if (rq->cmd_flags & REQ_ELVPRIV)
+- elv_put_request(q, rq);
+- mempool_free(rq, q->rq.rq_pool);
+-}
+-
+-static struct request *
+-blk_alloc_request(struct request_queue *q, int rw, int priv, gfp_t gfp_mask)
+-{
+- struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
+-
+- if (!rq)
+- return NULL;
+-
+- /*
+- * first three bits are identical in rq->cmd_flags and bio->bi_rw,
+- * see bio.h and blkdev.h
+- */
+- rq->cmd_flags = rw | REQ_ALLOCED;
+-
+- if (priv) {
+- if (unlikely(elv_set_request(q, rq, gfp_mask))) {
+- mempool_free(rq, q->rq.rq_pool);
+- return NULL;
+- }
+- rq->cmd_flags |= REQ_ELVPRIV;
+- }
+-
+- return rq;
+-}
+-
+-/*
+- * ioc_batching returns true if the ioc is a valid batching request and
+- * should be given priority access to a request.
+- */
+-static inline int ioc_batching(struct request_queue *q, struct io_context *ioc)
+-{
+- if (!ioc)
+- return 0;
+-
+- /*
+- * Make sure the process is able to allocate at least 1 request
+- * even if the batch times out, otherwise we could theoretically
+- * lose wakeups.
+- */
+- return ioc->nr_batch_requests == q->nr_batching ||
+- (ioc->nr_batch_requests > 0
+- && time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME));
+-}
+-
+-/*
+- * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This
+- * will cause the process to be a "batcher" on all queues in the system. This
+- * is the behaviour we want though - once it gets a wakeup it should be given
+- * a nice run.
+- */
+-static void ioc_set_batching(struct request_queue *q, struct io_context *ioc)
+-{
+- if (!ioc || ioc_batching(q, ioc))
+- return;
+-
+- ioc->nr_batch_requests = q->nr_batching;
+- ioc->last_waited = jiffies;
+-}
+-
+-static void __freed_request(struct request_queue *q, int rw)
+-{
+- struct request_list *rl = &q->rq;
+-
+- if (rl->count[rw] < queue_congestion_off_threshold(q))
+- blk_clear_queue_congested(q, rw);
+-
+- if (rl->count[rw] + 1 <= q->nr_requests) {
+- if (waitqueue_active(&rl->wait[rw]))
+- wake_up(&rl->wait[rw]);
+-
+- blk_clear_queue_full(q, rw);
+- }
+-}
+-
+-/*
+- * A request has just been released. Account for it, update the full and
+- * congestion status, wake up any waiters. Called under q->queue_lock.
+- */
+-static void freed_request(struct request_queue *q, int rw, int priv)
+-{
+- struct request_list *rl = &q->rq;
+-
+- rl->count[rw]--;
+- if (priv)
+- rl->elvpriv--;
+-
+- __freed_request(q, rw);
+-
+- if (unlikely(rl->starved[rw ^ 1]))
+- __freed_request(q, rw ^ 1);
+-}
+-
+-#define blkdev_free_rq(list) list_entry((list)->next, struct request, queuelist)
+-/*
+- * Get a free request, queue_lock must be held.
+- * Returns NULL on failure, with queue_lock held.
+- * Returns !NULL on success, with queue_lock *not held*.
+- */
+-static struct request *get_request(struct request_queue *q, int rw_flags,
+- struct bio *bio, gfp_t gfp_mask)
+-{
+- struct request *rq = NULL;
+- struct request_list *rl = &q->rq;
+- struct io_context *ioc = NULL;
+- const int rw = rw_flags & 0x01;
+- int may_queue, priv;
+-
+- may_queue = elv_may_queue(q, rw_flags);
+- if (may_queue == ELV_MQUEUE_NO)
+- goto rq_starved;
+-
+- if (rl->count[rw]+1 >= queue_congestion_on_threshold(q)) {
+- if (rl->count[rw]+1 >= q->nr_requests) {
+- ioc = current_io_context(GFP_ATOMIC, q->node);
+- /*
+- * The queue will fill after this allocation, so set
+- * it as full, and mark this process as "batching".
+- * This process will be allowed to complete a batch of
+- * requests, others will be blocked.
+- */
+- if (!blk_queue_full(q, rw)) {
+- ioc_set_batching(q, ioc);
+- blk_set_queue_full(q, rw);
+- } else {
+- if (may_queue != ELV_MQUEUE_MUST
+- && !ioc_batching(q, ioc)) {
+- /*
+- * The queue is full and the allocating
+- * process is not a "batcher", and not
+- * exempted by the IO scheduler
+- */
+- goto out;
+- }
+- }
+- }
+- blk_set_queue_congested(q, rw);
+- }
+-
+- /*
+- * Only allow batching queuers to allocate up to 50% over the defined
+- * limit of requests, otherwise we could have thousands of requests
+- * allocated with any setting of ->nr_requests
+- */
+- if (rl->count[rw] >= (3 * q->nr_requests / 2))
+- goto out;
+-
+- rl->count[rw]++;
+- rl->starved[rw] = 0;
+-
+- priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
+- if (priv)
+- rl->elvpriv++;
+-
+- spin_unlock_irq(q->queue_lock);
+-
+- rq = blk_alloc_request(q, rw_flags, priv, gfp_mask);
+- if (unlikely(!rq)) {
+- /*
+- * Allocation failed presumably due to memory. Undo anything
+- * we might have messed up.
+- *
+- * Allocating task should really be put onto the front of the
+- * wait queue, but this is pretty rare.
+- */
+- spin_lock_irq(q->queue_lock);
+- freed_request(q, rw, priv);
+-
+- /*
+- * in the very unlikely event that allocation failed and no
+- * requests for this direction was pending, mark us starved
+- * so that freeing of a request in the other direction will
+- * notice us. another possible fix would be to split the
+- * rq mempool into READ and WRITE
+- */
+-rq_starved:
+- if (unlikely(rl->count[rw] == 0))
+- rl->starved[rw] = 1;
+-
+- goto out;
+- }
+-
+- /*
+- * ioc may be NULL here, and ioc_batching will be false. That's
+- * OK, if the queue is under the request limit then requests need
+- * not count toward the nr_batch_requests limit. There will always
+- * be some limit enforced by BLK_BATCH_TIME.
+- */
+- if (ioc_batching(q, ioc))
+- ioc->nr_batch_requests--;
+-
+- rq_init(q, rq);
+-
+- blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ);
+-out:
+- return rq;
+-}
+-
+-/*
+- * No available requests for this queue, unplug the device and wait for some
+- * requests to become available.
+- *
+- * Called with q->queue_lock held, and returns with it unlocked.
+- */
+-static struct request *get_request_wait(struct request_queue *q, int rw_flags,
+- struct bio *bio)
+-{
+- const int rw = rw_flags & 0x01;
+- struct request *rq;
+-
+- rq = get_request(q, rw_flags, bio, GFP_NOIO);
+- while (!rq) {
+- DEFINE_WAIT(wait);
+- struct request_list *rl = &q->rq;
+-
+- prepare_to_wait_exclusive(&rl->wait[rw], &wait,
+- TASK_UNINTERRUPTIBLE);
+-
+- rq = get_request(q, rw_flags, bio, GFP_NOIO);
+-
+- if (!rq) {
+- struct io_context *ioc;
+-
+- blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ);
+-
+- __generic_unplug_device(q);
+- spin_unlock_irq(q->queue_lock);
+- io_schedule();
+-
+- /*
+- * After sleeping, we become a "batching" process and
+- * will be able to allocate at least one request, and
+- * up to a big batch of them for a small period time.
+- * See ioc_batching, ioc_set_batching
+- */
+- ioc = current_io_context(GFP_NOIO, q->node);
+- ioc_set_batching(q, ioc);
+-
+- spin_lock_irq(q->queue_lock);
+- }
+- finish_wait(&rl->wait[rw], &wait);
+- }
+-
+- return rq;
+-}
+-
+-struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
+-{
+- struct request *rq;
+-
+- BUG_ON(rw != READ && rw != WRITE);
+-
+- spin_lock_irq(q->queue_lock);
+- if (gfp_mask & __GFP_WAIT) {
+- rq = get_request_wait(q, rw, NULL);
+- } else {
+- rq = get_request(q, rw, NULL, gfp_mask);
+- if (!rq)
+- spin_unlock_irq(q->queue_lock);
+- }
+- /* q->queue_lock is unlocked at this point */
+-
+- return rq;
+-}
+-EXPORT_SYMBOL(blk_get_request);
+-
+-/**
+- * blk_start_queueing - initiate dispatch of requests to device
+- * @q: request queue to kick into gear
+- *
+- * This is basically a helper to remove the need to know whether a queue
+- * is plugged or not if someone just wants to initiate dispatch of requests
+- * for this queue.
+- *
+- * The queue lock must be held with interrupts disabled.
+- */
+-void blk_start_queueing(struct request_queue *q)
-{
-- struct cfq_data *cfqd = (struct cfq_data *) data;
+- if (!blk_queue_plugged(q))
+- q->request_fn(q);
+- else
+- __generic_unplug_device(q);
+-}
+-EXPORT_SYMBOL(blk_start_queueing);
+-
+-/**
+- * blk_requeue_request - put a request back on queue
+- * @q: request queue where request should be inserted
+- * @rq: request to be inserted
+- *
+- * Description:
+- * Drivers often keep queueing requests until the hardware cannot accept
+- * more, when that condition happens we need to put the request back
+- * on the queue. Must be called with queue lock held.
+- */
+-void blk_requeue_request(struct request_queue *q, struct request *rq)
+-{
+- blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
+-
+- if (blk_rq_tagged(rq))
+- blk_queue_end_tag(q, rq);
+-
+- elv_requeue_request(q, rq);
+-}
+-
+-EXPORT_SYMBOL(blk_requeue_request);
+-
+-/**
+- * blk_insert_request - insert a special request in to a request queue
+- * @q: request queue where request should be inserted
+- * @rq: request to be inserted
+- * @at_head: insert request at head or tail of queue
+- * @data: private data
+- *
+- * Description:
+- * Many block devices need to execute commands asynchronously, so they don't
+- * block the whole kernel from preemption during request execution. This is
+- * accomplished normally by inserting aritficial requests tagged as
+- * REQ_SPECIAL in to the corresponding request queue, and letting them be
+- * scheduled for actual execution by the request queue.
+- *
+- * We have the option of inserting the head or the tail of the queue.
+- * Typically we use the tail for new ioctls and so forth. We use the head
+- * of the queue for things like a QUEUE_FULL message from a device, or a
+- * host that is unable to accept a particular command.
+- */
+-void blk_insert_request(struct request_queue *q, struct request *rq,
+- int at_head, void *data)
+-{
+- int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
- unsigned long flags;
-
-- spin_lock_irqsave(cfqd->queue->queue_lock, flags);
+- /*
+- * tell I/O scheduler that this isn't a regular read/write (ie it
+- * must not attempt merges on this) and that it acts as a soft
+- * barrier
+- */
+- rq->cmd_type = REQ_TYPE_SPECIAL;
+- rq->cmd_flags |= REQ_SOFTBARRIER;
+-
+- rq->special = data;
+-
+- spin_lock_irqsave(q->queue_lock, flags);
-
- /*
-- * race with a non-idle queue, reset timer
+- * If command is tagged, release the tag
- */
-- if (!start_idle_class_timer(cfqd))
-- cfq_schedule_dispatch(cfqd);
+- if (blk_rq_tagged(rq))
+- blk_queue_end_tag(q, rq);
-
-- spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
+- drive_stat_acct(rq, 1);
+- __elv_add_request(q, rq, where, 0);
+- blk_start_queueing(q);
+- spin_unlock_irqrestore(q->queue_lock, flags);
-}
-
- static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
- {
- del_timer_sync(&cfqd->idle_slice_timer);
-- del_timer_sync(&cfqd->idle_class_timer);
- kblockd_flush_work(&cfqd->unplug_work);
- }
-
-@@ -2126,10 +2117,6 @@ static void *cfq_init_queue(struct request_queue *q)
- cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
- cfqd->idle_slice_timer.data = (unsigned long) cfqd;
-
-- init_timer(&cfqd->idle_class_timer);
-- cfqd->idle_class_timer.function = cfq_idle_class_timer;
-- cfqd->idle_class_timer.data = (unsigned long) cfqd;
+-EXPORT_SYMBOL(blk_insert_request);
-
- INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
-
- cfqd->last_end_request = jiffies;
-@@ -2160,7 +2147,7 @@ static int __init cfq_slab_setup(void)
- if (!cfq_pool)
- goto fail;
-
-- cfq_ioc_pool = KMEM_CACHE(cfq_io_context, 0);
-+ cfq_ioc_pool = KMEM_CACHE(cfq_io_context, SLAB_DESTROY_BY_RCU);
- if (!cfq_ioc_pool)
- goto fail;
-
-diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
-index cae0a85..b733732 100644
---- a/block/compat_ioctl.c
-+++ b/block/compat_ioctl.c
-@@ -545,6 +545,7 @@ static int compat_blk_trace_setup(struct block_device *bdev, char __user *arg)
- struct blk_user_trace_setup buts;
- struct compat_blk_user_trace_setup cbuts;
- struct request_queue *q;
-+ char b[BDEVNAME_SIZE];
- int ret;
-
- q = bdev_get_queue(bdev);
-@@ -554,6 +555,8 @@ static int compat_blk_trace_setup(struct block_device *bdev, char __user *arg)
- if (copy_from_user(&cbuts, arg, sizeof(cbuts)))
- return -EFAULT;
-
-+ strcpy(b, bdevname(bdev, b));
-+
- buts = (struct blk_user_trace_setup) {
- .act_mask = cbuts.act_mask,
- .buf_size = cbuts.buf_size,
-@@ -565,7 +568,7 @@ static int compat_blk_trace_setup(struct block_device *bdev, char __user *arg)
- memcpy(&buts.name, &cbuts.name, 32);
-
- mutex_lock(&bdev->bd_mutex);
-- ret = do_blk_trace_setup(q, bdev, &buts);
-+ ret = do_blk_trace_setup(q, b, bdev->bd_dev, &buts);
- mutex_unlock(&bdev->bd_mutex);
- if (ret)
- return ret;
-diff --git a/block/elevator.c b/block/elevator.c
-index e452deb..8cd5775 100644
---- a/block/elevator.c
-+++ b/block/elevator.c
-@@ -185,9 +185,7 @@ static elevator_t *elevator_alloc(struct request_queue *q,
-
- eq->ops = &e->ops;
- eq->elevator_type = e;
-- kobject_init(&eq->kobj);
-- kobject_set_name(&eq->kobj, "%s", "iosched");
-- eq->kobj.ktype = &elv_ktype;
-+ kobject_init(&eq->kobj, &elv_ktype);
- mutex_init(&eq->sysfs_lock);
-
- eq->hash = kmalloc_node(sizeof(struct hlist_head) * ELV_HASH_ENTRIES,
-@@ -743,7 +741,21 @@ struct request *elv_next_request(struct request_queue *q)
- q->boundary_rq = NULL;
- }
-
-- if ((rq->cmd_flags & REQ_DONTPREP) || !q->prep_rq_fn)
-+ if (rq->cmd_flags & REQ_DONTPREP)
-+ break;
-+
-+ if (q->dma_drain_size && rq->data_len) {
-+ /*
-+ * make sure space for the drain appears we
-+ * know we can do this because max_hw_segments
-+ * has been adjusted to be one fewer than the
-+ * device can handle
-+ */
-+ rq->nr_phys_segments++;
-+ rq->nr_hw_segments++;
-+ }
-+
-+ if (!q->prep_rq_fn)
- break;
-
- ret = q->prep_rq_fn(q, rq);
-@@ -756,6 +768,16 @@ struct request *elv_next_request(struct request_queue *q)
- * avoid resource deadlock. REQ_STARTED will
- * prevent other fs requests from passing this one.
- */
-+ if (q->dma_drain_size && rq->data_len &&
-+ !(rq->cmd_flags & REQ_DONTPREP)) {
-+ /*
-+ * remove the space for the drain we added
-+ * so that we don't add it again
-+ */
-+ --rq->nr_phys_segments;
-+ --rq->nr_hw_segments;
-+ }
-+
- rq = NULL;
- break;
- } else if (ret == BLKPREP_KILL) {
-@@ -931,9 +953,7 @@ int elv_register_queue(struct request_queue *q)
- elevator_t *e = q->elevator;
- int error;
-
-- e->kobj.parent = &q->kobj;
+-static int __blk_rq_unmap_user(struct bio *bio)
+-{
+- int ret = 0;
-
-- error = kobject_add(&e->kobj);
-+ error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
- if (!error) {
- struct elv_fs_entry *attr = e->elevator_type->elevator_attrs;
- if (attr) {
-diff --git a/block/genhd.c b/block/genhd.c
-index f2ac914..5e4ab4b 100644
---- a/block/genhd.c
-+++ b/block/genhd.c
-@@ -17,8 +17,10 @@
- #include <linux/buffer_head.h>
- #include <linux/mutex.h>
-
--struct kset block_subsys;
--static DEFINE_MUTEX(block_subsys_lock);
-+static DEFINE_MUTEX(block_class_lock);
-+#ifndef CONFIG_SYSFS_DEPRECATED
-+struct kobject *block_depr;
-+#endif
-
- /*
- * Can be deleted altogether. Later.
-@@ -37,19 +39,17 @@ static inline int major_to_index(int major)
- }
-
- #ifdef CONFIG_PROC_FS
+- if (bio) {
+- if (bio_flagged(bio, BIO_USER_MAPPED))
+- bio_unmap_user(bio);
+- else
+- ret = bio_uncopy_user(bio);
+- }
-
- void blkdev_show(struct seq_file *f, off_t offset)
- {
- struct blk_major_name *dp;
-
- if (offset < BLKDEV_MAJOR_HASH_SIZE) {
-- mutex_lock(&block_subsys_lock);
-+ mutex_lock(&block_class_lock);
- for (dp = major_names[offset]; dp; dp = dp->next)
- seq_printf(f, "%3d %s\n", dp->major, dp->name);
-- mutex_unlock(&block_subsys_lock);
-+ mutex_unlock(&block_class_lock);
- }
- }
+- return ret;
+-}
-
- #endif /* CONFIG_PROC_FS */
-
- int register_blkdev(unsigned int major, const char *name)
-@@ -57,7 +57,7 @@ int register_blkdev(unsigned int major, const char *name)
- struct blk_major_name **n, *p;
- int index, ret = 0;
-
-- mutex_lock(&block_subsys_lock);
-+ mutex_lock(&block_class_lock);
-
- /* temporary */
- if (major == 0) {
-@@ -102,7 +102,7 @@ int register_blkdev(unsigned int major, const char *name)
- kfree(p);
- }
- out:
-- mutex_unlock(&block_subsys_lock);
-+ mutex_unlock(&block_class_lock);
- return ret;
- }
-
-@@ -114,7 +114,7 @@ void unregister_blkdev(unsigned int major, const char *name)
- struct blk_major_name *p = NULL;
- int index = major_to_index(major);
-
-- mutex_lock(&block_subsys_lock);
-+ mutex_lock(&block_class_lock);
- for (n = &major_names[index]; *n; n = &(*n)->next)
- if ((*n)->major == major)
- break;
-@@ -124,7 +124,7 @@ void unregister_blkdev(unsigned int major, const char *name)
- p = *n;
- *n = p->next;
- }
-- mutex_unlock(&block_subsys_lock);
-+ mutex_unlock(&block_class_lock);
- kfree(p);
- }
-
-@@ -137,29 +137,30 @@ static struct kobj_map *bdev_map;
- * range must be nonzero
- * The hash chain is sorted on range, so that subranges can override.
- */
--void blk_register_region(dev_t dev, unsigned long range, struct module *module,
-+void blk_register_region(dev_t devt, unsigned long range, struct module *module,
- struct kobject *(*probe)(dev_t, int *, void *),
- int (*lock)(dev_t, void *), void *data)
- {
-- kobj_map(bdev_map, dev, range, module, probe, lock, data);
-+ kobj_map(bdev_map, devt, range, module, probe, lock, data);
- }
-
- EXPORT_SYMBOL(blk_register_region);
-
--void blk_unregister_region(dev_t dev, unsigned long range)
-+void blk_unregister_region(dev_t devt, unsigned long range)
- {
-- kobj_unmap(bdev_map, dev, range);
-+ kobj_unmap(bdev_map, devt, range);
- }
-
- EXPORT_SYMBOL(blk_unregister_region);
-
--static struct kobject *exact_match(dev_t dev, int *part, void *data)
-+static struct kobject *exact_match(dev_t devt, int *part, void *data)
- {
- struct gendisk *p = data;
-- return &p->kobj;
-+
-+ return &p->dev.kobj;
- }
-
--static int exact_lock(dev_t dev, void *data)
-+static int exact_lock(dev_t devt, void *data)
- {
- struct gendisk *p = data;
-
-@@ -194,8 +195,6 @@ void unlink_gendisk(struct gendisk *disk)
- disk->minors);
- }
-
--#define to_disk(obj) container_of(obj,struct gendisk,kobj)
+-int blk_rq_append_bio(struct request_queue *q, struct request *rq,
+- struct bio *bio)
+-{
+- if (!rq->bio)
+- blk_rq_bio_prep(q, rq, bio);
+- else if (!ll_back_merge_fn(q, rq, bio))
+- return -EINVAL;
+- else {
+- rq->biotail->bi_next = bio;
+- rq->biotail = bio;
-
- /**
- * get_gendisk - get partitioning information for a given device
- * @dev: device to get partitioning information for
-@@ -203,10 +202,12 @@ void unlink_gendisk(struct gendisk *disk)
- * This function gets the structure containing partitioning
- * information for the given device @dev.
- */
--struct gendisk *get_gendisk(dev_t dev, int *part)
-+struct gendisk *get_gendisk(dev_t devt, int *part)
- {
-- struct kobject *kobj = kobj_lookup(bdev_map, dev, part);
-- return kobj ? to_disk(kobj) : NULL;
-+ struct kobject *kobj = kobj_lookup(bdev_map, devt, part);
-+ struct device *dev = kobj_to_dev(kobj);
-+
-+ return kobj ? dev_to_disk(dev) : NULL;
- }
-
- /*
-@@ -216,13 +217,17 @@ struct gendisk *get_gendisk(dev_t dev, int *part)
- */
- void __init printk_all_partitions(void)
- {
-- int n;
-+ struct device *dev;
- struct gendisk *sgp;
-+ char buf[BDEVNAME_SIZE];
-+ int n;
-
-- mutex_lock(&block_subsys_lock);
-+ mutex_lock(&block_class_lock);
- /* For each block device... */
-- list_for_each_entry(sgp, &block_subsys.list, kobj.entry) {
-- char buf[BDEVNAME_SIZE];
-+ list_for_each_entry(dev, &block_class.devices, node) {
-+ if (dev->type != &disk_type)
-+ continue;
-+ sgp = dev_to_disk(dev);
- /*
- * Don't show empty devices or things that have been surpressed
- */
-@@ -255,38 +260,46 @@ void __init printk_all_partitions(void)
- sgp->major, n + 1 + sgp->first_minor,
- (unsigned long long)sgp->part[n]->nr_sects >> 1,
- disk_name(sgp, n + 1, buf));
-- } /* partition subloop */
-- } /* Block device loop */
-+ }
-+ }
-
-- mutex_unlock(&block_subsys_lock);
-- return;
-+ mutex_unlock(&block_class_lock);
- }
-
- #ifdef CONFIG_PROC_FS
- /* iterator */
- static void *part_start(struct seq_file *part, loff_t *pos)
- {
-- struct list_head *p;
-- loff_t l = *pos;
-+ loff_t k = *pos;
-+ struct device *dev;
-
-- mutex_lock(&block_subsys_lock);
-- list_for_each(p, &block_subsys.list)
-- if (!l--)
-- return list_entry(p, struct gendisk, kobj.entry);
-+ mutex_lock(&block_class_lock);
-+ list_for_each_entry(dev, &block_class.devices, node) {
-+ if (dev->type != &disk_type)
-+ continue;
-+ if (!k--)
-+ return dev_to_disk(dev);
-+ }
- return NULL;
- }
-
- static void *part_next(struct seq_file *part, void *v, loff_t *pos)
- {
-- struct list_head *p = ((struct gendisk *)v)->kobj.entry.next;
-+ struct gendisk *gp = v;
-+ struct device *dev;
- ++*pos;
-- return p==&block_subsys.list ? NULL :
-- list_entry(p, struct gendisk, kobj.entry);
-+ list_for_each_entry(dev, &gp->dev.node, node) {
-+ if (&dev->node == &block_class.devices)
-+ return NULL;
-+ if (dev->type == &disk_type)
-+ return dev_to_disk(dev);
-+ }
-+ return NULL;
- }
-
- static void part_stop(struct seq_file *part, void *v)
- {
-- mutex_unlock(&block_subsys_lock);
-+ mutex_unlock(&block_class_lock);
- }
-
- static int show_partition(struct seq_file *part, void *v)
-@@ -295,7 +308,7 @@ static int show_partition(struct seq_file *part, void *v)
- int n;
- char buf[BDEVNAME_SIZE];
-
-- if (&sgp->kobj.entry == block_subsys.list.next)
-+ if (&sgp->dev.node == block_class.devices.next)
- seq_puts(part, "major minor #blocks name\n\n");
-
- /* Don't show non-partitionable removeable devices or empty devices */
-@@ -325,110 +338,81 @@ static int show_partition(struct seq_file *part, void *v)
- }
-
- struct seq_operations partitions_op = {
-- .start =part_start,
-- .next = part_next,
-- .stop = part_stop,
-- .show = show_partition
-+ .start = part_start,
-+ .next = part_next,
-+ .stop = part_stop,
-+ .show = show_partition
- };
- #endif
-
-
- extern int blk_dev_init(void);
-
--static struct kobject *base_probe(dev_t dev, int *part, void *data)
-+static struct kobject *base_probe(dev_t devt, int *part, void *data)
- {
-- if (request_module("block-major-%d-%d", MAJOR(dev), MINOR(dev)) > 0)
-+ if (request_module("block-major-%d-%d", MAJOR(devt), MINOR(devt)) > 0)
- /* Make old-style 2.4 aliases work */
-- request_module("block-major-%d", MAJOR(dev));
-+ request_module("block-major-%d", MAJOR(devt));
- return NULL;
- }
-
- static int __init genhd_device_init(void)
- {
-- int err;
+- rq->data_len += bio->bi_size;
+- }
+- return 0;
+-}
+-EXPORT_SYMBOL(blk_rq_append_bio);
-
-- bdev_map = kobj_map_init(base_probe, &block_subsys_lock);
-+ class_register(&block_class);
-+ bdev_map = kobj_map_init(base_probe, &block_class_lock);
- blk_dev_init();
-- err = subsystem_register(&block_subsys);
-- if (err < 0)
-- printk(KERN_WARNING "%s: subsystem_register error: %d\n",
-- __FUNCTION__, err);
-- return err;
-+
-+#ifndef CONFIG_SYSFS_DEPRECATED
-+ /* create top-level block dir */
-+ block_depr = kobject_create_and_add("block", NULL);
-+#endif
-+ return 0;
- }
-
- subsys_initcall(genhd_device_init);
-
+-static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
+- void __user *ubuf, unsigned int len)
+-{
+- unsigned long uaddr;
+- struct bio *bio, *orig_bio;
+- int reading, ret;
-
+- reading = rq_data_dir(rq) == READ;
-
--/*
-- * kobject & sysfs bindings for block devices
+- /*
+- * if alignment requirement is satisfied, map in user pages for
+- * direct dma. else, set up kernel bounce buffers
+- */
+- uaddr = (unsigned long) ubuf;
+- if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q)))
+- bio = bio_map_user(q, NULL, uaddr, len, reading);
+- else
+- bio = bio_copy_user(q, uaddr, len, reading);
+-
+- if (IS_ERR(bio))
+- return PTR_ERR(bio);
+-
+- orig_bio = bio;
+- blk_queue_bounce(q, &bio);
+-
+- /*
+- * We link the bounce buffer in and could have to traverse it
+- * later so we have to get a ref to prevent it from being freed
+- */
+- bio_get(bio);
+-
+- ret = blk_rq_append_bio(q, rq, bio);
+- if (!ret)
+- return bio->bi_size;
+-
+- /* if it was boucned we must call the end io function */
+- bio_endio(bio, 0);
+- __blk_rq_unmap_user(orig_bio);
+- bio_put(bio);
+- return ret;
+-}
+-
+-/**
+- * blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage
+- * @q: request queue where request should be inserted
+- * @rq: request structure to fill
+- * @ubuf: the user buffer
+- * @len: length of user data
+- *
+- * Description:
+- * Data will be mapped directly for zero copy io, if possible. Otherwise
+- * a kernel bounce buffer is used.
+- *
+- * A matching blk_rq_unmap_user() must be issued at the end of io, while
+- * still in process context.
+- *
+- * Note: The mapped bio may need to be bounced through blk_queue_bounce()
+- * before being submitted to the device, as pages mapped may be out of
+- * reach. It's the callers responsibility to make sure this happens. The
+- * original bio must be passed back in to blk_rq_unmap_user() for proper
+- * unmapping.
- */
--static ssize_t disk_attr_show(struct kobject *kobj, struct attribute *attr,
-- char *page)
-+static ssize_t disk_range_show(struct device *dev,
-+ struct device_attribute *attr, char *buf)
- {
-- struct gendisk *disk = to_disk(kobj);
-- struct disk_attribute *disk_attr =
-- container_of(attr,struct disk_attribute,attr);
-- ssize_t ret = -EIO;
-+ struct gendisk *disk = dev_to_disk(dev);
-
-- if (disk_attr->show)
-- ret = disk_attr->show(disk,page);
+-int blk_rq_map_user(struct request_queue *q, struct request *rq,
+- void __user *ubuf, unsigned long len)
+-{
+- unsigned long bytes_read = 0;
+- struct bio *bio = NULL;
+- int ret;
+-
+- if (len > (q->max_hw_sectors << 9))
+- return -EINVAL;
+- if (!len || !ubuf)
+- return -EINVAL;
+-
+- while (bytes_read != len) {
+- unsigned long map_len, end, start;
+-
+- map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE);
+- end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1)
+- >> PAGE_SHIFT;
+- start = (unsigned long)ubuf >> PAGE_SHIFT;
+-
+- /*
+- * A bad offset could cause us to require BIO_MAX_PAGES + 1
+- * pages. If this happens we just lower the requested
+- * mapping len by a page so that we can fit
+- */
+- if (end - start > BIO_MAX_PAGES)
+- map_len -= PAGE_SIZE;
+-
+- ret = __blk_rq_map_user(q, rq, ubuf, map_len);
+- if (ret < 0)
+- goto unmap_rq;
+- if (!bio)
+- bio = rq->bio;
+- bytes_read += ret;
+- ubuf += ret;
+- }
+-
+- rq->buffer = rq->data = NULL;
+- return 0;
+-unmap_rq:
+- blk_rq_unmap_user(bio);
- return ret;
-+ return sprintf(buf, "%d\n", disk->minors);
- }
-
--static ssize_t disk_attr_store(struct kobject * kobj, struct attribute * attr,
-- const char *page, size_t count)
-+static ssize_t disk_removable_show(struct device *dev,
-+ struct device_attribute *attr, char *buf)
- {
-- struct gendisk *disk = to_disk(kobj);
-- struct disk_attribute *disk_attr =
-- container_of(attr,struct disk_attribute,attr);
-- ssize_t ret = 0;
-+ struct gendisk *disk = dev_to_disk(dev);
-
-- if (disk_attr->store)
-- ret = disk_attr->store(disk, page, count);
+-}
+-
+-EXPORT_SYMBOL(blk_rq_map_user);
+-
+-/**
+- * blk_rq_map_user_iov - map user data to a request, for REQ_BLOCK_PC usage
+- * @q: request queue where request should be inserted
+- * @rq: request to map data to
+- * @iov: pointer to the iovec
+- * @iov_count: number of elements in the iovec
+- * @len: I/O byte count
+- *
+- * Description:
+- * Data will be mapped directly for zero copy io, if possible. Otherwise
+- * a kernel bounce buffer is used.
+- *
+- * A matching blk_rq_unmap_user() must be issued at the end of io, while
+- * still in process context.
+- *
+- * Note: The mapped bio may need to be bounced through blk_queue_bounce()
+- * before being submitted to the device, as pages mapped may be out of
+- * reach. It's the callers responsibility to make sure this happens. The
+- * original bio must be passed back in to blk_rq_unmap_user() for proper
+- * unmapping.
+- */
+-int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
+- struct sg_iovec *iov, int iov_count, unsigned int len)
+-{
+- struct bio *bio;
+-
+- if (!iov || iov_count <= 0)
+- return -EINVAL;
+-
+- /* we don't allow misaligned data like bio_map_user() does. If the
+- * user is using sg, they're expected to know the alignment constraints
+- * and respect them accordingly */
+- bio = bio_map_user_iov(q, NULL, iov, iov_count, rq_data_dir(rq)== READ);
+- if (IS_ERR(bio))
+- return PTR_ERR(bio);
+-
+- if (bio->bi_size != len) {
+- bio_endio(bio, 0);
+- bio_unmap_user(bio);
+- return -EINVAL;
+- }
+-
+- bio_get(bio);
+- blk_rq_bio_prep(q, rq, bio);
+- rq->buffer = rq->data = NULL;
+- return 0;
+-}
+-
+-EXPORT_SYMBOL(blk_rq_map_user_iov);
+-
+-/**
+- * blk_rq_unmap_user - unmap a request with user data
+- * @bio: start of bio list
+- *
+- * Description:
+- * Unmap a rq previously mapped by blk_rq_map_user(). The caller must
+- * supply the original rq->bio from the blk_rq_map_user() return, since
+- * the io completion may have changed rq->bio.
+- */
+-int blk_rq_unmap_user(struct bio *bio)
+-{
+- struct bio *mapped_bio;
+- int ret = 0, ret2;
+-
+- while (bio) {
+- mapped_bio = bio;
+- if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
+- mapped_bio = bio->bi_private;
+-
+- ret2 = __blk_rq_unmap_user(mapped_bio);
+- if (ret2 && !ret)
+- ret = ret2;
+-
+- mapped_bio = bio;
+- bio = bio->bi_next;
+- bio_put(mapped_bio);
+- }
+-
- return ret;
-+ return sprintf(buf, "%d\n",
-+ (disk->flags & GENHD_FL_REMOVABLE ? 1 : 0));
- }
-
--static struct sysfs_ops disk_sysfs_ops = {
-- .show = &disk_attr_show,
-- .store = &disk_attr_store,
--};
+-}
-
--static ssize_t disk_uevent_store(struct gendisk * disk,
-- const char *buf, size_t count)
+-EXPORT_SYMBOL(blk_rq_unmap_user);
+-
+-/**
+- * blk_rq_map_kern - map kernel data to a request, for REQ_BLOCK_PC usage
+- * @q: request queue where request should be inserted
+- * @rq: request to fill
+- * @kbuf: the kernel buffer
+- * @len: length of user data
+- * @gfp_mask: memory allocation flags
+- */
+-int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
+- unsigned int len, gfp_t gfp_mask)
-{
-- kobject_uevent(&disk->kobj, KOBJ_ADD);
-- return count;
+- struct bio *bio;
+-
+- if (len > (q->max_hw_sectors << 9))
+- return -EINVAL;
+- if (!len || !kbuf)
+- return -EINVAL;
+-
+- bio = bio_map_kern(q, kbuf, len, gfp_mask);
+- if (IS_ERR(bio))
+- return PTR_ERR(bio);
+-
+- if (rq_data_dir(rq) == WRITE)
+- bio->bi_rw |= (1 << BIO_RW);
+-
+- blk_rq_bio_prep(q, rq, bio);
+- blk_queue_bounce(q, &rq->bio);
+- rq->buffer = rq->data = NULL;
+- return 0;
-}
--static ssize_t disk_dev_read(struct gendisk * disk, char *page)
+-
+-EXPORT_SYMBOL(blk_rq_map_kern);
+-
+-/**
+- * blk_execute_rq_nowait - insert a request into queue for execution
+- * @q: queue to insert the request in
+- * @bd_disk: matching gendisk
+- * @rq: request to insert
+- * @at_head: insert request at head or tail of queue
+- * @done: I/O completion handler
+- *
+- * Description:
+- * Insert a fully prepared request at the back of the io scheduler queue
+- * for execution. Don't wait for completion.
+- */
+-void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
+- struct request *rq, int at_head,
+- rq_end_io_fn *done)
+-{
+- int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
+-
+- rq->rq_disk = bd_disk;
+- rq->cmd_flags |= REQ_NOMERGE;
+- rq->end_io = done;
+- WARN_ON(irqs_disabled());
+- spin_lock_irq(q->queue_lock);
+- __elv_add_request(q, rq, where, 1);
+- __generic_unplug_device(q);
+- spin_unlock_irq(q->queue_lock);
+-}
+-EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
+-
+-/**
+- * blk_execute_rq - insert a request into queue for execution
+- * @q: queue to insert the request in
+- * @bd_disk: matching gendisk
+- * @rq: request to insert
+- * @at_head: insert request at head or tail of queue
+- *
+- * Description:
+- * Insert a fully prepared request at the back of the io scheduler queue
+- * for execution and wait for completion.
+- */
+-int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
+- struct request *rq, int at_head)
-{
-- dev_t base = MKDEV(disk->major, disk->first_minor);
-- return print_dev_t(page, base);
+- DECLARE_COMPLETION_ONSTACK(wait);
+- char sense[SCSI_SENSE_BUFFERSIZE];
+- int err = 0;
+-
+- /*
+- * we need an extra reference to the request, so we can look at
+- * it after io completion
+- */
+- rq->ref_count++;
+-
+- if (!rq->sense) {
+- memset(sense, 0, sizeof(sense));
+- rq->sense = sense;
+- rq->sense_len = 0;
+- }
+-
+- rq->end_io_data = &wait;
+- blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq);
+- wait_for_completion(&wait);
+-
+- if (rq->errors)
+- err = -EIO;
+-
+- return err;
-}
--static ssize_t disk_range_read(struct gendisk * disk, char *page)
-+static ssize_t disk_size_show(struct device *dev,
-+ struct device_attribute *attr, char *buf)
- {
-- return sprintf(page, "%d\n", disk->minors);
+-
+-EXPORT_SYMBOL(blk_execute_rq);
+-
+-static void bio_end_empty_barrier(struct bio *bio, int err)
+-{
+- if (err)
+- clear_bit(BIO_UPTODATE, &bio->bi_flags);
+-
+- complete(bio->bi_private);
-}
--static ssize_t disk_removable_read(struct gendisk * disk, char *page)
+-
+-/**
+- * blkdev_issue_flush - queue a flush
+- * @bdev: blockdev to issue flush for
+- * @error_sector: error sector
+- *
+- * Description:
+- * Issue a flush for the block device in question. Caller can supply
+- * room for storing the error offset in case of a flush error, if they
+- * wish to. Caller must run wait_for_completion() on its own.
+- */
+-int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
+-{
+- DECLARE_COMPLETION_ONSTACK(wait);
+- struct request_queue *q;
+- struct bio *bio;
+- int ret;
+-
+- if (bdev->bd_disk == NULL)
+- return -ENXIO;
+-
+- q = bdev_get_queue(bdev);
+- if (!q)
+- return -ENXIO;
+-
+- bio = bio_alloc(GFP_KERNEL, 0);
+- if (!bio)
+- return -ENOMEM;
+-
+- bio->bi_end_io = bio_end_empty_barrier;
+- bio->bi_private = &wait;
+- bio->bi_bdev = bdev;
+- submit_bio(1 << BIO_RW_BARRIER, bio);
+-
+- wait_for_completion(&wait);
+-
+- /*
+- * The driver must store the error location in ->bi_sector, if
+- * it supports it. For non-stacked drivers, this should be copied
+- * from rq->sector.
+- */
+- if (error_sector)
+- *error_sector = bio->bi_sector;
+-
+- ret = 0;
+- if (!bio_flagged(bio, BIO_UPTODATE))
+- ret = -EIO;
+-
+- bio_put(bio);
+- return ret;
+-}
+-
+-EXPORT_SYMBOL(blkdev_issue_flush);
+-
+-static void drive_stat_acct(struct request *rq, int new_io)
-{
-- return sprintf(page, "%d\n",
-- (disk->flags & GENHD_FL_REMOVABLE ? 1 : 0));
-+ struct gendisk *disk = dev_to_disk(dev);
-
-+ return sprintf(buf, "%llu\n", (unsigned long long)get_capacity(disk));
- }
--static ssize_t disk_size_read(struct gendisk * disk, char *page)
+- int rw = rq_data_dir(rq);
+-
+- if (!blk_fs_request(rq) || !rq->rq_disk)
+- return;
+-
+- if (!new_io) {
+- __disk_stat_inc(rq->rq_disk, merges[rw]);
+- } else {
+- disk_round_stats(rq->rq_disk);
+- rq->rq_disk->in_flight++;
+- }
+-}
+-
+-/*
+- * add-request adds a request to the linked list.
+- * queue lock is held and interrupts disabled, as we muck with the
+- * request queue list.
+- */
+-static inline void add_request(struct request_queue * q, struct request * req)
-{
-- return sprintf(page, "%llu\n", (unsigned long long)get_capacity(disk));
+- drive_stat_acct(req, 1);
+-
+- /*
+- * elevator indicated where it wants this request to be
+- * inserted at elevator_merge time
+- */
+- __elv_add_request(q, req, ELEVATOR_INSERT_SORT, 0);
-}
--static ssize_t disk_capability_read(struct gendisk *disk, char *page)
-+
-+static ssize_t disk_capability_show(struct device *dev,
-+ struct device_attribute *attr, char *buf)
- {
-- return sprintf(page, "%x\n", disk->flags);
-+ struct gendisk *disk = dev_to_disk(dev);
-+
-+ return sprintf(buf, "%x\n", disk->flags);
- }
--static ssize_t disk_stats_read(struct gendisk * disk, char *page)
-+
-+static ssize_t disk_stat_show(struct device *dev,
-+ struct device_attribute *attr, char *buf)
- {
-+ struct gendisk *disk = dev_to_disk(dev);
-+
- preempt_disable();
- disk_round_stats(disk);
- preempt_enable();
-- return sprintf(page,
-+ return sprintf(buf,
- "%8lu %8lu %8llu %8u "
- "%8lu %8lu %8llu %8u "
- "%8u %8u %8u"
-@@ -445,40 +429,21 @@ static ssize_t disk_stats_read(struct gendisk * disk, char *page)
- jiffies_to_msecs(disk_stat_read(disk, io_ticks)),
- jiffies_to_msecs(disk_stat_read(disk, time_in_queue)));
- }
--static struct disk_attribute disk_attr_uevent = {
-- .attr = {.name = "uevent", .mode = S_IWUSR },
-- .store = disk_uevent_store
--};
--static struct disk_attribute disk_attr_dev = {
-- .attr = {.name = "dev", .mode = S_IRUGO },
-- .show = disk_dev_read
--};
--static struct disk_attribute disk_attr_range = {
-- .attr = {.name = "range", .mode = S_IRUGO },
-- .show = disk_range_read
--};
--static struct disk_attribute disk_attr_removable = {
-- .attr = {.name = "removable", .mode = S_IRUGO },
-- .show = disk_removable_read
--};
--static struct disk_attribute disk_attr_size = {
-- .attr = {.name = "size", .mode = S_IRUGO },
-- .show = disk_size_read
--};
--static struct disk_attribute disk_attr_capability = {
-- .attr = {.name = "capability", .mode = S_IRUGO },
-- .show = disk_capability_read
--};
--static struct disk_attribute disk_attr_stat = {
-- .attr = {.name = "stat", .mode = S_IRUGO },
-- .show = disk_stats_read
--};
-
- #ifdef CONFIG_FAIL_MAKE_REQUEST
-+static ssize_t disk_fail_show(struct device *dev,
-+ struct device_attribute *attr, char *buf)
-+{
-+ struct gendisk *disk = dev_to_disk(dev);
-+
-+ return sprintf(buf, "%d\n", disk->flags & GENHD_FL_FAIL ? 1 : 0);
-+}
-
--static ssize_t disk_fail_store(struct gendisk * disk,
-+static ssize_t disk_fail_store(struct device *dev,
-+ struct device_attribute *attr,
- const char *buf, size_t count)
- {
-+ struct gendisk *disk = dev_to_disk(dev);
- int i;
-
- if (count > 0 && sscanf(buf, "%d", &i) > 0) {
-@@ -490,136 +455,100 @@ static ssize_t disk_fail_store(struct gendisk * disk,
-
- return count;
- }
--static ssize_t disk_fail_read(struct gendisk * disk, char *page)
+-
+-/*
+- * disk_round_stats() - Round off the performance stats on a struct
+- * disk_stats.
+- *
+- * The average IO queue length and utilisation statistics are maintained
+- * by observing the current state of the queue length and the amount of
+- * time it has been in this state for.
+- *
+- * Normally, that accounting is done on IO completion, but that can result
+- * in more than a second's worth of IO being accounted for within any one
+- * second, leading to >100% utilisation. To deal with that, we call this
+- * function to do a round-off before returning the results when reading
+- * /proc/diskstats. This accounts immediately for all queue usage up to
+- * the current jiffies and restarts the counters again.
+- */
+-void disk_round_stats(struct gendisk *disk)
-{
-- return sprintf(page, "%d\n", disk->flags & GENHD_FL_FAIL ? 1 : 0);
+- unsigned long now = jiffies;
+-
+- if (now == disk->stamp)
+- return;
+-
+- if (disk->in_flight) {
+- __disk_stat_add(disk, time_in_queue,
+- disk->in_flight * (now - disk->stamp));
+- __disk_stat_add(disk, io_ticks, (now - disk->stamp));
+- }
+- disk->stamp = now;
-}
--static struct disk_attribute disk_attr_fail = {
-- .attr = {.name = "make-it-fail", .mode = S_IRUGO | S_IWUSR },
-- .store = disk_fail_store,
-- .show = disk_fail_read
--};
-
- #endif
-
--static struct attribute * default_attrs[] = {
-- &disk_attr_uevent.attr,
-- &disk_attr_dev.attr,
-- &disk_attr_range.attr,
-- &disk_attr_removable.attr,
-- &disk_attr_size.attr,
-- &disk_attr_stat.attr,
-- &disk_attr_capability.attr,
-+static DEVICE_ATTR(range, S_IRUGO, disk_range_show, NULL);
-+static DEVICE_ATTR(removable, S_IRUGO, disk_removable_show, NULL);
-+static DEVICE_ATTR(size, S_IRUGO, disk_size_show, NULL);
-+static DEVICE_ATTR(capability, S_IRUGO, disk_capability_show, NULL);
-+static DEVICE_ATTR(stat, S_IRUGO, disk_stat_show, NULL);
-+#ifdef CONFIG_FAIL_MAKE_REQUEST
-+static struct device_attribute dev_attr_fail =
-+ __ATTR(make-it-fail, S_IRUGO|S_IWUSR, disk_fail_show, disk_fail_store);
-+#endif
-+
-+static struct attribute *disk_attrs[] = {
-+ &dev_attr_range.attr,
-+ &dev_attr_removable.attr,
-+ &dev_attr_size.attr,
-+ &dev_attr_capability.attr,
-+ &dev_attr_stat.attr,
- #ifdef CONFIG_FAIL_MAKE_REQUEST
-- &disk_attr_fail.attr,
-+ &dev_attr_fail.attr,
- #endif
-- NULL,
-+ NULL
-+};
-+
-+static struct attribute_group disk_attr_group = {
-+ .attrs = disk_attrs,
- };
-
--static void disk_release(struct kobject * kobj)
-+static struct attribute_group *disk_attr_groups[] = {
-+ &disk_attr_group,
-+ NULL
-+};
-+
-+static void disk_release(struct device *dev)
- {
-- struct gendisk *disk = to_disk(kobj);
-+ struct gendisk *disk = dev_to_disk(dev);
-+
- kfree(disk->random);
- kfree(disk->part);
- free_disk_stats(disk);
- kfree(disk);
- }
-
--static struct kobj_type ktype_block = {
-- .release = disk_release,
-- .sysfs_ops = &disk_sysfs_ops,
-- .default_attrs = default_attrs,
-+struct class block_class = {
-+ .name = "block",
- };
-
--extern struct kobj_type ktype_part;
+-EXPORT_SYMBOL_GPL(disk_round_stats);
-
--static int block_uevent_filter(struct kset *kset, struct kobject *kobj)
+-/*
+- * queue lock must be held
+- */
+-void __blk_put_request(struct request_queue *q, struct request *req)
-{
-- struct kobj_type *ktype = get_ktype(kobj);
+- if (unlikely(!q))
+- return;
+- if (unlikely(--req->ref_count))
+- return;
-
-- return ((ktype == &ktype_block) || (ktype == &ktype_part));
+- elv_completed_request(q, req);
+-
+- /*
+- * Request may not have originated from ll_rw_blk. if not,
+- * it didn't come out of our reserved rq pools
+- */
+- if (req->cmd_flags & REQ_ALLOCED) {
+- int rw = rq_data_dir(req);
+- int priv = req->cmd_flags & REQ_ELVPRIV;
+-
+- BUG_ON(!list_empty(&req->queuelist));
+- BUG_ON(!hlist_unhashed(&req->hash));
+-
+- blk_free_request(q, req);
+- freed_request(q, rw, priv);
+- }
-}
-
--static int block_uevent(struct kset *kset, struct kobject *kobj,
-- struct kobj_uevent_env *env)
+-EXPORT_SYMBOL_GPL(__blk_put_request);
+-
+-void blk_put_request(struct request *req)
-{
-- struct kobj_type *ktype = get_ktype(kobj);
-- struct device *physdev;
-- struct gendisk *disk;
-- struct hd_struct *part;
+- unsigned long flags;
+- struct request_queue *q = req->q;
-
-- if (ktype == &ktype_block) {
-- disk = container_of(kobj, struct gendisk, kobj);
-- add_uevent_var(env, "MINOR=%u", disk->first_minor);
-- } else if (ktype == &ktype_part) {
-- disk = container_of(kobj->parent, struct gendisk, kobj);
-- part = container_of(kobj, struct hd_struct, kobj);
-- add_uevent_var(env, "MINOR=%u",
-- disk->first_minor + part->partno);
-- } else
+- /*
+- * Gee, IDE calls in w/ NULL q. Fix IDE and remove the
+- * following if (q) test.
+- */
+- if (q) {
+- spin_lock_irqsave(q->queue_lock, flags);
+- __blk_put_request(q, req);
+- spin_unlock_irqrestore(q->queue_lock, flags);
+- }
+-}
+-
+-EXPORT_SYMBOL(blk_put_request);
+-
+-/**
+- * blk_end_sync_rq - executes a completion event on a request
+- * @rq: request to complete
+- * @error: end io status of the request
+- */
+-void blk_end_sync_rq(struct request *rq, int error)
+-{
+- struct completion *waiting = rq->end_io_data;
+-
+- rq->end_io_data = NULL;
+- __blk_put_request(rq->q, rq);
+-
+- /*
+- * complete last, if this is a stack request the process (and thus
+- * the rq pointer) could be invalid right after this complete()
+- */
+- complete(waiting);
+-}
+-EXPORT_SYMBOL(blk_end_sync_rq);
+-
+-/*
+- * Has to be called with the request spinlock acquired
+- */
+-static int attempt_merge(struct request_queue *q, struct request *req,
+- struct request *next)
+-{
+- if (!rq_mergeable(req) || !rq_mergeable(next))
- return 0;
-
-- add_uevent_var(env, "MAJOR=%u", disk->major);
+- /*
+- * not contiguous
+- */
+- if (req->sector + req->nr_sectors != next->sector)
+- return 0;
-
-- /* add physical device, backing this device */
-- physdev = disk->driverfs_dev;
-- if (physdev) {
-- char *path = kobject_get_path(&physdev->kobj, GFP_KERNEL);
+- if (rq_data_dir(req) != rq_data_dir(next)
+- || req->rq_disk != next->rq_disk
+- || next->special)
+- return 0;
-
-- add_uevent_var(env, "PHYSDEVPATH=%s", path);
-- kfree(path);
+- /*
+- * If we are allowed to merge, then append bio list
+- * from next to rq and release next. merge_requests_fn
+- * will have updated segment counts, update sector
+- * counts here.
+- */
+- if (!ll_merge_requests_fn(q, req, next))
+- return 0;
-
-- if (physdev->bus)
-- add_uevent_var(env, "PHYSDEVBUS=%s", physdev->bus->name);
+- /*
+- * At this point we have either done a back merge
+- * or front merge. We need the smaller start_time of
+- * the merged requests to be the current request
+- * for accounting purposes.
+- */
+- if (time_after(req->start_time, next->start_time))
+- req->start_time = next->start_time;
-
-- if (physdev->driver)
-- add_uevent_var(env, physdev->driver->name);
+- req->biotail->bi_next = next->bio;
+- req->biotail = next->biotail;
+-
+- req->nr_sectors = req->hard_nr_sectors += next->hard_nr_sectors;
+-
+- elv_merge_requests(q, req, next);
+-
+- if (req->rq_disk) {
+- disk_round_stats(req->rq_disk);
+- req->rq_disk->in_flight--;
- }
-
+- req->ioprio = ioprio_best(req->ioprio, next->ioprio);
+-
+- __blk_put_request(q, next);
+- return 1;
+-}
+-
+-static inline int attempt_back_merge(struct request_queue *q,
+- struct request *rq)
+-{
+- struct request *next = elv_latter_request(q, rq);
+-
+- if (next)
+- return attempt_merge(q, rq, next);
+-
- return 0;
-}
-
--static struct kset_uevent_ops block_uevent_ops = {
-- .filter = block_uevent_filter,
-- .uevent = block_uevent,
-+struct device_type disk_type = {
-+ .name = "disk",
-+ .groups = disk_attr_groups,
-+ .release = disk_release,
- };
-
--decl_subsys(block, &ktype_block, &block_uevent_ops);
+-static inline int attempt_front_merge(struct request_queue *q,
+- struct request *rq)
+-{
+- struct request *prev = elv_former_request(q, rq);
-
- /*
- * aggregate disk stat collector. Uses the same stats that the sysfs
- * entries do, above, but makes them available through one seq_file.
-- * Watching a few disks may be efficient through sysfs, but watching
-- * all of them will be more efficient through this interface.
- *
- * The output looks suspiciously like /proc/partitions with a bunch of
- * extra fields.
- */
-
--/* iterator */
- static void *diskstats_start(struct seq_file *part, loff_t *pos)
- {
- loff_t k = *pos;
-- struct list_head *p;
-+ struct device *dev;
-
-- mutex_lock(&block_subsys_lock);
-- list_for_each(p, &block_subsys.list)
-+ mutex_lock(&block_class_lock);
-+ list_for_each_entry(dev, &block_class.devices, node) {
-+ if (dev->type != &disk_type)
-+ continue;
- if (!k--)
-- return list_entry(p, struct gendisk, kobj.entry);
-+ return dev_to_disk(dev);
-+ }
- return NULL;
- }
-
- static void *diskstats_next(struct seq_file *part, void *v, loff_t *pos)
- {
-- struct list_head *p = ((struct gendisk *)v)->kobj.entry.next;
-+ struct gendisk *gp = v;
-+ struct device *dev;
-+
- ++*pos;
-- return p==&block_subsys.list ? NULL :
-- list_entry(p, struct gendisk, kobj.entry);
-+ list_for_each_entry(dev, &gp->dev.node, node) {
-+ if (&dev->node == &block_class.devices)
-+ return NULL;
-+ if (dev->type == &disk_type)
-+ return dev_to_disk(dev);
-+ }
-+ return NULL;
- }
-
- static void diskstats_stop(struct seq_file *part, void *v)
- {
-- mutex_unlock(&block_subsys_lock);
-+ mutex_unlock(&block_class_lock);
- }
-
- static int diskstats_show(struct seq_file *s, void *v)
-@@ -629,7 +558,7 @@ static int diskstats_show(struct seq_file *s, void *v)
- int n = 0;
-
- /*
-- if (&sgp->kobj.entry == block_subsys.kset.list.next)
-+ if (&gp->dev.kobj.entry == block_class.devices.next)
- seq_puts(s, "major minor name"
- " rio rmerge rsect ruse wio wmerge "
- "wsect wuse running use aveq"
-@@ -683,7 +612,7 @@ static void media_change_notify_thread(struct work_struct *work)
- * set enviroment vars to indicate which event this is for
- * so that user space will know to go check the media status.
- */
-- kobject_uevent_env(&gd->kobj, KOBJ_CHANGE, envp);
-+ kobject_uevent_env(&gd->dev.kobj, KOBJ_CHANGE, envp);
- put_device(gd->driverfs_dev);
- }
-
-@@ -694,6 +623,25 @@ void genhd_media_change_notify(struct gendisk *disk)
- }
- EXPORT_SYMBOL_GPL(genhd_media_change_notify);
-
-+dev_t blk_lookup_devt(const char *name)
-+{
-+ struct device *dev;
-+ dev_t devt = MKDEV(0, 0);
-+
-+ mutex_lock(&block_class_lock);
-+ list_for_each_entry(dev, &block_class.devices, node) {
-+ if (strcmp(dev->bus_id, name) == 0) {
-+ devt = dev->devt;
-+ break;
-+ }
-+ }
-+ mutex_unlock(&block_class_lock);
-+
-+ return devt;
-+}
-+
-+EXPORT_SYMBOL(blk_lookup_devt);
-+
- struct gendisk *alloc_disk(int minors)
- {
- return alloc_disk_node(minors, -1);
-@@ -721,9 +669,10 @@ struct gendisk *alloc_disk_node(int minors, int node_id)
- }
- }
- disk->minors = minors;
-- kobj_set_kset_s(disk,block_subsys);
-- kobject_init(&disk->kobj);
- rand_initialize_disk(disk);
-+ disk->dev.class = &block_class;
-+ disk->dev.type = &disk_type;
-+ device_initialize(&disk->dev);
- INIT_WORK(&disk->async_notify,
- media_change_notify_thread);
- }
-@@ -743,7 +692,7 @@ struct kobject *get_disk(struct gendisk *disk)
- owner = disk->fops->owner;
- if (owner && !try_module_get(owner))
- return NULL;
-- kobj = kobject_get(&disk->kobj);
-+ kobj = kobject_get(&disk->dev.kobj);
- if (kobj == NULL) {
- module_put(owner);
- return NULL;
-@@ -757,7 +706,7 @@ EXPORT_SYMBOL(get_disk);
- void put_disk(struct gendisk *disk)
- {
- if (disk)
-- kobject_put(&disk->kobj);
-+ kobject_put(&disk->dev.kobj);
- }
-
- EXPORT_SYMBOL(put_disk);
-diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
-index 8b91994..1932a56 100644
---- a/block/ll_rw_blk.c
-+++ b/block/ll_rw_blk.c
-@@ -347,7 +347,6 @@ unsigned blk_ordered_req_seq(struct request *rq)
- void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
- {
- struct request *rq;
-- int uptodate;
-
- if (error && !q->orderr)
- q->orderr = error;
-@@ -361,15 +360,11 @@ void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
- /*
- * Okay, sequence complete.
- */
-- uptodate = 1;
-- if (q->orderr)
-- uptodate = q->orderr;
+- if (prev)
+- return attempt_merge(q, prev, rq);
+-
+- return 0;
+-}
+-
+-static void init_request_from_bio(struct request *req, struct bio *bio)
+-{
+- req->cmd_type = REQ_TYPE_FS;
+-
+- /*
+- * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST)
+- */
+- if (bio_rw_ahead(bio) || bio_failfast(bio))
+- req->cmd_flags |= REQ_FAILFAST;
+-
+- /*
+- * REQ_BARRIER implies no merging, but lets make it explicit
+- */
+- if (unlikely(bio_barrier(bio)))
+- req->cmd_flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
+-
+- if (bio_sync(bio))
+- req->cmd_flags |= REQ_RW_SYNC;
+- if (bio_rw_meta(bio))
+- req->cmd_flags |= REQ_RW_META;
+-
+- req->errors = 0;
+- req->hard_sector = req->sector = bio->bi_sector;
+- req->ioprio = bio_prio(bio);
+- req->start_time = jiffies;
+- blk_rq_bio_prep(req->q, req, bio);
+-}
+-
+-static int __make_request(struct request_queue *q, struct bio *bio)
+-{
+- struct request *req;
+- int el_ret, nr_sectors, barrier, err;
+- const unsigned short prio = bio_prio(bio);
+- const int sync = bio_sync(bio);
+- int rw_flags;
+-
+- nr_sectors = bio_sectors(bio);
+-
+- /*
+- * low level driver can indicate that it wants pages above a
+- * certain limit bounced to low memory (ie for highmem, or even
+- * ISA dma in theory)
+- */
+- blk_queue_bounce(q, &bio);
+-
+- barrier = bio_barrier(bio);
+- if (unlikely(barrier) && (q->next_ordered == QUEUE_ORDERED_NONE)) {
+- err = -EOPNOTSUPP;
+- goto end_io;
+- }
+-
+- spin_lock_irq(q->queue_lock);
+-
+- if (unlikely(barrier) || elv_queue_empty(q))
+- goto get_rq;
+-
+- el_ret = elv_merge(q, &req, bio);
+- switch (el_ret) {
+- case ELEVATOR_BACK_MERGE:
+- BUG_ON(!rq_mergeable(req));
+-
+- if (!ll_back_merge_fn(q, req, bio))
+- break;
+-
+- blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
+-
+- req->biotail->bi_next = bio;
+- req->biotail = bio;
+- req->nr_sectors = req->hard_nr_sectors += nr_sectors;
+- req->ioprio = ioprio_best(req->ioprio, prio);
+- drive_stat_acct(req, 0);
+- if (!attempt_back_merge(q, req))
+- elv_merged_request(q, req, el_ret);
+- goto out;
+-
+- case ELEVATOR_FRONT_MERGE:
+- BUG_ON(!rq_mergeable(req));
+-
+- if (!ll_front_merge_fn(q, req, bio))
+- break;
+-
+- blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
+-
+- bio->bi_next = req->bio;
+- req->bio = bio;
+-
+- /*
+- * may not be valid. if the low level driver said
+- * it didn't need a bounce buffer then it better
+- * not touch req->buffer either...
+- */
+- req->buffer = bio_data(bio);
+- req->current_nr_sectors = bio_cur_sectors(bio);
+- req->hard_cur_sectors = req->current_nr_sectors;
+- req->sector = req->hard_sector = bio->bi_sector;
+- req->nr_sectors = req->hard_nr_sectors += nr_sectors;
+- req->ioprio = ioprio_best(req->ioprio, prio);
+- drive_stat_acct(req, 0);
+- if (!attempt_front_merge(q, req))
+- elv_merged_request(q, req, el_ret);
+- goto out;
+-
+- /* ELV_NO_MERGE: elevator says don't/can't merge. */
+- default:
+- ;
+- }
+-
+-get_rq:
+- /*
+- * This sync check and mask will be re-done in init_request_from_bio(),
+- * but we need to set it earlier to expose the sync flag to the
+- * rq allocator and io schedulers.
+- */
+- rw_flags = bio_data_dir(bio);
+- if (sync)
+- rw_flags |= REQ_RW_SYNC;
+-
+- /*
+- * Grab a free request. This is might sleep but can not fail.
+- * Returns with the queue unlocked.
+- */
+- req = get_request_wait(q, rw_flags, bio);
+-
+- /*
+- * After dropping the lock and possibly sleeping here, our request
+- * may now be mergeable after it had proven unmergeable (above).
+- * We don't worry about that case for efficiency. It won't happen
+- * often, and the elevators are able to handle it.
+- */
+- init_request_from_bio(req, bio);
+-
+- spin_lock_irq(q->queue_lock);
+- if (elv_queue_empty(q))
+- blk_plug_device(q);
+- add_request(q, req);
+-out:
+- if (sync)
+- __generic_unplug_device(q);
+-
+- spin_unlock_irq(q->queue_lock);
+- return 0;
+-
+-end_io:
+- bio_endio(bio, err);
+- return 0;
+-}
+-
+-/*
+- * If bio->bi_dev is a partition, remap the location
+- */
+-static inline void blk_partition_remap(struct bio *bio)
+-{
+- struct block_device *bdev = bio->bi_bdev;
+-
+- if (bio_sectors(bio) && bdev != bdev->bd_contains) {
+- struct hd_struct *p = bdev->bd_part;
+- const int rw = bio_data_dir(bio);
+-
+- p->sectors[rw] += bio_sectors(bio);
+- p->ios[rw]++;
+-
+- bio->bi_sector += p->start_sect;
+- bio->bi_bdev = bdev->bd_contains;
+-
+- blk_add_trace_remap(bdev_get_queue(bio->bi_bdev), bio,
+- bdev->bd_dev, bio->bi_sector,
+- bio->bi_sector - p->start_sect);
+- }
+-}
+-
+-static void handle_bad_sector(struct bio *bio)
+-{
+- char b[BDEVNAME_SIZE];
+-
+- printk(KERN_INFO "attempt to access beyond end of device\n");
+- printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n",
+- bdevname(bio->bi_bdev, b),
+- bio->bi_rw,
+- (unsigned long long)bio->bi_sector + bio_sectors(bio),
+- (long long)(bio->bi_bdev->bd_inode->i_size >> 9));
+-
+- set_bit(BIO_EOF, &bio->bi_flags);
+-}
+-
+-#ifdef CONFIG_FAIL_MAKE_REQUEST
+-
+-static DECLARE_FAULT_ATTR(fail_make_request);
+-
+-static int __init setup_fail_make_request(char *str)
+-{
+- return setup_fault_attr(&fail_make_request, str);
+-}
+-__setup("fail_make_request=", setup_fail_make_request);
+-
+-static int should_fail_request(struct bio *bio)
+-{
+- if ((bio->bi_bdev->bd_disk->flags & GENHD_FL_FAIL) ||
+- (bio->bi_bdev->bd_part && bio->bi_bdev->bd_part->make_it_fail))
+- return should_fail(&fail_make_request, bio->bi_size);
+-
+- return 0;
+-}
+-
+-static int __init fail_make_request_debugfs(void)
+-{
+- return init_fault_attr_dentries(&fail_make_request,
+- "fail_make_request");
+-}
+-
+-late_initcall(fail_make_request_debugfs);
+-
+-#else /* CONFIG_FAIL_MAKE_REQUEST */
+-
+-static inline int should_fail_request(struct bio *bio)
+-{
+- return 0;
+-}
+-
+-#endif /* CONFIG_FAIL_MAKE_REQUEST */
+-
+-/*
+- * Check whether this bio extends beyond the end of the device.
+- */
+-static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
+-{
+- sector_t maxsector;
+-
+- if (!nr_sectors)
+- return 0;
+-
+- /* Test device or partition size, when known. */
+- maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
+- if (maxsector) {
+- sector_t sector = bio->bi_sector;
+-
+- if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
+- /*
+- * This may well happen - the kernel calls bread()
+- * without checking the size of the device, e.g., when
+- * mounting a device.
+- */
+- handle_bad_sector(bio);
+- return 1;
+- }
+- }
+-
+- return 0;
+-}
+-
+-/**
+- * generic_make_request: hand a buffer to its device driver for I/O
+- * @bio: The bio describing the location in memory and on the device.
+- *
+- * generic_make_request() is used to make I/O requests of block
+- * devices. It is passed a &struct bio, which describes the I/O that needs
+- * to be done.
+- *
+- * generic_make_request() does not return any status. The
+- * success/failure status of the request, along with notification of
+- * completion, is delivered asynchronously through the bio->bi_end_io
+- * function described (one day) else where.
+- *
+- * The caller of generic_make_request must make sure that bi_io_vec
+- * are set to describe the memory buffer, and that bi_dev and bi_sector are
+- * set to describe the device address, and the
+- * bi_end_io and optionally bi_private are set to describe how
+- * completion notification should be signaled.
+- *
+- * generic_make_request and the drivers it calls may use bi_next if this
+- * bio happens to be merged with someone else, and may change bi_dev and
+- * bi_sector for remaps as it sees fit. So the values of these fields
+- * should NOT be depended on after the call to generic_make_request.
+- */
+-static inline void __generic_make_request(struct bio *bio)
+-{
+- struct request_queue *q;
+- sector_t old_sector;
+- int ret, nr_sectors = bio_sectors(bio);
+- dev_t old_dev;
+- int err = -EIO;
+-
+- might_sleep();
+-
+- if (bio_check_eod(bio, nr_sectors))
+- goto end_io;
+-
+- /*
+- * Resolve the mapping until finished. (drivers are
+- * still free to implement/resolve their own stacking
+- * by explicitly returning 0)
+- *
+- * NOTE: we don't repeat the blk_size check for each new device.
+- * Stacking drivers are expected to know what they are doing.
+- */
+- old_sector = -1;
+- old_dev = 0;
+- do {
+- char b[BDEVNAME_SIZE];
+-
+- q = bdev_get_queue(bio->bi_bdev);
+- if (!q) {
+- printk(KERN_ERR
+- "generic_make_request: Trying to access "
+- "nonexistent block-device %s (%Lu)\n",
+- bdevname(bio->bi_bdev, b),
+- (long long) bio->bi_sector);
+-end_io:
+- bio_endio(bio, err);
+- break;
+- }
+-
+- if (unlikely(nr_sectors > q->max_hw_sectors)) {
+- printk("bio too big device %s (%u > %u)\n",
+- bdevname(bio->bi_bdev, b),
+- bio_sectors(bio),
+- q->max_hw_sectors);
+- goto end_io;
+- }
+-
+- if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
+- goto end_io;
+-
+- if (should_fail_request(bio))
+- goto end_io;
+-
+- /*
+- * If this device has partitions, remap block n
+- * of partition p to block n+start(p) of the disk.
+- */
+- blk_partition_remap(bio);
+-
+- if (old_sector != -1)
+- blk_add_trace_remap(q, bio, old_dev, bio->bi_sector,
+- old_sector);
+-
+- blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
+-
+- old_sector = bio->bi_sector;
+- old_dev = bio->bi_bdev->bd_dev;
+-
+- if (bio_check_eod(bio, nr_sectors))
+- goto end_io;
+- if (bio_empty_barrier(bio) && !q->prepare_flush_fn) {
+- err = -EOPNOTSUPP;
+- goto end_io;
+- }
+-
+- ret = q->make_request_fn(q, bio);
+- } while (ret);
+-}
+-
+-/*
+- * We only want one ->make_request_fn to be active at a time,
+- * else stack usage with stacked devices could be a problem.
+- * So use current->bio_{list,tail} to keep a list of requests
+- * submited by a make_request_fn function.
+- * current->bio_tail is also used as a flag to say if
+- * generic_make_request is currently active in this task or not.
+- * If it is NULL, then no make_request is active. If it is non-NULL,
+- * then a make_request is active, and new requests should be added
+- * at the tail
+- */
+-void generic_make_request(struct bio *bio)
+-{
+- if (current->bio_tail) {
+- /* make_request is active */
+- *(current->bio_tail) = bio;
+- bio->bi_next = NULL;
+- current->bio_tail = &bio->bi_next;
+- return;
+- }
+- /* following loop may be a bit non-obvious, and so deserves some
+- * explanation.
+- * Before entering the loop, bio->bi_next is NULL (as all callers
+- * ensure that) so we have a list with a single bio.
+- * We pretend that we have just taken it off a longer list, so
+- * we assign bio_list to the next (which is NULL) and bio_tail
+- * to &bio_list, thus initialising the bio_list of new bios to be
+- * added. __generic_make_request may indeed add some more bios
+- * through a recursive call to generic_make_request. If it
+- * did, we find a non-NULL value in bio_list and re-enter the loop
+- * from the top. In this case we really did just take the bio
+- * of the top of the list (no pretending) and so fixup bio_list and
+- * bio_tail or bi_next, and call into __generic_make_request again.
+- *
+- * The loop was structured like this to make only one call to
+- * __generic_make_request (which is important as it is large and
+- * inlined) and to keep the structure simple.
+- */
+- BUG_ON(bio->bi_next);
+- do {
+- current->bio_list = bio->bi_next;
+- if (bio->bi_next == NULL)
+- current->bio_tail = ¤t->bio_list;
+- else
+- bio->bi_next = NULL;
+- __generic_make_request(bio);
+- bio = current->bio_list;
+- } while (bio);
+- current->bio_tail = NULL; /* deactivate */
+-}
+-
+-EXPORT_SYMBOL(generic_make_request);
+-
+-/**
+- * submit_bio: submit a bio to the block device layer for I/O
+- * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead)
+- * @bio: The &struct bio which describes the I/O
+- *
+- * submit_bio() is very similar in purpose to generic_make_request(), and
+- * uses that function to do most of the work. Both are fairly rough
+- * interfaces, @bio must be presetup and ready for I/O.
+- *
+- */
+-void submit_bio(int rw, struct bio *bio)
+-{
+- int count = bio_sectors(bio);
+-
+- bio->bi_rw |= rw;
+-
+- /*
+- * If it's a regular read/write or a barrier with data attached,
+- * go through the normal accounting stuff before submission.
+- */
+- if (!bio_empty_barrier(bio)) {
+-
+- BIO_BUG_ON(!bio->bi_size);
+- BIO_BUG_ON(!bio->bi_io_vec);
+-
+- if (rw & WRITE) {
+- count_vm_events(PGPGOUT, count);
+- } else {
+- task_io_account_read(bio->bi_size);
+- count_vm_events(PGPGIN, count);
+- }
+-
+- if (unlikely(block_dump)) {
+- char b[BDEVNAME_SIZE];
+- printk(KERN_DEBUG "%s(%d): %s block %Lu on %s\n",
+- current->comm, task_pid_nr(current),
+- (rw & WRITE) ? "WRITE" : "READ",
+- (unsigned long long)bio->bi_sector,
+- bdevname(bio->bi_bdev,b));
+- }
+- }
+-
+- generic_make_request(bio);
+-}
+-
+-EXPORT_SYMBOL(submit_bio);
+-
+-static void blk_recalc_rq_sectors(struct request *rq, int nsect)
+-{
+- if (blk_fs_request(rq)) {
+- rq->hard_sector += nsect;
+- rq->hard_nr_sectors -= nsect;
+-
+- /*
+- * Move the I/O submission pointers ahead if required.
+- */
+- if ((rq->nr_sectors >= rq->hard_nr_sectors) &&
+- (rq->sector <= rq->hard_sector)) {
+- rq->sector = rq->hard_sector;
+- rq->nr_sectors = rq->hard_nr_sectors;
+- rq->hard_cur_sectors = bio_cur_sectors(rq->bio);
+- rq->current_nr_sectors = rq->hard_cur_sectors;
+- rq->buffer = bio_data(rq->bio);
+- }
+-
+- /*
+- * if total number of sectors is less than the first segment
+- * size, something has gone terribly wrong
+- */
+- if (rq->nr_sectors < rq->current_nr_sectors) {
+- printk("blk: request botched\n");
+- rq->nr_sectors = rq->current_nr_sectors;
+- }
+- }
+-}
-
- q->ordseq = 0;
- rq = q->orig_bar_rq;
-
-- end_that_request_first(rq, uptodate, rq->hard_nr_sectors);
-- end_that_request_last(rq, uptodate);
-+ if (__blk_end_request(rq, q->orderr, blk_rq_bytes(rq)))
-+ BUG();
- }
-
- static void pre_flush_end_io(struct request *rq, int error)
-@@ -486,9 +481,9 @@ int blk_do_ordered(struct request_queue *q, struct request **rqp)
- * ORDERED_NONE while this request is on it.
- */
- blkdev_dequeue_request(rq);
-- end_that_request_first(rq, -EOPNOTSUPP,
-- rq->hard_nr_sectors);
-- end_that_request_last(rq, -EOPNOTSUPP);
-+ if (__blk_end_request(rq, -EOPNOTSUPP,
-+ blk_rq_bytes(rq)))
-+ BUG();
- *rqp = NULL;
- return 0;
- }
-@@ -726,6 +721,45 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
- EXPORT_SYMBOL(blk_queue_stack_limits);
-
- /**
-+ * blk_queue_dma_drain - Set up a drain buffer for excess dma.
-+ *
-+ * @q: the request queue for the device
-+ * @buf: physically contiguous buffer
-+ * @size: size of the buffer in bytes
-+ *
-+ * Some devices have excess DMA problems and can't simply discard (or
-+ * zero fill) the unwanted piece of the transfer. They have to have a
-+ * real area of memory to transfer it into. The use case for this is
-+ * ATAPI devices in DMA mode. If the packet command causes a transfer
-+ * bigger than the transfer size some HBAs will lock up if there
-+ * aren't DMA elements to contain the excess transfer. What this API
-+ * does is adjust the queue so that the buf is always appended
-+ * silently to the scatterlist.
-+ *
-+ * Note: This routine adjusts max_hw_segments to make room for
-+ * appending the drain buffer. If you call
-+ * blk_queue_max_hw_segments() or blk_queue_max_phys_segments() after
-+ * calling this routine, you must set the limit to one fewer than your
-+ * device can support otherwise there won't be room for the drain
-+ * buffer.
-+ */
-+int blk_queue_dma_drain(struct request_queue *q, void *buf,
-+ unsigned int size)
-+{
-+ if (q->max_hw_segments < 2 || q->max_phys_segments < 2)
-+ return -EINVAL;
-+ /* make room for appending the drain */
-+ --q->max_hw_segments;
-+ --q->max_phys_segments;
-+ q->dma_drain_buffer = buf;
-+ q->dma_drain_size = size;
-+
-+ return 0;
-+}
-+
-+EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
-+
-+/**
- * blk_queue_segment_boundary - set boundary rules for segment merging
- * @q: the request queue for the device
- * @mask: the memory boundary mask
-@@ -760,6 +794,30 @@ void blk_queue_dma_alignment(struct request_queue *q, int mask)
- EXPORT_SYMBOL(blk_queue_dma_alignment);
-
- /**
-+ * blk_queue_update_dma_alignment - update dma length and memory alignment
-+ * @q: the request queue for the device
-+ * @mask: alignment mask
-+ *
-+ * description:
-+ * update required memory and length aligment for direct dma transactions.
-+ * If the requested alignment is larger than the current alignment, then
-+ * the current queue alignment is updated to the new value, otherwise it
-+ * is left alone. The design of this is to allow multiple objects
-+ * (driver, device, transport etc) to set their respective
-+ * alignments without having them interfere.
-+ *
-+ **/
-+void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
-+{
-+ BUG_ON(mask > PAGE_SIZE);
-+
-+ if (mask > q->dma_alignment)
-+ q->dma_alignment = mask;
-+}
-+
-+EXPORT_SYMBOL(blk_queue_update_dma_alignment);
-+
-+/**
- * blk_queue_find_tag - find a request by its tag and queue
- * @q: The request queue for the device
- * @tag: The tag of the request
-@@ -1355,6 +1413,16 @@ new_segment:
- bvprv = bvec;
- } /* segments in rq */
-
-+ if (q->dma_drain_size) {
-+ sg->page_link &= ~0x02;
-+ sg = sg_next(sg);
-+ sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
-+ q->dma_drain_size,
-+ ((unsigned long)q->dma_drain_buffer) &
-+ (PAGE_SIZE - 1));
-+ nsegs++;
-+ }
-+
- if (sg)
- sg_mark_end(sg);
-
-@@ -1862,9 +1930,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
-
- init_timer(&q->unplug_timer);
-
-- kobject_set_name(&q->kobj, "%s", "queue");
-- q->kobj.ktype = &queue_ktype;
-- kobject_init(&q->kobj);
-+ kobject_init(&q->kobj, &queue_ktype);
-
- mutex_init(&q->sysfs_lock);
-
-@@ -3415,29 +3481,36 @@ static void blk_recalc_rq_sectors(struct request *rq, int nsect)
- }
- }
-
-static int __end_that_request_first(struct request *req, int uptodate,
-+/**
-+ * __end_that_request_first - end I/O on a request
-+ * @req: the request being processed
-+ * @error: 0 for success, < 0 for error
-+ * @nr_bytes: number of bytes to complete
-+ *
-+ * Description:
-+ * Ends I/O on a number of bytes attached to @req, and sets it up
-+ * for the next range of segments (if any) in the cluster.
-+ *
-+ * Return:
-+ * 0 - we are done with this request, call end_that_request_last()
-+ * 1 - still buffers pending for this request
-+ **/
-+static int __end_that_request_first(struct request *req, int error,
- int nr_bytes)
- {
+- int nr_bytes)
+-{
- int total_bytes, bio_nbytes, error, next_idx = 0;
-+ int total_bytes, bio_nbytes, next_idx = 0;
- struct bio *bio;
-
- blk_add_trace_rq(req->q, req, BLK_TA_COMPLETE);
-
- /*
+- struct bio *bio;
+-
+- blk_add_trace_rq(req->q, req, BLK_TA_COMPLETE);
+-
+- /*
- * extend uptodate bool to allow < 0 value to be direct io error
- */
- error = 0;
@@ -137965,21 +145749,106 @@
- error = !uptodate ? -EIO : uptodate;
-
- /*
- * for a REQ_BLOCK_PC request, we want to carry any eventual
- * sense key with us all the way through
- */
- if (!blk_pc_request(req))
- req->errors = 0;
-
+- * for a REQ_BLOCK_PC request, we want to carry any eventual
+- * sense key with us all the way through
+- */
+- if (!blk_pc_request(req))
+- req->errors = 0;
+-
- if (!uptodate) {
-+ if (error) {
- if (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))
- printk("end_request: I/O error, dev %s, sector %llu\n",
- req->rq_disk ? req->rq_disk->disk_name : "?",
-@@ -3531,49 +3604,6 @@ static int __end_that_request_first(struct request *req, int uptodate,
- return 1;
- }
-
+- if (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))
+- printk("end_request: I/O error, dev %s, sector %llu\n",
+- req->rq_disk ? req->rq_disk->disk_name : "?",
+- (unsigned long long)req->sector);
+- }
+-
+- if (blk_fs_request(req) && req->rq_disk) {
+- const int rw = rq_data_dir(req);
+-
+- disk_stat_add(req->rq_disk, sectors[rw], nr_bytes >> 9);
+- }
+-
+- total_bytes = bio_nbytes = 0;
+- while ((bio = req->bio) != NULL) {
+- int nbytes;
+-
+- /*
+- * For an empty barrier request, the low level driver must
+- * store a potential error location in ->sector. We pass
+- * that back up in ->bi_sector.
+- */
+- if (blk_empty_barrier(req))
+- bio->bi_sector = req->sector;
+-
+- if (nr_bytes >= bio->bi_size) {
+- req->bio = bio->bi_next;
+- nbytes = bio->bi_size;
+- req_bio_endio(req, bio, nbytes, error);
+- next_idx = 0;
+- bio_nbytes = 0;
+- } else {
+- int idx = bio->bi_idx + next_idx;
+-
+- if (unlikely(bio->bi_idx >= bio->bi_vcnt)) {
+- blk_dump_rq_flags(req, "__end_that");
+- printk("%s: bio idx %d >= vcnt %d\n",
+- __FUNCTION__,
+- bio->bi_idx, bio->bi_vcnt);
+- break;
+- }
+-
+- nbytes = bio_iovec_idx(bio, idx)->bv_len;
+- BIO_BUG_ON(nbytes > bio->bi_size);
+-
+- /*
+- * not a complete bvec done
+- */
+- if (unlikely(nbytes > nr_bytes)) {
+- bio_nbytes += nr_bytes;
+- total_bytes += nr_bytes;
+- break;
+- }
+-
+- /*
+- * advance to the next vector
+- */
+- next_idx++;
+- bio_nbytes += nbytes;
+- }
+-
+- total_bytes += nbytes;
+- nr_bytes -= nbytes;
+-
+- if ((bio = req->bio)) {
+- /*
+- * end more in this run, or just return 'not-done'
+- */
+- if (unlikely(nr_bytes <= 0))
+- break;
+- }
+- }
+-
+- /*
+- * completely done
+- */
+- if (!req->bio)
+- return 0;
+-
+- /*
+- * if the request wasn't completed, update state
+- */
+- if (bio_nbytes) {
+- req_bio_endio(req, bio, bio_nbytes, error);
+- bio->bi_idx += next_idx;
+- bio_iovec(bio)->bv_offset += nr_bytes;
+- bio_iovec(bio)->bv_len -= nr_bytes;
+- }
+-
+- blk_recalc_rq_sectors(req, total_bytes >> 9);
+- blk_recalc_rq_segments(req);
+- return 1;
+-}
+-
-/**
- * end_that_request_first - end I/O on a request
- * @req: the request being processed
@@ -138023,405 +145892,333 @@
-
-EXPORT_SYMBOL(end_that_request_chunk);
-
- /*
- * splice the completion data to a local structure and hand off to
- * process_completion_queue() to complete the requests
-@@ -3653,17 +3683,15 @@ EXPORT_SYMBOL(blk_complete_request);
- /*
- * queue lock must be held
- */
+-/*
+- * splice the completion data to a local structure and hand off to
+- * process_completion_queue() to complete the requests
+- */
+-static void blk_done_softirq(struct softirq_action *h)
+-{
+- struct list_head *cpu_list, local_list;
+-
+- local_irq_disable();
+- cpu_list = &__get_cpu_var(blk_cpu_done);
+- list_replace_init(cpu_list, &local_list);
+- local_irq_enable();
+-
+- while (!list_empty(&local_list)) {
+- struct request *rq = list_entry(local_list.next, struct request, donelist);
+-
+- list_del_init(&rq->donelist);
+- rq->q->softirq_done_fn(rq);
+- }
+-}
+-
+-static int __cpuinit blk_cpu_notify(struct notifier_block *self, unsigned long action,
+- void *hcpu)
+-{
+- /*
+- * If a CPU goes away, splice its entries to the current CPU
+- * and trigger a run of the softirq
+- */
+- if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
+- int cpu = (unsigned long) hcpu;
+-
+- local_irq_disable();
+- list_splice_init(&per_cpu(blk_cpu_done, cpu),
+- &__get_cpu_var(blk_cpu_done));
+- raise_softirq_irqoff(BLOCK_SOFTIRQ);
+- local_irq_enable();
+- }
+-
+- return NOTIFY_OK;
+-}
+-
+-
+-static struct notifier_block blk_cpu_notifier __cpuinitdata = {
+- .notifier_call = blk_cpu_notify,
+-};
+-
+-/**
+- * blk_complete_request - end I/O on a request
+- * @req: the request being processed
+- *
+- * Description:
+- * Ends all I/O on a request. It does not handle partial completions,
+- * unless the driver actually implements this in its completion callback
+- * through requeueing. The actual completion happens out-of-order,
+- * through a softirq handler. The user must have registered a completion
+- * callback through blk_queue_softirq_done().
+- **/
+-
+-void blk_complete_request(struct request *req)
+-{
+- struct list_head *cpu_list;
+- unsigned long flags;
+-
+- BUG_ON(!req->q->softirq_done_fn);
+-
+- local_irq_save(flags);
+-
+- cpu_list = &__get_cpu_var(blk_cpu_done);
+- list_add_tail(&req->donelist, cpu_list);
+- raise_softirq_irqoff(BLOCK_SOFTIRQ);
+-
+- local_irq_restore(flags);
+-}
+-
+-EXPORT_SYMBOL(blk_complete_request);
+-
+-/*
+- * queue lock must be held
+- */
-void end_that_request_last(struct request *req, int uptodate)
-+static void end_that_request_last(struct request *req, int error)
- {
- struct gendisk *disk = req->rq_disk;
+-{
+- struct gendisk *disk = req->rq_disk;
- int error;
-
+-
- /*
- * extend uptodate bool to allow < 0 value to be direct io error
- */
- error = 0;
- if (end_io_error(uptodate))
- error = !uptodate ? -EIO : uptodate;
-+ if (blk_rq_tagged(req))
-+ blk_queue_end_tag(req->q, req);
-+
-+ if (blk_queued_rq(req))
-+ blkdev_dequeue_request(req);
-
- if (unlikely(laptop_mode) && blk_fs_request(req))
- laptop_io_completion();
-@@ -3682,32 +3710,54 @@ void end_that_request_last(struct request *req, int uptodate)
- disk_round_stats(disk);
- disk->in_flight--;
- }
-+
- if (req->end_io)
- req->end_io(req, error);
+-
+- if (unlikely(laptop_mode) && blk_fs_request(req))
+- laptop_io_completion();
+-
+- /*
+- * Account IO completion. bar_rq isn't accounted as a normal
+- * IO on queueing nor completion. Accounting the containing
+- * request is enough.
+- */
+- if (disk && blk_fs_request(req) && req != &req->q->bar_rq) {
+- unsigned long duration = jiffies - req->start_time;
+- const int rw = rq_data_dir(req);
+-
+- __disk_stat_inc(disk, ios[rw]);
+- __disk_stat_add(disk, ticks[rw], duration);
+- disk_round_stats(disk);
+- disk->in_flight--;
+- }
+- if (req->end_io)
+- req->end_io(req, error);
- else
-+ else {
-+ if (blk_bidi_rq(req))
-+ __blk_put_request(req->next_rq->q, req->next_rq);
-+
- __blk_put_request(req->q, req);
-+ }
- }
-
+- __blk_put_request(req->q, req);
+-}
+-
-EXPORT_SYMBOL(end_that_request_last);
-
- static inline void __end_request(struct request *rq, int uptodate,
+-static inline void __end_request(struct request *rq, int uptodate,
- unsigned int nr_bytes, int dequeue)
-+ unsigned int nr_bytes)
- {
+-{
- if (!end_that_request_chunk(rq, uptodate, nr_bytes)) {
- if (dequeue)
- blkdev_dequeue_request(rq);
- add_disk_randomness(rq->rq_disk);
- end_that_request_last(rq, uptodate);
- }
-+ int error = 0;
-+
-+ if (uptodate <= 0)
-+ error = uptodate ? uptodate : -EIO;
-+
-+ __blk_end_request(rq, error, nr_bytes);
- }
-
+-}
+-
-static unsigned int rq_byte_size(struct request *rq)
-+/**
-+ * blk_rq_bytes - Returns bytes left to complete in the entire request
-+ **/
-+unsigned int blk_rq_bytes(struct request *rq)
- {
- if (blk_fs_request(rq))
- return rq->hard_nr_sectors << 9;
-
- return rq->data_len;
- }
-+EXPORT_SYMBOL_GPL(blk_rq_bytes);
-+
-+/**
-+ * blk_rq_cur_bytes - Returns bytes left to complete in the current segment
-+ **/
-+unsigned int blk_rq_cur_bytes(struct request *rq)
-+{
-+ if (blk_fs_request(rq))
-+ return rq->current_nr_sectors << 9;
-+
-+ if (rq->bio)
-+ return rq->bio->bi_size;
-+
-+ return rq->data_len;
-+}
-+EXPORT_SYMBOL_GPL(blk_rq_cur_bytes);
-
- /**
- * end_queued_request - end all I/O on a queued request
-@@ -3722,7 +3772,7 @@ static unsigned int rq_byte_size(struct request *rq)
- **/
- void end_queued_request(struct request *rq, int uptodate)
- {
+-{
+- if (blk_fs_request(rq))
+- return rq->hard_nr_sectors << 9;
+-
+- return rq->data_len;
+-}
+-
+-/**
+- * end_queued_request - end all I/O on a queued request
+- * @rq: the request being processed
+- * @uptodate: error value or 0/1 uptodate flag
+- *
+- * Description:
+- * Ends all I/O on a request, and removes it from the block layer queues.
+- * Not suitable for normal IO completion, unless the driver still has
+- * the request attached to the block layer.
+- *
+- **/
+-void end_queued_request(struct request *rq, int uptodate)
+-{
- __end_request(rq, uptodate, rq_byte_size(rq), 1);
-+ __end_request(rq, uptodate, blk_rq_bytes(rq));
- }
- EXPORT_SYMBOL(end_queued_request);
-
-@@ -3739,7 +3789,7 @@ EXPORT_SYMBOL(end_queued_request);
- **/
- void end_dequeued_request(struct request *rq, int uptodate)
- {
+-}
+-EXPORT_SYMBOL(end_queued_request);
+-
+-/**
+- * end_dequeued_request - end all I/O on a dequeued request
+- * @rq: the request being processed
+- * @uptodate: error value or 0/1 uptodate flag
+- *
+- * Description:
+- * Ends all I/O on a request. The request must already have been
+- * dequeued using blkdev_dequeue_request(), as is normally the case
+- * for most drivers.
+- *
+- **/
+-void end_dequeued_request(struct request *rq, int uptodate)
+-{
- __end_request(rq, uptodate, rq_byte_size(rq), 0);
-+ __end_request(rq, uptodate, blk_rq_bytes(rq));
- }
- EXPORT_SYMBOL(end_dequeued_request);
-
-@@ -3765,10 +3815,159 @@ EXPORT_SYMBOL(end_dequeued_request);
- **/
- void end_request(struct request *req, int uptodate)
- {
+-}
+-EXPORT_SYMBOL(end_dequeued_request);
+-
+-
+-/**
+- * end_request - end I/O on the current segment of the request
+- * @req: the request being processed
+- * @uptodate: error value or 0/1 uptodate flag
+- *
+- * Description:
+- * Ends I/O on the current segment of a request. If that is the only
+- * remaining segment, the request is also completed and freed.
+- *
+- * This is a remnant of how older block drivers handled IO completions.
+- * Modern drivers typically end IO on the full request in one go, unless
+- * they have a residual value to account for. For that case this function
+- * isn't really useful, unless the residual just happens to be the
+- * full current segment. In other words, don't use this function in new
+- * code. Either use end_request_completely(), or the
+- * end_that_request_chunk() (along with end_that_request_last()) for
+- * partial completions.
+- *
+- **/
+-void end_request(struct request *req, int uptodate)
+-{
- __end_request(req, uptodate, req->hard_cur_sectors << 9, 1);
-+ __end_request(req, uptodate, req->hard_cur_sectors << 9);
- }
- EXPORT_SYMBOL(end_request);
-
-+/**
-+ * blk_end_io - Generic end_io function to complete a request.
-+ * @rq: the request being processed
-+ * @error: 0 for success, < 0 for error
-+ * @nr_bytes: number of bytes to complete @rq
-+ * @bidi_bytes: number of bytes to complete @rq->next_rq
-+ * @drv_callback: function called between completion of bios in the request
-+ * and completion of the request.
-+ * If the callback returns non 0, this helper returns without
-+ * completion of the request.
-+ *
-+ * Description:
-+ * Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
-+ * If @rq has leftover, sets it up for the next range of segments.
-+ *
-+ * Return:
-+ * 0 - we are done with this request
-+ * 1 - this request is not freed yet, it still has pending buffers.
-+ **/
-+static int blk_end_io(struct request *rq, int error, int nr_bytes,
-+ int bidi_bytes, int (drv_callback)(struct request *))
-+{
-+ struct request_queue *q = rq->q;
-+ unsigned long flags = 0UL;
-+
-+ if (blk_fs_request(rq) || blk_pc_request(rq)) {
-+ if (__end_that_request_first(rq, error, nr_bytes))
-+ return 1;
-+
-+ /* Bidi request must be completed as a whole */
-+ if (blk_bidi_rq(rq) &&
-+ __end_that_request_first(rq->next_rq, error, bidi_bytes))
-+ return 1;
-+ }
-+
-+ /* Special feature for tricky drivers */
-+ if (drv_callback && drv_callback(rq))
-+ return 1;
-+
-+ add_disk_randomness(rq->rq_disk);
-+
-+ spin_lock_irqsave(q->queue_lock, flags);
-+ end_that_request_last(rq, error);
-+ spin_unlock_irqrestore(q->queue_lock, flags);
-+
-+ return 0;
-+}
-+
-+/**
-+ * blk_end_request - Helper function for drivers to complete the request.
-+ * @rq: the request being processed
-+ * @error: 0 for success, < 0 for error
-+ * @nr_bytes: number of bytes to complete
-+ *
-+ * Description:
-+ * Ends I/O on a number of bytes attached to @rq.
-+ * If @rq has leftover, sets it up for the next range of segments.
-+ *
-+ * Return:
-+ * 0 - we are done with this request
-+ * 1 - still buffers pending for this request
-+ **/
-+int blk_end_request(struct request *rq, int error, int nr_bytes)
-+{
-+ return blk_end_io(rq, error, nr_bytes, 0, NULL);
-+}
-+EXPORT_SYMBOL_GPL(blk_end_request);
-+
-+/**
-+ * __blk_end_request - Helper function for drivers to complete the request.
-+ * @rq: the request being processed
-+ * @error: 0 for success, < 0 for error
-+ * @nr_bytes: number of bytes to complete
-+ *
-+ * Description:
-+ * Must be called with queue lock held unlike blk_end_request().
-+ *
-+ * Return:
-+ * 0 - we are done with this request
-+ * 1 - still buffers pending for this request
-+ **/
-+int __blk_end_request(struct request *rq, int error, int nr_bytes)
-+{
-+ if (blk_fs_request(rq) || blk_pc_request(rq)) {
-+ if (__end_that_request_first(rq, error, nr_bytes))
-+ return 1;
-+ }
-+
-+ add_disk_randomness(rq->rq_disk);
-+
-+ end_that_request_last(rq, error);
-+
-+ return 0;
-+}
-+EXPORT_SYMBOL_GPL(__blk_end_request);
-+
-+/**
-+ * blk_end_bidi_request - Helper function for drivers to complete bidi request.
-+ * @rq: the bidi request being processed
-+ * @error: 0 for success, < 0 for error
-+ * @nr_bytes: number of bytes to complete @rq
-+ * @bidi_bytes: number of bytes to complete @rq->next_rq
-+ *
-+ * Description:
-+ * Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
-+ *
-+ * Return:
-+ * 0 - we are done with this request
-+ * 1 - still buffers pending for this request
-+ **/
-+int blk_end_bidi_request(struct request *rq, int error, int nr_bytes,
-+ int bidi_bytes)
-+{
-+ return blk_end_io(rq, error, nr_bytes, bidi_bytes, NULL);
-+}
-+EXPORT_SYMBOL_GPL(blk_end_bidi_request);
-+
-+/**
-+ * blk_end_request_callback - Special helper function for tricky drivers
-+ * @rq: the request being processed
-+ * @error: 0 for success, < 0 for error
-+ * @nr_bytes: number of bytes to complete
-+ * @drv_callback: function called between completion of bios in the request
-+ * and completion of the request.
-+ * If the callback returns non 0, this helper returns without
-+ * completion of the request.
-+ *
-+ * Description:
-+ * Ends I/O on a number of bytes attached to @rq.
-+ * If @rq has leftover, sets it up for the next range of segments.
-+ *
-+ * This special helper function is used only for existing tricky drivers.
-+ * (e.g. cdrom_newpc_intr() of ide-cd)
-+ * This interface will be removed when such drivers are rewritten.
-+ * Don't use this interface in other places anymore.
-+ *
-+ * Return:
-+ * 0 - we are done with this request
-+ * 1 - this request is not freed yet.
-+ * this request still has pending buffers or
-+ * the driver doesn't want to finish this request yet.
-+ **/
-+int blk_end_request_callback(struct request *rq, int error, int nr_bytes,
-+ int (drv_callback)(struct request *))
-+{
-+ return blk_end_io(rq, error, nr_bytes, 0, drv_callback);
-+}
-+EXPORT_SYMBOL_GPL(blk_end_request_callback);
-+
- static void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
- struct bio *bio)
- {
-@@ -3831,55 +4030,100 @@ int __init blk_dev_init(void)
- return 0;
- }
-
-+static void cfq_dtor(struct io_context *ioc)
-+{
-+ struct cfq_io_context *cic[1];
-+ int r;
-+
-+ /*
-+ * We don't have a specific key to lookup with, so use the gang
-+ * lookup to just retrieve the first item stored. The cfq exit
-+ * function will iterate the full tree, so any member will do.
-+ */
-+ r = radix_tree_gang_lookup(&ioc->radix_root, (void **) cic, 0, 1);
-+ if (r > 0)
-+ cic[0]->dtor(ioc);
-+}
-+
- /*
+-}
+-EXPORT_SYMBOL(end_request);
+-
+-static void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
+- struct bio *bio)
+-{
+- /* first two bits are identical in rq->cmd_flags and bio->bi_rw */
+- rq->cmd_flags |= (bio->bi_rw & 3);
+-
+- rq->nr_phys_segments = bio_phys_segments(q, bio);
+- rq->nr_hw_segments = bio_hw_segments(q, bio);
+- rq->current_nr_sectors = bio_cur_sectors(bio);
+- rq->hard_cur_sectors = rq->current_nr_sectors;
+- rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio);
+- rq->buffer = bio_data(bio);
+- rq->data_len = bio->bi_size;
+-
+- rq->bio = rq->biotail = bio;
+-
+- if (bio->bi_bdev)
+- rq->rq_disk = bio->bi_bdev->bd_disk;
+-}
+-
+-int kblockd_schedule_work(struct work_struct *work)
+-{
+- return queue_work(kblockd_workqueue, work);
+-}
+-
+-EXPORT_SYMBOL(kblockd_schedule_work);
+-
+-void kblockd_flush_work(struct work_struct *work)
+-{
+- cancel_work_sync(work);
+-}
+-EXPORT_SYMBOL(kblockd_flush_work);
+-
+-int __init blk_dev_init(void)
+-{
+- int i;
+-
+- kblockd_workqueue = create_workqueue("kblockd");
+- if (!kblockd_workqueue)
+- panic("Failed to create kblockd\n");
+-
+- request_cachep = kmem_cache_create("blkdev_requests",
+- sizeof(struct request), 0, SLAB_PANIC, NULL);
+-
+- requestq_cachep = kmem_cache_create("blkdev_queue",
+- sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
+-
+- iocontext_cachep = kmem_cache_create("blkdev_ioc",
+- sizeof(struct io_context), 0, SLAB_PANIC, NULL);
+-
+- for_each_possible_cpu(i)
+- INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
+-
+- open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL);
+- register_hotcpu_notifier(&blk_cpu_notifier);
+-
+- blk_max_low_pfn = max_low_pfn - 1;
+- blk_max_pfn = max_pfn - 1;
+-
+- return 0;
+-}
+-
+-/*
- * IO Context helper functions
-+ * IO Context helper functions. put_io_context() returns 1 if there are no
-+ * more users of this io context, 0 otherwise.
- */
+- */
-void put_io_context(struct io_context *ioc)
-+int put_io_context(struct io_context *ioc)
- {
- if (ioc == NULL)
+-{
+- if (ioc == NULL)
- return;
-+ return 1;
-
- BUG_ON(atomic_read(&ioc->refcount) == 0);
-
- if (atomic_dec_and_test(&ioc->refcount)) {
+-
+- BUG_ON(atomic_read(&ioc->refcount) == 0);
+-
+- if (atomic_dec_and_test(&ioc->refcount)) {
- struct cfq_io_context *cic;
-
- rcu_read_lock();
- if (ioc->aic && ioc->aic->dtor)
- ioc->aic->dtor(ioc->aic);
+- rcu_read_lock();
+- if (ioc->aic && ioc->aic->dtor)
+- ioc->aic->dtor(ioc->aic);
- if (ioc->cic_root.rb_node != NULL) {
- struct rb_node *n = rb_first(&ioc->cic_root);
-
- cic = rb_entry(n, struct cfq_io_context, rb_node);
- cic->dtor(ioc);
- }
- rcu_read_unlock();
-+ cfq_dtor(ioc);
-
- kmem_cache_free(iocontext_cachep, ioc);
-+ return 1;
- }
-+ return 0;
- }
- EXPORT_SYMBOL(put_io_context);
-
-+static void cfq_exit(struct io_context *ioc)
-+{
-+ struct cfq_io_context *cic[1];
-+ int r;
-+
-+ rcu_read_lock();
-+ /*
-+ * See comment for cfq_dtor()
-+ */
-+ r = radix_tree_gang_lookup(&ioc->radix_root, (void **) cic, 0, 1);
-+ rcu_read_unlock();
-+
-+ if (r > 0)
-+ cic[0]->exit(ioc);
-+}
-+
- /* Called by the exitting task */
- void exit_io_context(void)
- {
- struct io_context *ioc;
+- rcu_read_unlock();
+-
+- kmem_cache_free(iocontext_cachep, ioc);
+- }
+-}
+-EXPORT_SYMBOL(put_io_context);
+-
+-/* Called by the exitting task */
+-void exit_io_context(void)
+-{
+- struct io_context *ioc;
- struct cfq_io_context *cic;
-
- task_lock(current);
- ioc = current->io_context;
- current->io_context = NULL;
- task_unlock(current);
-
+-
+- task_lock(current);
+- ioc = current->io_context;
+- current->io_context = NULL;
+- task_unlock(current);
+-
- ioc->task = NULL;
- if (ioc->aic && ioc->aic->exit)
- ioc->aic->exit(ioc->aic);
- if (ioc->cic_root.rb_node != NULL) {
- cic = rb_entry(rb_first(&ioc->cic_root), struct cfq_io_context, rb_node);
- cic->exit(ioc);
-+ if (atomic_dec_and_test(&ioc->nr_tasks)) {
-+ if (ioc->aic && ioc->aic->exit)
-+ ioc->aic->exit(ioc->aic);
-+ cfq_exit(ioc);
-+
-+ put_io_context(ioc);
- }
-+}
-+
-+struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
-+{
-+ struct io_context *ret;
-
+- }
+-
- put_io_context(ioc);
-+ ret = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node);
-+ if (ret) {
-+ atomic_set(&ret->refcount, 1);
-+ atomic_set(&ret->nr_tasks, 1);
-+ spin_lock_init(&ret->lock);
-+ ret->ioprio_changed = 0;
-+ ret->ioprio = 0;
-+ ret->last_waited = jiffies; /* doesn't matter... */
-+ ret->nr_batch_requests = 0; /* because this is 0 */
-+ ret->aic = NULL;
-+ INIT_RADIX_TREE(&ret->radix_root, GFP_ATOMIC | __GFP_HIGH);
-+ ret->ioc_data = NULL;
-+ }
-+
-+ return ret;
- }
-
- /*
-@@ -3899,16 +4143,8 @@ static struct io_context *current_io_context(gfp_t gfp_flags, int node)
- if (likely(ret))
- return ret;
-
+-}
+-
+-/*
+- * If the current task has no IO context then create one and initialise it.
+- * Otherwise, return its existing IO context.
+- *
+- * This returned IO context doesn't have a specifically elevated refcount,
+- * but since the current task itself holds a reference, the context can be
+- * used in general code, so long as it stays within `current` context.
+- */
+-static struct io_context *current_io_context(gfp_t gfp_flags, int node)
+-{
+- struct task_struct *tsk = current;
+- struct io_context *ret;
+-
+- ret = tsk->io_context;
+- if (likely(ret))
+- return ret;
+-
- ret = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node);
-+ ret = alloc_io_context(gfp_flags, node);
- if (ret) {
+- if (ret) {
- atomic_set(&ret->refcount, 1);
- ret->task = current;
- ret->ioprio_changed = 0;
@@ -138430,52 +146227,309 @@
- ret->aic = NULL;
- ret->cic_root.rb_node = NULL;
- ret->ioc_data = NULL;
- /* make sure set_task_ioprio() sees the settings above */
- smp_wmb();
- tsk->io_context = ret;
-@@ -3925,10 +4161,18 @@ static struct io_context *current_io_context(gfp_t gfp_flags, int node)
- */
- struct io_context *get_io_context(gfp_t gfp_flags, int node)
- {
+- /* make sure set_task_ioprio() sees the settings above */
+- smp_wmb();
+- tsk->io_context = ret;
+- }
+-
+- return ret;
+-}
+-
+-/*
+- * If the current task has no IO context then create one and initialise it.
+- * If it does have a context, take a ref on it.
+- *
+- * This is always called in the context of the task which submitted the I/O.
+- */
+-struct io_context *get_io_context(gfp_t gfp_flags, int node)
+-{
- struct io_context *ret;
- ret = current_io_context(gfp_flags, node);
- if (likely(ret))
- atomic_inc(&ret->refcount);
-+ struct io_context *ret = NULL;
-+
-+ /*
-+ * Check for unlikely race with exiting task. ioc ref count is
-+ * zero when ioc is being detached.
-+ */
-+ do {
-+ ret = current_io_context(gfp_flags, node);
-+ if (unlikely(!ret))
-+ break;
-+ } while (!atomic_inc_not_zero(&ret->refcount));
-+
- return ret;
- }
- EXPORT_SYMBOL(get_io_context);
-@@ -4182,9 +4426,8 @@ int blk_register_queue(struct gendisk *disk)
- if (!q || !q->request_fn)
- return -ENXIO;
-
+- return ret;
+-}
+-EXPORT_SYMBOL(get_io_context);
+-
+-void copy_io_context(struct io_context **pdst, struct io_context **psrc)
+-{
+- struct io_context *src = *psrc;
+- struct io_context *dst = *pdst;
+-
+- if (src) {
+- BUG_ON(atomic_read(&src->refcount) == 0);
+- atomic_inc(&src->refcount);
+- put_io_context(dst);
+- *pdst = src;
+- }
+-}
+-EXPORT_SYMBOL(copy_io_context);
+-
+-void swap_io_context(struct io_context **ioc1, struct io_context **ioc2)
+-{
+- struct io_context *temp;
+- temp = *ioc1;
+- *ioc1 = *ioc2;
+- *ioc2 = temp;
+-}
+-EXPORT_SYMBOL(swap_io_context);
+-
+-/*
+- * sysfs parts below
+- */
+-struct queue_sysfs_entry {
+- struct attribute attr;
+- ssize_t (*show)(struct request_queue *, char *);
+- ssize_t (*store)(struct request_queue *, const char *, size_t);
+-};
+-
+-static ssize_t
+-queue_var_show(unsigned int var, char *page)
+-{
+- return sprintf(page, "%d\n", var);
+-}
+-
+-static ssize_t
+-queue_var_store(unsigned long *var, const char *page, size_t count)
+-{
+- char *p = (char *) page;
+-
+- *var = simple_strtoul(p, &p, 10);
+- return count;
+-}
+-
+-static ssize_t queue_requests_show(struct request_queue *q, char *page)
+-{
+- return queue_var_show(q->nr_requests, (page));
+-}
+-
+-static ssize_t
+-queue_requests_store(struct request_queue *q, const char *page, size_t count)
+-{
+- struct request_list *rl = &q->rq;
+- unsigned long nr;
+- int ret = queue_var_store(&nr, page, count);
+- if (nr < BLKDEV_MIN_RQ)
+- nr = BLKDEV_MIN_RQ;
+-
+- spin_lock_irq(q->queue_lock);
+- q->nr_requests = nr;
+- blk_queue_congestion_threshold(q);
+-
+- if (rl->count[READ] >= queue_congestion_on_threshold(q))
+- blk_set_queue_congested(q, READ);
+- else if (rl->count[READ] < queue_congestion_off_threshold(q))
+- blk_clear_queue_congested(q, READ);
+-
+- if (rl->count[WRITE] >= queue_congestion_on_threshold(q))
+- blk_set_queue_congested(q, WRITE);
+- else if (rl->count[WRITE] < queue_congestion_off_threshold(q))
+- blk_clear_queue_congested(q, WRITE);
+-
+- if (rl->count[READ] >= q->nr_requests) {
+- blk_set_queue_full(q, READ);
+- } else if (rl->count[READ]+1 <= q->nr_requests) {
+- blk_clear_queue_full(q, READ);
+- wake_up(&rl->wait[READ]);
+- }
+-
+- if (rl->count[WRITE] >= q->nr_requests) {
+- blk_set_queue_full(q, WRITE);
+- } else if (rl->count[WRITE]+1 <= q->nr_requests) {
+- blk_clear_queue_full(q, WRITE);
+- wake_up(&rl->wait[WRITE]);
+- }
+- spin_unlock_irq(q->queue_lock);
+- return ret;
+-}
+-
+-static ssize_t queue_ra_show(struct request_queue *q, char *page)
+-{
+- int ra_kb = q->backing_dev_info.ra_pages << (PAGE_CACHE_SHIFT - 10);
+-
+- return queue_var_show(ra_kb, (page));
+-}
+-
+-static ssize_t
+-queue_ra_store(struct request_queue *q, const char *page, size_t count)
+-{
+- unsigned long ra_kb;
+- ssize_t ret = queue_var_store(&ra_kb, page, count);
+-
+- spin_lock_irq(q->queue_lock);
+- q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10);
+- spin_unlock_irq(q->queue_lock);
+-
+- return ret;
+-}
+-
+-static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
+-{
+- int max_sectors_kb = q->max_sectors >> 1;
+-
+- return queue_var_show(max_sectors_kb, (page));
+-}
+-
+-static ssize_t
+-queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
+-{
+- unsigned long max_sectors_kb,
+- max_hw_sectors_kb = q->max_hw_sectors >> 1,
+- page_kb = 1 << (PAGE_CACHE_SHIFT - 10);
+- ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
+-
+- if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
+- return -EINVAL;
+- /*
+- * Take the queue lock to update the readahead and max_sectors
+- * values synchronously:
+- */
+- spin_lock_irq(q->queue_lock);
+- q->max_sectors = max_sectors_kb << 1;
+- spin_unlock_irq(q->queue_lock);
+-
+- return ret;
+-}
+-
+-static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
+-{
+- int max_hw_sectors_kb = q->max_hw_sectors >> 1;
+-
+- return queue_var_show(max_hw_sectors_kb, (page));
+-}
+-
+-
+-static struct queue_sysfs_entry queue_requests_entry = {
+- .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
+- .show = queue_requests_show,
+- .store = queue_requests_store,
+-};
+-
+-static struct queue_sysfs_entry queue_ra_entry = {
+- .attr = {.name = "read_ahead_kb", .mode = S_IRUGO | S_IWUSR },
+- .show = queue_ra_show,
+- .store = queue_ra_store,
+-};
+-
+-static struct queue_sysfs_entry queue_max_sectors_entry = {
+- .attr = {.name = "max_sectors_kb", .mode = S_IRUGO | S_IWUSR },
+- .show = queue_max_sectors_show,
+- .store = queue_max_sectors_store,
+-};
+-
+-static struct queue_sysfs_entry queue_max_hw_sectors_entry = {
+- .attr = {.name = "max_hw_sectors_kb", .mode = S_IRUGO },
+- .show = queue_max_hw_sectors_show,
+-};
+-
+-static struct queue_sysfs_entry queue_iosched_entry = {
+- .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR },
+- .show = elv_iosched_show,
+- .store = elv_iosched_store,
+-};
+-
+-static struct attribute *default_attrs[] = {
+- &queue_requests_entry.attr,
+- &queue_ra_entry.attr,
+- &queue_max_hw_sectors_entry.attr,
+- &queue_max_sectors_entry.attr,
+- &queue_iosched_entry.attr,
+- NULL,
+-};
+-
+-#define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
+-
+-static ssize_t
+-queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
+-{
+- struct queue_sysfs_entry *entry = to_queue(attr);
+- struct request_queue *q =
+- container_of(kobj, struct request_queue, kobj);
+- ssize_t res;
+-
+- if (!entry->show)
+- return -EIO;
+- mutex_lock(&q->sysfs_lock);
+- if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
+- mutex_unlock(&q->sysfs_lock);
+- return -ENOENT;
+- }
+- res = entry->show(q, page);
+- mutex_unlock(&q->sysfs_lock);
+- return res;
+-}
+-
+-static ssize_t
+-queue_attr_store(struct kobject *kobj, struct attribute *attr,
+- const char *page, size_t length)
+-{
+- struct queue_sysfs_entry *entry = to_queue(attr);
+- struct request_queue *q = container_of(kobj, struct request_queue, kobj);
+-
+- ssize_t res;
+-
+- if (!entry->store)
+- return -EIO;
+- mutex_lock(&q->sysfs_lock);
+- if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
+- mutex_unlock(&q->sysfs_lock);
+- return -ENOENT;
+- }
+- res = entry->store(q, page, length);
+- mutex_unlock(&q->sysfs_lock);
+- return res;
+-}
+-
+-static struct sysfs_ops queue_sysfs_ops = {
+- .show = queue_attr_show,
+- .store = queue_attr_store,
+-};
+-
+-static struct kobj_type queue_ktype = {
+- .sysfs_ops = &queue_sysfs_ops,
+- .default_attrs = default_attrs,
+- .release = blk_release_queue,
+-};
+-
+-int blk_register_queue(struct gendisk *disk)
+-{
+- int ret;
+-
+- struct request_queue *q = disk->queue;
+-
+- if (!q || !q->request_fn)
+- return -ENXIO;
+-
- q->kobj.parent = kobject_get(&disk->kobj);
-
- ret = kobject_add(&q->kobj);
-+ ret = kobject_add(&q->kobj, kobject_get(&disk->dev.kobj),
-+ "%s", "queue");
- if (ret < 0)
- return ret;
-
-@@ -4209,6 +4452,6 @@ void blk_unregister_queue(struct gendisk *disk)
-
- kobject_uevent(&q->kobj, KOBJ_REMOVE);
- kobject_del(&q->kobj);
+- if (ret < 0)
+- return ret;
+-
+- kobject_uevent(&q->kobj, KOBJ_ADD);
+-
+- ret = elv_register_queue(q);
+- if (ret) {
+- kobject_uevent(&q->kobj, KOBJ_REMOVE);
+- kobject_del(&q->kobj);
+- return ret;
+- }
+-
+- return 0;
+-}
+-
+-void blk_unregister_queue(struct gendisk *disk)
+-{
+- struct request_queue *q = disk->queue;
+-
+- if (q && q->request_fn) {
+- elv_unregister_queue(q);
+-
+- kobject_uevent(&q->kobj, KOBJ_REMOVE);
+- kobject_del(&q->kobj);
- kobject_put(&disk->kobj);
-+ kobject_put(&disk->dev.kobj);
- }
- }
+- }
+-}
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 083d2e1..c3166a1 100644
--- a/crypto/Kconfig
@@ -163854,7 +171908,7 @@
unregister_chrdev(AOE_MAJOR, "aoechr");
}
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
-index 509b649..ef50068 100644
+index 509b649..855ce8e 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -1187,17 +1187,6 @@ static int cciss_ioctl(struct inode *inode, struct file *filep,
@@ -163900,9 +171954,11 @@
cmd_free(h, cmd, 1);
cciss_check_queues(h);
spin_unlock_irqrestore(&h->lock, flags);
-@@ -2544,7 +2526,6 @@ after_error_processing:
+@@ -2542,9 +2524,7 @@ after_error_processing:
+ resend_cciss_cmd(h, cmd);
+ return;
}
- cmd->rq->data_len = 0;
+- cmd->rq->data_len = 0;
cmd->rq->completion_data = cmd;
- blk_add_trace_rq(cmd->rq->q, cmd->rq, BLK_TA_COMPLETE);
blk_complete_request(cmd->rq);
@@ -164628,19 +172684,37 @@
default:
BUG();
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
-index 82effce..2c81465 100644
+index 82effce..78ebfff 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
-@@ -703,7 +703,7 @@ static void ace_fsm_dostate(struct ace_device *ace)
+@@ -483,7 +483,6 @@ static void ace_fsm_dostate(struct ace_device *ace)
+ u32 status;
+ u16 val;
+ int count;
+- int i;
+
+ #if defined(DEBUG)
+ dev_dbg(ace->dev, "fsm_state=%i, id_req_count=%i\n",
+@@ -688,7 +687,6 @@ static void ace_fsm_dostate(struct ace_device *ace)
+ }
+
+ /* Transfer the next buffer */
+- i = 16;
+ if (ace->fsm_task == ACE_TASK_WRITE)
+ ace->reg_ops->dataout(ace);
+ else
+@@ -702,8 +700,8 @@ static void ace_fsm_dostate(struct ace_device *ace)
+ }
/* bio finished; is there another one? */
- i = ace->req->current_nr_sectors;
+- i = ace->req->current_nr_sectors;
- if (end_that_request_first(ace->req, 1, i)) {
-+ if (__blk_end_request(ace->req, 0, i)) {
++ if (__blk_end_request(ace->req, 0,
++ blk_rq_cur_bytes(ace->req))) {
/* dev_dbg(ace->dev, "next block; h=%li c=%i\n",
* ace->req->hard_nr_sectors,
* ace->req->current_nr_sectors);
-@@ -718,9 +718,6 @@ static void ace_fsm_dostate(struct ace_device *ace)
+@@ -718,9 +716,6 @@ static void ace_fsm_dostate(struct ace_device *ace)
break;
case ACE_FSM_STATE_REQ_COMPLETE:
@@ -254093,7 +262167,7 @@
dev->name);
dev->stats.rx_dropped++;
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
-index 9af05a2..af40ff4 100644
+index 9af05a2..5a2d1dd 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -212,7 +212,7 @@ config MII
@@ -254158,6 +262232,15 @@
help
Say Y here if you have an Seeq based Ethernet network card. This is
used in many Silicon Graphics machines.
+@@ -1962,7 +1992,7 @@ config E1000_DISABLE_PACKET_SPLIT
+
+ config E1000E
+ tristate "Intel(R) PRO/1000 PCI-Express Gigabit Ethernet support"
+- depends on PCI
++ depends on PCI && EXPERIMENTAL
+ ---help---
+ This driver supports the PCI-Express Intel(R) PRO/1000 gigabit
+ ethernet family of adapters. For PCI or PCI-X e1000 adapters,
@@ -1989,6 +2019,28 @@ config IP1000
To compile this driver as a module, choose M here: the module
will be called ipg. This is recommended.
@@ -284459,54 +292542,10 @@
/* Number of entries in the Multicast Table Array (MTA). */
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
-index 76c0fa6..7f5b2ae 100644
+index 76c0fa6..3111af6 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
-@@ -73,14 +73,6 @@ static struct pci_device_id e1000_pci_tbl[] = {
- INTEL_E1000_ETHERNET_DEVICE(0x1026),
- INTEL_E1000_ETHERNET_DEVICE(0x1027),
- INTEL_E1000_ETHERNET_DEVICE(0x1028),
-- INTEL_E1000_ETHERNET_DEVICE(0x1049),
-- INTEL_E1000_ETHERNET_DEVICE(0x104A),
-- INTEL_E1000_ETHERNET_DEVICE(0x104B),
-- INTEL_E1000_ETHERNET_DEVICE(0x104C),
-- INTEL_E1000_ETHERNET_DEVICE(0x104D),
-- INTEL_E1000_ETHERNET_DEVICE(0x105E),
-- INTEL_E1000_ETHERNET_DEVICE(0x105F),
-- INTEL_E1000_ETHERNET_DEVICE(0x1060),
- INTEL_E1000_ETHERNET_DEVICE(0x1075),
- INTEL_E1000_ETHERNET_DEVICE(0x1076),
- INTEL_E1000_ETHERNET_DEVICE(0x1077),
-@@ -89,28 +81,9 @@ static struct pci_device_id e1000_pci_tbl[] = {
- INTEL_E1000_ETHERNET_DEVICE(0x107A),
- INTEL_E1000_ETHERNET_DEVICE(0x107B),
- INTEL_E1000_ETHERNET_DEVICE(0x107C),
-- INTEL_E1000_ETHERNET_DEVICE(0x107D),
-- INTEL_E1000_ETHERNET_DEVICE(0x107E),
-- INTEL_E1000_ETHERNET_DEVICE(0x107F),
- INTEL_E1000_ETHERNET_DEVICE(0x108A),
-- INTEL_E1000_ETHERNET_DEVICE(0x108B),
-- INTEL_E1000_ETHERNET_DEVICE(0x108C),
-- INTEL_E1000_ETHERNET_DEVICE(0x1096),
-- INTEL_E1000_ETHERNET_DEVICE(0x1098),
- INTEL_E1000_ETHERNET_DEVICE(0x1099),
-- INTEL_E1000_ETHERNET_DEVICE(0x109A),
-- INTEL_E1000_ETHERNET_DEVICE(0x10A4),
-- INTEL_E1000_ETHERNET_DEVICE(0x10A5),
- INTEL_E1000_ETHERNET_DEVICE(0x10B5),
-- INTEL_E1000_ETHERNET_DEVICE(0x10B9),
-- INTEL_E1000_ETHERNET_DEVICE(0x10BA),
-- INTEL_E1000_ETHERNET_DEVICE(0x10BB),
-- INTEL_E1000_ETHERNET_DEVICE(0x10BC),
-- INTEL_E1000_ETHERNET_DEVICE(0x10C4),
-- INTEL_E1000_ETHERNET_DEVICE(0x10C5),
-- INTEL_E1000_ETHERNET_DEVICE(0x10D5),
-- INTEL_E1000_ETHERNET_DEVICE(0x10D9),
-- INTEL_E1000_ETHERNET_DEVICE(0x10DA),
- /* required last entry */
- {0,}
- };
-@@ -153,7 +126,7 @@ static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
+@@ -153,7 +153,7 @@ static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
struct e1000_tx_ring *tx_ring);
static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring);
@@ -284515,7 +292554,7 @@
static void e1000_update_phy_info(unsigned long data);
static void e1000_watchdog(unsigned long data);
static void e1000_82547_tx_fifo_stall(unsigned long data);
-@@ -299,14 +272,14 @@ module_exit(e1000_exit_module);
+@@ -299,14 +299,14 @@ module_exit(e1000_exit_module);
static int e1000_request_irq(struct e1000_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
@@ -284532,7 +292571,7 @@
irq_flags = 0;
}
}
-@@ -514,7 +487,7 @@ static void e1000_configure(struct e1000_adapter *adapter)
+@@ -514,7 +514,7 @@ static void e1000_configure(struct e1000_adapter *adapter)
struct net_device *netdev = adapter->netdev;
int i;
@@ -284541,7 +292580,7 @@
e1000_restore_vlan(adapter);
e1000_init_manageability(adapter);
-@@ -845,6 +818,64 @@ e1000_reset(struct e1000_adapter *adapter)
+@@ -845,6 +845,64 @@ e1000_reset(struct e1000_adapter *adapter)
}
/**
@@ -284606,7 +292645,7 @@
* e1000_probe - Device Initialization Routine
* @pdev: PCI device information struct
* @ent: entry in e1000_pci_tbl
-@@ -927,7 +958,7 @@ e1000_probe(struct pci_dev *pdev,
+@@ -927,7 +985,7 @@ e1000_probe(struct pci_dev *pdev,
netdev->stop = &e1000_close;
netdev->hard_start_xmit = &e1000_xmit_frame;
netdev->get_stats = &e1000_get_stats;
@@ -284615,7 +292654,7 @@
netdev->set_mac_address = &e1000_set_mac;
netdev->change_mtu = &e1000_change_mtu;
netdev->do_ioctl = &e1000_ioctl;
-@@ -995,7 +1026,6 @@ e1000_probe(struct pci_dev *pdev,
+@@ -995,7 +1053,6 @@ e1000_probe(struct pci_dev *pdev,
adapter->en_mng_pt = e1000_enable_mng_pass_thru(&adapter->hw);
/* initialize eeprom parameters */
@@ -284623,7 +292662,7 @@
if (e1000_init_eeprom_params(&adapter->hw)) {
E1000_ERR("EEPROM initialization failed\n");
goto err_eeprom;
-@@ -1007,23 +1037,29 @@ e1000_probe(struct pci_dev *pdev,
+@@ -1007,23 +1064,29 @@ e1000_probe(struct pci_dev *pdev,
e1000_reset_hw(&adapter->hw);
/* make sure the EEPROM is good */
@@ -284663,7 +292702,7 @@
e1000_get_bus_info(&adapter->hw);
-@@ -2410,21 +2446,22 @@ e1000_set_mac(struct net_device *netdev, void *p)
+@@ -2410,21 +2473,22 @@ e1000_set_mac(struct net_device *netdev, void *p)
}
/**
@@ -284692,7 +292731,7 @@
uint32_t rctl;
uint32_t hash_value;
int i, rar_entries = E1000_RAR_ENTRIES;
-@@ -2447,9 +2484,16 @@ e1000_set_multi(struct net_device *netdev)
+@@ -2447,9 +2511,16 @@ e1000_set_multi(struct net_device *netdev)
rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
} else if (netdev->flags & IFF_ALLMULTI) {
rctl |= E1000_RCTL_MPE;
@@ -284711,7 +292750,7 @@
}
E1000_WRITE_REG(hw, RCTL, rctl);
-@@ -2459,7 +2503,10 @@ e1000_set_multi(struct net_device *netdev)
+@@ -2459,7 +2530,10 @@ e1000_set_multi(struct net_device *netdev)
if (hw->mac_type == e1000_82542_rev2_0)
e1000_enter_82542_rst(adapter);
@@ -284723,7 +292762,7 @@
* RAR 0 is used for the station MAC adddress
* if there are not 14 addresses, go ahead and clear the filters
* -- with 82571 controllers only 0-13 entries are filled here
-@@ -2467,8 +2514,11 @@ e1000_set_multi(struct net_device *netdev)
+@@ -2467,8 +2541,11 @@ e1000_set_multi(struct net_device *netdev)
mc_ptr = netdev->mc_list;
for (i = 1; i < rar_entries; i++) {
@@ -284737,7 +292776,7 @@
mc_ptr = mc_ptr->next;
} else {
E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
-@@ -2477,6 +2527,7 @@ e1000_set_multi(struct net_device *netdev)
+@@ -2477,6 +2554,7 @@ e1000_set_multi(struct net_device *netdev)
E1000_WRITE_FLUSH(hw);
}
}
@@ -284745,7 +292784,7 @@
/* clear the old settings from the multicast hash table */
-@@ -2488,7 +2539,7 @@ e1000_set_multi(struct net_device *netdev)
+@@ -2488,7 +2566,7 @@ e1000_set_multi(struct net_device *netdev)
/* load any remaining addresses into the hash table */
for (; mc_ptr; mc_ptr = mc_ptr->next) {
@@ -284754,7 +292793,7 @@
e1000_mta_set(hw, hash_value);
}
-@@ -3680,10 +3731,6 @@ e1000_update_stats(struct e1000_adapter *adapter)
+@@ -3680,10 +3758,6 @@ e1000_update_stats(struct e1000_adapter *adapter)
}
/* Fill out the OS statistics structure */
@@ -284765,7 +292804,7 @@
adapter->net_stats.multicast = adapter->stats.mprc;
adapter->net_stats.collisions = adapter->stats.colc;
-@@ -4059,6 +4106,8 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
+@@ -4059,6 +4133,8 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
}
adapter->total_tx_bytes += total_tx_bytes;
adapter->total_tx_packets += total_tx_packets;
@@ -284774,7 +292813,7 @@
return cleaned;
}
-@@ -4106,8 +4155,8 @@ e1000_rx_checksum(struct e1000_adapter *adapter,
+@@ -4106,8 +4182,8 @@ e1000_rx_checksum(struct e1000_adapter *adapter,
/* Hardware complements the payload checksum, so we undo it
* and then put the value in host order for further stack use.
*/
@@ -284785,7 +292824,7 @@
skb->ip_summed = CHECKSUM_COMPLETE;
}
adapter->hw_csum_good++;
-@@ -4281,6 +4330,8 @@ next_desc:
+@@ -4281,6 +4357,8 @@ next_desc:
adapter->total_rx_packets += total_rx_packets;
adapter->total_rx_bytes += total_rx_bytes;
@@ -284794,7 +292833,7 @@
return cleaned;
}
-@@ -4468,6 +4519,8 @@ next_desc:
+@@ -4468,6 +4546,8 @@ next_desc:
adapter->total_rx_packets += total_rx_packets;
adapter->total_rx_bytes += total_rx_bytes;
@@ -284803,7 +292842,7 @@
return cleaned;
}
-@@ -4631,7 +4684,7 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
+@@ -4631,7 +4711,7 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
rx_desc->read.buffer_addr[j+1] =
cpu_to_le64(ps_page_dma->ps_page_dma[j]);
} else
@@ -284812,7 +292851,7 @@
}
skb = netdev_alloc_skb(netdev,
-@@ -4874,22 +4927,6 @@ e1000_pci_clear_mwi(struct e1000_hw *hw)
+@@ -4874,22 +4954,6 @@ e1000_pci_clear_mwi(struct e1000_hw *hw)
pci_clear_mwi(adapter->pdev);
}
@@ -284835,7 +292874,7 @@
int
e1000_pcix_get_mmrbc(struct e1000_hw *hw)
{
-@@ -5095,7 +5132,7 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
+@@ -5095,7 +5159,7 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
if (wufc) {
e1000_setup_rctl(adapter);
@@ -502029,7 +510068,7 @@
- cproto -E "gcc -E" -e -v -I $(TOPDIR)/include -DMAKING_PROTO -D__KERNEL__ $(SRC) >> proto2.h
- mv proto2.h proto.h
diff --git a/fs/splice.c b/fs/splice.c
-index 6bdcb61..0a0b79b 100644
+index 6bdcb61..1577a73 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -254,11 +254,16 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
@@ -502080,7 +510119,39 @@
return in->f_op->splice_read(in, ppos, pipe, len, flags);
}
-@@ -1440,6 +1438,7 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
+@@ -1033,7 +1031,11 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
+ goto out_release;
+ }
+
++done:
+ pipe->nrbufs = pipe->curbuf = 0;
++ if (bytes > 0)
++ file_accessed(in);
++
+ return bytes;
+
+ out_release:
+@@ -1049,16 +1051,11 @@ out_release:
+ buf->ops = NULL;
+ }
+ }
+- pipe->nrbufs = pipe->curbuf = 0;
+-
+- /*
+- * If we transferred some data, return the number of bytes:
+- */
+- if (bytes > 0)
+- return bytes;
+
+- return ret;
++ if (!bytes)
++ bytes = ret;
+
++ goto done;
+ }
+ EXPORT_SYMBOL(splice_direct_to_actor);
+
+@@ -1440,6 +1437,7 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
.partial = partial,
.flags = flags,
.ops = &user_page_pipe_buf_ops,
@@ -530279,7 +538350,7 @@
int (*match)(struct attribute_container *, struct device *);
#define ATTRIBUTE_CONTAINER_NO_CLASSDEVS 0x01
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
-index d18ee67..71e7a84 100644
+index d18ee67..e18d419 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -34,83 +34,10 @@ struct sg_io_hdr;
@@ -530469,10 +538540,11 @@
}
/* assumes size > 256 */
-@@ -895,6 +825,12 @@ static inline void exit_io_context(void)
+@@ -895,6 +825,13 @@ static inline void exit_io_context(void)
{
}
++struct io_context;
+static inline int put_io_context(struct io_context *ioc)
+{
+ return 1;
@@ -530483,7 +538555,7 @@
#endif
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
-index 7e11d23..06dadba 100644
+index 7e11d23..cfc3147 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -148,7 +148,7 @@ extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
@@ -530499,10 +538571,10 @@
__blk_add_trace(bt, from, bio->bi_size, bio->bi_rw, BLK_TA_REMAP, !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r);
}
-+extern int blk_trace_setup(request_queue_t *q, char *name, dev_t dev,
++extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
+ char __user *arg);
-+extern int blk_trace_startstop(request_queue_t *q, int start);
-+extern int blk_trace_remove(request_queue_t *q);
++extern int blk_trace_startstop(struct request_queue *q, int start);
++extern int blk_trace_remove(struct request_queue *q);
+
#else /* !CONFIG_BLK_DEV_IO_TRACE */
#define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY)
More information about the Kernel-svn-changes
mailing list