Merge branch 'for-3.16/core' into for-3.16/drivers

mtip32xx uses blk_mq_alloc_reserved_request(), so pull in the
core changes so we have a properly merged end result.

Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
Jens Axboe 2014-05-28 09:50:26 -06:00
commit 6178976500
33 changed files with 1476 additions and 564 deletions

View file

@ -62,7 +62,7 @@
!Efs/mpage.c
!Efs/namei.c
!Efs/buffer.c
!Efs/bio.c
!Eblock/bio.c
!Efs/seq_file.c
!Efs/filesystems.c
!Efs/fs-writeback.c

View file

@ -2,13 +2,15 @@
# Makefile for the kernel block layer
#
obj-$(CONFIG_BLOCK) := elevator.o blk-core.o blk-tag.o blk-sysfs.o \
obj-$(CONFIG_BLOCK) := bio.o elevator.o blk-core.o blk-tag.o blk-sysfs.o \
blk-flush.o blk-settings.o blk-ioc.o blk-map.o \
blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \
blk-iopoll.o blk-lib.o blk-mq.o blk-mq-tag.o \
blk-mq-sysfs.o blk-mq-cpu.o blk-mq-cpumap.o ioctl.o \
genhd.o scsi_ioctl.o partition-generic.o partitions/
genhd.o scsi_ioctl.o partition-generic.o ioprio.o \
partitions/
obj-$(CONFIG_BOUNCE) += bounce.o
obj-$(CONFIG_BLK_DEV_BSG) += bsg.o
obj-$(CONFIG_BLK_DEV_BSGLIB) += bsg-lib.o
obj-$(CONFIG_BLK_CGROUP) += blk-cgroup.o
@ -20,3 +22,4 @@ obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o
obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o
obj-$(CONFIG_BLK_DEV_INTEGRITY) += blk-integrity.o
obj-$(CONFIG_BLK_CMDLINE_PARSER) += cmdline-parser.o
obj-$(CONFIG_BLK_DEV_INTEGRITY) += bio-integrity.o

View file

@ -617,7 +617,7 @@ int bioset_integrity_create(struct bio_set *bs, int pool_size)
if (!bs->bio_integrity_pool)
return -1;
bs->bvec_integrity_pool = biovec_create_pool(bs, pool_size);
bs->bvec_integrity_pool = biovec_create_pool(pool_size);
if (!bs->bvec_integrity_pool) {
mempool_destroy(bs->bio_integrity_pool);
return -1;

View file

@ -305,6 +305,8 @@ static void bio_chain_endio(struct bio *bio, int error)
/**
* bio_chain - chain bio completions
* @bio: the target bio
* @parent: the @bio's parent bio
*
* The caller won't have a bi_end_io called when @bio completes - instead,
* @parent's bi_end_io won't be called until both @parent and @bio have
@ -1011,8 +1013,7 @@ static void bio_set_map_data(struct bio_map_data *bmd, struct bio *bio,
bio->bi_private = bmd;
}
static struct bio_map_data *bio_alloc_map_data(int nr_segs,
unsigned int iov_count,
static struct bio_map_data *bio_alloc_map_data(unsigned int iov_count,
gfp_t gfp_mask)
{
if (iov_count > UIO_MAXIOV)
@ -1154,7 +1155,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
if (offset)
nr_pages++;
bmd = bio_alloc_map_data(nr_pages, iov_count, gfp_mask);
bmd = bio_alloc_map_data(iov_count, gfp_mask);
if (!bmd)
return ERR_PTR(-ENOMEM);
@ -1859,7 +1860,7 @@ EXPORT_SYMBOL_GPL(bio_trim);
* create memory pools for biovec's in a bio_set.
* use the global biovec slabs created for general use.
*/
mempool_t *biovec_create_pool(struct bio_set *bs, int pool_entries)
mempool_t *biovec_create_pool(int pool_entries)
{
struct biovec_slab *bp = bvec_slabs + BIOVEC_MAX_IDX;
@ -1922,7 +1923,7 @@ struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad)
if (!bs->bio_pool)
goto bad;
bs->bvec_pool = biovec_create_pool(bs, pool_size);
bs->bvec_pool = biovec_create_pool(pool_size);
if (!bs->bvec_pool)
goto bad;

View file

@ -576,12 +576,9 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
if (!q)
return NULL;
if (percpu_counter_init(&q->mq_usage_counter, 0))
goto fail_q;
q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask);
if (q->id < 0)
goto fail_c;
goto fail_q;
q->backing_dev_info.ra_pages =
(VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
@ -639,8 +636,6 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
bdi_destroy(&q->backing_dev_info);
fail_id:
ida_simple_remove(&blk_queue_ida, q->id);
fail_c:
percpu_counter_destroy(&q->mq_usage_counter);
fail_q:
kmem_cache_free(blk_requestq_cachep, q);
return NULL;
@ -848,6 +843,47 @@ static void freed_request(struct request_list *rl, unsigned int flags)
__freed_request(rl, sync ^ 1);
}
int blk_update_nr_requests(struct request_queue *q, unsigned int nr)
{
struct request_list *rl;
spin_lock_irq(q->queue_lock);
q->nr_requests = nr;
blk_queue_congestion_threshold(q);
/* congestion isn't cgroup aware and follows root blkcg for now */
rl = &q->root_rl;
if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q))
blk_set_queue_congested(q, BLK_RW_SYNC);
else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q))
blk_clear_queue_congested(q, BLK_RW_SYNC);
if (rl->count[BLK_RW_ASYNC] >= queue_congestion_on_threshold(q))
blk_set_queue_congested(q, BLK_RW_ASYNC);
else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q))
blk_clear_queue_congested(q, BLK_RW_ASYNC);
blk_queue_for_each_rl(rl, q) {
if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
blk_set_rl_full(rl, BLK_RW_SYNC);
} else {
blk_clear_rl_full(rl, BLK_RW_SYNC);
wake_up(&rl->wait[BLK_RW_SYNC]);
}
if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
blk_set_rl_full(rl, BLK_RW_ASYNC);
} else {
blk_clear_rl_full(rl, BLK_RW_ASYNC);
wake_up(&rl->wait[BLK_RW_ASYNC]);
}
}
spin_unlock_irq(q->queue_lock);
return 0;
}
/*
* Determine if elevator data should be initialized when allocating the
* request associated with @bio.
@ -1137,7 +1173,7 @@ static struct request *blk_old_get_request(struct request_queue *q, int rw,
struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
{
if (q->mq_ops)
return blk_mq_alloc_request(q, rw, gfp_mask);
return blk_mq_alloc_request(q, rw, gfp_mask, false);
else
return blk_old_get_request(q, rw, gfp_mask);
}
@ -1233,12 +1269,15 @@ static void add_acct_request(struct request_queue *q, struct request *rq,
static void part_round_stats_single(int cpu, struct hd_struct *part,
unsigned long now)
{
int inflight;
if (now == part->stamp)
return;
if (part_in_flight(part)) {
inflight = part_in_flight(part);
if (inflight) {
__part_stat_add(cpu, part, time_in_queue,
part_in_flight(part) * (now - part->stamp));
inflight * (now - part->stamp));
__part_stat_add(cpu, part, io_ticks, (now - part->stamp));
}
part->stamp = now;
@ -1427,6 +1466,8 @@ bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
* added on the elevator at this point. In addition, we don't have
* reliable access to the elevator outside queue lock. Only check basic
* merging parameters without querying the elevator.
*
* Caller must ensure !blk_queue_nomerges(q) beforehand.
*/
bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
unsigned int *request_count)
@ -1436,9 +1477,6 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
bool ret = false;
struct list_head *plug_list;
if (blk_queue_nomerges(q))
goto out;
plug = current->plug;
if (!plug)
goto out;
@ -1517,7 +1555,8 @@ void blk_queue_bio(struct request_queue *q, struct bio *bio)
* Check if we can merge with the plugged list before grabbing
* any locks.
*/
if (blk_attempt_plug_merge(q, bio, &request_count))
if (!blk_queue_nomerges(q) &&
blk_attempt_plug_merge(q, bio, &request_count))
return;
spin_lock_irq(q->queue_lock);

View file

@ -130,21 +130,13 @@ static void blk_flush_restore_request(struct request *rq)
blk_clear_rq_complete(rq);
}
static void mq_flush_run(struct work_struct *work)
{
struct request *rq;
rq = container_of(work, struct request, requeue_work);
memset(&rq->csd, 0, sizeof(rq->csd));
blk_mq_insert_request(rq, false, true, false);
}
static bool blk_flush_queue_rq(struct request *rq, bool add_front)
{
if (rq->q->mq_ops) {
INIT_WORK(&rq->requeue_work, mq_flush_run);
kblockd_schedule_work(&rq->requeue_work);
struct request_queue *q = rq->q;
blk_mq_add_to_requeue_list(rq, add_front);
blk_mq_kick_requeue_list(q);
return false;
} else {
if (add_front)

View file

@ -64,12 +64,12 @@ EXPORT_SYMBOL(__blk_iopoll_complete);
* iopoll handler will not be invoked again before blk_iopoll_sched_prep()
* is called.
**/
void blk_iopoll_complete(struct blk_iopoll *iopoll)
void blk_iopoll_complete(struct blk_iopoll *iop)
{
unsigned long flags;
local_irq_save(flags);
__blk_iopoll_complete(iopoll);
__blk_iopoll_complete(iop);
local_irq_restore(flags);
}
EXPORT_SYMBOL(blk_iopoll_complete);

View file

@ -226,8 +226,8 @@ EXPORT_SYMBOL(blkdev_issue_write_same);
* Generate and issue number of bios with zerofiled pages.
*/
int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask)
static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask)
{
int ret;
struct bio *bio;

View file

@ -18,14 +18,18 @@ static int blk_mq_main_cpu_notify(struct notifier_block *self,
{
unsigned int cpu = (unsigned long) hcpu;
struct blk_mq_cpu_notifier *notify;
int ret = NOTIFY_OK;
raw_spin_lock(&blk_mq_cpu_notify_lock);
list_for_each_entry(notify, &blk_mq_cpu_notify_list, list)
notify->notify(notify->data, action, cpu);
list_for_each_entry(notify, &blk_mq_cpu_notify_list, list) {
ret = notify->notify(notify->data, action, cpu);
if (ret != NOTIFY_OK)
break;
}
raw_spin_unlock(&blk_mq_cpu_notify_lock);
return NOTIFY_OK;
return ret;
}
void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier)
@ -45,7 +49,7 @@ void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier)
}
void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
void (*fn)(void *, unsigned long, unsigned int),
int (*fn)(void *, unsigned long, unsigned int),
void *data)
{
notifier->notify = fn;

View file

@ -96,3 +96,19 @@ unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set)
kfree(map);
return NULL;
}
/*
* We have no quick way of doing reverse lookups. This is only used at
* queue init time, so runtime isn't important.
*/
int blk_mq_hw_queue_to_node(unsigned int *mq_map, unsigned int index)
{
int i;
for_each_possible_cpu(i) {
if (index == mq_map[i])
return cpu_to_node(i);
}
return NUMA_NO_NODE;
}

View file

@ -203,47 +203,16 @@ static ssize_t blk_mq_hw_sysfs_rq_list_show(struct blk_mq_hw_ctx *hctx,
return ret;
}
static ssize_t blk_mq_hw_sysfs_ipi_show(struct blk_mq_hw_ctx *hctx, char *page)
{
ssize_t ret;
spin_lock(&hctx->lock);
ret = sprintf(page, "%u\n", !!(hctx->flags & BLK_MQ_F_SHOULD_IPI));
spin_unlock(&hctx->lock);
return ret;
}
static ssize_t blk_mq_hw_sysfs_ipi_store(struct blk_mq_hw_ctx *hctx,
const char *page, size_t len)
{
struct blk_mq_ctx *ctx;
unsigned long ret;
unsigned int i;
if (kstrtoul(page, 10, &ret)) {
pr_err("blk-mq-sysfs: invalid input '%s'\n", page);
return -EINVAL;
}
spin_lock(&hctx->lock);
if (ret)
hctx->flags |= BLK_MQ_F_SHOULD_IPI;
else
hctx->flags &= ~BLK_MQ_F_SHOULD_IPI;
spin_unlock(&hctx->lock);
hctx_for_each_ctx(hctx, ctx, i)
ctx->ipi_redirect = !!ret;
return len;
}
static ssize_t blk_mq_hw_sysfs_tags_show(struct blk_mq_hw_ctx *hctx, char *page)
{
return blk_mq_tag_sysfs_show(hctx->tags, page);
}
static ssize_t blk_mq_hw_sysfs_active_show(struct blk_mq_hw_ctx *hctx, char *page)
{
return sprintf(page, "%u\n", atomic_read(&hctx->nr_active));
}
static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
{
unsigned int i, first = 1;
@ -303,15 +272,14 @@ static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_dispatched = {
.attr = {.name = "dispatched", .mode = S_IRUGO },
.show = blk_mq_hw_sysfs_dispatched_show,
};
static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_active = {
.attr = {.name = "active", .mode = S_IRUGO },
.show = blk_mq_hw_sysfs_active_show,
};
static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_pending = {
.attr = {.name = "pending", .mode = S_IRUGO },
.show = blk_mq_hw_sysfs_rq_list_show,
};
static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_ipi = {
.attr = {.name = "ipi_redirect", .mode = S_IRUGO | S_IWUSR},
.show = blk_mq_hw_sysfs_ipi_show,
.store = blk_mq_hw_sysfs_ipi_store,
};
static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_tags = {
.attr = {.name = "tags", .mode = S_IRUGO },
.show = blk_mq_hw_sysfs_tags_show,
@ -326,9 +294,9 @@ static struct attribute *default_hw_ctx_attrs[] = {
&blk_mq_hw_sysfs_run.attr,
&blk_mq_hw_sysfs_dispatched.attr,
&blk_mq_hw_sysfs_pending.attr,
&blk_mq_hw_sysfs_ipi.attr,
&blk_mq_hw_sysfs_tags.attr,
&blk_mq_hw_sysfs_cpus.attr,
&blk_mq_hw_sysfs_active.attr,
NULL,
};

View file

@ -1,64 +1,333 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/blk-mq.h>
#include "blk.h"
#include "blk-mq.h"
#include "blk-mq-tag.h"
void blk_mq_wait_for_tags(struct blk_mq_tags *tags)
static bool bt_has_free_tags(struct blk_mq_bitmap_tags *bt)
{
int tag = blk_mq_get_tag(tags, __GFP_WAIT, false);
blk_mq_put_tag(tags, tag);
int i;
for (i = 0; i < bt->map_nr; i++) {
struct blk_align_bitmap *bm = &bt->map[i];
int ret;
ret = find_first_zero_bit(&bm->word, bm->depth);
if (ret < bm->depth)
return true;
}
return false;
}
bool blk_mq_has_free_tags(struct blk_mq_tags *tags)
{
return !tags ||
percpu_ida_free_tags(&tags->free_tags, nr_cpu_ids) != 0;
if (!tags)
return true;
return bt_has_free_tags(&tags->bitmap_tags);
}
static unsigned int __blk_mq_get_tag(struct blk_mq_tags *tags, gfp_t gfp)
static inline void bt_index_inc(unsigned int *index)
{
*index = (*index + 1) & (BT_WAIT_QUEUES - 1);
}
/*
* If a previously inactive queue goes active, bump the active user count.
*/
bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
{
if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) &&
!test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
atomic_inc(&hctx->tags->active_queues);
return true;
}
/*
* Wakeup all potentially sleeping on normal (non-reserved) tags
*/
static void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags)
{
struct blk_mq_bitmap_tags *bt;
int i, wake_index;
bt = &tags->bitmap_tags;
wake_index = bt->wake_index;
for (i = 0; i < BT_WAIT_QUEUES; i++) {
struct bt_wait_state *bs = &bt->bs[wake_index];
if (waitqueue_active(&bs->wait))
wake_up(&bs->wait);
bt_index_inc(&wake_index);
}
}
/*
* If a previously busy queue goes inactive, potential waiters could now
* be allowed to queue. Wake them up and check.
*/
void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
{
struct blk_mq_tags *tags = hctx->tags;
if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
return;
atomic_dec(&tags->active_queues);
blk_mq_tag_wakeup_all(tags);
}
/*
* For shared tag users, we track the number of currently active users
* and attempt to provide a fair share of the tag depth for each of them.
*/
static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
struct blk_mq_bitmap_tags *bt)
{
unsigned int depth, users;
if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED))
return true;
if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
return true;
/*
* Don't try dividing an ant
*/
if (bt->depth == 1)
return true;
users = atomic_read(&hctx->tags->active_queues);
if (!users)
return true;
/*
* Allow at least some tags
*/
depth = max((bt->depth + users - 1) / users, 4U);
return atomic_read(&hctx->nr_active) < depth;
}
static int __bt_get_word(struct blk_align_bitmap *bm, unsigned int last_tag)
{
int tag, org_last_tag, end;
org_last_tag = last_tag;
end = bm->depth;
do {
restart:
tag = find_next_zero_bit(&bm->word, end, last_tag);
if (unlikely(tag >= end)) {
/*
* We started with an offset, start from 0 to
* exhaust the map.
*/
if (org_last_tag && last_tag) {
end = last_tag;
last_tag = 0;
goto restart;
}
return -1;
}
last_tag = tag + 1;
} while (test_and_set_bit_lock(tag, &bm->word));
return tag;
}
/*
* Straight forward bitmap tag implementation, where each bit is a tag
* (cleared == free, and set == busy). The small twist is using per-cpu
* last_tag caches, which blk-mq stores in the blk_mq_ctx software queue
* contexts. This enables us to drastically limit the space searched,
* without dirtying an extra shared cacheline like we would if we stored
* the cache value inside the shared blk_mq_bitmap_tags structure. On top
* of that, each word of tags is in a separate cacheline. This means that
* multiple users will tend to stick to different cachelines, at least
* until the map is exhausted.
*/
static int __bt_get(struct blk_mq_hw_ctx *hctx, struct blk_mq_bitmap_tags *bt,
unsigned int *tag_cache)
{
unsigned int last_tag, org_last_tag;
int index, i, tag;
if (!hctx_may_queue(hctx, bt))
return -1;
last_tag = org_last_tag = *tag_cache;
index = TAG_TO_INDEX(bt, last_tag);
for (i = 0; i < bt->map_nr; i++) {
tag = __bt_get_word(&bt->map[index], TAG_TO_BIT(bt, last_tag));
if (tag != -1) {
tag += (index << bt->bits_per_word);
goto done;
}
last_tag = 0;
if (++index >= bt->map_nr)
index = 0;
}
*tag_cache = 0;
return -1;
/*
* Only update the cache from the allocation path, if we ended
* up using the specific cached tag.
*/
done:
if (tag == org_last_tag) {
last_tag = tag + 1;
if (last_tag >= bt->depth - 1)
last_tag = 0;
*tag_cache = last_tag;
}
return tag;
}
static struct bt_wait_state *bt_wait_ptr(struct blk_mq_bitmap_tags *bt,
struct blk_mq_hw_ctx *hctx)
{
struct bt_wait_state *bs;
if (!hctx)
return &bt->bs[0];
bs = &bt->bs[hctx->wait_index];
bt_index_inc(&hctx->wait_index);
return bs;
}
static int bt_get(struct blk_mq_bitmap_tags *bt, struct blk_mq_hw_ctx *hctx,
unsigned int *last_tag, gfp_t gfp)
{
struct bt_wait_state *bs;
DEFINE_WAIT(wait);
int tag;
tag = __bt_get(hctx, bt, last_tag);
if (tag != -1)
return tag;
if (!(gfp & __GFP_WAIT))
return -1;
bs = bt_wait_ptr(bt, hctx);
do {
bool was_empty;
was_empty = list_empty(&wait.task_list);
prepare_to_wait(&bs->wait, &wait, TASK_UNINTERRUPTIBLE);
tag = __bt_get(hctx, bt, last_tag);
if (tag != -1)
break;
if (was_empty)
atomic_set(&bs->wait_cnt, bt->wake_cnt);
io_schedule();
} while (1);
finish_wait(&bs->wait, &wait);
return tag;
}
static unsigned int __blk_mq_get_tag(struct blk_mq_tags *tags,
struct blk_mq_hw_ctx *hctx,
unsigned int *last_tag, gfp_t gfp)
{
int tag;
tag = percpu_ida_alloc(&tags->free_tags, (gfp & __GFP_WAIT) ?
TASK_UNINTERRUPTIBLE : TASK_RUNNING);
if (tag < 0)
return BLK_MQ_TAG_FAIL;
return tag + tags->nr_reserved_tags;
tag = bt_get(&tags->bitmap_tags, hctx, last_tag, gfp);
if (tag >= 0)
return tag + tags->nr_reserved_tags;
return BLK_MQ_TAG_FAIL;
}
static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_tags *tags,
gfp_t gfp)
{
int tag;
int tag, zero = 0;
if (unlikely(!tags->nr_reserved_tags)) {
WARN_ON_ONCE(1);
return BLK_MQ_TAG_FAIL;
}
tag = percpu_ida_alloc(&tags->reserved_tags, (gfp & __GFP_WAIT) ?
TASK_UNINTERRUPTIBLE : TASK_RUNNING);
tag = bt_get(&tags->breserved_tags, NULL, &zero, gfp);
if (tag < 0)
return BLK_MQ_TAG_FAIL;
return tag;
}
unsigned int blk_mq_get_tag(struct blk_mq_tags *tags, gfp_t gfp, bool reserved)
unsigned int blk_mq_get_tag(struct blk_mq_hw_ctx *hctx, unsigned int *last_tag,
gfp_t gfp, bool reserved)
{
if (!reserved)
return __blk_mq_get_tag(tags, gfp);
return __blk_mq_get_tag(hctx->tags, hctx, last_tag, gfp);
return __blk_mq_get_reserved_tag(tags, gfp);
return __blk_mq_get_reserved_tag(hctx->tags, gfp);
}
static struct bt_wait_state *bt_wake_ptr(struct blk_mq_bitmap_tags *bt)
{
int i, wake_index;
wake_index = bt->wake_index;
for (i = 0; i < BT_WAIT_QUEUES; i++) {
struct bt_wait_state *bs = &bt->bs[wake_index];
if (waitqueue_active(&bs->wait)) {
if (wake_index != bt->wake_index)
bt->wake_index = wake_index;
return bs;
}
bt_index_inc(&wake_index);
}
return NULL;
}
static void bt_clear_tag(struct blk_mq_bitmap_tags *bt, unsigned int tag)
{
const int index = TAG_TO_INDEX(bt, tag);
struct bt_wait_state *bs;
/*
* The unlock memory barrier need to order access to req in free
* path and clearing tag bit
*/
clear_bit_unlock(TAG_TO_BIT(bt, tag), &bt->map[index].word);
bs = bt_wake_ptr(bt);
if (bs && atomic_dec_and_test(&bs->wait_cnt)) {
atomic_set(&bs->wait_cnt, bt->wake_cnt);
bt_index_inc(&bt->wake_index);
wake_up(&bs->wait);
}
}
static void __blk_mq_put_tag(struct blk_mq_tags *tags, unsigned int tag)
{
BUG_ON(tag >= tags->nr_tags);
percpu_ida_free(&tags->free_tags, tag - tags->nr_reserved_tags);
bt_clear_tag(&tags->bitmap_tags, tag);
}
static void __blk_mq_put_reserved_tag(struct blk_mq_tags *tags,
@ -66,22 +335,43 @@ static void __blk_mq_put_reserved_tag(struct blk_mq_tags *tags,
{
BUG_ON(tag >= tags->nr_reserved_tags);
percpu_ida_free(&tags->reserved_tags, tag);
bt_clear_tag(&tags->breserved_tags, tag);
}
void blk_mq_put_tag(struct blk_mq_tags *tags, unsigned int tag)
void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag,
unsigned int *last_tag)
{
if (tag >= tags->nr_reserved_tags)
__blk_mq_put_tag(tags, tag);
else
struct blk_mq_tags *tags = hctx->tags;
if (tag >= tags->nr_reserved_tags) {
const int real_tag = tag - tags->nr_reserved_tags;
__blk_mq_put_tag(tags, real_tag);
*last_tag = real_tag;
} else
__blk_mq_put_reserved_tag(tags, tag);
}
static int __blk_mq_tag_iter(unsigned id, void *data)
static void bt_for_each_free(struct blk_mq_bitmap_tags *bt,
unsigned long *free_map, unsigned int off)
{
unsigned long *tag_map = data;
__set_bit(id, tag_map);
return 0;
int i;
for (i = 0; i < bt->map_nr; i++) {
struct blk_align_bitmap *bm = &bt->map[i];
int bit = 0;
do {
bit = find_next_zero_bit(&bm->word, bm->depth, bit);
if (bit >= bm->depth)
break;
__set_bit(bit + off, free_map);
bit++;
} while (1);
off += (1 << bt->bits_per_word);
}
}
void blk_mq_tag_busy_iter(struct blk_mq_tags *tags,
@ -95,21 +385,128 @@ void blk_mq_tag_busy_iter(struct blk_mq_tags *tags,
if (!tag_map)
return;
percpu_ida_for_each_free(&tags->free_tags, __blk_mq_tag_iter, tag_map);
bt_for_each_free(&tags->bitmap_tags, tag_map, tags->nr_reserved_tags);
if (tags->nr_reserved_tags)
percpu_ida_for_each_free(&tags->reserved_tags, __blk_mq_tag_iter,
tag_map);
bt_for_each_free(&tags->breserved_tags, tag_map, 0);
fn(data, tag_map);
kfree(tag_map);
}
EXPORT_SYMBOL(blk_mq_tag_busy_iter);
static unsigned int bt_unused_tags(struct blk_mq_bitmap_tags *bt)
{
unsigned int i, used;
for (i = 0, used = 0; i < bt->map_nr; i++) {
struct blk_align_bitmap *bm = &bt->map[i];
used += bitmap_weight(&bm->word, bm->depth);
}
return bt->depth - used;
}
static void bt_update_count(struct blk_mq_bitmap_tags *bt,
unsigned int depth)
{
unsigned int tags_per_word = 1U << bt->bits_per_word;
unsigned int map_depth = depth;
if (depth) {
int i;
for (i = 0; i < bt->map_nr; i++) {
bt->map[i].depth = min(map_depth, tags_per_word);
map_depth -= bt->map[i].depth;
}
}
bt->wake_cnt = BT_WAIT_BATCH;
if (bt->wake_cnt > depth / 4)
bt->wake_cnt = max(1U, depth / 4);
bt->depth = depth;
}
static int bt_alloc(struct blk_mq_bitmap_tags *bt, unsigned int depth,
int node, bool reserved)
{
int i;
bt->bits_per_word = ilog2(BITS_PER_LONG);
/*
* Depth can be zero for reserved tags, that's not a failure
* condition.
*/
if (depth) {
unsigned int nr, tags_per_word;
tags_per_word = (1 << bt->bits_per_word);
/*
* If the tag space is small, shrink the number of tags
* per word so we spread over a few cachelines, at least.
* If less than 4 tags, just forget about it, it's not
* going to work optimally anyway.
*/
if (depth >= 4) {
while (tags_per_word * 4 > depth) {
bt->bits_per_word--;
tags_per_word = (1 << bt->bits_per_word);
}
}
nr = ALIGN(depth, tags_per_word) / tags_per_word;
bt->map = kzalloc_node(nr * sizeof(struct blk_align_bitmap),
GFP_KERNEL, node);
if (!bt->map)
return -ENOMEM;
bt->map_nr = nr;
}
bt->bs = kzalloc(BT_WAIT_QUEUES * sizeof(*bt->bs), GFP_KERNEL);
if (!bt->bs) {
kfree(bt->map);
return -ENOMEM;
}
for (i = 0; i < BT_WAIT_QUEUES; i++)
init_waitqueue_head(&bt->bs[i].wait);
bt_update_count(bt, depth);
return 0;
}
static void bt_free(struct blk_mq_bitmap_tags *bt)
{
kfree(bt->map);
kfree(bt->bs);
}
static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags,
int node)
{
unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
if (bt_alloc(&tags->bitmap_tags, depth, node, false))
goto enomem;
if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, node, true))
goto enomem;
return tags;
enomem:
bt_free(&tags->bitmap_tags);
kfree(tags);
return NULL;
}
struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
unsigned int reserved_tags, int node)
{
unsigned int nr_tags, nr_cache;
struct blk_mq_tags *tags;
int ret;
if (total_tags > BLK_MQ_TAG_MAX) {
pr_err("blk-mq: tag depth too large\n");
@ -120,73 +517,59 @@ struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
if (!tags)
return NULL;
nr_tags = total_tags - reserved_tags;
nr_cache = nr_tags / num_possible_cpus();
if (nr_cache < BLK_MQ_TAG_CACHE_MIN)
nr_cache = BLK_MQ_TAG_CACHE_MIN;
else if (nr_cache > BLK_MQ_TAG_CACHE_MAX)
nr_cache = BLK_MQ_TAG_CACHE_MAX;
tags->nr_tags = total_tags;
tags->nr_reserved_tags = reserved_tags;
tags->nr_max_cache = nr_cache;
tags->nr_batch_move = max(1u, nr_cache / 2);
ret = __percpu_ida_init(&tags->free_tags, tags->nr_tags -
tags->nr_reserved_tags,
tags->nr_max_cache,
tags->nr_batch_move);
if (ret)
goto err_free_tags;
if (reserved_tags) {
/*
* With max_cahe and batch set to 1, the allocator fallbacks to
* no cached. It's fine reserved tags allocation is slow.
*/
ret = __percpu_ida_init(&tags->reserved_tags, reserved_tags,
1, 1);
if (ret)
goto err_reserved_tags;
}
return tags;
err_reserved_tags:
percpu_ida_destroy(&tags->free_tags);
err_free_tags:
kfree(tags);
return NULL;
return blk_mq_init_bitmap_tags(tags, node);
}
void blk_mq_free_tags(struct blk_mq_tags *tags)
{
percpu_ida_destroy(&tags->free_tags);
percpu_ida_destroy(&tags->reserved_tags);
bt_free(&tags->bitmap_tags);
bt_free(&tags->breserved_tags);
kfree(tags);
}
void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *tag)
{
unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
*tag = prandom_u32() % depth;
}
int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int tdepth)
{
tdepth -= tags->nr_reserved_tags;
if (tdepth > tags->nr_tags)
return -EINVAL;
/*
* Don't need (or can't) update reserved tags here, they remain
* static and should never need resizing.
*/
bt_update_count(&tags->bitmap_tags, tdepth);
blk_mq_tag_wakeup_all(tags);
return 0;
}
ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page)
{
char *orig_page = page;
unsigned int cpu;
unsigned int free, res;
if (!tags)
return 0;
page += sprintf(page, "nr_tags=%u, reserved_tags=%u, batch_move=%u,"
" max_cache=%u\n", tags->nr_tags, tags->nr_reserved_tags,
tags->nr_batch_move, tags->nr_max_cache);
page += sprintf(page, "nr_tags=%u, reserved_tags=%u, "
"bits_per_word=%u\n",
tags->nr_tags, tags->nr_reserved_tags,
tags->bitmap_tags.bits_per_word);
page += sprintf(page, "nr_free=%u, nr_reserved=%u\n",
percpu_ida_free_tags(&tags->free_tags, nr_cpu_ids),
percpu_ida_free_tags(&tags->reserved_tags, nr_cpu_ids));
free = bt_unused_tags(&tags->bitmap_tags);
res = bt_unused_tags(&tags->breserved_tags);
for_each_possible_cpu(cpu) {
page += sprintf(page, " cpu%02u: nr_free=%u\n", cpu,
percpu_ida_free_tags(&tags->free_tags, cpu));
}
page += sprintf(page, "nr_free=%u, nr_reserved=%u\n", free, res);
page += sprintf(page, "active_queues=%u\n", atomic_read(&tags->active_queues));
return page - orig_page;
}

View file

@ -1,7 +1,32 @@
#ifndef INT_BLK_MQ_TAG_H
#define INT_BLK_MQ_TAG_H
#include <linux/percpu_ida.h>
#include "blk-mq.h"
enum {
BT_WAIT_QUEUES = 8,
BT_WAIT_BATCH = 8,
};
struct bt_wait_state {
atomic_t wait_cnt;
wait_queue_head_t wait;
} ____cacheline_aligned_in_smp;
#define TAG_TO_INDEX(bt, tag) ((tag) >> (bt)->bits_per_word)
#define TAG_TO_BIT(bt, tag) ((tag) & ((1 << (bt)->bits_per_word) - 1))
struct blk_mq_bitmap_tags {
unsigned int depth;
unsigned int wake_cnt;
unsigned int bits_per_word;
unsigned int map_nr;
struct blk_align_bitmap *map;
unsigned int wake_index;
struct bt_wait_state *bs;
};
/*
* Tag address space map.
@ -9,11 +34,11 @@
struct blk_mq_tags {
unsigned int nr_tags;
unsigned int nr_reserved_tags;
unsigned int nr_batch_move;
unsigned int nr_max_cache;
struct percpu_ida free_tags;
struct percpu_ida reserved_tags;
atomic_t active_queues;
struct blk_mq_bitmap_tags bitmap_tags;
struct blk_mq_bitmap_tags breserved_tags;
struct request **rqs;
struct list_head page_list;
@ -23,12 +48,12 @@ struct blk_mq_tags {
extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, unsigned int reserved_tags, int node);
extern void blk_mq_free_tags(struct blk_mq_tags *tags);
extern unsigned int blk_mq_get_tag(struct blk_mq_tags *tags, gfp_t gfp, bool reserved);
extern void blk_mq_wait_for_tags(struct blk_mq_tags *tags);
extern void blk_mq_put_tag(struct blk_mq_tags *tags, unsigned int tag);
extern void blk_mq_tag_busy_iter(struct blk_mq_tags *tags, void (*fn)(void *data, unsigned long *), void *data);
extern unsigned int blk_mq_get_tag(struct blk_mq_hw_ctx *hctx, unsigned int *last_tag, gfp_t gfp, bool reserved);
extern void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag, unsigned int *last_tag);
extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags);
extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page);
extern void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *last_tag);
extern int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int depth);
enum {
BLK_MQ_TAG_CACHE_MIN = 1,
@ -41,4 +66,23 @@ enum {
BLK_MQ_TAG_MAX = BLK_MQ_TAG_FAIL - 1,
};
extern bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *);
extern void __blk_mq_tag_idle(struct blk_mq_hw_ctx *);
static inline bool blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
{
if (!(hctx->flags & BLK_MQ_F_TAG_SHARED))
return false;
return __blk_mq_tag_busy(hctx);
}
static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
{
if (!(hctx->flags & BLK_MQ_F_TAG_SHARED))
return;
__blk_mq_tag_idle(hctx);
}
#endif

File diff suppressed because it is too large Load diff

View file

@ -11,7 +11,8 @@ struct blk_mq_ctx {
unsigned int cpu;
unsigned int index_hw;
unsigned int ipi_redirect;
unsigned int last_tag ____cacheline_aligned_in_smp;
/* incremented at dispatch time */
unsigned long rq_dispatched[2];
@ -22,7 +23,7 @@ struct blk_mq_ctx {
struct request_queue *queue;
struct kobject kobj;
};
} ____cacheline_aligned_in_smp;
void __blk_mq_complete_request(struct request *rq);
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
@ -31,13 +32,14 @@ void blk_mq_drain_queue(struct request_queue *q);
void blk_mq_free_queue(struct request_queue *q);
void blk_mq_clone_flush_request(struct request *flush_rq,
struct request *orig_rq);
int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
/*
* CPU hotplug helpers
*/
struct blk_mq_cpu_notifier;
void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
void (*fn)(void *, unsigned long, unsigned int),
int (*fn)(void *, unsigned long, unsigned int),
void *data);
void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
@ -50,7 +52,15 @@ void blk_mq_disable_hotplug(void);
*/
extern unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set);
extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues);
extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
void blk_mq_add_timer(struct request *rq);
/*
* Basic implementation of sparser bitmap, allowing the user to spread
* the bits over more cachelines.
*/
struct blk_align_bitmap {
unsigned long word;
unsigned long depth;
} ____cacheline_aligned_in_smp;
#endif

View file

@ -48,11 +48,10 @@ static ssize_t queue_requests_show(struct request_queue *q, char *page)
static ssize_t
queue_requests_store(struct request_queue *q, const char *page, size_t count)
{
struct request_list *rl;
unsigned long nr;
int ret;
int ret, err;
if (!q->request_fn)
if (!q->request_fn && !q->mq_ops)
return -EINVAL;
ret = queue_var_store(&nr, page, count);
@ -62,40 +61,14 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
if (nr < BLKDEV_MIN_RQ)
nr = BLKDEV_MIN_RQ;
spin_lock_irq(q->queue_lock);
q->nr_requests = nr;
blk_queue_congestion_threshold(q);
if (q->request_fn)
err = blk_update_nr_requests(q, nr);
else
err = blk_mq_update_nr_requests(q, nr);
/* congestion isn't cgroup aware and follows root blkcg for now */
rl = &q->root_rl;
if (err)
return err;
if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q))
blk_set_queue_congested(q, BLK_RW_SYNC);
else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q))
blk_clear_queue_congested(q, BLK_RW_SYNC);
if (rl->count[BLK_RW_ASYNC] >= queue_congestion_on_threshold(q))
blk_set_queue_congested(q, BLK_RW_ASYNC);
else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q))
blk_clear_queue_congested(q, BLK_RW_ASYNC);
blk_queue_for_each_rl(rl, q) {
if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
blk_set_rl_full(rl, BLK_RW_SYNC);
} else {
blk_clear_rl_full(rl, BLK_RW_SYNC);
wake_up(&rl->wait[BLK_RW_SYNC]);
}
if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
blk_set_rl_full(rl, BLK_RW_ASYNC);
} else {
blk_clear_rl_full(rl, BLK_RW_ASYNC);
wake_up(&rl->wait[BLK_RW_ASYNC]);
}
}
spin_unlock_irq(q->queue_lock);
return ret;
}
@ -544,8 +517,6 @@ static void blk_release_queue(struct kobject *kobj)
if (q->queue_tags)
__blk_queue_free_tags(q);
percpu_counter_destroy(&q->mq_usage_counter);
if (q->mq_ops)
blk_mq_free_queue(q);

View file

@ -744,7 +744,7 @@ static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw,
static bool throtl_slice_used(struct throtl_grp *tg, bool rw)
{
if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
return 0;
return false;
return 1;
}
@ -842,7 +842,7 @@ static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio,
if (tg->io_disp[rw] + 1 <= io_allowed) {
if (wait)
*wait = 0;
return 1;
return true;
}
/* Calc approx time to dispatch */
@ -880,7 +880,7 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
if (tg->bytes_disp[rw] + bio->bi_iter.bi_size <= bytes_allowed) {
if (wait)
*wait = 0;
return 1;
return true;
}
/* Calc approx time to dispatch */
@ -923,7 +923,7 @@ static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
if (tg->bps[rw] == -1 && tg->iops[rw] == -1) {
if (wait)
*wait = 0;
return 1;
return true;
}
/*
@ -1258,7 +1258,7 @@ static void throtl_pending_timer_fn(unsigned long arg)
* of throtl_data->service_queue. Those bio's are ready and issued by this
* function.
*/
void blk_throtl_dispatch_work_fn(struct work_struct *work)
static void blk_throtl_dispatch_work_fn(struct work_struct *work)
{
struct throtl_data *td = container_of(work, struct throtl_data,
dispatch_work);

View file

@ -96,11 +96,7 @@ static void blk_rq_timed_out(struct request *req)
__blk_complete_request(req);
break;
case BLK_EH_RESET_TIMER:
if (q->mq_ops)
blk_mq_add_timer(req);
else
blk_add_timer(req);
blk_add_timer(req);
blk_clear_rq_complete(req);
break;
case BLK_EH_NOT_HANDLED:
@ -170,7 +166,26 @@ void blk_abort_request(struct request *req)
}
EXPORT_SYMBOL_GPL(blk_abort_request);
void __blk_add_timer(struct request *req, struct list_head *timeout_list)
unsigned long blk_rq_timeout(unsigned long timeout)
{
unsigned long maxt;
maxt = round_jiffies_up(jiffies + BLK_MAX_TIMEOUT);
if (time_after(timeout, maxt))
timeout = maxt;
return timeout;
}
/**
* blk_add_timer - Start timeout timer for a single request
* @req: request that is about to start running.
*
* Notes:
* Each request has its own timer, and as it is added to the queue, we
* set up the timer. When the request completes, we cancel the timer.
*/
void blk_add_timer(struct request *req)
{
struct request_queue *q = req->q;
unsigned long expiry;
@ -188,15 +203,15 @@ void __blk_add_timer(struct request *req, struct list_head *timeout_list)
req->timeout = q->rq_timeout;
req->deadline = jiffies + req->timeout;
if (timeout_list)
list_add_tail(&req->timeout_list, timeout_list);
if (!q->mq_ops)
list_add_tail(&req->timeout_list, &req->q->timeout_list);
/*
* If the timer isn't already pending or this timeout is earlier
* than an existing one, modify the timer. Round up to next nearest
* second.
*/
expiry = round_jiffies_up(req->deadline);
expiry = blk_rq_timeout(round_jiffies_up(req->deadline));
if (!timer_pending(&q->timeout) ||
time_before(expiry, q->timeout.expires)) {
@ -214,17 +229,3 @@ void __blk_add_timer(struct request *req, struct list_head *timeout_list)
}
}
/**
* blk_add_timer - Start timeout timer for a single request
* @req: request that is about to start running.
*
* Notes:
* Each request has its own timer, and as it is added to the queue, we
* set up the timer. When the request completes, we cancel the timer.
*/
void blk_add_timer(struct request *req)
{
__blk_add_timer(req, &req->q->timeout_list);
}

View file

@ -9,6 +9,9 @@
/* Number of requests a "batching" process may submit */
#define BLK_BATCH_REQ 32
/* Max future timer expiry for timeouts */
#define BLK_MAX_TIMEOUT (5 * HZ)
extern struct kmem_cache *blk_requestq_cachep;
extern struct kmem_cache *request_cachep;
extern struct kobj_type blk_queue_ktype;
@ -37,9 +40,9 @@ bool __blk_end_bidi_request(struct request *rq, int error,
void blk_rq_timed_out_timer(unsigned long data);
void blk_rq_check_expired(struct request *rq, unsigned long *next_timeout,
unsigned int *next_set);
void __blk_add_timer(struct request *req, struct list_head *timeout_list);
unsigned long blk_rq_timeout(unsigned long timeout);
void blk_add_timer(struct request *req);
void blk_delete_timer(struct request *);
void blk_add_timer(struct request *);
bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
@ -185,6 +188,8 @@ static inline int queue_congestion_off_threshold(struct request_queue *q)
return q->nr_congestion_off;
}
extern int blk_update_nr_requests(struct request_queue *, unsigned int);
/*
* Contribute to IO statistics IFF:
*

View file

@ -4460,7 +4460,7 @@ static int cfq_init_queue(struct request_queue *q, struct elevator_type *e)
static ssize_t
cfq_var_show(unsigned int var, char *page)
{
return sprintf(page, "%d\n", var);
return sprintf(page, "%u\n", var);
}
static ssize_t

View file

@ -178,7 +178,7 @@ static struct mtip_cmd *mtip_get_int_command(struct driver_data *dd)
{
struct request *rq;
rq = blk_mq_alloc_reserved_request(dd->queue, 0, __GFP_WAIT);
rq = blk_mq_alloc_request(dd->queue, 0, __GFP_WAIT, true);
return blk_mq_rq_to_pdu(rq);
}

View file

@ -322,39 +322,10 @@ static int null_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq)
}
static struct blk_mq_hw_ctx *null_alloc_hctx(struct blk_mq_tag_set *set,
unsigned int hctx_index)
unsigned int hctx_index,
int node)
{
int b_size = DIV_ROUND_UP(set->nr_hw_queues, nr_online_nodes);
int tip = (set->nr_hw_queues % nr_online_nodes);
int node = 0, i, n;
/*
* Split submit queues evenly wrt to the number of nodes. If uneven,
* fill the first buckets with one extra, until the rest is filled with
* no extra.
*/
for (i = 0, n = 1; i < hctx_index; i++, n++) {
if (n % b_size == 0) {
n = 0;
node++;
tip--;
if (!tip)
b_size = set->nr_hw_queues / nr_online_nodes;
}
}
/*
* A node might not be online, therefore map the relative node id to the
* real node id.
*/
for_each_online_node(n) {
if (!node)
break;
node--;
}
return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, n);
return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, node);
}
static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index)

View file

@ -743,6 +743,7 @@ static void skd_request_fn(struct request_queue *q)
break;
}
skreq->discard_page = 1;
req->completion_data = page;
skd_prep_discard_cdb(scsi_req, skreq, page, lba, count);
} else if (flush == SKD_FLUSH_ZERO_SIZE_FIRST) {
@ -855,10 +856,9 @@ static void skd_end_request(struct skd_device *skdev,
if ((io_flags & REQ_DISCARD) &&
(skreq->discard_page == 1)) {
struct bio *bio = req->bio;
pr_debug("%s:%s:%d, free the page!",
skdev->name, __func__, __LINE__);
__free_page(bio->bi_io_vec->bv_page);
__free_page(req->completion_data);
}
if (unlikely(error)) {

View file

@ -902,6 +902,7 @@ void add_disk_randomness(struct gendisk *disk)
add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
trace_add_disk_randomness(disk_devt(disk), ENTROPY_BITS(&input_pool));
}
EXPORT_SYMBOL_GPL(add_disk_randomness);
#endif
/*********************************************************************

View file

@ -737,6 +737,7 @@ static int sd_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq)
goto out;
}
rq->completion_data = page;
blk_add_request_payload(rq, page, len);
ret = scsi_setup_blk_pc_cmnd(sdp, rq);
rq->__data_len = nr_bytes;
@ -839,11 +840,9 @@ static void sd_unprep_fn(struct request_queue *q, struct request *rq)
{
struct scsi_cmnd *SCpnt = rq->special;
if (rq->cmd_flags & REQ_DISCARD) {
struct bio *bio = rq->bio;
if (rq->cmd_flags & REQ_DISCARD)
__free_page(rq->completion_data);
__free_page(bio->bi_io_vec->bv_page);
}
if (SCpnt->cmnd != rq->cmd) {
mempool_free(SCpnt->cmnd, sd_cdb_pool);
SCpnt->cmnd = NULL;

View file

@ -14,14 +14,13 @@ obj-y := open.o read_write.o file_table.o super.o \
stack.o fs_struct.o statfs.o
ifeq ($(CONFIG_BLOCK),y)
obj-y += buffer.o bio.o block_dev.o direct-io.o mpage.o ioprio.o
obj-y += buffer.o block_dev.o direct-io.o mpage.o
else
obj-y += no-block.o
endif
obj-$(CONFIG_PROC_FS) += proc_namespace.o
obj-$(CONFIG_BLK_DEV_INTEGRITY) += bio-integrity.o
obj-y += notify/
obj-$(CONFIG_EPOLL) += eventpoll.o
obj-$(CONFIG_ANON_INODES) += anon_inodes.o

View file

@ -333,7 +333,7 @@ static inline struct bio *bio_next_split(struct bio *bio, int sectors,
extern struct bio_set *bioset_create(unsigned int, unsigned int);
extern void bioset_free(struct bio_set *);
extern mempool_t *biovec_create_pool(struct bio_set *bs, int pool_entries);
extern mempool_t *biovec_create_pool(int pool_entries);
extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *);
extern void bio_put(struct bio *);

View file

@ -8,7 +8,13 @@ struct blk_mq_tags;
struct blk_mq_cpu_notifier {
struct list_head list;
void *data;
void (*notify)(void *data, unsigned long action, unsigned int cpu);
int (*notify)(void *data, unsigned long action, unsigned int cpu);
};
struct blk_mq_ctxmap {
unsigned int map_size;
unsigned int bits_per_word;
struct blk_align_bitmap *map;
};
struct blk_mq_hw_ctx {
@ -21,6 +27,8 @@ struct blk_mq_hw_ctx {
struct delayed_work run_work;
struct delayed_work delay_work;
cpumask_var_t cpumask;
int next_cpu;
int next_cpu_batch;
unsigned long flags; /* BLK_MQ_F_* flags */
@ -29,10 +37,12 @@ struct blk_mq_hw_ctx {
void *driver_data;
struct blk_mq_ctxmap ctx_map;
unsigned int nr_ctx;
struct blk_mq_ctx **ctxs;
unsigned int nr_ctx_map;
unsigned long *ctx_map;
unsigned int wait_index;
struct blk_mq_tags *tags;
@ -44,6 +54,8 @@ struct blk_mq_hw_ctx {
unsigned int numa_node;
unsigned int cmd_size; /* per-request extra data */
atomic_t nr_active;
struct blk_mq_cpu_notifier cpu_notifier;
struct kobject kobj;
};
@ -51,7 +63,7 @@ struct blk_mq_hw_ctx {
struct blk_mq_tag_set {
struct blk_mq_ops *ops;
unsigned int nr_hw_queues;
unsigned int queue_depth;
unsigned int queue_depth; /* max hw supported */
unsigned int reserved_tags;
unsigned int cmd_size; /* per-request extra data */
int numa_node;
@ -60,12 +72,15 @@ struct blk_mq_tag_set {
void *driver_data;
struct blk_mq_tags **tags;
struct mutex tag_list_lock;
struct list_head tag_list;
};
typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, struct request *);
typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int);
typedef struct blk_mq_hw_ctx *(alloc_hctx_fn)(struct blk_mq_tag_set *,
unsigned int);
unsigned int, int);
typedef void (free_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int);
typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
@ -122,11 +137,14 @@ enum {
BLK_MQ_F_SHOULD_MERGE = 1 << 0,
BLK_MQ_F_SHOULD_SORT = 1 << 1,
BLK_MQ_F_SHOULD_IPI = 1 << 2,
BLK_MQ_F_TAG_SHARED = 1 << 2,
BLK_MQ_S_STOPPED = 0,
BLK_MQ_S_TAG_ACTIVE = 1,
BLK_MQ_MAX_DEPTH = 2048,
BLK_MQ_CPU_WORK_BATCH = 8,
};
struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
@ -142,19 +160,20 @@ void blk_mq_insert_request(struct request *, bool, bool, bool);
void blk_mq_run_queues(struct request_queue *q, bool async);
void blk_mq_free_request(struct request *rq);
bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp);
struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw, gfp_t gfp);
struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
gfp_t gfp, bool reserved);
struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag);
struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int);
struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int);
void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *, unsigned int);
void blk_mq_end_io(struct request *rq, int error);
void __blk_mq_end_io(struct request *rq, int error);
void blk_mq_requeue_request(struct request *rq);
void blk_mq_add_to_requeue_list(struct request *rq, bool at_head);
void blk_mq_kick_requeue_list(struct request_queue *q);
void blk_mq_complete_request(struct request *rq);
void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
@ -163,6 +182,7 @@ void blk_mq_stop_hw_queues(struct request_queue *q);
void blk_mq_start_hw_queues(struct request_queue *q);
void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
void blk_mq_tag_busy_iter(struct blk_mq_tags *tags, void (*fn)(void *data, unsigned long *), void *data);
/*
* Driver command data is immediately after the request. So subtract request

View file

@ -190,6 +190,7 @@ enum rq_flag_bits {
__REQ_PM, /* runtime pm request */
__REQ_END, /* last of chain of requests */
__REQ_HASHED, /* on IO scheduler merge hash */
__REQ_MQ_INFLIGHT, /* track inflight for MQ */
__REQ_NR_BITS, /* stops here */
};
@ -243,5 +244,6 @@ enum rq_flag_bits {
#define REQ_PM (1ULL << __REQ_PM)
#define REQ_END (1ULL << __REQ_END)
#define REQ_HASHED (1ULL << __REQ_HASHED)
#define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT)
#endif /* __LINUX_BLK_TYPES_H */

View file

@ -90,15 +90,15 @@ enum rq_cmd_type_bits {
#define BLK_MAX_CDB 16
/*
* try to put the fields that are referenced together in the same cacheline.
* if you modify this structure, be sure to check block/blk-core.c:blk_rq_init()
* as well!
* Try to put the fields that are referenced together in the same cacheline.
*
* If you modify this structure, make sure to update blk_rq_init() and
* especially blk_mq_rq_ctx_init() to take care of the added fields.
*/
struct request {
struct list_head queuelist;
union {
struct call_single_data csd;
struct work_struct requeue_work;
unsigned long fifo_time;
};
@ -462,6 +462,10 @@ struct request_queue {
struct request *flush_rq;
spinlock_t mq_flush_lock;
struct list_head requeue_list;
spinlock_t requeue_lock;
struct work_struct requeue_work;
struct mutex sysfs_lock;
int bypass_depth;
@ -480,6 +484,9 @@ struct request_queue {
wait_queue_head_t mq_freeze_wq;
struct percpu_counter mq_usage_counter;
struct list_head all_q_node;
struct blk_mq_tag_set *tag_set;
struct list_head tag_set_list;
};
#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */

View file

@ -30,7 +30,6 @@ endif
obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o
obj-$(CONFIG_BOUNCE) += bounce.o
obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o
obj-$(CONFIG_FRONTSWAP) += frontswap.o
obj-$(CONFIG_ZSWAP) += zswap.o