From 93bee2d6ba212292896281eb4b496cef9c3f86d3 Mon Sep 17 00:00:00 2001 From: uNiXpSyChO Date: Sun, 1 Jan 2012 20:14:17 -0500 Subject: [PATCH 01/19] Compiler tuning --- Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index c4f219da6..7abc0592a 100644 --- a/Makefile +++ b/Makefile @@ -337,8 +337,8 @@ MODFLAGS = -DMODULE CFLAGS_MODULE = $(MODFLAGS) AFLAGS_MODULE = $(MODFLAGS) LDFLAGS_MODULE = -T $(srctree)/scripts/module-common.lds -CFLAGS_KERNEL = -AFLAGS_KERNEL = +CFLAGS_KERNEL = -fgcse-lm -fgcse-sm -fsched-spec-load -fforce-addr -ffast-math -fsingle-precision-constant -mtune=cortex-a9 -mfpu=neon -ftree-vectorize -funswitch-loops +AFLAGS_KERNEL = -fgcse-lm -fgcse-sm -fsched-spec-load -fforce-addr -ffast-math -fsingle-precision-constant -mtune=cortex-a9 -mfpu=neon -ftree-vectorize -funswitch-loops CFLAGS_GCOV = -fprofile-arcs -ftest-coverage From 0dad9991e639a299940e98a4c38d66efc5afef52 Mon Sep 17 00:00:00 2001 From: Rod Whitby Date: Wed, 4 Jan 2012 15:13:49 +1030 Subject: [PATCH 02/19] Bring back the ability to override the version string --- scripts/mkcompile_h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/mkcompile_h b/scripts/mkcompile_h index 50ad317a4..d5bc4e662 100755 --- a/scripts/mkcompile_h +++ b/scripts/mkcompile_h @@ -64,8 +64,8 @@ UTS_TRUNCATE="cut -b -$UTS_LEN" echo \#define UTS_VERSION \"`echo $UTS_VERSION | $UTS_TRUNCATE`\" echo \#define LINUX_COMPILE_TIME \"`date +%T`\" - echo \#define LINUX_COMPILE_BY \"`whoami`\" - echo \#define LINUX_COMPILE_HOST \"`hostname | $UTS_TRUNCATE`\" + echo \#define LINUX_COMPILE_BY \"`echo $KBUILD_BUILD_COMPILE_BY | $UTS_TRUNCATE`\" + echo \#define LINUX_COMPILE_HOST \"`echo $KBUILD_BUILD_COMPILE_HOST | $UTS_TRUNCATE`\" domain=`dnsdomainname 2> /dev/null` if [ -z "$domain" ]; then From bdfbc6f8e04c5c399bb17fb114fc7bab03fdfa20 Mon Sep 17 00:00:00 2001 From: uNiXpSyChO Date: Sun, 1 Jan 2012 20:33:23 -0500 Subject: [PATCH 03/19] gov bundle: SIO, VR, BFQ --- block/Kconfig.iosched | 51 + block/Makefile | 3 + block/bfq-cgroup.c | 769 +++++++++++ block/bfq-ioc.c | 374 ++++++ block/bfq-iosched.c | 2314 +++++++++++++++++++++++++++++++++ block/bfq-sched.c | 1010 ++++++++++++++ block/bfq.h | 558 ++++++++ block/blk-ioc.c | 27 +- block/cfq-iosched.c | 10 +- block/deadline-iosched.c | 2 +- block/sio-iosched.c | 346 +++++ block/vr-iosched.c | 446 +++++++ drivers/mmc/core/mmc.c | 8 +- drivers/mmc/host/msm_sdcc.c | 25 +- fs/ioprio.c | 9 +- include/linux/cgroup_subsys.h | 6 + include/linux/iocontext.h | 18 +- 17 files changed, 5937 insertions(+), 39 deletions(-) create mode 100644 block/bfq-cgroup.c create mode 100644 block/bfq-ioc.c create mode 100644 block/bfq-iosched.c create mode 100644 block/bfq-sched.c create mode 100644 block/bfq.h create mode 100644 block/sio-iosched.c create mode 100644 block/vr-iosched.c diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched index 3199b76f7..6a827731e 100644 --- a/block/Kconfig.iosched +++ b/block/Kconfig.iosched @@ -21,6 +21,13 @@ config IOSCHED_DEADLINE a new point in the service tree and doing a batch of IO from there in case of expiry. +config IOSCHED_VR + tristate "V(R) I/O scheduler" + default n + ---help--- + Requests are chosen according to SSTF with a penalty of rev_penalty + for switching head direction. + config IOSCHED_CFQ tristate "CFQ I/O scheduler" # If BLK_CGROUP is a module, CFQ has to be built as module. @@ -43,6 +50,38 @@ config CFQ_GROUP_IOSCHED ---help--- Enable group IO scheduling in CFQ. +config IOSCHED_BFQ + tristate "BFQ I/O scheduler" + depends on EXPERIMENTAL + default n + ---help--- + The BFQ I/O scheduler tries to distribute bandwidth among + all processes according to their weights. + It aims at distributing the bandwidth as desired, independently of + the disk parameters and with any workload. It also tries to + guarantee low latency to interactive and soft real-time + applications. If compiled built-in (saying Y here), BFQ can + be configured to support hierarchical scheduling. + +config CGROUP_BFQIO + bool "BFQ hierarchical scheduling support" + depends on CGROUPS && IOSCHED_BFQ=y + default n + ---help--- + Enable hierarchical scheduling in BFQ, using the cgroups + filesystem interface. The name of the subsystem will be + bfqio. + +config IOSCHED_SIO + tristate "Simple I/O scheduler" + default y + ---help--- + The Simple I/O scheduler is an extremely simple scheduler that + does basic merging and no sorting. It is aimed for usage in + aleatory access devices like SSD, USB flash devices, etc. + It differences synchronous requests from asynchronous ones for + better interactibility. + choice prompt "Default I/O scheduler" default DEFAULT_CFQ @@ -56,6 +95,15 @@ choice config DEFAULT_CFQ bool "CFQ" if IOSCHED_CFQ=y + config DEFAULT_VR + bool "V(R)" if IOSCHED_VR=y + + config DEFAULT_BFQ + bool "BFQ" if IOSCHED_BFQ=y + + config DEFAULT_SIO + bool "SIO" if IOSCHED_SIO=y + config DEFAULT_NOOP bool "No-op" @@ -65,6 +113,9 @@ config DEFAULT_IOSCHED string default "deadline" if DEFAULT_DEADLINE default "cfq" if DEFAULT_CFQ + default "vr" if DEFAULT_VR + default "bfq" if DEFAULT_BFQ + default "sio" if DEFAULT_SIO default "noop" if DEFAULT_NOOP endmenu diff --git a/block/Makefile b/block/Makefile index 0bb499a73..2f422474a 100644 --- a/block/Makefile +++ b/block/Makefile @@ -12,6 +12,9 @@ obj-$(CONFIG_BLK_CGROUP) += blk-cgroup.o obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o +obj-$(CONFIG_IOSCHED_BFQ) += bfq-iosched.o +obj-$(CONFIG_IOSCHED_VR) += vr-iosched.o +obj-$(CONFIG_IOSCHED_SIO) += sio-iosched.o obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o obj-$(CONFIG_BLK_DEV_INTEGRITY) += blk-integrity.o diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c new file mode 100644 index 000000000..94e73e8e8 --- /dev/null +++ b/block/bfq-cgroup.c @@ -0,0 +1,769 @@ +/* + * BFQ: CGROUPS support. + * + * Based on ideas and code from CFQ: + * Copyright (C) 2003 Jens Axboe + * + * Copyright (C) 2008 Fabio Checconi + * Paolo Valente + * + * Licensed under the GPL-2 as detailed in the accompanying COPYING.BFQ file. + */ + +#ifdef CONFIG_CGROUP_BFQIO +static struct bfqio_cgroup bfqio_root_cgroup = { + .weight = BFQ_DEFAULT_GRP_WEIGHT, + .ioprio = BFQ_DEFAULT_GRP_IOPRIO, + .ioprio_class = BFQ_DEFAULT_GRP_CLASS, +}; + +static inline void bfq_init_entity(struct bfq_entity *entity, + struct bfq_group *bfqg) +{ + entity->weight = entity->new_weight; + entity->orig_weight = entity->new_weight; + entity->ioprio = entity->new_ioprio; + entity->ioprio_class = entity->new_ioprio_class; + entity->parent = bfqg->my_entity; + entity->sched_data = &bfqg->sched_data; +} + +static struct bfqio_cgroup *cgroup_to_bfqio(struct cgroup *cgroup) +{ + return container_of(cgroup_subsys_state(cgroup, bfqio_subsys_id), + struct bfqio_cgroup, css); +} + +/* + * Search the bfq_group for bfqd into the hash table (by now only a list) + * of bgrp. Must be called under rcu_read_lock(). + */ +static struct bfq_group *bfqio_lookup_group(struct bfqio_cgroup *bgrp, + struct bfq_data *bfqd) +{ + struct bfq_group *bfqg; + struct hlist_node *n; + void *key; + + hlist_for_each_entry_rcu(bfqg, n, &bgrp->group_data, group_node) { + key = rcu_dereference(bfqg->bfqd); + if (key == bfqd) + return bfqg; + } + + return NULL; +} + +static inline void bfq_group_init_entity(struct bfqio_cgroup *bgrp, + struct bfq_group *bfqg) +{ + struct bfq_entity *entity = &bfqg->entity; + + entity->weight = entity->new_weight = bgrp->weight; + entity->orig_weight = entity->new_weight; + entity->ioprio = entity->new_ioprio = bgrp->ioprio; + entity->ioprio_class = entity->new_ioprio_class = bgrp->ioprio_class; + entity->ioprio_changed = 1; + entity->my_sched_data = &bfqg->sched_data; +} + +static inline void bfq_group_set_parent(struct bfq_group *bfqg, + struct bfq_group *parent) +{ + struct bfq_entity *entity; + + BUG_ON(parent == NULL); + BUG_ON(bfqg == NULL); + + entity = &bfqg->entity; + entity->parent = parent->my_entity; + entity->sched_data = &parent->sched_data; +} + +/** + * bfq_group_chain_alloc - allocate a chain of groups. + * @bfqd: queue descriptor. + * @cgroup: the leaf cgroup this chain starts from. + * + * Allocate a chain of groups starting from the one belonging to + * @cgroup up to the root cgroup. Stop if a cgroup on the chain + * to the root has already an allocated group on @bfqd. + */ +static struct bfq_group *bfq_group_chain_alloc(struct bfq_data *bfqd, + struct cgroup *cgroup) +{ + struct bfqio_cgroup *bgrp; + struct bfq_group *bfqg, *prev = NULL, *leaf = NULL; + + for (; cgroup != NULL; cgroup = cgroup->parent) { + bgrp = cgroup_to_bfqio(cgroup); + + bfqg = bfqio_lookup_group(bgrp, bfqd); + if (bfqg != NULL) { + /* + * All the cgroups in the path from there to the + * root must have a bfq_group for bfqd, so we don't + * need any more allocations. + */ + break; + } + + bfqg = kzalloc(sizeof(*bfqg), GFP_ATOMIC); + if (bfqg == NULL) + goto cleanup; + + bfq_group_init_entity(bgrp, bfqg); + bfqg->my_entity = &bfqg->entity; + + if (leaf == NULL) { + leaf = bfqg; + prev = leaf; + } else { + bfq_group_set_parent(prev, bfqg); + /* + * Build a list of allocated nodes using the bfqd + * filed, that is still unused and will be initialized + * only after the node will be connected. + */ + prev->bfqd = bfqg; + prev = bfqg; + } + } + + return leaf; + +cleanup: + while (leaf != NULL) { + prev = leaf; + leaf = leaf->bfqd; + kfree(prev); + } + + return NULL; +} + +/** + * bfq_group_chain_link - link an allocatd group chain to a cgroup hierarchy. + * @bfqd: the queue descriptor. + * @cgroup: the leaf cgroup to start from. + * @leaf: the leaf group (to be associated to @cgroup). + * + * Try to link a chain of groups to a cgroup hierarchy, connecting the + * nodes bottom-up, so we can be sure that when we find a cgroup in the + * hierarchy that already as a group associated to @bfqd all the nodes + * in the path to the root cgroup have one too. + * + * On locking: the queue lock protects the hierarchy (there is a hierarchy + * per device) while the bfqio_cgroup lock protects the list of groups + * belonging to the same cgroup. + */ +static void bfq_group_chain_link(struct bfq_data *bfqd, struct cgroup *cgroup, + struct bfq_group *leaf) +{ + struct bfqio_cgroup *bgrp; + struct bfq_group *bfqg, *next, *prev = NULL; + unsigned long flags; + + assert_spin_locked(bfqd->queue->queue_lock); + + for (; cgroup != NULL && leaf != NULL; cgroup = cgroup->parent) { + bgrp = cgroup_to_bfqio(cgroup); + next = leaf->bfqd; + + bfqg = bfqio_lookup_group(bgrp, bfqd); + BUG_ON(bfqg != NULL); + + spin_lock_irqsave(&bgrp->lock, flags); + + rcu_assign_pointer(leaf->bfqd, bfqd); + hlist_add_head_rcu(&leaf->group_node, &bgrp->group_data); + hlist_add_head(&leaf->bfqd_node, &bfqd->group_list); + + spin_unlock_irqrestore(&bgrp->lock, flags); + + prev = leaf; + leaf = next; + } + + BUG_ON(cgroup == NULL && leaf != NULL); + if (cgroup != NULL && prev != NULL) { + bgrp = cgroup_to_bfqio(cgroup); + bfqg = bfqio_lookup_group(bgrp, bfqd); + bfq_group_set_parent(prev, bfqg); + } +} + +/** + * bfq_find_alloc_group - return the group associated to @bfqd in @cgroup. + * @bfqd: queue descriptor. + * @cgroup: cgroup being searched for. + * + * Return a group associated to @bfqd in @cgroup, allocating one if + * necessary. When a group is returned all the cgroups in the path + * to the root have a group associated to @bfqd. + * + * If the allocation fails, return the root group: this breaks guarantees + * but is a safe fallbak. If this loss becames a problem it can be + * mitigated using the equivalent weight (given by the product of the + * weights of the groups in the path from @group to the root) in the + * root scheduler. + * + * We allocate all the missing nodes in the path from the leaf cgroup + * to the root and we connect the nodes only after all the allocations + * have been successful. + */ +static struct bfq_group *bfq_find_alloc_group(struct bfq_data *bfqd, + struct cgroup *cgroup) +{ + struct bfqio_cgroup *bgrp = cgroup_to_bfqio(cgroup); + struct bfq_group *bfqg; + + bfqg = bfqio_lookup_group(bgrp, bfqd); + if (bfqg != NULL) + return bfqg; + + bfqg = bfq_group_chain_alloc(bfqd, cgroup); + if (bfqg != NULL) + bfq_group_chain_link(bfqd, cgroup, bfqg); + else + bfqg = bfqd->root_group; + + return bfqg; +} + +/** + * bfq_bfqq_move - migrate @bfqq to @bfqg. + * @bfqd: queue descriptor. + * @bfqq: the queue to move. + * @entity: @bfqq's entity. + * @bfqg: the group to move to. + * + * Move @bfqq to @bfqg, deactivating it from its old group and reactivating + * it on the new one. Avoid putting the entity on the old group idle tree. + * + * Must be called under the queue lock; the cgroup owning @bfqg must + * not disappear (by now this just means that we are called under + * rcu_read_lock()). + */ +static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, + struct bfq_entity *entity, struct bfq_group *bfqg) +{ + int busy, resume; + + busy = bfq_bfqq_busy(bfqq); + resume = !RB_EMPTY_ROOT(&bfqq->sort_list); + + BUG_ON(resume && !entity->on_st); + BUG_ON(busy && !resume && entity->on_st && bfqq != bfqd->active_queue); + + if (busy) { + BUG_ON(atomic_read(&bfqq->ref) < 2); + + if (!resume) + bfq_del_bfqq_busy(bfqd, bfqq, 0); + else + bfq_deactivate_bfqq(bfqd, bfqq, 0); + } + + /* + * Here we use a reference to bfqg. We don't need a refcounter + * as the cgroup reference will not be dropped, so that its + * destroy() callback will not be invoked. + */ + entity->parent = bfqg->my_entity; + entity->sched_data = &bfqg->sched_data; + + if (busy && resume) + bfq_activate_bfqq(bfqd, bfqq); +} + +/** + * __bfq_cic_change_cgroup - move @cic to @cgroup. + * @bfqd: the queue descriptor. + * @cic: the cic to move. + * @cgroup: the cgroup to move to. + * + * Move cic to cgroup, assuming that bfqd->queue is locked; the caller + * has to make sure that the reference to cgroup is valid across the call. + * + * NOTE: an alternative approach might have been to store the current + * cgroup in bfqq and getting a reference to it, reducing the lookup + * time here, at the price of slightly more complex code. + */ +static struct bfq_group *__bfq_cic_change_cgroup(struct bfq_data *bfqd, + struct cfq_io_context *cic, + struct cgroup *cgroup) +{ + struct bfq_queue *async_bfqq = cic_to_bfqq(cic, 0); + struct bfq_queue *sync_bfqq = cic_to_bfqq(cic, 1); + struct bfq_entity *entity; + struct bfq_group *bfqg; + struct bfqio_cgroup *bgrp; + + bgrp = cgroup_to_bfqio(cgroup); + + bfqg = bfq_find_alloc_group(bfqd, cgroup); + if (async_bfqq != NULL) { + entity = &async_bfqq->entity; + + if (entity->sched_data != &bfqg->sched_data) { + cic_set_bfqq(cic, NULL, 0); + bfq_log_bfqq(bfqd, async_bfqq, + "cic_change_group: %p %d", + async_bfqq, async_bfqq->ref); + bfq_put_queue(async_bfqq); + } + } + + if (sync_bfqq != NULL) { + entity = &sync_bfqq->entity; + if (entity->sched_data != &bfqg->sched_data) + bfq_bfqq_move(bfqd, sync_bfqq, entity, bfqg); + } + + return bfqg; +} + +/** + * bfq_cic_change_cgroup - move @cic to @cgroup. + * @cic: the cic being migrated. + * @cgroup: the destination cgroup. + * + * When the task owning @cic is moved to @cgroup, @cic is immediately + * moved into its new parent group. + */ +static void bfq_cic_change_cgroup(struct cfq_io_context *cic, + struct cgroup *cgroup) +{ + struct bfq_data *bfqd; + unsigned long uninitialized_var(flags); + + bfqd = bfq_get_bfqd_locked(&cic->key, &flags); + if (bfqd != NULL) { + __bfq_cic_change_cgroup(bfqd, cic, cgroup); + bfq_put_bfqd_unlock(bfqd, &flags); + } +} + +/** + * bfq_cic_update_cgroup - update the cgroup of @cic. + * @cic: the @cic to update. + * + * Make sure that @cic is enqueued in the cgroup of the current task. + * We need this in addition to moving cics during the cgroup attach + * phase because the task owning @cic could be at its first disk + * access or we may end up in the root cgroup as the result of a + * memory allocation failure and here we try to move to the right + * group. + * + * Must be called under the queue lock. It is safe to use the returned + * value even after the rcu_read_unlock() as the migration/destruction + * paths act under the queue lock too. IOW it is impossible to race with + * group migration/destruction and end up with an invalid group as: + * a) here cgroup has not yet been destroyed, nor its destroy callback + * has started execution, as current holds a reference to it, + * b) if it is destroyed after rcu_read_unlock() [after current is + * migrated to a different cgroup] its attach() callback will have + * taken care of remove all the references to the old cgroup data. + */ +static struct bfq_group *bfq_cic_update_cgroup(struct cfq_io_context *cic) +{ + struct bfq_data *bfqd = cic->key; + struct bfq_group *bfqg; + struct cgroup *cgroup; + + BUG_ON(bfqd == NULL); + + rcu_read_lock(); + cgroup = task_cgroup(current, bfqio_subsys_id); + bfqg = __bfq_cic_change_cgroup(bfqd, cic, cgroup); + rcu_read_unlock(); + + return bfqg; +} + +/** + * bfq_flush_idle_tree - deactivate any entity on the idle tree of @st. + * @st: the service tree being flushed. + */ +static inline void bfq_flush_idle_tree(struct bfq_service_tree *st) +{ + struct bfq_entity *entity = st->first_idle; + + for (; entity != NULL; entity = st->first_idle) + __bfq_deactivate_entity(entity, 0); +} + +/** + * bfq_destroy_group - destroy @bfqg. + * @bgrp: the bfqio_cgroup containing @bfqg. + * @bfqg: the group being destroyed. + * + * Destroy @bfqg, making sure that it is not referenced from its parent. + */ +static void bfq_destroy_group(struct bfqio_cgroup *bgrp, struct bfq_group *bfqg) +{ + struct bfq_data *bfqd; + struct bfq_service_tree *st; + struct bfq_entity *entity = bfqg->my_entity; + unsigned long uninitialized_var(flags); + int i; + + hlist_del(&bfqg->group_node); + + /* + * We may race with device destruction, take extra care when + * dereferencing bfqg->bfqd. + */ + bfqd = bfq_get_bfqd_locked(&bfqg->bfqd, &flags); + if (bfqd != NULL) { + hlist_del(&bfqg->bfqd_node); + __bfq_deactivate_entity(entity, 0); + bfq_put_async_queues(bfqd, bfqg); + bfq_put_bfqd_unlock(bfqd, &flags); + } + + for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) { + st = bfqg->sched_data.service_tree + i; + + /* + * The idle tree may still contain bfq_queues belonging + * to exited task because they never migrated to a different + * cgroup from the one being destroyed now. Noone else + * can access them so it's safe to act without any lock. + */ + bfq_flush_idle_tree(st); + + BUG_ON(!RB_EMPTY_ROOT(&st->active)); + BUG_ON(!RB_EMPTY_ROOT(&st->idle)); + } + BUG_ON(bfqg->sched_data.next_active != NULL); + BUG_ON(bfqg->sched_data.active_entity != NULL); + BUG_ON(entity->tree != NULL); + + /* + * No need to defer the kfree() to the end of the RCU grace + * period: we are called from the destroy() callback of our + * cgroup, so we can be sure that noone is a) still using + * this cgroup or b) doing lookups in it. + */ + kfree(bfqg); +} + +/** + * bfq_disconnect_groups - diconnect @bfqd from all its groups. + * @bfqd: the device descriptor being exited. + * + * When the device exits we just make sure that no lookup can return + * the now unused group structures. They will be deallocated on cgroup + * destruction. + */ +static void bfq_disconnect_groups(struct bfq_data *bfqd) +{ + struct hlist_node *pos, *n; + struct bfq_group *bfqg; + + bfq_log(bfqd, "disconnect_groups beginning") ; + hlist_for_each_entry_safe(bfqg, pos, n, &bfqd->group_list, bfqd_node) { + hlist_del(&bfqg->bfqd_node); + + __bfq_deactivate_entity(bfqg->my_entity, 0); + + /* + * Don't remove from the group hash, just set an + * invalid key. No lookups can race with the + * assignment as bfqd is being destroyed; this + * implies also that new elements cannot be added + * to the list. + */ + rcu_assign_pointer(bfqg->bfqd, NULL); + + bfq_log(bfqd, "disconnect_groups: put async for group %p", + bfqg) ; + bfq_put_async_queues(bfqd, bfqg); + } +} + +static inline void bfq_free_root_group(struct bfq_data *bfqd) +{ + struct bfqio_cgroup *bgrp = &bfqio_root_cgroup; + struct bfq_group *bfqg = bfqd->root_group; + + bfq_put_async_queues(bfqd, bfqg); + + spin_lock_irq(&bgrp->lock); + hlist_del_rcu(&bfqg->group_node); + spin_unlock_irq(&bgrp->lock); + + /* + * No need to synchronize_rcu() here: since the device is gone + * there cannot be any read-side access to its root_group. + */ + kfree(bfqg); +} + +static struct bfq_group *bfq_alloc_root_group(struct bfq_data *bfqd, int node) +{ + struct bfq_group *bfqg; + struct bfqio_cgroup *bgrp; + int i; + + bfqg = kmalloc_node(sizeof(*bfqg), GFP_KERNEL | __GFP_ZERO, node); + if (bfqg == NULL) + return NULL; + + bfqg->entity.parent = NULL; + for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) + bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT; + + bgrp = &bfqio_root_cgroup; + spin_lock_irq(&bgrp->lock); + rcu_assign_pointer(bfqg->bfqd, bfqd); + hlist_add_head_rcu(&bfqg->group_node, &bgrp->group_data); + spin_unlock_irq(&bgrp->lock); + + return bfqg; +} + +#define SHOW_FUNCTION(__VAR) \ +static u64 bfqio_cgroup_##__VAR##_read(struct cgroup *cgroup, \ + struct cftype *cftype) \ +{ \ + struct bfqio_cgroup *bgrp; \ + u64 ret; \ + \ + if (!cgroup_lock_live_group(cgroup)) \ + return -ENODEV; \ + \ + bgrp = cgroup_to_bfqio(cgroup); \ + spin_lock_irq(&bgrp->lock); \ + ret = bgrp->__VAR; \ + spin_unlock_irq(&bgrp->lock); \ + \ + cgroup_unlock(); \ + \ + return ret; \ +} + +SHOW_FUNCTION(weight); +SHOW_FUNCTION(ioprio); +SHOW_FUNCTION(ioprio_class); +#undef SHOW_FUNCTION + +#define STORE_FUNCTION(__VAR, __MIN, __MAX) \ +static int bfqio_cgroup_##__VAR##_write(struct cgroup *cgroup, \ + struct cftype *cftype, \ + u64 val) \ +{ \ + struct bfqio_cgroup *bgrp; \ + struct bfq_group *bfqg; \ + struct hlist_node *n; \ + \ + if (val < (__MIN) || val > (__MAX)) \ + return -EINVAL; \ + \ + if (!cgroup_lock_live_group(cgroup)) \ + return -ENODEV; \ + \ + bgrp = cgroup_to_bfqio(cgroup); \ + \ + spin_lock_irq(&bgrp->lock); \ + bgrp->__VAR = (unsigned short)val; \ + hlist_for_each_entry(bfqg, n, &bgrp->group_data, group_node) { \ + bfqg->entity.new_##__VAR = (unsigned short)val; \ + smp_wmb(); \ + bfqg->entity.ioprio_changed = 1; \ + } \ + spin_unlock_irq(&bgrp->lock); \ + \ + cgroup_unlock(); \ + \ + return 0; \ +} + +STORE_FUNCTION(weight, BFQ_MIN_WEIGHT, BFQ_MAX_WEIGHT); +STORE_FUNCTION(ioprio, 0, IOPRIO_BE_NR - 1); +STORE_FUNCTION(ioprio_class, IOPRIO_CLASS_RT, IOPRIO_CLASS_IDLE); +#undef STORE_FUNCTION + +static struct cftype bfqio_files[] = { + { + .name = "weight", + .read_u64 = bfqio_cgroup_weight_read, + .write_u64 = bfqio_cgroup_weight_write, + }, + { + .name = "ioprio", + .read_u64 = bfqio_cgroup_ioprio_read, + .write_u64 = bfqio_cgroup_ioprio_write, + }, + { + .name = "ioprio_class", + .read_u64 = bfqio_cgroup_ioprio_class_read, + .write_u64 = bfqio_cgroup_ioprio_class_write, + }, +}; + +static int bfqio_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup) +{ + return cgroup_add_files(cgroup, subsys, bfqio_files, + ARRAY_SIZE(bfqio_files)); +} + +static struct cgroup_subsys_state *bfqio_create(struct cgroup_subsys *subsys, + struct cgroup *cgroup) +{ + struct bfqio_cgroup *bgrp; + + if (cgroup->parent != NULL) { + bgrp = kzalloc(sizeof(*bgrp), GFP_KERNEL); + if (bgrp == NULL) + return ERR_PTR(-ENOMEM); + } else + bgrp = &bfqio_root_cgroup; + + spin_lock_init(&bgrp->lock); + INIT_HLIST_HEAD(&bgrp->group_data); + bgrp->ioprio = BFQ_DEFAULT_GRP_IOPRIO; + bgrp->ioprio_class = BFQ_DEFAULT_GRP_CLASS; + + return &bgrp->css; +} + +/* + * We cannot support shared io contexts, as we have no mean to support + * two tasks with the same ioc in two different groups without major rework + * of the main cic/bfqq data structures. By now we allow a task to change + * its cgroup only if it's the only owner of its ioc; the drawback of this + * behavior is that a group containing a task that forked using CLONE_IO + * will not be destroyed until the tasks sharing the ioc die. + */ +static int bfqio_can_attach(struct cgroup_subsys *subsys, struct cgroup *cgroup, + struct task_struct *tsk, bool threadgroup) +{ + struct io_context *ioc; + int ret = 0; + + /* task_lock() is needed to avoid races with exit_io_context() */ + task_lock(tsk); + ioc = tsk->io_context; + if (ioc != NULL && atomic_read(&ioc->nr_tasks) > 1) + /* + * ioc == NULL means that the task is either too young or + * exiting: if it has still no ioc the ioc can't be shared, + * if the task is exiting the attach will fail anyway, no + * matter what we return here. + */ + ret = -EINVAL; + task_unlock(tsk); + + return ret; +} + +static void bfqio_attach(struct cgroup_subsys *subsys, struct cgroup *cgroup, + struct cgroup *prev, struct task_struct *tsk, + bool threadgroup) +{ + struct io_context *ioc; + struct cfq_io_context *cic; + struct hlist_node *n; + + task_lock(tsk); + ioc = tsk->io_context; + if (ioc != NULL) { + BUG_ON(atomic_read(&ioc->refcount) == 0); + atomic_inc(&ioc->refcount); + } + task_unlock(tsk); + + if (ioc == NULL) + return; + + rcu_read_lock(); + hlist_for_each_entry_rcu(cic, n, &ioc->bfq_cic_list, cic_list) + bfq_cic_change_cgroup(cic, cgroup); + rcu_read_unlock(); + + put_io_context(ioc); +} + +static void bfqio_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup) +{ + struct bfqio_cgroup *bgrp = cgroup_to_bfqio(cgroup); + struct hlist_node *n, *tmp; + struct bfq_group *bfqg; + + /* + * Since we are destroying the cgroup, there are no more tasks + * referencing it, and all the RCU grace periods that may have + * referenced it are ended (as the destruction of the parent + * cgroup is RCU-safe); bgrp->group_data will not be accessed by + * anything else and we don't need any synchronization. + */ + hlist_for_each_entry_safe(bfqg, n, tmp, &bgrp->group_data, group_node) + bfq_destroy_group(bgrp, bfqg); + + BUG_ON(!hlist_empty(&bgrp->group_data)); + + kfree(bgrp); +} + +struct cgroup_subsys bfqio_subsys = { + .name = "bfqio", + .create = bfqio_create, + .can_attach = bfqio_can_attach, + .attach = bfqio_attach, + .destroy = bfqio_destroy, + .populate = bfqio_populate, + .subsys_id = bfqio_subsys_id, +}; +#else +static inline void bfq_init_entity(struct bfq_entity *entity, + struct bfq_group *bfqg) +{ + entity->weight = entity->new_weight; + entity->orig_weight = entity->new_weight; + entity->ioprio = entity->new_ioprio; + entity->ioprio_class = entity->new_ioprio_class; + entity->sched_data = &bfqg->sched_data; +} + +static inline struct bfq_group * +bfq_cic_update_cgroup(struct cfq_io_context *cic) +{ + struct bfq_data *bfqd = cic->key; + return bfqd->root_group; +} + +static inline void bfq_bfqq_move(struct bfq_data *bfqd, + struct bfq_queue *bfqq, + struct bfq_entity *entity, + struct bfq_group *bfqg) +{ +} + +static inline void bfq_disconnect_groups(struct bfq_data *bfqd) +{ + bfq_put_async_queues(bfqd, bfqd->root_group); +} + +static inline void bfq_free_root_group(struct bfq_data *bfqd) +{ + kfree(bfqd->root_group); +} + +static struct bfq_group *bfq_alloc_root_group(struct bfq_data *bfqd, int node) +{ + struct bfq_group *bfqg; + int i; + + bfqg = kmalloc_node(sizeof(*bfqg), GFP_KERNEL | __GFP_ZERO, node); + if (bfqg == NULL) + return NULL; + + for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) + bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT; + + return bfqg; +} +#endif diff --git a/block/bfq-ioc.c b/block/bfq-ioc.c new file mode 100644 index 000000000..9d06cebde --- /dev/null +++ b/block/bfq-ioc.c @@ -0,0 +1,374 @@ +/* + * BFQ: I/O context handling. + * + * Based on ideas and code from CFQ: + * Copyright (C) 2003 Jens Axboe + * + * Copyright (C) 2008 Fabio Checconi + * Paolo Valente + */ + +/** + * bfq_cic_free_rcu - deferred cic freeing. + * @head: RCU head of the cic to free. + * + * Free the cic containing @head and, if it was the last one and + * the module is exiting wake up anyone waiting for its deallocation + * (see bfq_exit()). + */ +static void bfq_cic_free_rcu(struct rcu_head *head) +{ + struct cfq_io_context *cic; + + cic = container_of(head, struct cfq_io_context, rcu_head); + + kmem_cache_free(bfq_ioc_pool, cic); + elv_ioc_count_dec(bfq_ioc_count); + + if (bfq_ioc_gone != NULL) { + spin_lock(&bfq_ioc_gone_lock); + if (bfq_ioc_gone != NULL && + !elv_ioc_count_read(bfq_ioc_count)) { + complete(bfq_ioc_gone); + bfq_ioc_gone = NULL; + } + spin_unlock(&bfq_ioc_gone_lock); + } +} + +static void bfq_cic_free(struct cfq_io_context *cic) +{ + call_rcu(&cic->rcu_head, bfq_cic_free_rcu); +} + +/** + * cic_free_func - disconnect a cic ready to be freed. + * @ioc: the io_context @cic belongs to. + * @cic: the cic to be freed. + * + * Remove @cic from the @ioc radix tree hash and from its cic list, + * deferring the deallocation of @cic to the end of the current RCU + * grace period. This assumes that __bfq_exit_single_io_context() + * has already been called for @cic. + */ +static void cic_free_func(struct io_context *ioc, struct cfq_io_context *cic) +{ + unsigned long flags; + unsigned long dead_key = (unsigned long) cic->key; + + BUG_ON(!(dead_key & CIC_DEAD_KEY)); + + spin_lock_irqsave(&ioc->lock, flags); + radix_tree_delete(&ioc->bfq_radix_root, + dead_key >> CIC_DEAD_INDEX_SHIFT); + hlist_del_init_rcu(&cic->cic_list); + spin_unlock_irqrestore(&ioc->lock, flags); + + bfq_cic_free(cic); +} + +static void bfq_free_io_context(struct io_context *ioc) +{ + /* + * ioc->refcount is zero here, or we are called from elv_unregister(), + * so no more cic's are allowed to be linked into this ioc. So it + * should be ok to iterate over the known list, we will see all cic's + * since no new ones are added. + */ + call_for_each_cic(ioc, cic_free_func); +} + +/** + * __bfq_exit_single_io_context - deassociate @cic from any running task. + * @bfqd: bfq_data on which @cic is valid. + * @cic: the cic being exited. + * + * Whenever no more tasks are using @cic or @bfqd is deallocated we + * need to invalidate its entry in the radix tree hash table and to + * release the queues it refers to. + * + * Called under the queue lock. + */ +static void __bfq_exit_single_io_context(struct bfq_data *bfqd, + struct cfq_io_context *cic) +{ + struct io_context *ioc = cic->ioc; + + list_del_init(&cic->queue_list); + + /* + * Make sure dead mark is seen for dead queues + */ + smp_wmb(); + rcu_assign_pointer(cic->key, bfqd_dead_key(bfqd)); + + /* + * No write-side locking as no task is using @ioc (they're exited + * or bfqd is being deallocated. + */ + if (ioc->ioc_data == cic) + rcu_assign_pointer(ioc->ioc_data, NULL); + + if (cic->cfqq[BLK_RW_ASYNC] != NULL) { + bfq_exit_bfqq(bfqd, cic->cfqq[BLK_RW_ASYNC]); + cic->cfqq[BLK_RW_ASYNC] = NULL; + } + + if (cic->cfqq[BLK_RW_SYNC] != NULL) { + bfq_exit_bfqq(bfqd, cic->cfqq[BLK_RW_SYNC]); + cic->cfqq[BLK_RW_SYNC] = NULL; + } +} + +/** + * bfq_exit_single_io_context - deassociate @cic from @ioc (unlocked version). + * @ioc: the io_context @cic belongs to. + * @cic: the cic being exited. + * + * Take the queue lock and call __bfq_exit_single_io_context() to do the + * rest of the work. We take care of possible races with bfq_exit_queue() + * using bfq_get_bfqd_locked() (and abusing a little bit the RCU mechanism). + */ +static void bfq_exit_single_io_context(struct io_context *ioc, + struct cfq_io_context *cic) +{ + struct bfq_data *bfqd; + unsigned long uninitialized_var(flags); + + bfqd = bfq_get_bfqd_locked(&cic->key, &flags); + if (bfqd != NULL) { + __bfq_exit_single_io_context(bfqd, cic); + bfq_put_bfqd_unlock(bfqd, &flags); + } +} + +/** + * bfq_exit_io_context - deassociate @ioc from all cics it owns. + * @ioc: the @ioc being exited. + * + * No more processes are using @ioc we need to clean up and put the + * internal structures we have that belongs to that process. Loop + * through all its cics, locking their queues and exiting them. + */ +static void bfq_exit_io_context(struct io_context *ioc) +{ + call_for_each_cic(ioc, bfq_exit_single_io_context); +} + +static struct cfq_io_context *bfq_alloc_io_context(struct bfq_data *bfqd, + gfp_t gfp_mask) +{ + struct cfq_io_context *cic; + + cic = kmem_cache_alloc_node(bfq_ioc_pool, gfp_mask | __GFP_ZERO, + bfqd->queue->node); + if (cic != NULL) { + cic->last_end_request = jiffies; + INIT_LIST_HEAD(&cic->queue_list); + INIT_HLIST_NODE(&cic->cic_list); + cic->dtor = bfq_free_io_context; + cic->exit = bfq_exit_io_context; + elv_ioc_count_inc(bfq_ioc_count); + } + + return cic; +} + +/** + * bfq_drop_dead_cic - free an exited cic. + * @bfqd: bfq data for the device in use. + * @ioc: io_context owning @cic. + * @cic: the @cic to free. + * + * We drop cfq io contexts lazily, so we may find a dead one. + */ +static void bfq_drop_dead_cic(struct bfq_data *bfqd, struct io_context *ioc, + struct cfq_io_context *cic) +{ + unsigned long flags; + + WARN_ON(!list_empty(&cic->queue_list)); + BUG_ON(cic->key != bfqd_dead_key(bfqd)); + + spin_lock_irqsave(&ioc->lock, flags); + + BUG_ON(ioc->ioc_data == cic); + + /* + * With shared I/O contexts two lookups may race and drop the + * same cic more than one time: RCU guarantees that the storage + * will not be freed too early, here we make sure that we do + * not try to remove the cic from the hashing structures multiple + * times. + */ + if (!hlist_unhashed(&cic->cic_list)) { + radix_tree_delete(&ioc->bfq_radix_root, bfqd->cic_index); + hlist_del_init_rcu(&cic->cic_list); + bfq_cic_free(cic); + } + + spin_unlock_irqrestore(&ioc->lock, flags); +} + +/** + * bfq_cic_lookup - search into @ioc a cic associated to @bfqd. + * @bfqd: the lookup key. + * @ioc: the io_context of the process doing I/O. + * + * If @ioc already has a cic associated to @bfqd return it, return %NULL + * otherwise. + */ +static struct cfq_io_context *bfq_cic_lookup(struct bfq_data *bfqd, + struct io_context *ioc) +{ + struct cfq_io_context *cic; + unsigned long flags; + void *k; + + if (unlikely(ioc == NULL)) + return NULL; + + rcu_read_lock(); + + /* We maintain a last-hit cache, to avoid browsing over the tree. */ + cic = rcu_dereference(ioc->ioc_data); + if (cic != NULL) { + k = rcu_dereference(cic->key); + if (k == bfqd) + goto out; + } + + do { + cic = radix_tree_lookup(&ioc->bfq_radix_root, + bfqd->cic_index); + if (cic == NULL) + goto out; + + k = rcu_dereference(cic->key); + if (unlikely(k != bfqd)) { + rcu_read_unlock(); + bfq_drop_dead_cic(bfqd, ioc, cic); + rcu_read_lock(); + continue; + } + + spin_lock_irqsave(&ioc->lock, flags); + rcu_assign_pointer(ioc->ioc_data, cic); + spin_unlock_irqrestore(&ioc->lock, flags); + break; + } while (1); + +out: + rcu_read_unlock(); + + return cic; +} + +/** + * bfq_cic_link - add @cic to @ioc. + * @bfqd: bfq_data @cic refers to. + * @ioc: io_context @cic belongs to. + * @cic: the cic to link. + * @gfp_mask: the mask to use for radix tree preallocations. + * + * Add @cic to @ioc, using @bfqd as the search key. This enables us to + * lookup the process specific cfq io context when entered from the block + * layer. Also adds @cic to a per-bfqd list, used when this queue is + * removed. + */ +static int bfq_cic_link(struct bfq_data *bfqd, struct io_context *ioc, + struct cfq_io_context *cic, gfp_t gfp_mask) +{ + unsigned long flags; + int ret; + + ret = radix_tree_preload(gfp_mask); + if (ret == 0) { + cic->ioc = ioc; + + /* No write-side locking, cic is not published yet. */ + rcu_assign_pointer(cic->key, bfqd); + + spin_lock_irqsave(&ioc->lock, flags); + ret = radix_tree_insert(&ioc->bfq_radix_root, + bfqd->cic_index, cic); + if (ret == 0) + hlist_add_head_rcu(&cic->cic_list, &ioc->bfq_cic_list); + spin_unlock_irqrestore(&ioc->lock, flags); + + radix_tree_preload_end(); + + if (ret == 0) { + spin_lock_irqsave(bfqd->queue->queue_lock, flags); + list_add(&cic->queue_list, &bfqd->cic_list); + spin_unlock_irqrestore(bfqd->queue->queue_lock, flags); + } + } + + if (ret != 0) + printk(KERN_ERR "bfq: cic link failed!\n"); + + return ret; +} + +/** + * bfq_ioc_set_ioprio - signal a priority change to the cics belonging to @ioc. + * @ioc: the io_context changing its priority. + */ +static inline void bfq_ioc_set_ioprio(struct io_context *ioc) +{ + call_for_each_cic(ioc, bfq_changed_ioprio); +} + +/** + * bfq_get_io_context - return the @cic associated to @bfqd in @ioc. + * @bfqd: the search key. + * @gfp_mask: the mask to use for cic allocation. + * + * Setup general io context and cfq io context. There can be several cfq + * io contexts per general io context, if this process is doing io to more + * than one device managed by cfq. + */ +static struct cfq_io_context *bfq_get_io_context(struct bfq_data *bfqd, + gfp_t gfp_mask) +{ + struct io_context *ioc = NULL; + struct cfq_io_context *cic; + + might_sleep_if(gfp_mask & __GFP_WAIT); + + ioc = get_io_context(gfp_mask, bfqd->queue->node); + if (ioc == NULL) + return NULL; + + /* Lookup for an existing cic. */ + cic = bfq_cic_lookup(bfqd, ioc); + if (cic != NULL) + goto out; + + /* Alloc one if needed. */ + cic = bfq_alloc_io_context(bfqd, gfp_mask); + if (cic == NULL) + goto err; + + /* Link it into the ioc's radix tree and cic list. */ + if (bfq_cic_link(bfqd, ioc, cic, gfp_mask) != 0) + goto err_free; + +out: + /* + * test_and_clear_bit() implies a memory barrier, paired with + * the wmb() in fs/ioprio.c, so the value seen for ioprio is the + * new one. + */ + if (unlikely(test_and_clear_bit(IOC_BFQ_IOPRIO_CHANGED, + ioc->ioprio_changed))) + bfq_ioc_set_ioprio(ioc); + + return cic; +err_free: + bfq_cic_free(cic); +err: + put_io_context(ioc); + return NULL; +} diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c new file mode 100644 index 000000000..a98a20d20 --- /dev/null +++ b/block/bfq-iosched.c @@ -0,0 +1,2314 @@ +/* + * BFQ, or Budget Fair Queueing, disk scheduler. + * + * Based on ideas and code from CFQ: + * Copyright (C) 2003 Jens Axboe + * + * Copyright (C) 2008 Fabio Checconi + * Paolo Valente + * + * Licensed under the GPL-2 as detailed in the accompanying COPYING.BFQ file. + * + * BFQ is a proportional share disk scheduling algorithm based on the + * slice-by-slice service scheme of CFQ. But BFQ assigns budgets, + * measured in number of sectors, to tasks instead of time slices. + * The disk is not granted to the active task for a given time slice, + * but until it has exahusted its assigned budget. This change from + * the time to the service domain allows BFQ to distribute the disk + * bandwidth among tasks as desired, without any distortion due to + * ZBR, workload fluctuations or other factors. BFQ uses an ad hoc + * internal scheduler, called B-WF2Q+, to schedule tasks according to + * their budgets. Thanks to this accurate scheduler, BFQ can afford + * to assign high budgets to disk-bound non-seeky tasks (to boost the + * throughput), and yet guarantee low latencies to interactive and + * soft real-time applications. + * + * BFQ has been introduced in [1], where the interested reader can + * find an accurate description of the algorithm, the bandwidth + * distribution and latency guarantees it provides, plus formal proofs + * of all the properties. With respect to the algorithm presented in + * the paper, this implementation adds several little heuristics, and + * a hierarchical extension, based on H-WF2Q+. + * + * B-WF2Q+ is based on WF2Q+, that is described in [2], together with + * H-WF2Q+, while the augmented tree used to implement B-WF2Q+ with O(log N) + * complexity derives from the one introduced with EEVDF in [3]. + * + * [1] P. Valente and F. Checconi, ``High Throughput Disk Scheduling + * with Deterministic Guarantees on Bandwidth Distribution,'' to appear + * on IEEE Transactions on Computer. + * + * http://algo.ing.unimo.it/people/paolo/disk_sched/bfq.pdf + * + * [2] Jon C.R. Bennett and H. Zhang, ``Hierarchical Packet Fair Queueing + * Algorithms,'' IEEE/ACM Transactions on Networking, 5(5):675-689, + * Oct 1997. + * + * http://www.cs.cmu.edu/~hzhang/papers/TON-97-Oct.ps.gz + * + * [3] I. Stoica and H. Abdel-Wahab, ``Earliest Eligible Virtual Deadline + * First: A Flexible and Accurate Mechanism for Proportional Share + * Resource Allocation,'' technical report. + * + * http://www.cs.berkeley.edu/~istoica/papers/eevdf-tr-95.pdf + */ +#include +#include +#include +#include +#include +#include +#include +#include "bfq.h" + +/* Max number of dispatches in one round of service. */ +static const int bfq_quantum = 4; + +/* Expiration time of sync (0) and async (1) requests, in jiffies. */ +static const int bfq_fifo_expire[2] = { HZ / 4, HZ / 8 }; + +/* Maximum backwards seek, in KiB. */ +static const int bfq_back_max = 16 * 1024; + +/* Penalty of a backwards seek, in number of sectors. */ +static const int bfq_back_penalty = 2; + +/* Idling period duration, in jiffies. */ +static int bfq_slice_idle = HZ / 125; + +/* Default maximum budget values, in sectors and number of requests. */ +static const int bfq_default_max_budget = 16 * 1024; +static const int bfq_max_budget_async_rq = 4; + +/* + * Async to sync throughput distribution is controlled as follows: + * when an async request is served, the entity is charged the number + * of sectors of the request, multipled by the factor below + */ +static const int bfq_async_charge_factor = 10; + +/* Default timeout values, in jiffies, approximating CFQ defaults. */ +static const int bfq_timeout_sync = HZ / 8; +static int bfq_timeout_async = HZ / 25; + +struct kmem_cache *bfq_pool; +struct kmem_cache *bfq_ioc_pool; + +static DEFINE_PER_CPU(unsigned long, bfq_ioc_count); +static struct completion *bfq_ioc_gone; +static DEFINE_SPINLOCK(bfq_ioc_gone_lock); + +static DEFINE_SPINLOCK(cic_index_lock); +static DEFINE_IDA(cic_index_ida); + +/* Below this threshold (in ms), we consider thinktime immediate. */ +#define BFQ_MIN_TT 2 + +/* hw_tag detection: parallel requests threshold and min samples needed. */ +#define BFQ_HW_QUEUE_THRESHOLD 4 +#define BFQ_HW_QUEUE_SAMPLES 32 + +#define BFQQ_SEEKY(bfqq) ((bfqq)->seek_mean > (8 * 1024)) + +/* Min samples used for peak rate estimation (for autotuning). */ +#define BFQ_PEAK_RATE_SAMPLES 32 + +/* Shift used for peak rate fixed precision calculations. */ +#define BFQ_RATE_SHIFT 16 + +#define BFQ_SERVICE_TREE_INIT ((struct bfq_service_tree) \ + { RB_ROOT, RB_ROOT, NULL, NULL, 0, 0 }) + +#define RQ_CIC(rq) \ + ((struct cfq_io_context *) (rq)->elevator_private) +#define RQ_BFQQ(rq) ((rq)->elevator_private2) + +#include "bfq-ioc.c" +#include "bfq-sched.c" +#include "bfq-cgroup.c" + +#define bfq_class_idle(cfqq) ((bfqq)->entity.ioprio_class ==\ + IOPRIO_CLASS_IDLE) + +#define bfq_sample_valid(samples) ((samples) > 80) + +/* + * We regard a request as SYNC, if either it's a read or has the SYNC bit + * set (in which case it could also be a direct WRITE). + */ +static inline int bfq_bio_sync(struct bio *bio) +{ + if (bio_data_dir(bio) == READ || bio_rw_flagged(bio, BIO_RW_SYNCIO)) + return 1; + + return 0; +} + +/* + * Scheduler run of queue, if there are requests pending and no one in the + * driver that will restart queueing. + */ +static inline void bfq_schedule_dispatch(struct bfq_data *bfqd) +{ + if (bfqd->queued != 0) { + bfq_log(bfqd, "schedule dispatch"); + kblockd_schedule_work(bfqd->queue, &bfqd->unplug_work); + } +} + +static inline int bfq_queue_empty(struct request_queue *q) +{ + struct bfq_data *bfqd = q->elevator->elevator_data; + + return bfqd->queued == 0; +} + +/* + * Lifted from AS - choose which of rq1 and rq2 that is best served now. + * We choose the request that is closesr to the head right now. Distance + * behind the head is penalized and only allowed to a certain extent. + */ +static struct request *bfq_choose_req(struct bfq_data *bfqd, + struct request *rq1, + struct request *rq2) +{ + sector_t last, s1, s2, d1 = 0, d2 = 0; + unsigned long back_max; +#define BFQ_RQ1_WRAP 0x01 /* request 1 wraps */ +#define BFQ_RQ2_WRAP 0x02 /* request 2 wraps */ + unsigned wrap = 0; /* bit mask: requests behind the disk head? */ + + if (rq1 == NULL || rq1 == rq2) + return rq2; + if (rq2 == NULL) + return rq1; + + if (rq_is_sync(rq1) && !rq_is_sync(rq2)) + return rq1; + else if (rq_is_sync(rq2) && !rq_is_sync(rq1)) + return rq2; + if (rq_is_meta(rq1) && !rq_is_meta(rq2)) + return rq1; + else if (rq_is_meta(rq2) && !rq_is_meta(rq1)) + return rq2; + + s1 = blk_rq_pos(rq1); + s2 = blk_rq_pos(rq2); + + last = bfqd->last_position; + + /* + * By definition, 1KiB is 2 sectors. + */ + back_max = bfqd->bfq_back_max * 2; + + /* + * Strict one way elevator _except_ in the case where we allow + * short backward seeks which are biased as twice the cost of a + * similar forward seek. + */ + if (s1 >= last) + d1 = s1 - last; + else if (s1 + back_max >= last) + d1 = (last - s1) * bfqd->bfq_back_penalty; + else + wrap |= BFQ_RQ1_WRAP; + + if (s2 >= last) + d2 = s2 - last; + else if (s2 + back_max >= last) + d2 = (last - s2) * bfqd->bfq_back_penalty; + else + wrap |= BFQ_RQ2_WRAP; + + /* Found required data */ + + /* + * By doing switch() on the bit mask "wrap" we avoid having to + * check two variables for all permutations: --> faster! + */ + switch (wrap) { + case 0: /* common case for CFQ: rq1 and rq2 not wrapped */ + if (d1 < d2) + return rq1; + else if (d2 < d1) + return rq2; + else { + if (s1 >= s2) + return rq1; + else + return rq2; + } + + case BFQ_RQ2_WRAP: + return rq1; + case BFQ_RQ1_WRAP: + return rq2; + case (BFQ_RQ1_WRAP|BFQ_RQ2_WRAP): /* both rqs wrapped */ + default: + /* + * Since both rqs are wrapped, + * start with the one that's further behind head + * (--> only *one* back seek required), + * since back seek takes more time than forward. + */ + if (s1 <= s2) + return rq1; + else + return rq2; + } +} + +static struct request *bfq_find_next_rq(struct bfq_data *bfqd, + struct bfq_queue *bfqq, + struct request *last) +{ + struct rb_node *rbnext = rb_next(&last->rb_node); + struct rb_node *rbprev = rb_prev(&last->rb_node); + struct request *next = NULL, *prev = NULL; + + BUG_ON(RB_EMPTY_NODE(&last->rb_node)); + + if (rbprev != NULL) + prev = rb_entry_rq(rbprev); + + if (rbnext != NULL) + next = rb_entry_rq(rbnext); + else { + rbnext = rb_first(&bfqq->sort_list); + if (rbnext && rbnext != &last->rb_node) + next = rb_entry_rq(rbnext); + } + + return bfq_choose_req(bfqd, next, prev); +} + +static void bfq_del_rq_rb(struct request *rq) +{ + struct bfq_queue *bfqq = RQ_BFQQ(rq); + struct bfq_data *bfqd = bfqq->bfqd; + const int sync = rq_is_sync(rq); + + BUG_ON(bfqq->queued[sync] == 0); + bfqq->queued[sync]--; + bfqd->queued--; + + elv_rb_del(&bfqq->sort_list, rq); + + if (bfq_bfqq_busy(bfqq) && bfqq != bfqd->active_queue && + RB_EMPTY_ROOT(&bfqq->sort_list)) + bfq_del_bfqq_busy(bfqd, bfqq, 1); +} + +/* see the definition of bfq_async_charge_factor for details */ +static inline bfq_service_t bfq_serv_to_charge(struct request *rq, + struct bfq_queue *bfqq) +{ + return blk_rq_sectors(rq) * + (1 + ((!bfq_bfqq_sync(bfqq)) * bfq_async_charge_factor)); +} + +/** + * bfq_updated_next_req - update the queue after a new next_rq selection. + * @bfqd: the device data the queue belongs to. + * @bfqq: the queue to update. + * + * If the first request of a queue changes we make sure that the queue + * has enough budget to serve at least its first request (if the + * request has grown). We do this because if the queue has not enough + * budget for its first request, it has to go through two dispatch + * rounds to actually get it dispatched. + */ +static void bfq_updated_next_req(struct bfq_data *bfqd, + struct bfq_queue *bfqq) +{ + struct bfq_entity *entity = &bfqq->entity; + struct bfq_service_tree *st = bfq_entity_service_tree(entity); + struct request *next_rq = bfqq->next_rq; + bfq_service_t new_budget; + + if (next_rq == NULL) + return; + + if (bfqq == bfqd->active_queue) + /* + * In order not to break guarantees, budgets cannot be + * changed after an entity has been selected. + */ + return; + + BUG_ON(entity->tree != &st->active); + BUG_ON(entity == entity->sched_data->active_entity); + + new_budget = max_t(bfq_service_t, bfqq->max_budget, + bfq_serv_to_charge(next_rq, bfqq)); + entity->budget = new_budget; + bfq_log_bfqq(bfqd, bfqq, "updated next rq: new budget %lu", new_budget); + bfq_activate_bfqq(bfqd, bfqq); +} + +static void bfq_add_rq_rb(struct request *rq) +{ + struct bfq_queue *bfqq = RQ_BFQQ(rq); + struct bfq_entity *entity = &bfqq->entity; + struct bfq_data *bfqd = bfqq->bfqd; + struct request *__alias, *next_rq; + + bfq_log_bfqq(bfqd, bfqq, "add_rq_rb %d", rq_is_sync(rq)); + bfqq->queued[rq_is_sync(rq)]++; + bfqd->queued++; + + /* + * Looks a little odd, but the first insert might return an alias, + * if that happens, put the alias on the dispatch list. + */ + while ((__alias = elv_rb_add(&bfqq->sort_list, rq)) != NULL) + bfq_dispatch_insert(bfqd->queue, __alias); + + /* + * Check if this request is a better next-serve candidate. + */ + next_rq = bfq_choose_req(bfqd, bfqq->next_rq, rq); + BUG_ON(next_rq == NULL); + bfqq->next_rq = next_rq; + + if (!bfq_bfqq_busy(bfqq)) { + entity->budget = max_t(bfq_service_t, bfqq->max_budget, + bfq_serv_to_charge(next_rq, bfqq)); + + /* + * If the queue is not being boosted and has been idle + * for enough time, start a boosting period + */ + if (bfqd->low_latency && bfqq->high_weight_budget == 0) { + if(bfqq->last_activation_time + BFQ_MIN_ACT_INTERVAL < + jiffies_to_msecs(jiffies)) { + bfqq->high_weight_budget = BFQ_BOOST_BUDGET; + entity->ioprio_changed = 1; + bfq_log_bfqq(bfqd, bfqq, + "wboost starting at %lu msec", + bfqq->last_activation_time); + } + bfqq->last_activation_time = + jiffies_to_msecs(jiffies); + } + + bfq_add_bfqq_busy(bfqd, bfqq); + } else + bfq_updated_next_req(bfqd, bfqq); +} + +static void bfq_reposition_rq_rb(struct bfq_queue *bfqq, struct request *rq) +{ + elv_rb_del(&bfqq->sort_list, rq); + bfqq->queued[rq_is_sync(rq)]--; + bfqq->bfqd->queued--; + bfq_add_rq_rb(rq); +} + +static struct request *bfq_find_rq_fmerge(struct bfq_data *bfqd, + struct bio *bio) +{ + struct task_struct *tsk = current; + struct cfq_io_context *cic; + struct bfq_queue *bfqq; + + cic = bfq_cic_lookup(bfqd, tsk->io_context); + if (cic == NULL) + return NULL; + + bfqq = cic_to_bfqq(cic, bfq_bio_sync(bio)); + if (bfqq != NULL) { + sector_t sector = bio->bi_sector + bio_sectors(bio); + + return elv_rb_find(&bfqq->sort_list, sector); + } + + return NULL; +} + +static void bfq_activate_request(struct request_queue *q, struct request *rq) +{ + struct bfq_data *bfqd = q->elevator->elevator_data; + + bfqd->rq_in_driver++; + bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq); +} + +static void bfq_deactivate_request(struct request_queue *q, struct request *rq) +{ + struct bfq_data *bfqd = q->elevator->elevator_data; + + WARN_ON(bfqd->rq_in_driver == 0); + bfqd->rq_in_driver--; +} + +static void bfq_remove_request(struct request *rq) +{ + struct bfq_queue *bfqq = RQ_BFQQ(rq); + struct bfq_data *bfqd = bfqq->bfqd; + + if (bfqq->next_rq == rq) { + bfqq->next_rq = bfq_find_next_rq(bfqd, bfqq, rq); + bfq_updated_next_req(bfqd, bfqq); + } + + list_del_init(&rq->queuelist); + bfq_del_rq_rb(rq); + + if (rq_is_meta(rq)) { + WARN_ON(bfqq->meta_pending == 0); + bfqq->meta_pending--; + } +} + +static int bfq_merge(struct request_queue *q, struct request **req, + struct bio *bio) +{ + struct bfq_data *bfqd = q->elevator->elevator_data; + struct request *__rq; + + __rq = bfq_find_rq_fmerge(bfqd, bio); + if (__rq != NULL && elv_rq_merge_ok(__rq, bio)) { + *req = __rq; + return ELEVATOR_FRONT_MERGE; + } + + return ELEVATOR_NO_MERGE; +} + +static void bfq_merged_request(struct request_queue *q, struct request *req, + int type) +{ + if (type == ELEVATOR_FRONT_MERGE) { + struct bfq_queue *bfqq = RQ_BFQQ(req); + + bfq_reposition_rq_rb(bfqq, req); + } +} + +static void bfq_merged_requests(struct request_queue *q, struct request *rq, + struct request *next) +{ + /* + * Reposition in fifo if next is older than rq. + */ + if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) && + time_before(next->start_time, rq->start_time)) + list_move(&rq->queuelist, &next->queuelist); + + bfq_remove_request(next); +} + +static int bfq_allow_merge(struct request_queue *q, struct request *rq, + struct bio *bio) +{ + struct bfq_data *bfqd = q->elevator->elevator_data; + struct cfq_io_context *cic; + struct bfq_queue *bfqq; + + /* Disallow merge of a sync bio into an async request. */ + if (bfq_bio_sync(bio) && !rq_is_sync(rq)) + return 0; + + /* + * Lookup the bfqq that this bio will be queued with. Allow + * merge only if rq is queued there. + */ + cic = bfq_cic_lookup(bfqd, current->io_context); + if (cic == NULL) + return 0; + + bfqq = cic_to_bfqq(cic, bfq_bio_sync(bio)); + return bfqq == RQ_BFQQ(rq); +} + +static void __bfq_set_active_queue(struct bfq_data *bfqd, + struct bfq_queue *bfqq) +{ + if (bfqq != NULL) { + bfq_mark_bfqq_must_alloc(bfqq); + bfq_mark_bfqq_budget_new(bfqq); + bfq_clear_bfqq_fifo_expire(bfqq); + + bfqd->budgets_assigned = (bfqd->budgets_assigned*7 + 256) / 8; + + bfq_log_bfqq(bfqd, bfqq, "set_active_queue, cur-budget = %lu", + bfqq->entity.budget); + } + + bfqd->active_queue = bfqq; +} + +/* + * Get and set a new active queue for service. + */ +static struct bfq_queue *bfq_set_active_queue(struct bfq_data *bfqd) +{ + struct bfq_queue *bfqq; + + bfqq = bfq_get_next_queue(bfqd); + __bfq_set_active_queue(bfqd, bfqq); + return bfqq; +} + +/* + * If enough samples have been computed, return the current max budget + * stored in bfqd, which is dynamically updated according to the + * estimated disk peak rate; otherwise return the default max budget + */ +static inline bfq_service_t bfq_max_budget(struct bfq_data *bfqd) +{ + return bfqd->budgets_assigned < 194 ? bfq_default_max_budget : + bfqd->bfq_max_budget; +} + +/* + * Return min budget, which is a fraction of the current or default + * max budget (trying with 1/32) + */ +static inline bfq_service_t bfq_min_budget(struct bfq_data *bfqd) +{ + return bfqd->budgets_assigned < 194 ? bfq_default_max_budget / 32 : + bfqd->bfq_max_budget / 32; +} + +static void bfq_arm_slice_timer(struct bfq_data *bfqd) +{ + struct bfq_queue *bfqq = bfqd->active_queue; + struct cfq_io_context *cic; + unsigned long sl; + + WARN_ON(!RB_EMPTY_ROOT(&bfqq->sort_list)); + + /* Idling is disabled, either manually or by past process history. */ + if (bfqd->bfq_slice_idle == 0 || !bfq_bfqq_idle_window(bfqq)) + return; + + /* Tasks have exited, don't wait. */ + cic = bfqd->active_cic; + if (cic == NULL || atomic_read(&cic->ioc->nr_tasks) == 0) + return; + + bfq_mark_bfqq_wait_request(bfqq); + + /* + * We don't want to idle for seeks, but we do want to allow + * fair distribution of slice time for a process doing back-to-back + * seeks. So allow a little bit of time for him to submit a new rq. + * + * To prevent processes with (partly) seeky workloads from + * being too ill-treated, grant them a small fraction of the + * assigned budget before reducing the waiting time to + * BFQ_MIN_TT. This happened to help reduce latency. + */ + sl = bfqd->bfq_slice_idle; + if (bfq_sample_valid(bfqq->seek_samples) && BFQQ_SEEKY(bfqq) && + bfqq->entity.service > bfq_max_budget(bfqd) / 8) + sl = min(sl, msecs_to_jiffies(BFQ_MIN_TT)); + + bfqd->last_idling_start = ktime_get(); + mod_timer(&bfqd->idle_slice_timer, jiffies + sl); + bfq_log(bfqd, "arm idle: %lu ms", sl); +} + +/* + * Set the maximum time for the active queue to consume its + * budget. This prevents seeky processes from lowering the disk + * throughput (always guaranteed with a time slice scheme as in CFQ). + */ +static void bfq_set_budget_timeout(struct bfq_data *bfqd) +{ + struct bfq_queue *bfqq = bfqd->active_queue; + + bfqd->last_budget_start = ktime_get(); + + bfq_clear_bfqq_budget_new(bfqq); + bfqq->budget_timeout = jiffies + + bfqd->bfq_timeout[!!bfq_bfqq_sync(bfqq)] * + (bfqq->entity.weight / bfqq->entity.orig_weight); +} + +/* + * Move request from internal lists to the request queue dispatch list. + */ +static void bfq_dispatch_insert(struct request_queue *q, struct request *rq) +{ + struct bfq_data *bfqd = q->elevator->elevator_data; + struct bfq_queue *bfqq = RQ_BFQQ(rq); + + bfq_remove_request(rq); + bfqq->dispatched++; + elv_dispatch_sort(q, rq); + + if (bfq_bfqq_sync(bfqq)) + bfqd->sync_flight++; +} + +/* + * Return expired entry, or NULL to just start from scratch in rbtree. + */ +static struct request *bfq_check_fifo(struct bfq_queue *bfqq) +{ + struct bfq_data *bfqd = bfqq->bfqd; + struct request *rq; + int fifo; + + if (bfq_bfqq_fifo_expire(bfqq)) + return NULL; + + bfq_mark_bfqq_fifo_expire(bfqq); + + if (list_empty(&bfqq->fifo)) + return NULL; + + fifo = bfq_bfqq_sync(bfqq); + rq = rq_entry_fifo(bfqq->fifo.next); + + if (time_before(jiffies, rq->start_time + bfqd->bfq_fifo_expire[fifo])) + return NULL; + + return rq; +} + +static inline bfq_service_t bfq_bfqq_budget_left(struct bfq_queue *bfqq) +{ + struct bfq_entity *entity = &bfqq->entity; + return entity->budget - entity->service; +} + +static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq) +{ + BUG_ON(bfqq != bfqd->active_queue); + + __bfq_bfqd_reset_active(bfqd); + + if (RB_EMPTY_ROOT(&bfqq->sort_list)) + bfq_del_bfqq_busy(bfqd, bfqq, 1); + else + bfq_activate_bfqq(bfqd, bfqq); +} + +/** + * __bfq_bfqq_recalc_budget - try to adapt the budget to the @bfqq behavior. + * @bfqd: device data. + * @bfqq: queue to update. + * @reason: reason for expiration. + * + * Handle the feedback on @bfqq budget. See the body for detailed + * comments. + */ +static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd, + struct bfq_queue *bfqq, + enum bfqq_expiration reason) +{ + struct request *next_rq; + bfq_service_t budget, min_budget; + + budget = bfqq->max_budget; + min_budget = bfq_min_budget(bfqd); + + BUG_ON(bfqq != bfqd->active_queue); + + bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last budg %lu, budg left %lu", + bfqq->entity.budget, bfq_bfqq_budget_left(bfqq)); + bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last max_budg %lu, min budg %lu", + budget, bfq_min_budget(bfqd)); + bfq_log_bfqq(bfqd, bfqq, "recalc_budg: sync %d, seeky %d", + bfq_bfqq_sync(bfqq), BFQQ_SEEKY(bfqd->active_queue)); + + if (bfq_bfqq_sync(bfqq)) { + switch (reason) { + /* + * Caveat: in all the following cases we trade latency + * for throughput. + */ + case BFQ_BFQQ_TOO_IDLE: + /* + * This is the only case where we may reduce + * the budget: if there is no requets of the + * process still waiting for completion, then + * we assume (tentatively) that the timer has + * expired because the batch of requests of + * the process could have been served with a + * smaller budget. Hence, betting that + * process will behave in the same way when it + * becomes backlogged again, we reduce its + * next budget. As long as we guess right, + * this budget cut reduces the latency + * experienced by the process. + * + * However, if there are still outstanding + * requests, then the process may have not yet + * issued its next request just because it is + * still waiting for the completion of some of + * the still oustanding ones. So in this + * subcase we do not reduce its budget, on the + * contrary we increase it to possibly boost + * the throughput, as discussed in the + * comments to the BUDGET_TIMEOUT case. + */ + if(bfqq->dispatched > 0) /* still oustanding reqs */ + budget = min(budget * 2, bfqd->bfq_max_budget); + else { + if (budget > 5 * min_budget) + budget -= 4 * min_budget; + else + budget = min_budget; + } + break; + case BFQ_BFQQ_BUDGET_TIMEOUT: + /* + * We double the budget here because: 1) it + * gives the chance to boost the throughput if + * this is not a seeky process (which may have + * bumped into this timeout because of, e.g., + * ZBR), 2) together with charge_full_budget + * it helps give seeky processes higher + * timestamps, and hence be served less + * frequently. + */ + budget = min(budget * 2, bfqd->bfq_max_budget); + break; + case BFQ_BFQQ_BUDGET_EXHAUSTED: + /* + * The process still has backlog, and did not + * let either the budget timeout or the disk + * idling timeout expire. Hence it is not + * seeky, has a short thinktime and may be + * happy with a higher budget too. So + * definitely increase the budget of this good + * candidate to boost the disk throughput. + */ + budget = min(budget * 4, bfqd->bfq_max_budget); + break; + case BFQ_BFQQ_NO_MORE_REQUESTS: + /* + * Leave the budget unchanged. + */ + default: + return; + } + } else /* async queue */ + /* async queues get always the maximum possible budget + * (their ability to dispatch is limited by + * @bfqd->bfq_max_budget_async_rq). + */ + budget = bfqd->bfq_max_budget; + + bfqq->max_budget = budget; + + if (bfqd->budgets_assigned >= 194 && bfqd->bfq_user_max_budget == 0 && + bfqq->max_budget > bfqd->bfq_max_budget) + bfqq->max_budget = bfqd->bfq_max_budget; + + /* + * Make sure that we have enough budget for the next request. + * Since the finish time of the bfqq must be kept in sync with + * the budget, be sure to call __bfq_bfqq_expire() after the + * update. + */ + next_rq = bfqq->next_rq; + if (next_rq != NULL) + bfqq->entity.budget = max_t(bfq_service_t, bfqq->max_budget, + bfq_serv_to_charge(next_rq, bfqq)); + else + bfqq->entity.budget = bfqq->max_budget; + + bfq_log_bfqq(bfqd, bfqq, "head sect: %lu, new budget %lu", + next_rq != NULL ? blk_rq_sectors(next_rq) : 0, + bfqq->entity.budget); +} + +static bfq_service_t bfq_calc_max_budget(u64 peak_rate, u64 timeout) +{ + bfq_service_t max_budget; + + /* + * The max_budget calculated when autotuning is equal to the + * amount of sectors transfered in timeout_sync at the + * estimated peak rate. + */ + max_budget = (bfq_service_t)(peak_rate * 1000 * + timeout >> BFQ_RATE_SHIFT); + + return max_budget; +} + +/* + * In addition to updating the peak rate, checks whether the process + * is "slow", and returns 1 if so. This slow flag is used, in addition + * to the budget timeout, to reduce the amount of service provided to + * seeky processes, and hence reduce their chances to lower the + * throughput. See the code for more details. + */ +static int bfq_update_peak_rate(struct bfq_data *bfqd, struct bfq_queue *bfqq, + int compensate, enum bfqq_expiration reason) +{ + u64 bw, usecs, expected, timeout; + ktime_t delta; + int update = 0; + + if (!bfq_bfqq_sync(bfqq) || bfq_bfqq_budget_new(bfqq)) + return 0; + + delta = compensate ? bfqd->last_idling_start : ktime_get(); + delta = ktime_sub(delta, bfqd->last_budget_start); + usecs = ktime_to_us(delta); + + /* Don't trust short/unrealistic values. */ + if (usecs < 100 || usecs >= LONG_MAX) + return 0; + + /* + * Calculate the bandwidth for the last slice. We use a 64 bit + * value to store the peak rate, in sectors per usec in fixed + * point math. We do so to have enough precision in the estimate + * and to avoid overflows. + */ + bw = (u64)bfqq->entity.service << BFQ_RATE_SHIFT; + do_div(bw, (unsigned long)usecs); + + timeout = jiffies_to_msecs(bfqd->bfq_timeout[BLK_RW_SYNC]); + + /* + * Use only long (> 20ms) intervals to filter out spikes for + * the peak rate estimation. + */ + if (usecs > 20000) { + if (bw > bfqd->peak_rate || + (!BFQQ_SEEKY(bfqq) && + reason == BFQ_BFQQ_BUDGET_TIMEOUT)) { + bfq_log(bfqd, "measured bw =%llu", bw); + /* + * To smooth oscillations use a low-pass filter with + * alpha=7/8, i.e., + * new_rate = (7/8) * old_rate + (1/8) * bw + */ + do_div(bw, 8); + bfqd->peak_rate *= 7; + do_div(bfqd->peak_rate, 8); + bfqd->peak_rate += bw; + update = 1; + bfq_log(bfqd, "new peak_rate=%llu", bfqd->peak_rate); + } + + update |= bfqd->peak_rate_samples == BFQ_PEAK_RATE_SAMPLES - 1; + + if (bfqd->peak_rate_samples < BFQ_PEAK_RATE_SAMPLES) + bfqd->peak_rate_samples++; + + if (bfqd->peak_rate_samples == BFQ_PEAK_RATE_SAMPLES && + update && bfqd->bfq_user_max_budget == 0) { + bfqd->bfq_max_budget = + bfq_calc_max_budget(bfqd->peak_rate, timeout); + bfq_log(bfqd, "new max_budget=%lu", + bfqd->bfq_max_budget); + } + } + + /* + * If the process has been served for a too short time + * interval to let its possible sequential accesses prevail on + * the initial seek time needed to move the disk head on the + * first sector it requested, then give the process a chance + * and for the moment return false. + */ + if (bfqq->entity.budget <= bfq_max_budget(bfqd) / 8) + return 0; + + /* + * A process is considered ``slow'' (i.e., seeky, so that we + * cannot treat it fairly in the service domain, as it would + * slow down too much the other processes) if, when a slice + * ends for whatever reason, it has received service at a + * rate that would not be high enough to complete the budget + * before the budget timeout expiration. + */ + expected = bw * 1000 * timeout >> BFQ_RATE_SHIFT; + + /* + * Caveat: processes doing IO in the slower disk zones will + * tend to be slow(er) even if not seeky. And the estimated + * peak rate will actually be an average over the disk + * surface. Hence, to not be too harsh with unlucky processes, + * we keep a budget/3 margin of safety before declaring a + * process slow. + */ + return expected > (4 * bfqq->entity.budget) / 3; +} + +/** + * bfq_bfqq_expire - expire a queue. + * @bfqd: device owning the queue. + * @bfqq: the queue to expire. + * @compensate: if true, compensate for the time spent idling. + * @reason: the reason causing the expiration. + * + * + * If the process associated to the queue is slow (i.e., seeky), or in + * case of budget timeout, or, finally, if it is async, we + * artificially charge it an entire budget (independently of the + * actual service it received). As a consequence, the queue will get + * higher timestamps than the correct ones upon reactivation, and + * hence it will be rescheduled as if it had received more service + * than what it actually received. In the end, this class of processes + * will receive less service in proportion to how slowly they consume + * their budgets (and hence how seriously they tend to lower the + * throughput). + * + * In contrast, when a queue expires because it has been idling for + * too much or because it exhausted its budget, we do not touch the + * amount of service it has received. Hence when the queue will be + * reactivated and its timestamps updated, the latter will be in sync + * with the actual service received by the queue until expiration. + * + * Charging a full budget to the first type of queues and the exact + * service to the others has the effect of using the WF2Q+ policy to + * schedule the former on a timeslice basis, without violating the + * service domain guarantees of the latter. + */ +static void bfq_bfqq_expire(struct bfq_data *bfqd, + struct bfq_queue *bfqq, + int compensate, + enum bfqq_expiration reason) +{ + int slow; + BUG_ON(bfqq != bfqd->active_queue); + + /* Update disk peak rate for autotuning and check whether the + * process is slow (see bfq_update_peak_rate). + */ + slow = bfq_update_peak_rate(bfqd, bfqq, compensate, reason); + + /* + * As above explained, 'punish' slow (i.e., seeky), timed-out + * and async queues, to favor sequential sync workloads. + * + * Processes doing IO in the slower disk zones will tend to be + * slow(er) even if not seeky. Hence, since the estimated peak + * rate is actually an average over the disk surface, these + * processes may timeout just for bad luck. To avoid punishing + * them we do not charge a full budget to a process that + * succeeded in consuming at least 2/3 of its budget. + */ + if (slow || (reason == BFQ_BFQQ_BUDGET_TIMEOUT && + bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3)) + bfq_bfqq_charge_full_budget(bfqq); + + bfq_log_bfqq(bfqd, bfqq, + "expire (%d, slow %d, num_disp %d, idle_win %d)", reason, slow, + bfqq->dispatched, bfq_bfqq_idle_window(bfqq)); + + /* Increase, decrease or leave budget unchanged according to reason */ + __bfq_bfqq_recalc_budget(bfqd, bfqq, reason); + __bfq_bfqq_expire(bfqd, bfqq); +} + +/* + * Budget timeout is not implemented through a dedicated timer, but + * just checked on request arrivals and completions, as well as on + * idle timer expirations. + */ +static int bfq_bfqq_budget_timeout(struct bfq_queue *bfqq) +{ + if (bfq_bfqq_budget_new(bfqq)) + return 0; + + if (time_before(jiffies, bfqq->budget_timeout)) + return 0; + + return 1; +} + +/* + * If we expire a queue that is waiting for the arrival of a new + * request, we may prevent the fictitious timestamp backshifting that + * allows the guarantees of the queue to be preserved (see [1] for + * this tricky aspect). Hence we return true only if this condition + * does not hold, or if the queue is slow enough to deserve only to be + * kicked off for preserving a high throughput. +*/ +static inline int bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq) +{ + return (! bfq_bfqq_wait_request(bfqq) || + bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3) + && + bfq_bfqq_budget_timeout(bfqq); +} + +/* + * Select a queue for service. If we have a current active queue, + * check whether to continue servicing it, or retrieve and set a new one. + */ +static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd) +{ + struct bfq_queue *bfqq; + struct request *next_rq; + enum bfqq_expiration reason = BFQ_BFQQ_BUDGET_TIMEOUT; + + bfqq = bfqd->active_queue; + if (bfqq == NULL) + goto new_queue; + + bfq_log_bfqq(bfqd, bfqq, "select_queue: already active queue"); + + if (bfq_may_expire_for_budg_timeout(bfqq)) + goto expire; + + next_rq = bfqq->next_rq; + /* + * If bfqq has requests queued and it has enough budget left to + * serve them, keep the queue, otherwise expire it. + */ + if (next_rq != NULL) { + if (bfq_serv_to_charge(next_rq, bfqq) > + bfq_bfqq_budget_left(bfqq)) { + reason = BFQ_BFQQ_BUDGET_EXHAUSTED; + goto expire; + } else + goto keep_queue; + } + + /* + * No requests pending. If the active queue still has + * requests in flight or is idling for a new request, then keep it. + */ + if (timer_pending(&bfqd->idle_slice_timer) || + (bfqq->dispatched != 0 && bfq_bfqq_idle_window(bfqq))) { + bfqq = NULL; + goto keep_queue; + } + + reason = BFQ_BFQQ_NO_MORE_REQUESTS; +expire: + bfq_bfqq_expire(bfqd, bfqq, 0, reason); +new_queue: + bfqq = bfq_set_active_queue(bfqd); + bfq_log(bfqd, "select_queue: new queue returned (possibly NULL)"); +keep_queue: + return bfqq; +} + +/* + * Dispatch some requests from bfqq, moving them to the request queue + * dispatch list. + */ +static int __bfq_dispatch_requests(struct bfq_data *bfqd, + struct bfq_queue *bfqq, + int max_dispatch) +{ + int dispatched = 0; + + BUG_ON(RB_EMPTY_ROOT(&bfqq->sort_list)); + + do { + struct request *rq; + bfq_service_t service_to_charge; + + /* Follow expired path, else get first next available. */ + rq = bfq_check_fifo(bfqq); + if (rq == NULL) + rq = bfqq->next_rq; + service_to_charge = bfq_serv_to_charge(rq, bfqq); + + if (service_to_charge > bfq_bfqq_budget_left(bfqq)) { + /* + * Expire the queue for budget exhaustion, and + * make sure that the next act_budget is enough + * to serve the next request, even if it comes + * from the fifo expired path. + */ + bfqq->next_rq = rq; + goto expire; + } + + /* Finally, insert request into driver dispatch list. */ + bfq_bfqq_served(bfqq, service_to_charge); + bfq_dispatch_insert(bfqd->queue, rq); + + if (bfqq->high_weight_budget > 0) { /* queue is being boosted */ + struct bfq_entity *entity = &bfqq->entity; + + bfq_log_bfqq(bfqd, bfqq, "busy period dur %llu msec, " + "old highwbudg %lu", + jiffies_to_msecs(jiffies) - + bfqq->last_activation_time, + bfqq->high_weight_budget); + /* + * Decrease the budget for weight boosting by + * the just received service, or, if too much + * time has elapsed from the beginning of this + * boosting period, stop it + */ + if (jiffies_to_msecs(jiffies) - + bfqq->last_activation_time <= BFQ_BOOST_TIMEOUT + && + bfqq->high_weight_budget > service_to_charge) + bfqq->high_weight_budget -= service_to_charge; + else + bfqq->high_weight_budget = 0; + entity->ioprio_changed = 1; + __bfq_entity_update_weight_prio( + bfq_entity_service_tree(entity), + entity); + } + + bfq_log_bfqq(bfqd, bfqq, "dispatched %lu sec req (%llu), " + "budg left %lu", + blk_rq_sectors(rq), blk_rq_pos(rq), + bfq_bfqq_budget_left(bfqq)); + + dispatched++; + + if (bfqd->active_cic == NULL) { + atomic_inc(&RQ_CIC(rq)->ioc->refcount); + bfqd->active_cic = RQ_CIC(rq); + } + + if (RB_EMPTY_ROOT(&bfqq->sort_list)) + break; + } while (dispatched < max_dispatch); + + bfq_log_bfqq(bfqd, bfqq, "dispatched %d reqs", dispatched); + + if (bfqd->busy_queues > 1 && ((!bfq_bfqq_sync(bfqq) && + dispatched >= bfqd->bfq_max_budget_async_rq) || + bfq_class_idle(bfqq))) + goto expire; + + return dispatched; + +expire: + bfq_bfqq_expire(bfqd, bfqq, 0, BFQ_BFQQ_BUDGET_EXHAUSTED); + return dispatched; +} + +static int __bfq_forced_dispatch_bfqq(struct bfq_queue *bfqq) +{ + int dispatched = 0; + + while (bfqq->next_rq != NULL) { + bfq_dispatch_insert(bfqq->bfqd->queue, bfqq->next_rq); + dispatched++; + } + + BUG_ON(!list_empty(&bfqq->fifo)); + return dispatched; +} + +/* + * Drain our current requests. Used for barriers and when switching + * io schedulers on-the-fly. + */ +static int bfq_forced_dispatch(struct bfq_data *bfqd) +{ + struct bfq_queue *bfqq, *n; + struct bfq_service_tree *st; + int dispatched = 0; + + bfqq = bfqd->active_queue; + if (bfqq != NULL) + __bfq_bfqq_expire(bfqd, bfqq); + + /* + * Loop through classes, and be careful to leave the scheduler + * in a consistent state, as feedback mechanisms and vtime + * updates cannot be disabled during the process. + */ + list_for_each_entry_safe(bfqq, n, &bfqd->active_list, bfqq_list) { + st = bfq_entity_service_tree(&bfqq->entity); + + dispatched += __bfq_forced_dispatch_bfqq(bfqq); + bfqq->max_budget = bfq_max_budget(bfqd); + + bfq_forget_idle(st); + } + + BUG_ON(bfqd->busy_queues != 0); + + return dispatched; +} + +static int bfq_dispatch_requests(struct request_queue *q, int force) +{ + struct bfq_data *bfqd = q->elevator->elevator_data; + struct bfq_queue *bfqq; + int dispatched; + + bfq_log(bfqd, "dispatch requests: %d busy queues", bfqd->busy_queues); + if (bfqd->busy_queues == 0) + return 0; + + if (unlikely(force)) + return bfq_forced_dispatch(bfqd); + + dispatched = 0; + while ((bfqq = bfq_select_queue(bfqd)) != NULL) { + int max_dispatch; + + max_dispatch = bfqd->bfq_quantum; + if (bfq_class_idle(bfqq)) + max_dispatch = 1; + + if (!bfq_bfqq_sync(bfqq)) + max_dispatch = bfqd->bfq_max_budget_async_rq; + + if (bfqq->dispatched >= max_dispatch) { + if (bfqd->busy_queues > 1) + break; + if (bfqq->dispatched >= 4 * max_dispatch) + break; + } + + if (bfqd->sync_flight != 0 && !bfq_bfqq_sync(bfqq)) + break; + + bfq_clear_bfqq_wait_request(bfqq); + BUG_ON(timer_pending(&bfqd->idle_slice_timer)); + + dispatched += __bfq_dispatch_requests(bfqd, bfqq, max_dispatch); + bfq_log_bfqq(bfqd, bfqq, "total dispatched increased to %d " + "(max_disp %d)", dispatched, max_dispatch); + } + + bfq_log(bfqd, "final total dispatched=%d", dispatched); + return dispatched; +} + +/* + * Task holds one reference to the queue, dropped when task exits. Each rq + * in-flight on this queue also holds a reference, dropped when rq is freed. + * + * Queue lock must be held here. + */ +static void bfq_put_queue(struct bfq_queue *bfqq) +{ + struct bfq_data *bfqd = bfqq->bfqd; + + BUG_ON(atomic_read(&bfqq->ref) <= 0); + + bfq_log_bfqq(bfqd, bfqq, "put_queue: %p %d", bfqq, bfqq->ref); + if (!atomic_dec_and_test(&bfqq->ref)) + return; + + BUG_ON(rb_first(&bfqq->sort_list) != NULL); + BUG_ON(bfqq->allocated[READ] + bfqq->allocated[WRITE] != 0); + BUG_ON(bfqq->entity.tree != NULL); + BUG_ON(bfq_bfqq_busy(bfqq)); + BUG_ON(bfqd->active_queue == bfqq); + + bfq_log_bfqq(bfqd, bfqq, "put_queue: %p freed", bfqq); + + kmem_cache_free(bfq_pool, bfqq); +} + +static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq) +{ + if (bfqq == bfqd->active_queue) { + __bfq_bfqq_expire(bfqd, bfqq); + bfq_schedule_dispatch(bfqd); + } + + bfq_log_bfqq(bfqd, bfqq, "exit_bfqq: %p, %d", bfqq, bfqq->ref); + bfq_put_queue(bfqq); +} + +/* + * Update the entity prio values; note that the new values will not + * be used until the next (re)activation. + */ +static void bfq_init_prio_data(struct bfq_queue *bfqq, struct io_context *ioc) +{ + struct task_struct *tsk = current; + int ioprio_class; + + if (!bfq_bfqq_prio_changed(bfqq)) + return; + + ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio); + switch (ioprio_class) { + default: + printk(KERN_ERR "bfq: bad prio %x\n", ioprio_class); + case IOPRIO_CLASS_NONE: + /* + * No prio set, inherit CPU scheduling settings. + */ + bfqq->entity.new_ioprio = task_nice_ioprio(tsk); + bfqq->entity.new_ioprio_class = task_nice_ioclass(tsk); + break; + case IOPRIO_CLASS_RT: + bfqq->entity.new_ioprio = task_ioprio(ioc); + bfqq->entity.new_ioprio_class = IOPRIO_CLASS_RT; + break; + case IOPRIO_CLASS_BE: + bfqq->entity.new_ioprio = task_ioprio(ioc); + bfqq->entity.new_ioprio_class = IOPRIO_CLASS_BE; + break; + case IOPRIO_CLASS_IDLE: + bfqq->entity.new_ioprio_class = IOPRIO_CLASS_IDLE; + bfqq->entity.new_ioprio = 7; + bfq_clear_bfqq_idle_window(bfqq); + break; + } + + bfqq->entity.ioprio_changed = 1; + + /* + * Keep track of original prio settings in case we have to temporarily + * elevate the priority of this queue. + */ + bfqq->org_ioprio = bfqq->entity.new_ioprio; + bfqq->org_ioprio_class = bfqq->entity.new_ioprio_class; + bfq_clear_bfqq_prio_changed(bfqq); +} + +static void bfq_changed_ioprio(struct io_context *ioc, + struct cfq_io_context *cic) +{ + struct bfq_data *bfqd; + struct bfq_queue *bfqq, *new_bfqq; + struct bfq_group *bfqg; + unsigned long uninitialized_var(flags); + + bfqd = bfq_get_bfqd_locked(&cic->key, &flags); + if (unlikely(bfqd == NULL)) + return; + + bfqq = cic->cfqq[BLK_RW_ASYNC]; + if (bfqq != NULL) { + bfqg = container_of(bfqq->entity.sched_data, struct bfq_group, + sched_data); + new_bfqq = bfq_get_queue(bfqd, bfqg, BLK_RW_ASYNC, cic->ioc, + GFP_ATOMIC); + if (new_bfqq != NULL) { + cic->cfqq[BLK_RW_ASYNC] = new_bfqq; + bfq_log_bfqq(bfqd, bfqq, + "changed_ioprio: bfqq %p %d", + bfqq, bfqq->ref); + bfq_put_queue(bfqq); + } + } + + bfqq = cic->cfqq[BLK_RW_SYNC]; + if (bfqq != NULL) + bfq_mark_bfqq_prio_changed(bfqq); + + bfq_put_bfqd_unlock(bfqd, &flags); +} + +static struct bfq_queue *bfq_find_alloc_queue(struct bfq_data *bfqd, + struct bfq_group *bfqg, + int is_sync, + struct io_context *ioc, + gfp_t gfp_mask) +{ + struct bfq_queue *bfqq, *new_bfqq = NULL; + struct cfq_io_context *cic; + +retry: + cic = bfq_cic_lookup(bfqd, ioc); + /* cic always exists here */ + bfqq = cic_to_bfqq(cic, is_sync); + + if (bfqq == NULL) { + if (new_bfqq != NULL) { + bfqq = new_bfqq; + new_bfqq = NULL; + } else if (gfp_mask & __GFP_WAIT) { + /* + * Inform the allocator of the fact that we will + * just repeat this allocation if it fails, to allow + * the allocator to do whatever it needs to attempt to + * free memory. + */ + spin_unlock_irq(bfqd->queue->queue_lock); + new_bfqq = kmem_cache_alloc_node(bfq_pool, + gfp_mask | __GFP_NOFAIL | __GFP_ZERO, + bfqd->queue->node); + spin_lock_irq(bfqd->queue->queue_lock); + goto retry; + } else { + bfqq = kmem_cache_alloc_node(bfq_pool, + gfp_mask | __GFP_ZERO, + bfqd->queue->node); + if (bfqq == NULL) + goto out; + } + + RB_CLEAR_NODE(&bfqq->entity.rb_node); + INIT_LIST_HEAD(&bfqq->fifo); + + atomic_set(&bfqq->ref, 0); + bfqq->bfqd = bfqd; + + bfq_mark_bfqq_prio_changed(bfqq); + + bfq_init_prio_data(bfqq, ioc); + bfq_init_entity(&bfqq->entity, bfqg); + + if (is_sync) { + if (!bfq_class_idle(bfqq)) + bfq_mark_bfqq_idle_window(bfqq); + bfq_mark_bfqq_sync(bfqq); + } + /* Tentative initial value to trade off between thr and lat */ + bfqq->max_budget = (2 * bfq_max_budget(bfqd)) / 3; + bfqq->pid = current->pid; + + bfqq->last_activation_time = 0; + bfqq->high_weight_budget = 0; + + bfq_log_bfqq(bfqd, bfqq, "allocated"); + } + + if (new_bfqq != NULL) + kmem_cache_free(bfq_pool, new_bfqq); + +out: + WARN_ON((gfp_mask & __GFP_WAIT) && bfqq == NULL); + return bfqq; +} + +static struct bfq_queue **bfq_async_queue_prio(struct bfq_data *bfqd, + struct bfq_group *bfqg, + int ioprio_class, int ioprio) +{ + switch (ioprio_class) { + case IOPRIO_CLASS_RT: + return &bfqg->async_bfqq[0][ioprio]; + case IOPRIO_CLASS_BE: + return &bfqg->async_bfqq[1][ioprio]; + case IOPRIO_CLASS_IDLE: + return &bfqg->async_idle_bfqq; + default: + BUG(); + } +} + +static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd, + struct bfq_group *bfqg, int is_sync, + struct io_context *ioc, gfp_t gfp_mask) +{ + const int ioprio = task_ioprio(ioc); + const int ioprio_class = task_ioprio_class(ioc); + struct bfq_queue **async_bfqq = NULL; + struct bfq_queue *bfqq = NULL; + + if (!is_sync) { + async_bfqq = bfq_async_queue_prio(bfqd, bfqg, ioprio_class, + ioprio); + bfqq = *async_bfqq; + } + + if (bfqq == NULL) { + bfqq = bfq_find_alloc_queue(bfqd, bfqg, is_sync, ioc, gfp_mask); + if (bfqq == NULL) + return NULL; + } + + /* + * Pin the queue now that it's allocated, scheduler exit will prune it. + */ + if (!is_sync && *async_bfqq == NULL) { + atomic_inc(&bfqq->ref); + bfq_log_bfqq(bfqd, bfqq, "get_queue, bfqq not in async: %p, %d", + bfqq, bfqq->ref); + *async_bfqq = bfqq; + } + + atomic_inc(&bfqq->ref); + bfq_log_bfqq(bfqd, bfqq, "get_queue, at end: %p, %d", bfqq, bfqq->ref); + return bfqq; +} + +static void bfq_update_io_thinktime(struct bfq_data *bfqd, + struct cfq_io_context *cic) +{ + unsigned long elapsed = jiffies - cic->last_end_request; + unsigned long ttime = min(elapsed, 2UL * bfqd->bfq_slice_idle); + + cic->ttime_samples = (7*cic->ttime_samples + 256) / 8; + cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8; + cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples; +} + +static void bfq_update_io_seektime(struct bfq_data *bfqd, + struct bfq_queue *bfqq, + struct request *rq) +{ + sector_t sdist; + u64 total; + + if (bfqq->last_request_pos < blk_rq_pos(rq)) + sdist = blk_rq_pos(rq) - bfqq->last_request_pos; + else + sdist = bfqq->last_request_pos - blk_rq_pos(rq); + + /* + * Don't allow the seek distance to get too large from the + * odd fragment, pagein, etc. + */ + if (bfqq->seek_samples == 0) /* first request, not really a seek */ + sdist = 0; + else if (bfqq->seek_samples <= 60) /* second & third seek */ + sdist = min(sdist, (bfqq->seek_mean * 4) + 2*1024*1024); + else + sdist = min(sdist, (bfqq->seek_mean * 4) + 2*1024*64); + + bfqq->seek_samples = (7*bfqq->seek_samples + 256) / 8; + bfqq->seek_total = (7*bfqq->seek_total + (u64)256*sdist) / 8; + total = bfqq->seek_total + (bfqq->seek_samples/2); + do_div(total, bfqq->seek_samples); + bfqq->seek_mean = (sector_t)total; + + bfq_log_bfqq(bfqd, bfqq, "dist=%llu mean=%llu", (u64)sdist, + (u64)bfqq->seek_mean); +} + +/* + * Disable idle window if the process thinks too long or seeks so much that + * it doesn't matter. + */ +static void bfq_update_idle_window(struct bfq_data *bfqd, + struct bfq_queue *bfqq, + struct cfq_io_context *cic) +{ + int enable_idle; + + /* Don't idle for async or idle io prio class. */ + if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq)) + return; + + enable_idle = bfq_bfqq_idle_window(bfqq); + + if (atomic_read(&cic->ioc->nr_tasks) == 0 || + bfqd->bfq_slice_idle == 0 || (bfqd->hw_tag && BFQQ_SEEKY(bfqq))) + enable_idle = 0; + else if (bfq_sample_valid(cic->ttime_samples)) { + if (cic->ttime_mean > bfqd->bfq_slice_idle) + enable_idle = 0; + else + enable_idle = 1; + } + bfq_log_bfqq(bfqd, bfqq, "update_idle_window: enable_idle %d", + enable_idle); + + if (enable_idle) + bfq_mark_bfqq_idle_window(bfqq); + else + bfq_clear_bfqq_idle_window(bfqq); +} + +/* + * Called when a new fs request (rq) is added to bfqq. Check if there's + * something we should do about it. + */ +static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq, + struct request *rq) +{ + struct cfq_io_context *cic = RQ_CIC(rq); + + if (rq_is_meta(rq)) + bfqq->meta_pending++; + + bfq_update_io_thinktime(bfqd, cic); + bfq_update_io_seektime(bfqd, bfqq, rq); + if (bfqq->entity.service > bfq_max_budget(bfqd) / 8 || + ! BFQQ_SEEKY(bfqq)) + bfq_update_idle_window(bfqd, bfqq, cic); + + bfq_log_bfqq(bfqd, bfqq, + "rq_enqueued: idle_window=%d (seeky %d, mean %llu)", + bfq_bfqq_idle_window(bfqq), BFQQ_SEEKY(bfqq), + bfqq->seek_mean); + + bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq); + + if (bfqq == bfqd->active_queue) { + if (bfq_bfqq_wait_request(bfqq)) { + /* + * If we are waiting for a request for this queue, let + * it rip immediately and flag that we must not expire + * this queue just now. + */ + bfq_clear_bfqq_wait_request(bfqq); + del_timer(&bfqd->idle_slice_timer); + /* + * Here we can safely expire the queue, in + * case of budget timeout, without wasting + * guarantees + */ + if (bfq_bfqq_budget_timeout(bfqq)) + bfq_bfqq_expire(bfqd, bfqq, 0, + BFQ_BFQQ_BUDGET_TIMEOUT); + __blk_run_queue(bfqd->queue); + } + } +} + +static void bfq_insert_request(struct request_queue *q, struct request *rq) +{ + struct bfq_data *bfqd = q->elevator->elevator_data; + struct bfq_queue *bfqq = RQ_BFQQ(rq); + + assert_spin_locked(bfqd->queue->queue_lock); + bfq_init_prio_data(bfqq, RQ_CIC(rq)->ioc); + + bfq_add_rq_rb(rq); + + list_add_tail(&rq->queuelist, &bfqq->fifo); + + bfq_rq_enqueued(bfqd, bfqq, rq); +} + +static void bfq_update_hw_tag(struct bfq_data *bfqd) +{ + bfqd->max_rq_in_driver = max(bfqd->max_rq_in_driver, + bfqd->rq_in_driver); + + /* + * This sample is valid if the number of outstanding requests + * is large enough to allow a queueing behavior. Note that the + * sum is not exact, as it's not taking into account deactivated + * requests. + */ + if (bfqd->rq_in_driver + bfqd->queued < BFQ_HW_QUEUE_THRESHOLD) + return; + + if (bfqd->hw_tag_samples++ < BFQ_HW_QUEUE_SAMPLES) + return; + + bfqd->hw_tag = bfqd->max_rq_in_driver > BFQ_HW_QUEUE_THRESHOLD; + bfqd->max_rq_in_driver = 0; + bfqd->hw_tag_samples = 0; +} + +static void bfq_completed_request(struct request_queue *q, struct request *rq) +{ + struct bfq_queue *bfqq = RQ_BFQQ(rq); + struct bfq_data *bfqd = bfqq->bfqd; + const int sync = rq_is_sync(rq); + + bfq_log_bfqq(bfqd, bfqq, "completed %lu sects req (%d)", + blk_rq_sectors(rq), sync); + + bfq_update_hw_tag(bfqd); + + WARN_ON(!bfqd->rq_in_driver); + WARN_ON(!bfqq->dispatched); + bfqd->rq_in_driver--; + bfqq->dispatched--; + + if (bfq_bfqq_sync(bfqq)) + bfqd->sync_flight--; + + if (sync) + RQ_CIC(rq)->last_end_request = jiffies; + + /* + * If this is the active queue, check if it needs to be expired, + * or if we want to idle in case it has no pending requests. + */ + if (bfqd->active_queue == bfqq) { + if (bfq_bfqq_budget_new(bfqq)) + bfq_set_budget_timeout(bfqd); + + if (bfq_may_expire_for_budg_timeout(bfqq)) + bfq_bfqq_expire(bfqd, bfqq, 0, BFQ_BFQQ_BUDGET_TIMEOUT); + else if (sync && bfqd->rq_in_driver == 0 && + RB_EMPTY_ROOT(&bfqq->sort_list)) + bfq_arm_slice_timer(bfqd); + } + + if (!bfqd->rq_in_driver) + bfq_schedule_dispatch(bfqd); +} + +/* + * We temporarily boost lower priority queues if they are holding fs exclusive + * resources. They are boosted to normal prio (CLASS_BE/4). + */ +static void bfq_prio_boost(struct bfq_queue *bfqq) +{ + if (has_fs_excl()) { + /* + * Boost idle prio on transactions that would lock out other + * users of the filesystem + */ + if (bfq_class_idle(bfqq)) + bfqq->entity.new_ioprio_class = IOPRIO_CLASS_BE; + if (bfqq->entity.new_ioprio > IOPRIO_NORM) + bfqq->entity.new_ioprio = IOPRIO_NORM; + } else { + /* + * Check if we need to unboost the queue + */ + if (bfqq->entity.new_ioprio_class != bfqq->org_ioprio_class) + bfqq->entity.new_ioprio_class = bfqq->org_ioprio_class; + if (bfqq->entity.new_ioprio != bfqq->org_ioprio) + bfqq->entity.new_ioprio = bfqq->org_ioprio; + } +} + +static inline int __bfq_may_queue(struct bfq_queue *bfqq) +{ + if (bfq_bfqq_wait_request(bfqq) && bfq_bfqq_must_alloc(bfqq)) { + bfq_clear_bfqq_must_alloc(bfqq); + return ELV_MQUEUE_MUST; + } + + return ELV_MQUEUE_MAY; +} + +static int bfq_may_queue(struct request_queue *q, int rw) +{ + struct bfq_data *bfqd = q->elevator->elevator_data; + struct task_struct *tsk = current; + struct cfq_io_context *cic; + struct bfq_queue *bfqq; + + /* + * Don't force setup of a queue from here, as a call to may_queue + * does not necessarily imply that a request actually will be queued. + * So just lookup a possibly existing queue, or return 'may queue' + * if that fails. + */ + cic = bfq_cic_lookup(bfqd, tsk->io_context); + if (cic == NULL) + return ELV_MQUEUE_MAY; + + bfqq = cic_to_bfqq(cic, rw & REQ_RW_SYNC); + if (bfqq != NULL) { + bfq_init_prio_data(bfqq, cic->ioc); + bfq_prio_boost(bfqq); + + return __bfq_may_queue(bfqq); + } + + return ELV_MQUEUE_MAY; +} + +/* + * Queue lock held here. + */ +static void bfq_put_request(struct request *rq) +{ + struct bfq_queue *bfqq = RQ_BFQQ(rq); + + if (bfqq != NULL) { + const int rw = rq_data_dir(rq); + + BUG_ON(!bfqq->allocated[rw]); + bfqq->allocated[rw]--; + + put_io_context(RQ_CIC(rq)->ioc); + + rq->elevator_private = NULL; + rq->elevator_private2 = NULL; + + bfq_log_bfqq(bfqq->bfqd, bfqq, "put_request %p, %d", + bfqq, bfqq->ref); + bfq_put_queue(bfqq); + } +} + +/* + * Allocate bfq data structures associated with this request. + */ +static int bfq_set_request(struct request_queue *q, struct request *rq, + gfp_t gfp_mask) +{ + struct bfq_data *bfqd = q->elevator->elevator_data; + struct cfq_io_context *cic; + const int rw = rq_data_dir(rq); + const int is_sync = rq_is_sync(rq); + struct bfq_queue *bfqq; + struct bfq_group *bfqg; + unsigned long flags; + + might_sleep_if(gfp_mask & __GFP_WAIT); + + cic = bfq_get_io_context(bfqd, gfp_mask); + + spin_lock_irqsave(q->queue_lock, flags); + + if (cic == NULL) + goto queue_fail; + + bfqg = bfq_cic_update_cgroup(cic); + + bfqq = cic_to_bfqq(cic, is_sync); + if (bfqq == NULL) { + bfqq = bfq_get_queue(bfqd, bfqg, is_sync, cic->ioc, gfp_mask); + if (bfqq == NULL) + goto queue_fail; + + cic_set_bfqq(cic, bfqq, is_sync); + } + + bfqq->allocated[rw]++; + atomic_inc(&bfqq->ref); + bfq_log_bfqq(bfqd, bfqq, "set_request: bfqq %p, %d", bfqq, bfqq->ref); + + spin_unlock_irqrestore(q->queue_lock, flags); + + rq->elevator_private = cic; + rq->elevator_private2 = bfqq; + + return 0; + +queue_fail: + if (cic != NULL) + put_io_context(cic->ioc); + + bfq_schedule_dispatch(bfqd); + spin_unlock_irqrestore(q->queue_lock, flags); + + return 1; +} + +static void bfq_kick_queue(struct work_struct *work) +{ + struct bfq_data *bfqd = + container_of(work, struct bfq_data, unplug_work); + struct request_queue *q = bfqd->queue; + unsigned long flags; + + spin_lock_irqsave(q->queue_lock, flags); + __blk_run_queue(q); + spin_unlock_irqrestore(q->queue_lock, flags); +} + +/* + * Handler of the expiration of the timer running if the active_queue + * is idling inside its time slice. + */ +static void bfq_idle_slice_timer(unsigned long data) +{ + struct bfq_data *bfqd = (struct bfq_data *)data; + struct bfq_queue *bfqq; + unsigned long flags; + enum bfqq_expiration reason; + + spin_lock_irqsave(bfqd->queue->queue_lock, flags); + + bfqq = bfqd->active_queue; + /* + * Theoretical race here: active_queue can be NULL or different + * from the queue that was idling if the timer handler spins on + * the queue_lock and a new request arrives for the current + * queue and there is a full dispatch cycle that changes the + * active_queue. This can hardly happen, but in the worst case + * we just expire a queue too early. + */ + if (bfqq != NULL) { + bfq_log_bfqq(bfqd, bfqq, "slice_timer expired"); + reason = BFQ_BFQQ_TOO_IDLE; + if (bfq_bfqq_budget_timeout(bfqq)) + /* + * Also here the queue can be safely expired + * for budget timeout without wasting + * guarantees + */ + reason = BFQ_BFQQ_BUDGET_TIMEOUT; + + bfq_bfqq_expire(bfqd, bfqq, 1, reason); + } + + bfq_schedule_dispatch(bfqd); + + spin_unlock_irqrestore(bfqd->queue->queue_lock, flags); +} + +static void bfq_shutdown_timer_wq(struct bfq_data *bfqd) +{ + del_timer_sync(&bfqd->idle_slice_timer); + cancel_work_sync(&bfqd->unplug_work); +} + +static inline void __bfq_put_async_bfqq(struct bfq_data *bfqd, + struct bfq_queue **bfqq_ptr) +{ + struct bfq_group *root_group = bfqd->root_group; + struct bfq_queue *bfqq = *bfqq_ptr; + + bfq_log(bfqd, "put_async_bfqq: %p", bfqq); + if (bfqq != NULL) { + bfq_bfqq_move(bfqd, bfqq, &bfqq->entity, root_group); + bfq_log_bfqq(bfqd, bfqq, "put_async_bfqq: putting %p, %d", + bfqq, bfqq->ref); + bfq_put_queue(bfqq); + *bfqq_ptr = NULL; + } +} + +/* + * Release all the bfqg references to its async queues. If we are + * deallocating the group these queues may still contain requests, so + * we reparent them to the root cgroup (i.e., the only one that will + * exist for sure untill all the requests on a device are gone). + */ +static void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg) +{ + int i, j; + + for (i = 0; i < 2; i++) + for (j = 0; j < IOPRIO_BE_NR; j++) + __bfq_put_async_bfqq(bfqd, &bfqg->async_bfqq[i][j]); + + __bfq_put_async_bfqq(bfqd, &bfqg->async_idle_bfqq); +} + +static void bfq_exit_queue(struct elevator_queue *e) +{ + struct bfq_data *bfqd = e->elevator_data; + struct request_queue *q = bfqd->queue; + struct bfq_queue *bfqq, *n; + struct cfq_io_context *cic; + + bfq_shutdown_timer_wq(bfqd); + + spin_lock_irq(q->queue_lock); + + while (!list_empty(&bfqd->cic_list)) { + cic = list_entry(bfqd->cic_list.next, struct cfq_io_context, + queue_list); + __bfq_exit_single_io_context(bfqd, cic); + } + + BUG_ON(bfqd->active_queue != NULL); + list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list) + bfq_deactivate_bfqq(bfqd, bfqq, 0); + + bfq_disconnect_groups(bfqd); + spin_unlock_irq(q->queue_lock); + + bfq_shutdown_timer_wq(bfqd); + + spin_lock(&cic_index_lock); + ida_remove(&cic_index_ida, bfqd->cic_index); + spin_unlock(&cic_index_lock); + + /* Wait for cic->key accessors to exit their grace periods. */ + synchronize_rcu(); + + BUG_ON(timer_pending(&bfqd->idle_slice_timer)); + + bfq_free_root_group(bfqd); + kfree(bfqd); +} + +static int bfq_alloc_cic_index(void) +{ + int index, error; + + do { + if (!ida_pre_get(&cic_index_ida, GFP_KERNEL)) + return -ENOMEM; + + spin_lock(&cic_index_lock); + error = ida_get_new(&cic_index_ida, &index); + spin_unlock(&cic_index_lock); + if (error && error != -EAGAIN) + return error; + } while (error); + + return index; +} + +static void *bfq_init_queue(struct request_queue *q) +{ + struct bfq_group *bfqg; + struct bfq_data *bfqd; + int i; + + i = bfq_alloc_cic_index(); + if (i < 0) + return NULL; + + bfqd = kmalloc_node(sizeof(*bfqd), GFP_KERNEL | __GFP_ZERO, q->node); + if (bfqd == NULL) + return NULL; + + bfqd->cic_index = i; + + INIT_LIST_HEAD(&bfqd->cic_list); + + bfqd->queue = q; + + bfqg = bfq_alloc_root_group(bfqd, q->node); + if (bfqg == NULL) { + kfree(bfqd); + return NULL; + } + + bfqd->root_group = bfqg; + + init_timer(&bfqd->idle_slice_timer); + bfqd->idle_slice_timer.function = bfq_idle_slice_timer; + bfqd->idle_slice_timer.data = (unsigned long)bfqd; + + INIT_WORK(&bfqd->unplug_work, bfq_kick_queue); + + INIT_LIST_HEAD(&bfqd->active_list); + INIT_LIST_HEAD(&bfqd->idle_list); + + bfqd->hw_tag = 1; + + bfqd->bfq_max_budget = bfq_default_max_budget; + + bfqd->bfq_quantum = bfq_quantum; + bfqd->bfq_fifo_expire[0] = bfq_fifo_expire[0]; + bfqd->bfq_fifo_expire[1] = bfq_fifo_expire[1]; + bfqd->bfq_back_max = bfq_back_max; + bfqd->bfq_back_penalty = bfq_back_penalty; + bfqd->bfq_slice_idle = bfq_slice_idle; + bfqd->bfq_max_budget_async_rq = bfq_max_budget_async_rq; + bfqd->bfq_timeout[BLK_RW_ASYNC] = bfq_timeout_async; + bfqd->bfq_timeout[BLK_RW_SYNC] = bfq_timeout_sync; + + bfqd->low_latency = true; + + return bfqd; +} + +static void bfq_slab_kill(void) +{ + if (bfq_pool != NULL) + kmem_cache_destroy(bfq_pool); + if (bfq_ioc_pool != NULL) + kmem_cache_destroy(bfq_ioc_pool); +} + +static int __init bfq_slab_setup(void) +{ + bfq_pool = KMEM_CACHE(bfq_queue, 0); + if (bfq_pool == NULL) + goto fail; + + bfq_ioc_pool = kmem_cache_create("bfq_io_context", + sizeof(struct cfq_io_context), + __alignof__(struct cfq_io_context), + 0, NULL); + if (bfq_ioc_pool == NULL) + goto fail; + + return 0; +fail: + bfq_slab_kill(); + return -ENOMEM; +} + +static ssize_t bfq_var_show(unsigned int var, char *page) +{ + return sprintf(page, "%d\n", var); +} + +static ssize_t bfq_var_store(unsigned int *var, const char *page, size_t count) +{ + char *p = (char *)page; + + *var = simple_strtoul(p, &p, 10); + return count; +} + +#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ +static ssize_t __FUNC(struct elevator_queue *e, char *page) \ +{ \ + struct bfq_data *bfqd = e->elevator_data; \ + unsigned int __data = __VAR; \ + if (__CONV) \ + __data = jiffies_to_msecs(__data); \ + return bfq_var_show(__data, (page)); \ +} +SHOW_FUNCTION(bfq_quantum_show, bfqd->bfq_quantum, 0); +SHOW_FUNCTION(bfq_fifo_expire_sync_show, bfqd->bfq_fifo_expire[1], 1); +SHOW_FUNCTION(bfq_fifo_expire_async_show, bfqd->bfq_fifo_expire[0], 1); +SHOW_FUNCTION(bfq_back_seek_max_show, bfqd->bfq_back_max, 0); +SHOW_FUNCTION(bfq_back_seek_penalty_show, bfqd->bfq_back_penalty, 0); +SHOW_FUNCTION(bfq_slice_idle_show, bfqd->bfq_slice_idle, 1); +SHOW_FUNCTION(bfq_max_budget_show, bfqd->bfq_user_max_budget, 0); +SHOW_FUNCTION(bfq_max_budget_async_rq_show, bfqd->bfq_max_budget_async_rq, 0); +SHOW_FUNCTION(bfq_timeout_sync_show, bfqd->bfq_timeout[BLK_RW_SYNC], 1); +SHOW_FUNCTION(bfq_timeout_async_show, bfqd->bfq_timeout[BLK_RW_ASYNC], 1); +SHOW_FUNCTION(bfq_low_latency_show, bfqd->low_latency, 0); +#undef SHOW_FUNCTION + +#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ +static ssize_t \ +__FUNC(struct elevator_queue *e, const char *page, size_t count) \ +{ \ + struct bfq_data *bfqd = e->elevator_data; \ + unsigned int __data; \ + int ret = bfq_var_store(&__data, (page), count); \ + if (__data < (MIN)) \ + __data = (MIN); \ + else if (__data > (MAX)) \ + __data = (MAX); \ + if (__CONV) \ + *(__PTR) = msecs_to_jiffies(__data); \ + else \ + *(__PTR) = __data; \ + return ret; \ +} +STORE_FUNCTION(bfq_quantum_store, &bfqd->bfq_quantum, 1, INT_MAX, 0); +STORE_FUNCTION(bfq_fifo_expire_sync_store, &bfqd->bfq_fifo_expire[1], 1, + INT_MAX, 1); +STORE_FUNCTION(bfq_fifo_expire_async_store, &bfqd->bfq_fifo_expire[0], 1, + INT_MAX, 1); +STORE_FUNCTION(bfq_back_seek_max_store, &bfqd->bfq_back_max, 0, INT_MAX, 0); +STORE_FUNCTION(bfq_back_seek_penalty_store, &bfqd->bfq_back_penalty, 1, + INT_MAX, 0); +STORE_FUNCTION(bfq_slice_idle_store, &bfqd->bfq_slice_idle, 0, INT_MAX, 1); +STORE_FUNCTION(bfq_max_budget_async_rq_store, &bfqd->bfq_max_budget_async_rq, + 1, INT_MAX, 0); +STORE_FUNCTION(bfq_timeout_async_store, &bfqd->bfq_timeout[BLK_RW_ASYNC], 0, + INT_MAX, 1); +#undef STORE_FUNCTION + +static inline bfq_service_t bfq_estimated_max_budget(struct bfq_data *bfqd) +{ + u64 timeout = jiffies_to_msecs(bfqd->bfq_timeout[BLK_RW_SYNC]); + + if (bfqd->peak_rate_samples >= BFQ_PEAK_RATE_SAMPLES) + return bfq_calc_max_budget(bfqd->peak_rate, timeout); + else + return bfq_default_max_budget; +} + +static ssize_t bfq_max_budget_store(struct elevator_queue *e, + const char *page, size_t count) +{ + struct bfq_data *bfqd = e->elevator_data; + unsigned int __data; + int ret = bfq_var_store(&__data, (page), count); + + if (__data == 0) + bfqd->bfq_max_budget = bfq_estimated_max_budget(bfqd); + else { + if (__data > INT_MAX) + __data = INT_MAX; + bfqd->bfq_max_budget = __data; + } + + bfqd->bfq_user_max_budget = __data; + + return ret; +} + +static ssize_t bfq_timeout_sync_store(struct elevator_queue *e, + const char *page, size_t count) +{ + struct bfq_data *bfqd = e->elevator_data; + unsigned int __data; + int ret = bfq_var_store(&__data, (page), count); + + if (__data < 1) + __data = 1; + else if (__data > INT_MAX) + __data = INT_MAX; + + bfqd->bfq_timeout[BLK_RW_SYNC] = msecs_to_jiffies(__data); + if (bfqd->bfq_user_max_budget == 0) + bfqd->bfq_max_budget = bfq_estimated_max_budget(bfqd); + + return ret; +} + +static ssize_t bfq_low_latency_store(struct elevator_queue *e, + const char *page, size_t count) +{ + struct bfq_data *bfqd = e->elevator_data; + unsigned int __data; + int ret = bfq_var_store(&__data, (page), count); + + if (__data > 1) + __data = 1; + bfqd->low_latency = __data; + + return ret; +} + +#define BFQ_ATTR(name) \ + __ATTR(name, S_IRUGO|S_IWUSR, bfq_##name##_show, bfq_##name##_store) + +static struct elv_fs_entry bfq_attrs[] = { + BFQ_ATTR(quantum), + BFQ_ATTR(fifo_expire_sync), + BFQ_ATTR(fifo_expire_async), + BFQ_ATTR(back_seek_max), + BFQ_ATTR(back_seek_penalty), + BFQ_ATTR(slice_idle), + BFQ_ATTR(max_budget), + BFQ_ATTR(max_budget_async_rq), + BFQ_ATTR(timeout_sync), + BFQ_ATTR(timeout_async), + BFQ_ATTR(low_latency), + __ATTR_NULL +}; + +static struct elevator_type iosched_bfq = { + .ops = { + .elevator_merge_fn = bfq_merge, + .elevator_merged_fn = bfq_merged_request, + .elevator_merge_req_fn = bfq_merged_requests, + .elevator_allow_merge_fn = bfq_allow_merge, + .elevator_dispatch_fn = bfq_dispatch_requests, + .elevator_add_req_fn = bfq_insert_request, + .elevator_activate_req_fn = bfq_activate_request, + .elevator_deactivate_req_fn = bfq_deactivate_request, + .elevator_queue_empty_fn = bfq_queue_empty, + .elevator_completed_req_fn = bfq_completed_request, + .elevator_former_req_fn = elv_rb_former_request, + .elevator_latter_req_fn = elv_rb_latter_request, + .elevator_set_req_fn = bfq_set_request, + .elevator_put_req_fn = bfq_put_request, + .elevator_may_queue_fn = bfq_may_queue, + .elevator_init_fn = bfq_init_queue, + .elevator_exit_fn = bfq_exit_queue, + .trim = bfq_free_io_context, + }, + .elevator_attrs = bfq_attrs, + .elevator_name = "bfq", + .elevator_owner = THIS_MODULE, +}; + +static int __init bfq_init(void) +{ + /* + * Can be 0 on HZ < 1000 setups. + */ + if (bfq_slice_idle == 0) + bfq_slice_idle = 1; + + if (bfq_timeout_async == 0) + bfq_timeout_async = 1; + + if (bfq_slab_setup()) + return -ENOMEM; + + elv_register(&iosched_bfq); + + return 0; +} + +static void __exit bfq_exit(void) +{ + DECLARE_COMPLETION_ONSTACK(all_gone); + elv_unregister(&iosched_bfq); + bfq_ioc_gone = &all_gone; + /* bfq_ioc_gone's update must be visible before reading bfq_ioc_count */ + smp_wmb(); + if (elv_ioc_count_read(bfq_ioc_count) != 0) + wait_for_completion(&all_gone); + ida_destroy(&cic_index_ida); + bfq_slab_kill(); +} + +module_init(bfq_init); +module_exit(bfq_exit); + +MODULE_AUTHOR("Fabio Checconi, Paolo Valente"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Budget Fair Queueing IO scheduler"); diff --git a/block/bfq-sched.c b/block/bfq-sched.c new file mode 100644 index 000000000..b4a6b1b3d --- /dev/null +++ b/block/bfq-sched.c @@ -0,0 +1,1010 @@ +/* + * BFQ: Hierarchical B-WF2Q+ scheduler. + * + * Based on ideas and code from CFQ: + * Copyright (C) 2003 Jens Axboe + * + * Copyright (C) 2008 Fabio Checconi + * Paolo Valente + */ + +#ifdef CONFIG_CGROUP_BFQIO +#define for_each_entity(entity) \ + for (; entity != NULL; entity = entity->parent) + +#define for_each_entity_safe(entity, parent) \ + for (; entity && ({ parent = entity->parent; 1; }); entity = parent) + +static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd, + int extract); + +static int bfq_update_next_active(struct bfq_sched_data *sd) +{ + struct bfq_group *bfqg; + struct bfq_entity *entity, *next_active; + + if (sd->active_entity != NULL) + /* will update/requeue at the end of service */ + return 0; + + /* + * NOTE: this can be improved in many ways, such as returning + * 1 (and thus propagating upwards the update) only when the + * budget changes, or caching the bfqq that will be scheduled + * next from this subtree. By now we worry more about + * correctness than about performance... + */ + next_active = bfq_lookup_next_entity(sd, 0); + sd->next_active = next_active; + + if (next_active != NULL) { + bfqg = container_of(sd, struct bfq_group, sched_data); + entity = bfqg->my_entity; + if (entity != NULL) + entity->budget = next_active->budget; + } + + return 1; +} + +static inline void bfq_check_next_active(struct bfq_sched_data *sd, + struct bfq_entity *entity) +{ + BUG_ON(sd->next_active != entity); +} +#else +#define for_each_entity(entity) \ + for (; entity != NULL; entity = NULL) + +#define for_each_entity_safe(entity, parent) \ + for (parent = NULL; entity != NULL; entity = parent) + +static inline int bfq_update_next_active(struct bfq_sched_data *sd) +{ + return 0; +} + +static inline void bfq_check_next_active(struct bfq_sched_data *sd, + struct bfq_entity *entity) +{ +} +#endif + +/* + * Shift for timestamp calculations. This actually limits the maximum + * service allowed in one timestamp delta (small shift values increase it), + * the maximum total weight that can be used for the queues in the system + * (big shift values increase it), and the period of virtual time wraparounds. + */ +#define WFQ_SERVICE_SHIFT 22 + +/** + * bfq_gt - compare two timestamps. + * @a: first ts. + * @b: second ts. + * + * Return @a > @b, dealing with wrapping correctly. + */ +static inline int bfq_gt(bfq_timestamp_t a, bfq_timestamp_t b) +{ + return (s64)(a - b) > 0; +} + +static inline struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity) +{ + struct bfq_queue *bfqq = NULL; + + BUG_ON(entity == NULL); + + if (entity->my_sched_data == NULL) + bfqq = container_of(entity, struct bfq_queue, entity); + + return bfqq; +} + + +/** + * bfq_delta - map service into the virtual time domain. + * @service: amount of service. + * @weight: scale factor (weight of an entity or weight sum). + */ +static inline bfq_timestamp_t bfq_delta(bfq_service_t service, + unsigned long weight) +{ + bfq_timestamp_t d = (bfq_timestamp_t)service << WFQ_SERVICE_SHIFT; + + do_div(d, weight); + return d; +} + +/** + * bfq_calc_finish - assign the finish time to an entity. + * @entity: the entity to act upon. + * @service: the service to be charged to the entity. + */ +static inline void bfq_calc_finish(struct bfq_entity *entity, + bfq_service_t service) +{ + struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); + + BUG_ON(entity->weight == 0); + + entity->finish = entity->start + + bfq_delta(service, entity->weight); + + if (bfqq != NULL) { + bfq_log_bfqq(bfqq->bfqd, bfqq, + "calc_finish: serv %lu, w %lu, hi-budg %lu", + service, entity->weight, + bfqq->high_weight_budget); + bfq_log_bfqq(bfqq->bfqd, bfqq, + "calc_finish: start %llu, finish %llu, delta %llu", + entity->start, entity->finish, + bfq_delta(service, entity->weight)); + } +} + +/** + * bfq_entity_of - get an entity from a node. + * @node: the node field of the entity. + * + * Convert a node pointer to the relative entity. This is used only + * to simplify the logic of some functions and not as the generic + * conversion mechanism because, e.g., in the tree walking functions, + * the check for a %NULL value would be redundant. + */ +static inline struct bfq_entity *bfq_entity_of(struct rb_node *node) +{ + struct bfq_entity *entity = NULL; + + if (node != NULL) + entity = rb_entry(node, struct bfq_entity, rb_node); + + return entity; +} + +/** + * bfq_extract - remove an entity from a tree. + * @root: the tree root. + * @entity: the entity to remove. + */ +static inline void bfq_extract(struct rb_root *root, + struct bfq_entity *entity) +{ + BUG_ON(entity->tree != root); + + entity->tree = NULL; + rb_erase(&entity->rb_node, root); +} + +/** + * bfq_idle_extract - extract an entity from the idle tree. + * @st: the service tree of the owning @entity. + * @entity: the entity being removed. + */ +static void bfq_idle_extract(struct bfq_service_tree *st, + struct bfq_entity *entity) +{ + struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); + struct rb_node *next; + + BUG_ON(entity->tree != &st->idle); + + if (entity == st->first_idle) { + next = rb_next(&entity->rb_node); + st->first_idle = bfq_entity_of(next); + } + + if (entity == st->last_idle) { + next = rb_prev(&entity->rb_node); + st->last_idle = bfq_entity_of(next); + } + + bfq_extract(&st->idle, entity); + + if (bfqq != NULL) + list_del(&bfqq->bfqq_list); +} + +/** + * bfq_insert - generic tree insertion. + * @root: tree root. + * @entity: entity to insert. + * + * This is used for the idle and the active tree, since they are both + * ordered by finish time. + */ +static void bfq_insert(struct rb_root *root, struct bfq_entity *entity) +{ + struct bfq_entity *entry; + struct rb_node **node = &root->rb_node; + struct rb_node *parent = NULL; + + BUG_ON(entity->tree != NULL); + + while (*node != NULL) { + parent = *node; + entry = rb_entry(parent, struct bfq_entity, rb_node); + + if (bfq_gt(entry->finish, entity->finish)) + node = &parent->rb_left; + else + node = &parent->rb_right; + } + + rb_link_node(&entity->rb_node, parent, node); + rb_insert_color(&entity->rb_node, root); + + entity->tree = root; +} + +/** + * bfq_update_min - update the min_start field of a entity. + * @entity: the entity to update. + * @node: one of its children. + * + * This function is called when @entity may store an invalid value for + * min_start due to updates to the active tree. The function assumes + * that the subtree rooted at @node (which may be its left or its right + * child) has a valid min_start value. + */ +static inline void bfq_update_min(struct bfq_entity *entity, + struct rb_node *node) +{ + struct bfq_entity *child; + + if (node != NULL) { + child = rb_entry(node, struct bfq_entity, rb_node); + if (bfq_gt(entity->min_start, child->min_start)) + entity->min_start = child->min_start; + } +} + +/** + * bfq_update_active_node - recalculate min_start. + * @node: the node to update. + * + * @node may have changed position or one of its children may have moved, + * this function updates its min_start value. The left and right subtrees + * are assumed to hold a correct min_start value. + */ +static inline void bfq_update_active_node(struct rb_node *node) +{ + struct bfq_entity *entity = rb_entry(node, struct bfq_entity, rb_node); + + entity->min_start = entity->start; + bfq_update_min(entity, node->rb_right); + bfq_update_min(entity, node->rb_left); +} + +/** + * bfq_update_active_tree - update min_start for the whole active tree. + * @node: the starting node. + * + * @node must be the deepest modified node after an update. This function + * updates its min_start using the values held by its children, assuming + * that they did not change, and then updates all the nodes that may have + * changed in the path to the root. The only nodes that may have changed + * are the ones in the path or their siblings. + */ +static void bfq_update_active_tree(struct rb_node *node) +{ + struct rb_node *parent; + +up: + bfq_update_active_node(node); + + parent = rb_parent(node); + if (parent == NULL) + return; + + if (node == parent->rb_left && parent->rb_right != NULL) + bfq_update_active_node(parent->rb_right); + else if (parent->rb_left != NULL) + bfq_update_active_node(parent->rb_left); + + node = parent; + goto up; +} + +/** + * bfq_active_insert - insert an entity in the active tree of its group/device. + * @st: the service tree of the entity. + * @entity: the entity being inserted. + * + * The active tree is ordered by finish time, but an extra key is kept + * per each node, containing the minimum value for the start times of + * its children (and the node itself), so it's possible to search for + * the eligible node with the lowest finish time in logarithmic time. + */ +static void bfq_active_insert(struct bfq_service_tree *st, + struct bfq_entity *entity) +{ + struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); + struct rb_node *node = &entity->rb_node; + + bfq_insert(&st->active, entity); + + if (node->rb_left != NULL) + node = node->rb_left; + else if (node->rb_right != NULL) + node = node->rb_right; + + bfq_update_active_tree(node); + + if (bfqq != NULL) + list_add(&bfqq->bfqq_list, &bfqq->bfqd->active_list); +} + +/** + * bfq_ioprio_to_weight - calc a weight from an ioprio. + * @ioprio: the ioprio value to convert. + */ +static unsigned short bfq_ioprio_to_weight(int ioprio) +{ + WARN_ON(ioprio < 0 || ioprio >= IOPRIO_BE_NR); + return IOPRIO_BE_NR - ioprio; +} + +/** + * bfq_weight_to_ioprio - calc an ioprio from a weight. + * @weight: the weight value to convert. + * + * To preserve as mush as possible the old only-ioprio user interface, + * 0 is used as an escape ioprio value for weights (numerically) equal or + * larger than IOPRIO_BE_NR + */ +static unsigned short bfq_weight_to_ioprio(int weight) +{ + WARN_ON(weight < BFQ_MIN_WEIGHT || weight > BFQ_MAX_WEIGHT); + return IOPRIO_BE_NR - weight < 0 ? 0 : IOPRIO_BE_NR - weight; +} + +static inline void bfq_get_entity(struct bfq_entity *entity) +{ + struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); + struct bfq_sched_data *sd; + + if (bfqq != NULL) { + sd = entity->sched_data; + atomic_inc(&bfqq->ref); + bfq_log_bfqq(bfqq->bfqd, bfqq, "get_entity: %p %d", + bfqq, bfqq->ref); + } +} + +/** + * bfq_find_deepest - find the deepest node that an extraction can modify. + * @node: the node being removed. + * + * Do the first step of an extraction in an rb tree, looking for the + * node that will replace @node, and returning the deepest node that + * the following modifications to the tree can touch. If @node is the + * last node in the tree return %NULL. + */ +static struct rb_node *bfq_find_deepest(struct rb_node *node) +{ + struct rb_node *deepest; + + if (node->rb_right == NULL && node->rb_left == NULL) + deepest = rb_parent(node); + else if (node->rb_right == NULL) + deepest = node->rb_left; + else if (node->rb_left == NULL) + deepest = node->rb_right; + else { + deepest = rb_next(node); + if (deepest->rb_right != NULL) + deepest = deepest->rb_right; + else if (rb_parent(deepest) != node) + deepest = rb_parent(deepest); + } + + return deepest; +} + +/** + * bfq_active_extract - remove an entity from the active tree. + * @st: the service_tree containing the tree. + * @entity: the entity being removed. + */ +static void bfq_active_extract(struct bfq_service_tree *st, + struct bfq_entity *entity) +{ + struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); + struct rb_node *node; + + node = bfq_find_deepest(&entity->rb_node); + bfq_extract(&st->active, entity); + + if (node != NULL) + bfq_update_active_tree(node); + + if (bfqq != NULL) + list_del(&bfqq->bfqq_list); +} + +/** + * bfq_idle_insert - insert an entity into the idle tree. + * @st: the service tree containing the tree. + * @entity: the entity to insert. + */ +static void bfq_idle_insert(struct bfq_service_tree *st, + struct bfq_entity *entity) +{ + struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); + struct bfq_entity *first_idle = st->first_idle; + struct bfq_entity *last_idle = st->last_idle; + + if (first_idle == NULL || bfq_gt(first_idle->finish, entity->finish)) + st->first_idle = entity; + if (last_idle == NULL || bfq_gt(entity->finish, last_idle->finish)) + st->last_idle = entity; + + bfq_insert(&st->idle, entity); + + if (bfqq != NULL) + list_add(&bfqq->bfqq_list, &bfqq->bfqd->idle_list); +} + +/** + * bfq_forget_entity - remove an entity from the wfq trees. + * @st: the service tree. + * @entity: the entity being removed. + * + * Update the device status and forget everything about @entity, putting + * the device reference to it, if it is a queue. Entities belonging to + * groups are not refcounted. + */ +static void bfq_forget_entity(struct bfq_service_tree *st, + struct bfq_entity *entity) +{ + struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); + struct bfq_sched_data *sd; + + BUG_ON(!entity->on_st); + + entity->on_st = 0; + st->wsum -= entity->weight; + if (bfqq != NULL) { + sd = entity->sched_data; + bfq_log_bfqq(bfqq->bfqd, bfqq, "forget_entity: %p %d", + bfqq, bfqq->ref); + bfq_put_queue(bfqq); + } +} + +/** + * bfq_put_idle_entity - release the idle tree ref of an entity. + * @st: service tree for the entity. + * @entity: the entity being released. + */ +static void bfq_put_idle_entity(struct bfq_service_tree *st, + struct bfq_entity *entity) +{ + bfq_idle_extract(st, entity); + bfq_forget_entity(st, entity); +} + +/** + * bfq_forget_idle - update the idle tree if necessary. + * @st: the service tree to act upon. + * + * To preserve the global O(log N) complexity we only remove one entry here; + * as the idle tree will not grow indefinitely this can be done safely. + */ +static void bfq_forget_idle(struct bfq_service_tree *st) +{ + struct bfq_entity *first_idle = st->first_idle; + struct bfq_entity *last_idle = st->last_idle; + + if (RB_EMPTY_ROOT(&st->active) && last_idle != NULL && + !bfq_gt(last_idle->finish, st->vtime)) { + /* + * Forget the whole idle tree, increasing the vtime past + * the last finish time of idle entities. + */ + st->vtime = last_idle->finish; + } + + if (first_idle != NULL && !bfq_gt(first_idle->finish, st->vtime)) + bfq_put_idle_entity(st, first_idle); +} + +static struct bfq_service_tree * +__bfq_entity_update_weight_prio(struct bfq_service_tree *old_st, + struct bfq_entity *entity) +{ + struct bfq_service_tree *new_st = old_st; + + if (entity->ioprio_changed) { + int new_boost_coeff = 1; + struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); + + if (bfqq != NULL) { + new_boost_coeff += + bfqq->high_weight_budget * BFQ_BOOST_COEFF / + BFQ_BOOST_BUDGET; + bfq_log_bfqq(bfqq->bfqd, bfqq, + "update_w_prio: wght %lu, hi-budg %lu, coef %d", + entity->weight, bfqq->high_weight_budget, + new_boost_coeff); + } + + BUG_ON(old_st->wsum < entity->weight); + old_st->wsum -= entity->weight; + + if (entity->new_weight != entity->orig_weight) { + entity->orig_weight = entity->new_weight; + entity->ioprio = + bfq_weight_to_ioprio(entity->orig_weight); + } else if (entity->new_ioprio != entity->ioprio) { + entity->ioprio = entity->new_ioprio; + entity->orig_weight = + bfq_ioprio_to_weight(entity->ioprio); + } else + entity->new_weight = entity->orig_weight = + bfq_ioprio_to_weight(entity->ioprio); + + entity->ioprio_class = entity->new_ioprio_class; + entity->ioprio_changed = 0; + + /* + * NOTE: here we may be changing the weight too early, + * this will cause unfairness. The correct approach + * would have required additional complexity to defer + * weight changes to the proper time instants (i.e., + * when entity->finish <= old_st->vtime). + */ + new_st = bfq_entity_service_tree(entity); + entity->weight = entity->orig_weight * new_boost_coeff; + new_st->wsum += entity->weight; + + if (new_st != old_st) + entity->start = new_st->vtime; + } + + return new_st; +} + +/** + * bfq_bfqq_served - update the scheduler status after selection for service. + * @bfqq: the queue being served. + * @served: bytes to transfer. + * + * NOTE: this can be optimized, as the timestamps of upper level entities + * are synchronized every time a new bfqq is selected for service. By now, + * we keep it to better check consistency. + */ +static void bfq_bfqq_served(struct bfq_queue *bfqq, bfq_service_t served) +{ + struct bfq_entity *entity = &bfqq->entity; + struct bfq_service_tree *st; + + for_each_entity(entity) { + st = bfq_entity_service_tree(entity); + + entity->service += served; + WARN_ON_ONCE(entity->service > entity->budget); + BUG_ON(st->wsum == 0); + + st->vtime += bfq_delta(served, st->wsum); + bfq_forget_idle(st); + } + bfq_log_bfqq(bfqq->bfqd, bfqq, "bfqq_served %lu secs", served); +} + +/** + * bfq_bfqq_charge_full_budget - set the service to the entity budget. + * @bfqq: the queue that needs a service update. + * + * When it's not possible to be fair in the service domain, because + * a queue is not consuming its budget fast enough (the meaning of + * fast depends on the timeout parameter), we charge it a full + * budget. In this way we should obtain a sort of time-domain + * fairness among all the seeky/slow queues. + */ +static inline void bfq_bfqq_charge_full_budget(struct bfq_queue *bfqq) +{ + struct bfq_entity *entity = &bfqq->entity; + + bfq_log_bfqq(bfqq->bfqd, bfqq, "charge_full_budget"); + + bfq_bfqq_served(bfqq, entity->budget - entity->service); +} + +/** + * __bfq_activate_entity - activate an entity. + * @entity: the entity being activated. + * + * Called whenever an entity is activated, i.e., it is not active and one + * of its children receives a new request, or has to be reactivated due to + * budget exhaustion. It uses the current budget of the entity (and the + * service received if @entity is active) of the queue to calculate its + * timestamps. + */ +static void __bfq_activate_entity(struct bfq_entity *entity) +{ + struct bfq_sched_data *sd = entity->sched_data; + struct bfq_service_tree *st = bfq_entity_service_tree(entity); + + if (entity == sd->active_entity) { + BUG_ON(entity->tree != NULL); + /* + * If we are requeueing the current entity we have + * to take care of not charging to it service it has + * not received. + */ + bfq_calc_finish(entity, entity->service); + entity->start = entity->finish; + sd->active_entity = NULL; + } else if (entity->tree == &st->active) { + /* + * Requeueing an entity due to a change of some + * next_active entity below it. We reuse the old + * start time. + */ + bfq_active_extract(st, entity); + } else if (entity->tree == &st->idle) { + /* + * Must be on the idle tree, bfq_idle_extract() will + * check for that. + */ + bfq_idle_extract(st, entity); + entity->start = bfq_gt(st->vtime, entity->finish) ? + st->vtime : entity->finish; + } else { + /* + * The finish time of the entity may be invalid, and + * it is in the past for sure, otherwise the queue + * would have been on the idle tree. + */ + entity->start = st->vtime; + st->wsum += entity->weight; + bfq_get_entity(entity); + + BUG_ON(entity->on_st); + entity->on_st = 1; + } + + st = __bfq_entity_update_weight_prio(st, entity); + bfq_calc_finish(entity, entity->budget); + bfq_active_insert(st, entity); +} + +/** + * bfq_activate_entity - activate an entity and its ancestors if necessary. + * @entity: the entity to activate. + * + * Activate @entity and all the entities on the path from it to the root. + */ +static void bfq_activate_entity(struct bfq_entity *entity) +{ + struct bfq_sched_data *sd; + + for_each_entity(entity) { + __bfq_activate_entity(entity); + + sd = entity->sched_data; + if (!bfq_update_next_active(sd)) + /* + * No need to propagate the activation to the + * upper entities, as they will be updated when + * the active entity is rescheduled. + */ + break; + } +} + +/** + * __bfq_deactivate_entity - deactivate an entity from its service tree. + * @entity: the entity to deactivate. + * @requeue: if false, the entity will not be put into the idle tree. + * + * Deactivate an entity, independently from its previous state. If the + * entity was not on a service tree just return, otherwise if it is on + * any scheduler tree, extract it from that tree, and if necessary + * and if the caller did not specify @requeue, put it on the idle tree. + * + * Return %1 if the caller should update the entity hierarchy, i.e., + * if the entity was under service or if it was the next_active for + * its sched_data; return %0 otherwise. + */ +static int __bfq_deactivate_entity(struct bfq_entity *entity, int requeue) +{ + struct bfq_sched_data *sd = entity->sched_data; + struct bfq_service_tree *st = bfq_entity_service_tree(entity); + int was_active = entity == sd->active_entity; + int ret = 0; + + if (!entity->on_st) + return 0; + + BUG_ON(was_active && entity->tree != NULL); + + if (was_active) { + bfq_calc_finish(entity, entity->service); + sd->active_entity = NULL; + } else if (entity->tree == &st->active) + bfq_active_extract(st, entity); + else if (entity->tree == &st->idle) + bfq_idle_extract(st, entity); + else if (entity->tree != NULL) + BUG(); + + if (was_active || sd->next_active == entity) + ret = bfq_update_next_active(sd); + + if (!requeue || !bfq_gt(entity->finish, st->vtime)) + bfq_forget_entity(st, entity); + else + bfq_idle_insert(st, entity); + + BUG_ON(sd->active_entity == entity); + BUG_ON(sd->next_active == entity); + + return ret; +} + +/** + * bfq_deactivate_entity - deactivate an entity. + * @entity: the entity to deactivate. + * @requeue: true if the entity can be put on the idle tree + */ +static void bfq_deactivate_entity(struct bfq_entity *entity, int requeue) +{ + struct bfq_sched_data *sd; + struct bfq_entity *parent; + + for_each_entity_safe(entity, parent) { + sd = entity->sched_data; + + if (!__bfq_deactivate_entity(entity, requeue)) + /* + * The parent entity is still backlogged, and + * we don't need to update it as it is still + * under service. + */ + break; + + if (sd->next_active != NULL) + /* + * The parent entity is still backlogged and + * the budgets on the path towards the root + * need to be updated. + */ + goto update; + + /* + * If we reach there the parent is no more backlogged and + * we want to propagate the dequeue upwards. + */ + requeue = 1; + } + + return; + +update: + entity = parent; + for_each_entity(entity) { + __bfq_activate_entity(entity); + + sd = entity->sched_data; + if (!bfq_update_next_active(sd)) + break; + } +} + +/** + * bfq_update_vtime - update vtime if necessary. + * @st: the service tree to act upon. + * + * If necessary update the service tree vtime to have at least one + * eligible entity, skipping to its start time. Assumes that the + * active tree of the device is not empty. + * + * NOTE: this hierarchical implementation updates vtimes quite often, + * we may end up with reactivated tasks getting timestamps after a + * vtime skip done because we needed a ->first_active entity on some + * intermediate node. + */ +static void bfq_update_vtime(struct bfq_service_tree *st) +{ + struct bfq_entity *entry; + struct rb_node *node = st->active.rb_node; + + entry = rb_entry(node, struct bfq_entity, rb_node); + if (bfq_gt(entry->min_start, st->vtime)) { + st->vtime = entry->min_start; + bfq_forget_idle(st); + } +} + +/** + * bfq_first_active - find the eligible entity with the smallest finish time + * @st: the service tree to select from. + * + * This function searches the first schedulable entity, starting from the + * root of the tree and going on the left every time on this side there is + * a subtree with at least one eligible (start >= vtime) entity. The path + * on the right is followed only if a) the left subtree contains no eligible + * entities and b) no eligible entity has been found yet. + */ +static struct bfq_entity *bfq_first_active_entity(struct bfq_service_tree *st) +{ + struct bfq_entity *entry, *first = NULL; + struct rb_node *node = st->active.rb_node; + + while (node != NULL) { + entry = rb_entry(node, struct bfq_entity, rb_node); +left: + if (!bfq_gt(entry->start, st->vtime)) + first = entry; + + BUG_ON(bfq_gt(entry->min_start, st->vtime)); + + if (node->rb_left != NULL) { + entry = rb_entry(node->rb_left, + struct bfq_entity, rb_node); + if (!bfq_gt(entry->min_start, st->vtime)) { + node = node->rb_left; + goto left; + } + } + if (first != NULL) + break; + node = node->rb_right; + } + + BUG_ON(first == NULL && !RB_EMPTY_ROOT(&st->active)); + return first; +} + +/** + * __bfq_lookup_next_entity - return the first eligible entity in @st. + * @st: the service tree. + * + * Update the virtual time in @st and return the first eligible entity + * it contains. + */ +static struct bfq_entity *__bfq_lookup_next_entity(struct bfq_service_tree *st) +{ + struct bfq_entity *entity; + + if (RB_EMPTY_ROOT(&st->active)) + return NULL; + + bfq_update_vtime(st); + entity = bfq_first_active_entity(st); + BUG_ON(bfq_gt(entity->start, st->vtime)); + + return entity; +} + +/** + * bfq_lookup_next_entity - return the first eligible entity in @sd. + * @sd: the sched_data. + * @extract: if true the returned entity will be also extracted from @sd. + * + * NOTE: since we cache the next_active entity at each level of the + * hierarchy, the complexity of the lookup can be decreased with + * absolutely no effort just returning the cached next_active value; + * we prefer to do full lookups to test the consistency of * the data + * structures. + */ +static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd, + int extract) +{ + struct bfq_service_tree *st = sd->service_tree; + struct bfq_entity *entity; + int i; + + BUG_ON(sd->active_entity != NULL); + + for (i = 0; i < BFQ_IOPRIO_CLASSES; i++, st++) { + entity = __bfq_lookup_next_entity(st); + if (entity != NULL) { + if (extract) { + bfq_check_next_active(sd, entity); + bfq_active_extract(st, entity); + sd->active_entity = entity; + sd->next_active = NULL; + } + break; + } + } + + return entity; +} + +/* + * Get next queue for service. + */ +static struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd) +{ + struct bfq_entity *entity = NULL; + struct bfq_sched_data *sd; + struct bfq_queue *bfqq; + + BUG_ON(bfqd->active_queue != NULL); + + if (bfqd->busy_queues == 0) + return NULL; + + sd = &bfqd->root_group->sched_data; + for (; sd != NULL; sd = entity->my_sched_data) { + entity = bfq_lookup_next_entity(sd, 1); + BUG_ON(entity == NULL); + entity->service = 0; + } + + bfqq = bfq_entity_to_bfqq(entity); + BUG_ON(bfqq == NULL); + + return bfqq; +} + +static void __bfq_bfqd_reset_active(struct bfq_data *bfqd) +{ + if (bfqd->active_cic != NULL) { + put_io_context(bfqd->active_cic->ioc); + bfqd->active_cic = NULL; + } + + bfqd->active_queue = NULL; + del_timer(&bfqd->idle_slice_timer); +} + +static void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, + int requeue) +{ + struct bfq_entity *entity = &bfqq->entity; + + if (bfqq == bfqd->active_queue) + __bfq_bfqd_reset_active(bfqd); + + bfq_deactivate_entity(entity, requeue); +} + +static void bfq_activate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq) +{ + struct bfq_entity *entity = &bfqq->entity; + + bfq_activate_entity(entity); +} + +/* + * Called when the bfqq no longer has requests pending, remove it from + * the service tree. + */ +static void bfq_del_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq, + int requeue) +{ + BUG_ON(!bfq_bfqq_busy(bfqq)); + BUG_ON(!RB_EMPTY_ROOT(&bfqq->sort_list)); + + bfq_log_bfqq(bfqd, bfqq, "del from busy"); + + bfq_clear_bfqq_busy(bfqq); + + BUG_ON(bfqd->busy_queues == 0); + bfqd->busy_queues--; + + bfq_deactivate_bfqq(bfqd, bfqq, requeue); +} + +/* + * Called when an inactive queue receives a new request. + */ +static void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq) +{ + BUG_ON(bfq_bfqq_busy(bfqq)); + BUG_ON(bfqq == bfqd->active_queue); + + bfq_log_bfqq(bfqd, bfqq, "add to busy"); + + bfq_activate_bfqq(bfqd, bfqq); + + bfq_mark_bfqq_busy(bfqq); + bfqd->busy_queues++; +} diff --git a/block/bfq.h b/block/bfq.h new file mode 100644 index 000000000..078b416fc --- /dev/null +++ b/block/bfq.h @@ -0,0 +1,558 @@ +/* + * BFQ-v1-r1 for 2.6.35: data structures and common functions prototypes. + * + * Based on ideas and code from CFQ: + * Copyright (C) 2003 Jens Axboe + * + * Copyright (C) 2008 Fabio Checconi + * Paolo Valente + */ + +#ifndef _BFQ_H +#define _BFQ_H + +#include +#include +#include +#include + +#define BFQ_IOPRIO_CLASSES 3 + +#define BFQ_MIN_WEIGHT 1 +#define BFQ_MAX_WEIGHT 1000 + +#define BFQ_DEFAULT_GRP_WEIGHT 10 +#define BFQ_DEFAULT_GRP_IOPRIO 0 +#define BFQ_DEFAULT_GRP_CLASS IOPRIO_CLASS_BE + +/* Constants used in weight boosting (in its turn used to reduce latencies): */ +/* max factor by which the weight of a boosted queue is multiplied */ +#define BFQ_BOOST_COEFF 10 +/* max number of sectors that can be served during a boosting period */ +#define BFQ_BOOST_BUDGET 49152 +/* max duration of a boosting period, msec */ +#define BFQ_BOOST_TIMEOUT 6000 +/* min idle period after which boosting may be reactivated for a queue, msec */ +#define BFQ_MIN_ACT_INTERVAL 20000 + +typedef u64 bfq_timestamp_t; +typedef unsigned long bfq_service_t; + +struct bfq_entity; + +/** + * struct bfq_service_tree - per ioprio_class service tree. + * @active: tree for active entities (i.e., those backlogged). + * @idle: tree for idle entities (i.e., those not backlogged, with V <= F_i). + * @first_idle: idle entity with minimum F_i. + * @last_idle: idle entity with maximum F_i. + * @vtime: scheduler virtual time. + * @wsum: scheduler weight sum; active and idle entities contribute to it. + * + * Each service tree represents a B-WF2Q+ scheduler on its own. Each + * ioprio_class has its own independent scheduler, and so its own + * bfq_service_tree. All the fields are protected by the queue lock + * of the containing bfqd. + */ +struct bfq_service_tree { + struct rb_root active; + struct rb_root idle; + + struct bfq_entity *first_idle; + struct bfq_entity *last_idle; + + bfq_timestamp_t vtime; + unsigned long wsum; +}; + +/** + * struct bfq_sched_data - multi-class scheduler. + * @active_entity: entity under service. + * @next_active: head-of-the-line entity in the scheduler. + * @service_tree: array of service trees, one per ioprio_class. + * + * bfq_sched_data is the basic scheduler queue. It supports three + * ioprio_classes, and can be used either as a toplevel queue or as + * an intermediate queue on a hierarchical setup. + * @next_active points to the active entity of the sched_data service + * trees that will be scheduled next. + * + * The supported ioprio_classes are the same as in CFQ, in descending + * priority order, IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE. + * Requests from higher priority queues are served before all the + * requests from lower priority queues; among requests of the same + * queue requests are served according to B-WF2Q+. + * All the fields are protected by the queue lock of the containing bfqd. + */ +struct bfq_sched_data { + struct bfq_entity *active_entity; + struct bfq_entity *next_active; + struct bfq_service_tree service_tree[BFQ_IOPRIO_CLASSES]; +}; + +/** + * struct bfq_entity - schedulable entity. + * @rb_node: service_tree member. + * @on_st: flag, true if the entity is on a tree (either the active or + * the idle one of its service_tree). + * @finish: B-WF2Q+ finish timestamp (aka F_i). + * @start: B-WF2Q+ start timestamp (aka S_i). + * @tree: tree the entity is enqueued into; %NULL if not on a tree. + * @min_start: minimum start time of the (active) subtree rooted at + * this entity; used for O(log N) lookups into active trees. + * @service: service received during the last round of service. + * @budget: budget used to calculate F_i; F_i = S_i + @budget / @weight. + * @weight: weight of the queue + * @parent: parent entity, for hierarchical scheduling. + * @my_sched_data: for non-leaf nodes in the cgroup hierarchy, the + * associated scheduler queue, %NULL on leaf nodes. + * @sched_data: the scheduler queue this entity belongs to. + * @ioprio: the ioprio in use. + * @new_weight: when a weight change is requested, the new weight value. + * @orig_weight: original weight, used to implement weight boosting + * @new_ioprio: when an ioprio change is requested, the new ioprio value. + * @ioprio_class: the ioprio_class in use. + * @new_ioprio_class: when an ioprio_class change is requested, the new + * ioprio_class value. + * @ioprio_changed: flag, true when the user requested a weight, ioprio or + * ioprio_class change. + * + * A bfq_entity is used to represent either a bfq_queue (leaf node in the + * cgroup hierarchy) or a bfq_group into the upper level scheduler. Each + * entity belongs to the sched_data of the parent group in the cgroup + * hierarchy. Non-leaf entities have also their own sched_data, stored + * in @my_sched_data. + * + * Each entity stores independently its priority values; this would + * allow different weights on different devices, but this + * functionality is not exported to userspace by now. Priorities and + * weights are updated lazily, first storing the new values into the + * new_* fields, then setting the @ioprio_changed flag. As soon as + * there is a transition in the entity state that allows the priority + * update to take place the effective and the requested priority + * values are synchronized. + * + * Unless cgroups are used, the weight value is calculated from the + * ioprio to export the same interface as CFQ. When dealing with + * ``well-behaved'' queues (i.e., queues that do not spend too much + * time to consume their budget and have true sequential behavior, and + * when there are no external factors breaking anticipation) the + * relative weights at each level of the cgroups hierarchy should be + * guaranteed. All the fields are protected by the queue lock of the + * containing bfqd. + */ +struct bfq_entity { + struct rb_node rb_node; + + int on_st; + + bfq_timestamp_t finish; + bfq_timestamp_t start; + + struct rb_root *tree; + + bfq_timestamp_t min_start; + + bfq_service_t service, budget; + unsigned short weight, new_weight; + unsigned short orig_weight; + + struct bfq_entity *parent; + + struct bfq_sched_data *my_sched_data; + struct bfq_sched_data *sched_data; + + unsigned short ioprio, new_ioprio; + unsigned short ioprio_class, new_ioprio_class; + + int ioprio_changed; +}; + +struct bfq_group; + +/** + * struct bfq_data - per device data structure. + * @queue: request queue for the managed device. + * @root_group: root bfq_group for the device. + * @busy_queues: number of bfq_queues containing requests (including the + * queue under service, even if it is idling). + * @queued: number of queued requests. + * @rq_in_driver: number of requests dispatched and waiting for completion. + * @sync_flight: number of sync requests in the driver. + * @max_rq_in_driver: max number of reqs in driver in the last @hw_tag_samples + * completed requests . + * @hw_tag_samples: nr of samples used to calculate hw_tag. + * @hw_tag: flag set to one if the driver is showing a queueing behavior. + * @budgets_assigned: number of budgets assigned. + * @idle_slice_timer: timer set when idling for the next sequential request + * from the queue under service. + * @unplug_work: delayed work to restart dispatching on the request queue. + * @active_queue: bfq_queue under service. + * @active_cic: cfq_io_context (cic) associated with the @active_queue. + * @last_position: on-disk position of the last served request. + * @last_budget_start: beginning of the last budget. + * @last_idling_start: beginning of the last idle slice. + * @peak_rate: peak transfer rate observed for a budget. + * @peak_rate_samples: number of samples used to calculate @peak_rate. + * @bfq_max_budget: maximum budget allotted to a bfq_queue before rescheduling. + * @cic_index: use small consequent indexes as radix tree keys to reduce depth + * @cic_list: list of all the cics active on the bfq_data device. + * @group_list: list of all the bfq_groups active on the device. + * @active_list: list of all the bfq_queues active on the device. + * @idle_list: list of all the bfq_queues idle on the device. + * @bfq_quantum: max number of requests dispatched per dispatch round. + * @bfq_fifo_expire: timeout for async/sync requests; when it expires + * requests are served in fifo order. + * @bfq_back_penalty: weight of backward seeks wrt forward ones. + * @bfq_back_max: maximum allowed backward seek. + * @bfq_slice_idle: maximum idling time. + * @bfq_user_max_budget: user-configured max budget value (0 for auto-tuning). + * @bfq_max_budget_async_rq: maximum budget (in nr of requests) allotted to + * async queues. + * @bfq_timeout: timeout for bfq_queues to consume their budget; used to + * to prevent seeky queues to impose long latencies to well + * behaved ones (this also implies that seeky queues cannot + * receive guarantees in the service domain; after a timeout + * they are charged for the whole allocated budget, to try + * to preserve a behavior reasonably fair among them, but + * without service-domain guarantees). + * + * All the fields are protected by the @queue lock. + */ +struct bfq_data { + struct request_queue *queue; + + struct bfq_group *root_group; + + int busy_queues; + int queued; + int rq_in_driver; + int sync_flight; + + int max_rq_in_driver; + int hw_tag_samples; + int hw_tag; + + int budgets_assigned; + + struct timer_list idle_slice_timer; + struct work_struct unplug_work; + + struct bfq_queue *active_queue; + struct cfq_io_context *active_cic; + + sector_t last_position; + + ktime_t last_budget_start; + ktime_t last_idling_start; + int peak_rate_samples; + u64 peak_rate; + bfq_service_t bfq_max_budget; + + unsigned int cic_index; + struct list_head cic_list; + struct hlist_head group_list; + struct list_head active_list; + struct list_head idle_list; + + unsigned int bfq_quantum; + unsigned int bfq_fifo_expire[2]; + unsigned int bfq_back_penalty; + unsigned int bfq_back_max; + unsigned int bfq_slice_idle; + + unsigned int bfq_user_max_budget; + unsigned int bfq_max_budget_async_rq; + unsigned int bfq_timeout[2]; + + bool low_latency; +}; + +/** + * struct bfq_queue - leaf schedulable entity. + * @ref: reference counter. + * @bfqd: parent bfq_data. + * @sort_list: sorted list of pending requests. + * @next_rq: if fifo isn't expired, next request to serve. + * @queued: nr of requests queued in @sort_list. + * @allocated: currently allocated requests. + * @meta_pending: pending metadata requests. + * @fifo: fifo list of requests in sort_list. + * @entity: entity representing this queue in the scheduler. + * @max_budget: maximum budget allowed from the feedback mechanism. + * @budget_timeout: budget expiration (in jiffies). + * @dispatched: number of requests on the dispatch list or inside driver. + * @org_ioprio: saved ioprio during boosted periods. + * @org_ioprio_class: saved ioprio_class during boosted periods. + * @flags: status flags. + * @bfqq_list: node for active/idle bfqq list inside our bfqd. + * @seek_samples: number of seeks sampled + * @seek_total: sum of the distances of the seeks sampled + * @seek_mean: mean seek distance + * @last_request_pos: position of the last request enqueued + * @pid: pid of the process owning the queue, used for logging purposes. + * @last_activation_time: time of the last (idle -> backlogged) transition + * @high_weight_budget: number of sectors left to serve with boosted weight + * + * A bfq_queue is a leaf request queue; it can be associated to an io_context + * or more (if it is an async one). @cgroup holds a reference to the + * cgroup, to be sure that it does not disappear while a bfqq still + * references it (mostly to avoid races between request issuing and task + * migration followed by cgroup distruction). + * All the fields are protected by the queue lock of the containing bfqd. + */ +struct bfq_queue { + atomic_t ref; + struct bfq_data *bfqd; + + struct rb_root sort_list; + struct request *next_rq; + int queued[2]; + int allocated[2]; + int meta_pending; + struct list_head fifo; + + struct bfq_entity entity; + + bfq_service_t max_budget; + unsigned long budget_timeout; + + int dispatched; + + unsigned short org_ioprio; + unsigned short org_ioprio_class; + + unsigned int flags; + + struct list_head bfqq_list; + + unsigned int seek_samples; + u64 seek_total; + sector_t seek_mean; + sector_t last_request_pos; + + pid_t pid; + + u64 last_activation_time; + bfq_service_t high_weight_budget; +}; + +enum bfqq_state_flags { + BFQ_BFQQ_FLAG_busy = 0, /* has requests or is under service */ + BFQ_BFQQ_FLAG_wait_request, /* waiting for a request */ + BFQ_BFQQ_FLAG_must_alloc, /* must be allowed rq alloc */ + BFQ_BFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */ + BFQ_BFQQ_FLAG_idle_window, /* slice idling enabled */ + BFQ_BFQQ_FLAG_prio_changed, /* task priority has changed */ + BFQ_BFQQ_FLAG_sync, /* synchronous queue */ + BFQ_BFQQ_FLAG_budget_new, /* no completion with this budget */ +}; + +#define BFQ_BFQQ_FNS(name) \ +static inline void bfq_mark_bfqq_##name(struct bfq_queue *bfqq) \ +{ \ + (bfqq)->flags |= (1 << BFQ_BFQQ_FLAG_##name); \ +} \ +static inline void bfq_clear_bfqq_##name(struct bfq_queue *bfqq) \ +{ \ + (bfqq)->flags &= ~(1 << BFQ_BFQQ_FLAG_##name); \ +} \ +static inline int bfq_bfqq_##name(const struct bfq_queue *bfqq) \ +{ \ + return ((bfqq)->flags & (1 << BFQ_BFQQ_FLAG_##name)) != 0; \ +} + +BFQ_BFQQ_FNS(busy); +BFQ_BFQQ_FNS(wait_request); +BFQ_BFQQ_FNS(must_alloc); +BFQ_BFQQ_FNS(fifo_expire); +BFQ_BFQQ_FNS(idle_window); +BFQ_BFQQ_FNS(prio_changed); +BFQ_BFQQ_FNS(sync); +BFQ_BFQQ_FNS(budget_new); +#undef BFQ_BFQQ_FNS + +/* Logging facilities. */ +#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) \ + blk_add_trace_msg((bfqd)->queue, "bfq%d " fmt, (bfqq)->pid, ##args) + +#define bfq_log(bfqd, fmt, args...) \ + blk_add_trace_msg((bfqd)->queue, "bfq " fmt, ##args) + +/* Expiration reasons. */ +enum bfqq_expiration { + BFQ_BFQQ_TOO_IDLE = 0, /* queue has been idling for too long */ + BFQ_BFQQ_BUDGET_TIMEOUT, /* budget took too long to be used */ + BFQ_BFQQ_BUDGET_EXHAUSTED, /* budget consumed */ + BFQ_BFQQ_NO_MORE_REQUESTS, /* the queue has no more requests */ +}; + +#ifdef CONFIG_CGROUP_BFQIO +/** + * struct bfq_group - per (device, cgroup) data structure. + * @entity: schedulable entity to insert into the parent group sched_data. + * @sched_data: own sched_data, to contain child entities (they may be + * both bfq_queues and bfq_groups). + * @group_node: node to be inserted into the bfqio_cgroup->group_data + * list of the containing cgroup's bfqio_cgroup. + * @bfqd_node: node to be inserted into the @bfqd->group_list list + * of the groups active on the same device; used for cleanup. + * @bfqd: the bfq_data for the device this group acts upon. + * @async_bfqq: array of async queues for all the tasks belonging to + * the group, one queue per ioprio value per ioprio_class, + * except for the idle class that has only one queue. + * @async_idle_bfqq: async queue for the idle class (ioprio is ignored). + * @my_entity: pointer to @entity, %NULL for the toplevel group; used + * to avoid too many special cases during group creation/migration. + * + * Each (device, cgroup) pair has its own bfq_group, i.e., for each cgroup + * there is a set of bfq_groups, each one collecting the lower-level + * entities belonging to the group that are acting on the same device. + * + * Locking works as follows: + * o @group_node is protected by the bfqio_cgroup lock, and is accessed + * via RCU from its readers. + * o @bfqd is protected by the queue lock, RCU is used to access it + * from the readers. + * o All the other fields are protected by the @bfqd queue lock. + */ +struct bfq_group { + struct bfq_entity entity; + struct bfq_sched_data sched_data; + + struct hlist_node group_node; + struct hlist_node bfqd_node; + + void *bfqd; + + struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR]; + struct bfq_queue *async_idle_bfqq; + + struct bfq_entity *my_entity; +}; + +/** + * struct bfqio_cgroup - bfq cgroup data structure. + * @css: subsystem state for bfq in the containing cgroup. + * @weight: cgroup weight. + * @ioprio: cgroup ioprio. + * @ioprio_class: cgroup ioprio_class. + * @lock: spinlock that protects @ioprio, @ioprio_class and @group_data. + * @group_data: list containing the bfq_group belonging to this cgroup. + * + * @group_data is accessed using RCU, with @lock protecting the updates, + * @ioprio and @ioprio_class are protected by @lock. + */ +struct bfqio_cgroup { + struct cgroup_subsys_state css; + + unsigned short weight, ioprio, ioprio_class; + + spinlock_t lock; + struct hlist_head group_data; +}; +#else +struct bfq_group { + struct bfq_sched_data sched_data; + + struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR]; + struct bfq_queue *async_idle_bfqq; +}; +#endif + +static inline struct bfq_service_tree * +bfq_entity_service_tree(struct bfq_entity *entity) +{ + struct bfq_sched_data *sched_data = entity->sched_data; + unsigned int idx = entity->ioprio_class - 1; + + BUG_ON(idx >= BFQ_IOPRIO_CLASSES); + BUG_ON(sched_data == NULL); + + return sched_data->service_tree + idx; +} + +static inline struct bfq_queue *cic_to_bfqq(struct cfq_io_context *cic, + int is_sync) +{ + return cic->cfqq[!!is_sync]; +} + +static inline void cic_set_bfqq(struct cfq_io_context *cic, + struct bfq_queue *bfqq, int is_sync) +{ + cic->cfqq[!!is_sync] = bfqq; +} + +static inline void call_for_each_cic(struct io_context *ioc, + void (*func)(struct io_context *, + struct cfq_io_context *)) +{ + struct cfq_io_context *cic; + struct hlist_node *n; + + rcu_read_lock(); + hlist_for_each_entry_rcu(cic, n, &ioc->bfq_cic_list, cic_list) + func(ioc, cic); + rcu_read_unlock(); +} + +#define CIC_DEAD_KEY 1ul +#define CIC_DEAD_INDEX_SHIFT 1 + +static inline void *bfqd_dead_key(struct bfq_data *bfqd) +{ + return (void *)(bfqd->cic_index << CIC_DEAD_INDEX_SHIFT | CIC_DEAD_KEY); +} + +/** + * bfq_get_bfqd_locked - get a lock to a bfqd using a RCU protected pointer. + * @ptr: a pointer to a bfqd. + * @flags: storage for the flags to be saved. + * + * This function allows cic->key and bfqg->bfqd to be protected by the + * queue lock of the bfqd they reference; the pointer is dereferenced + * under RCU, so the storage for bfqd is assured to be safe as long + * as the RCU read side critical section does not end. After the + * bfqd->queue->queue_lock is taken the pointer is rechecked, to be + * sure that no other writer accessed it. If we raced with a writer, + * the function returns NULL, with the queue unlocked, otherwise it + * returns the dereferenced pointer, with the queue locked. + */ +static inline struct bfq_data *bfq_get_bfqd_locked(void **ptr, + unsigned long *flags) +{ + struct bfq_data *bfqd; + + rcu_read_lock(); + bfqd = rcu_dereference(*(struct bfq_data **)ptr); + + if (bfqd != NULL && ! ((unsigned long) bfqd & CIC_DEAD_KEY)) { + spin_lock_irqsave(bfqd->queue->queue_lock, *flags); + if (*ptr == bfqd) + goto out; + spin_unlock_irqrestore(bfqd->queue->queue_lock, *flags); + } + + bfqd = NULL; +out: + rcu_read_unlock(); + return bfqd; +} + +static inline void bfq_put_bfqd_unlock(struct bfq_data *bfqd, + unsigned long *flags) +{ + spin_unlock_irqrestore(bfqd->queue->queue_lock, *flags); +} + +static void bfq_changed_ioprio(struct io_context *ioc, + struct cfq_io_context *cic); +static void bfq_put_queue(struct bfq_queue *bfqq); +static void bfq_dispatch_insert(struct request_queue *q, struct request *rq); +static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd, + struct bfq_group *bfqg, int is_sync, + struct io_context *ioc, gfp_t gfp_mask); +static void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg); +static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq); +#endif diff --git a/block/blk-ioc.c b/block/blk-ioc.c index d22c4c55c..52ebea9cd 100644 --- a/block/blk-ioc.c +++ b/block/blk-ioc.c @@ -5,6 +5,7 @@ #include #include #include +#include #include #include /* for max_pfn/max_low_pfn */ #include @@ -16,13 +17,12 @@ */ static struct kmem_cache *iocontext_cachep; -static void cfq_dtor(struct io_context *ioc) +static void hlist_sched_dtor(struct io_context *ioc, struct hlist_head *list) { - if (!hlist_empty(&ioc->cic_list)) { + if (!hlist_empty(list)) { struct cfq_io_context *cic; - cic = list_entry(ioc->cic_list.first, struct cfq_io_context, - cic_list); + cic = list_entry(list->first, struct cfq_io_context, cic_list); cic->dtor(ioc); } } @@ -40,7 +40,9 @@ int put_io_context(struct io_context *ioc) if (atomic_long_dec_and_test(&ioc->refcount)) { rcu_read_lock(); - cfq_dtor(ioc); + + hlist_sched_dtor(ioc, &ioc->cic_list); + hlist_sched_dtor(ioc, &ioc->bfq_cic_list); rcu_read_unlock(); kmem_cache_free(iocontext_cachep, ioc); @@ -50,15 +52,14 @@ int put_io_context(struct io_context *ioc) } EXPORT_SYMBOL(put_io_context); -static void cfq_exit(struct io_context *ioc) +static void hlist_sched_exit(struct io_context *ioc, struct hlist_head *list) { rcu_read_lock(); - if (!hlist_empty(&ioc->cic_list)) { + if (!hlist_empty(list)) { struct cfq_io_context *cic; - cic = list_entry(ioc->cic_list.first, struct cfq_io_context, - cic_list); + cic = list_entry(list->first, struct cfq_io_context, cic_list); cic->exit(ioc); } rcu_read_unlock(); @@ -75,8 +76,8 @@ void exit_io_context(struct task_struct *task) task_unlock(task); if (atomic_dec_and_test(&ioc->nr_tasks)) { - cfq_exit(ioc); - + hlist_sched_exit(ioc, &ioc->cic_list); + hlist_sched_exit(ioc, &ioc->bfq_cic_list); } put_io_context(ioc); } @@ -90,12 +91,14 @@ struct io_context *alloc_io_context(gfp_t gfp_flags, int node) atomic_long_set(&ret->refcount, 1); atomic_set(&ret->nr_tasks, 1); spin_lock_init(&ret->lock); - ret->ioprio_changed = 0; + bitmap_zero(ret->ioprio_changed, IOC_IOPRIO_CHANGED_BITS); ret->ioprio = 0; ret->last_waited = 0; /* doesn't matter... */ ret->nr_batch_requests = 0; /* because this is 0 */ INIT_RADIX_TREE(&ret->radix_root, GFP_ATOMIC | __GFP_HIGH); INIT_HLIST_HEAD(&ret->cic_list); + INIT_RADIX_TREE(&ret->bfq_radix_root, GFP_ATOMIC | __GFP_HIGH); + INIT_HLIST_HEAD(&ret->bfq_cic_list); ret->ioc_data = NULL; } diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 7982b830d..e80a8fdd6 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -2755,7 +2755,6 @@ static void changed_ioprio(struct io_context *ioc, struct cfq_io_context *cic) static void cfq_ioc_set_ioprio(struct io_context *ioc) { call_for_each_cic(ioc, changed_ioprio); - ioc->ioprio_changed = 0; } static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq, @@ -3039,8 +3038,13 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) goto err_free; out: - smp_read_barrier_depends(); - if (unlikely(ioc->ioprio_changed)) + /* + * test_and_clear_bit() implies a memory barrier, paired with + * the wmb() in fs/ioprio.c, so the value seen for ioprio is the + * new one. + */ + if (unlikely(test_and_clear_bit(IOC_CFQ_IOPRIO_CHANGED, + ioc->ioprio_changed))) cfq_ioc_set_ioprio(ioc); #ifdef CONFIG_CFQ_GROUP_IOSCHED diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c index b547cbca7..b2184bf74 100644 --- a/block/deadline-iosched.c +++ b/block/deadline-iosched.c @@ -20,7 +20,7 @@ static const int read_expire = HZ / 2; /* max time before a read is submitted. */ static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */ static const int writes_starved = 2; /* max times reads can starve a write */ -static const int fifo_batch = 16; /* # of sequential requests treated as one +static const int fifo_batch = 1; /* # of sequential requests treated as one by the above parameters. For throughput. */ struct deadline_data { diff --git a/block/sio-iosched.c b/block/sio-iosched.c new file mode 100644 index 000000000..aab730e67 --- /dev/null +++ b/block/sio-iosched.c @@ -0,0 +1,346 @@ +/* + * Simple IO scheduler + * Based on Noop, Deadline and V(R) IO schedulers. + * + * Copyright (C) 2010 Miguel Boton + * + * + * This algorithm does not do any kind of sorting, as it is aimed for + * aleatory access devices, but it does some basic merging. We try to + * keep minimum overhead to achieve low latencies. + * + * Asynchronous and synchronous requests are not treated separately, but + * we relay on deadlines to ensure fairness. + * + */ +#include +#include +#include +#include +#include +#include + +enum { + ASYNC, + SYNC, +}; + +/* Tunables */ +static const int sync_expire = HZ / 2; /* max time before a sync is submitted. */ +static const int async_expire = 5 * HZ; /* ditto for async, these limits are SOFT! */ +static const int fifo_batch = 16; /* # of sequential requests treated as one + by the above parameters. For throughput. */ + +/* Elevator data */ +struct sio_data { + /* Request queues */ + struct list_head fifo_list[2]; + + /* Attributes */ + unsigned int batched; + + /* Settings */ + int fifo_expire[2]; + int fifo_batch; +}; + +static void +sio_merged_requests(struct request_queue *q, struct request *rq, + struct request *next) +{ + /* + * If next expires before rq, assign its expire time to rq + * and move into next position (next will be deleted) in fifo. + */ + if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist)) { + if (time_before(rq_fifo_time(next), rq_fifo_time(rq))) { + list_move(&rq->queuelist, &next->queuelist); + rq_set_fifo_time(rq, rq_fifo_time(next)); + } + } + + /* Delete next request */ + rq_fifo_clear(next); +} + +static void +sio_add_request(struct request_queue *q, struct request *rq) +{ + struct sio_data *sd = q->elevator->elevator_data; + const int sync = rq_is_sync(rq); + + /* + * Add request to the proper fifo list and set its + * expire time. + */ + rq_set_fifo_time(rq, jiffies + sd->fifo_expire[sync]); + list_add_tail(&rq->queuelist, &sd->fifo_list[sync]); +} + +static int +sio_queue_empty(struct request_queue *q) +{ + struct sio_data *sd = q->elevator->elevator_data; + + /* Check if fifo lists are empty */ + return list_empty(&sd->fifo_list[SYNC]) && + list_empty(&sd->fifo_list[ASYNC]); +} + +static struct request * +sio_expired_request(struct sio_data *sd, int sync) +{ + struct request *rq; + + if (list_empty(&sd->fifo_list[sync])) + return NULL; + + /* Retrieve request */ + rq = rq_entry_fifo(sd->fifo_list[sync].next); + + /* Request has expired */ + if (time_after(jiffies, rq_fifo_time(rq))) + return rq; + + return NULL; +} + +static struct request * +sio_choose_expired_request(struct sio_data *sd) +{ + struct request *sync = sio_expired_request(sd, SYNC); + struct request *async = sio_expired_request(sd, ASYNC); + + /* + * Check expired requests. Asynchronous requests have + * priority over synchronous. + */ + if (sync && async) + return async; + if (sync) + return sync; + + return async; + +} + +static struct request * +sio_choose_request(struct sio_data *sd) +{ + /* + * Retrieve request from available fifo list. + * Synchronous requests have priority over asynchronous. + */ + if (!list_empty(&sd->fifo_list[SYNC])) + return rq_entry_fifo(sd->fifo_list[SYNC].next); + + if (!list_empty(&sd->fifo_list[ASYNC])) + return rq_entry_fifo(sd->fifo_list[ASYNC].next); + + return NULL; +} + +static inline void +sio_dispatch_request(struct sio_data *sd, struct request *rq) +{ + /* + * Remove the request from the fifo list + * and dispatch it. + */ + rq_fifo_clear(rq); + elv_dispatch_add_tail(rq->q, rq); + + sd->batched++; +} + +static int +sio_dispatch_requests(struct request_queue *q, int force) +{ + struct sio_data *sd = q->elevator->elevator_data; + struct request *rq = NULL; + + /* + * Retrieve any expired request after a batch of + * sequential requests. + */ + if (sd->batched > sd->fifo_batch) { + sd->batched = 0; + rq = sio_choose_expired_request(sd); + } + + /* Retrieve request */ + if (!rq) { + rq = sio_choose_request(sd); + if (!rq) + return 0; + } + + /* Dispatch request */ + sio_dispatch_request(sd, rq); + + return 1; +} + +static struct request * +sio_former_request(struct request_queue *q, struct request *rq) +{ + struct sio_data *sd = q->elevator->elevator_data; + const int sync = rq_is_sync(rq); + + if (rq->queuelist.prev == &sd->fifo_list[sync]) + return NULL; + + /* Return former request */ + return list_entry(rq->queuelist.prev, struct request, queuelist); +} + +static struct request * +sio_latter_request(struct request_queue *q, struct request *rq) +{ + struct sio_data *sd = q->elevator->elevator_data; + const int sync = rq_is_sync(rq); + + if (rq->queuelist.next == &sd->fifo_list[sync]) + return NULL; + + /* Return latter request */ + return list_entry(rq->queuelist.next, struct request, queuelist); +} + +static void * +sio_init_queue(struct request_queue *q) +{ + struct sio_data *sd; + + /* Allocate structure */ + sd = kmalloc_node(sizeof(*sd), GFP_KERNEL, q->node); + if (!sd) + return NULL; + + /* Initialize fifo lists */ + INIT_LIST_HEAD(&sd->fifo_list[SYNC]); + INIT_LIST_HEAD(&sd->fifo_list[ASYNC]); + + /* Initialize data */ + sd->batched = 0; + sd->fifo_expire[SYNC] = sync_expire; + sd->fifo_expire[ASYNC] = async_expire; + sd->fifo_batch = fifo_batch; + + return sd; +} + +static void +sio_exit_queue(struct elevator_queue *e) +{ + struct sio_data *sd = e->elevator_data; + + BUG_ON(!list_empty(&sd->fifo_list[SYNC])); + BUG_ON(!list_empty(&sd->fifo_list[ASYNC])); + + /* Free structure */ + kfree(sd); +} + +/* + * sysfs code + */ + +static ssize_t +sio_var_show(int var, char *page) +{ + return sprintf(page, "%d\n", var); +} + +static ssize_t +sio_var_store(int *var, const char *page, size_t count) +{ + char *p = (char *) page; + + *var = simple_strtol(p, &p, 10); + return count; +} + +#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ +static ssize_t __FUNC(struct elevator_queue *e, char *page) \ +{ \ + struct sio_data *sd = e->elevator_data; \ + int __data = __VAR; \ + if (__CONV) \ + __data = jiffies_to_msecs(__data); \ + return sio_var_show(__data, (page)); \ +} +SHOW_FUNCTION(sio_sync_expire_show, sd->fifo_expire[SYNC], 1); +SHOW_FUNCTION(sio_async_expire_show, sd->fifo_expire[ASYNC], 1); +SHOW_FUNCTION(sio_fifo_batch_show, sd->fifo_batch, 0); +#undef SHOW_FUNCTION + +#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ +static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \ +{ \ + struct sio_data *sd = e->elevator_data; \ + int __data; \ + int ret = sio_var_store(&__data, (page), count); \ + if (__data < (MIN)) \ + __data = (MIN); \ + else if (__data > (MAX)) \ + __data = (MAX); \ + if (__CONV) \ + *(__PTR) = msecs_to_jiffies(__data); \ + else \ + *(__PTR) = __data; \ + return ret; \ +} +STORE_FUNCTION(sio_sync_expire_store, &sd->fifo_expire[SYNC], 0, INT_MAX, 1); +STORE_FUNCTION(sio_async_expire_store, &sd->fifo_expire[ASYNC], 0, INT_MAX, 1); +STORE_FUNCTION(sio_fifo_batch_store, &sd->fifo_batch, 0, INT_MAX, 0); +#undef STORE_FUNCTION + +#define DD_ATTR(name) \ + __ATTR(name, S_IRUGO|S_IWUSR, sio_##name##_show, \ + sio_##name##_store) + +static struct elv_fs_entry sio_attrs[] = { + DD_ATTR(sync_expire), + DD_ATTR(async_expire), + DD_ATTR(fifo_batch), + __ATTR_NULL +}; + +static struct elevator_type iosched_sio = { + .ops = { + .elevator_merge_req_fn = sio_merged_requests, + .elevator_dispatch_fn = sio_dispatch_requests, + .elevator_add_req_fn = sio_add_request, + .elevator_queue_empty_fn = sio_queue_empty, + .elevator_former_req_fn = sio_former_request, + .elevator_latter_req_fn = sio_latter_request, + .elevator_init_fn = sio_init_queue, + .elevator_exit_fn = sio_exit_queue, + }, + + .elevator_attrs = sio_attrs, + .elevator_name = "sio", + .elevator_owner = THIS_MODULE, +}; + +static int __init sio_init(void) +{ + /* Register elevator */ + elv_register(&iosched_sio); + + return 0; +} + +static void __exit sio_exit(void) +{ + /* Unregister elevator */ + elv_unregister(&iosched_sio); +} + +module_init(sio_init); +module_exit(sio_exit); + +MODULE_AUTHOR("Miguel Boton"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Simple IO scheduler"); diff --git a/block/vr-iosched.c b/block/vr-iosched.c new file mode 100644 index 000000000..44198d20e --- /dev/null +++ b/block/vr-iosched.c @@ -0,0 +1,446 @@ +/* +* V(R) I/O Scheduler +* +* Copyright (C) 2007 Aaron Carroll +* +* +* The algorithm: +* +* The next request is decided based on its distance from the last +* request, with a multiplicative penalty of `rev_penalty' applied +* for reversing the head direction. A rev_penalty of 1 means SSTF +* behaviour. As this variable is increased, the algorithm approaches +* pure SCAN. Setting rev_penalty to 0 forces SCAN. +* +* Async and synch requests are not treated seperately. Instead we +* rely on deadlines to ensure fairness. +* +*/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +enum vr_data_dir { +ASYNC, +SYNC, +}; + +enum vr_head_dir { +FORWARD, +BACKWARD, +}; + +static const int sync_expire = HZ / 2; /* max time before a sync is submitted. */ +static const int async_expire = 5 * HZ; /* ditto for async, these limits are SOFT! */ +static const int fifo_batch = 16; +static const int rev_penalty = 10; /* penalty for reversing head direction */ + +struct vr_data { +struct rb_root sort_list; +struct list_head fifo_list[2]; + +struct request *next_rq; +struct request *prev_rq; + +unsigned int nbatched; +sector_t last_sector; /* head position */ +int head_dir; + +/* tunables */ +int fifo_expire[2]; +int fifo_batch; +int rev_penalty; +}; + +static void vr_move_request(struct vr_data *, struct request *); + +static inline struct vr_data * +vr_get_data(struct request_queue *q) +{ +return q->elevator->elevator_data; +} + +static void +vr_add_rq_rb(struct vr_data *vd, struct request *rq) +{ +struct request *alias = elv_rb_add(&vd->sort_list, rq); + +if (unlikely(alias)) { +vr_move_request(vd, alias); +alias = elv_rb_add(&vd->sort_list, rq); +BUG_ON(alias); +} + +if (blk_rq_pos(rq) >= vd->last_sector) { +if (!vd->next_rq || blk_rq_pos(vd->next_rq) > blk_rq_pos(rq)) +vd->next_rq = rq; +} +else { +if (!vd->prev_rq || blk_rq_pos(vd->prev_rq) < blk_rq_pos(rq)) +vd->prev_rq = rq; +} + +BUG_ON(vd->next_rq && vd->next_rq == vd->prev_rq); +BUG_ON(vd->next_rq && vd->prev_rq && blk_rq_pos(vd->next_rq) < blk_rq_pos(vd->prev_rq)); +} + +static void +vr_del_rq_rb(struct vr_data *vd, struct request *rq) +{ +/* +* We might be deleting our cached next request. +* If so, find its sucessor. +*/ + +if (vd->next_rq == rq) +vd->next_rq = elv_rb_latter_request(NULL, rq); +else if (vd->prev_rq == rq) +vd->prev_rq = elv_rb_former_request(NULL, rq); + +BUG_ON(vd->next_rq && vd->next_rq == vd->prev_rq); +BUG_ON(vd->next_rq && vd->prev_rq && blk_rq_pos(vd->next_rq) < blk_rq_pos(vd->prev_rq)); + +elv_rb_del(&vd->sort_list, rq); +} + +/* +* add rq to rbtree and fifo +*/ +static void +vr_add_request(struct request_queue *q, struct request *rq) +{ +struct vr_data *vd = vr_get_data(q); +const int dir = rq_is_sync(rq); + +vr_add_rq_rb(vd, rq); + +if (vd->fifo_expire[dir]) { +rq_set_fifo_time(rq, jiffies + vd->fifo_expire[dir]); +list_add_tail(&rq->queuelist, &vd->fifo_list[dir]); +} +} + +/* +* remove rq from rbtree and fifo. +*/ +static void +vr_remove_request(struct request_queue *q, struct request *rq) +{ +struct vr_data *vd = vr_get_data(q); + +rq_fifo_clear(rq); +vr_del_rq_rb(vd, rq); +} + +static int +vr_merge(struct request_queue *q, struct request **rqp, struct bio *bio) +{ +sector_t sector = bio->bi_sector + bio_sectors(bio); +struct vr_data *vd = vr_get_data(q); +struct request *rq = elv_rb_find(&vd->sort_list, sector); + +if (rq && elv_rq_merge_ok(rq, bio)) { +*rqp = rq; +return ELEVATOR_FRONT_MERGE; +} +return ELEVATOR_NO_MERGE; +} + +static void +vr_merged_request(struct request_queue *q, struct request *req, int type) +{ +struct vr_data *vd = vr_get_data(q); + +/* +* if the merge was a front merge, we need to reposition request +*/ +if (type == ELEVATOR_FRONT_MERGE) { +vr_del_rq_rb(vd, req); +vr_add_rq_rb(vd, req); +} +} + +static void +vr_merged_requests(struct request_queue *q, struct request *rq, +struct request *next) +{ +/* +* if next expires before rq, assign its expire time to rq +* and move into next position (next will be deleted) in fifo +*/ +if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist)) { +if (time_before(rq_fifo_time(next), rq_fifo_time(rq))) { +list_move(&rq->queuelist, &next->queuelist); +rq_set_fifo_time(rq, rq_fifo_time(next)); +} +} + +vr_remove_request(q, next); +} + +/* +* move an entry to dispatch queue +*/ +static void +vr_move_request(struct vr_data *vd, struct request *rq) +{ +struct request_queue *q = rq->q; + +if (blk_rq_pos(rq) > vd->last_sector) +vd->head_dir = FORWARD; +else +vd->head_dir = BACKWARD; + +vd->last_sector = blk_rq_pos(rq); +vd->next_rq = elv_rb_latter_request(NULL, rq); +vd->prev_rq = elv_rb_former_request(NULL, rq); + +BUG_ON(vd->next_rq && vd->next_rq == vd->prev_rq); + +vr_remove_request(q, rq); +elv_dispatch_add_tail(q, rq); +vd->nbatched++; +} + +/* +* get the first expired request in direction ddir +*/ +static struct request * +vr_expired_request(struct vr_data *vd, int ddir) +{ +struct request *rq; + +if (list_empty(&vd->fifo_list[ddir])) +return NULL; + +rq = rq_entry_fifo(vd->fifo_list[ddir].next); +if (time_after(jiffies, rq_fifo_time(rq))) +return rq; + +return NULL; +} + +/* +* Returns the oldest expired request +*/ +static struct request * +vr_check_fifo(struct vr_data *vd) +{ +struct request *rq_sync = vr_expired_request(vd, SYNC); +struct request *rq_async = vr_expired_request(vd, ASYNC); + +if (rq_async && rq_sync) { +if (time_after(rq_fifo_time(rq_async), rq_fifo_time(rq_sync))) +return rq_sync; +} +else if (rq_sync) +return rq_sync; + +return rq_async; +} + +/* +* Return the request with the lowest penalty +*/ +static struct request * +vr_choose_request(struct vr_data *vd) +{ +int penalty = (vd->rev_penalty) ? : INT_MAX; +struct request *next = vd->next_rq; +struct request *prev = vd->prev_rq; +sector_t next_pen, prev_pen; + +BUG_ON(prev && prev == next); + +if (!prev) +return next; +else if (!next) +return prev; + +/* At this point both prev and next are defined and distinct */ + +next_pen = blk_rq_pos(next) - vd->last_sector; +prev_pen = vd->last_sector - blk_rq_pos(prev); + +if (vd->head_dir == FORWARD) +next_pen = do_div(next_pen, penalty); +else +prev_pen = do_div(prev_pen, penalty); + +if (next_pen <= prev_pen) +return next; + +return prev; +} + +static int +vr_dispatch_requests(struct request_queue *q, int force) +{ +struct vr_data *vd = vr_get_data(q); +struct request *rq = NULL; + +/* Check for and issue expired requests */ +if (vd->nbatched > vd->fifo_batch) { +vd->nbatched = 0; +rq = vr_check_fifo(vd); +} + +if (!rq) { +rq = vr_choose_request(vd); +if (!rq) +return 0; +} + +vr_move_request(vd, rq); + +return 1; +} + +static int +vr_queue_empty(struct request_queue *q) +{ +struct vr_data *vd = vr_get_data(q); +return RB_EMPTY_ROOT(&vd->sort_list); +} + +static void +vr_exit_queue(struct elevator_queue *e) +{ +struct vr_data *vd = e->elevator_data; +BUG_ON(!RB_EMPTY_ROOT(&vd->sort_list)); +kfree(vd); +} + +/* +* initialize elevator private data (vr_data). +*/ +static void *vr_init_queue(struct request_queue *q) +{ +struct vr_data *vd; + +vd = kmalloc_node(sizeof(*vd), GFP_KERNEL | __GFP_ZERO, q->node); +if (!vd) +return NULL; + +INIT_LIST_HEAD(&vd->fifo_list[SYNC]); +INIT_LIST_HEAD(&vd->fifo_list[ASYNC]); +vd->sort_list = RB_ROOT; +vd->fifo_expire[SYNC] = sync_expire; +vd->fifo_expire[ASYNC] = async_expire; +vd->fifo_batch = fifo_batch; +vd->rev_penalty = rev_penalty; +return vd; +} + +/* +* sysfs parts below +*/ + +static ssize_t +vr_var_show(int var, char *page) +{ +return sprintf(page, "%d\n", var); +} + +static ssize_t +vr_var_store(int *var, const char *page, size_t count) +{ +*var = simple_strtol(page, NULL, 10); +return count; +} + +#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ +static ssize_t __FUNC(struct elevator_queue *e, char *page) \ +{ \ +struct vr_data *vd = e->elevator_data; \ +int __data = __VAR; \ +if (__CONV) \ +__data = jiffies_to_msecs(__data); \ +return vr_var_show(__data, (page)); \ +} +SHOW_FUNCTION(vr_sync_expire_show, vd->fifo_expire[SYNC], 1); +SHOW_FUNCTION(vr_async_expire_show, vd->fifo_expire[ASYNC], 1); +SHOW_FUNCTION(vr_fifo_batch_show, vd->fifo_batch, 0); +SHOW_FUNCTION(vr_rev_penalty_show, vd->rev_penalty, 0); +#undef SHOW_FUNCTION + +#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ +static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \ +{ \ +struct vr_data *vd = e->elevator_data; \ +int __data; \ +int ret = vr_var_store(&__data, (page), count); \ +if (__data < (MIN)) \ +__data = (MIN); \ +else if (__data > (MAX)) \ +__data = (MAX); \ +if (__CONV) \ +*(__PTR) = msecs_to_jiffies(__data); \ +else \ +*(__PTR) = __data; \ +return ret; \ +} +STORE_FUNCTION(vr_sync_expire_store, &vd->fifo_expire[SYNC], 0, INT_MAX, 1); +STORE_FUNCTION(vr_async_expire_store, &vd->fifo_expire[ASYNC], 0, INT_MAX, 1); +STORE_FUNCTION(vr_fifo_batch_store, &vd->fifo_batch, 0, INT_MAX, 0); +STORE_FUNCTION(vr_rev_penalty_store, &vd->rev_penalty, 0, INT_MAX, 0); +#undef STORE_FUNCTION + +#define DD_ATTR(name) \ +__ATTR(name, S_IRUGO|S_IWUSR, vr_##name##_show, \ +vr_##name##_store) + +static struct elv_fs_entry vr_attrs[] = { +DD_ATTR(sync_expire), +DD_ATTR(async_expire), +DD_ATTR(fifo_batch), +DD_ATTR(rev_penalty), +__ATTR_NULL +}; + +static struct elevator_type iosched_vr = { +.ops = { +.elevator_merge_fn = vr_merge, +.elevator_merged_fn = vr_merged_request, +.elevator_merge_req_fn = vr_merged_requests, +.elevator_dispatch_fn = vr_dispatch_requests, +.elevator_add_req_fn = vr_add_request, +.elevator_queue_empty_fn = vr_queue_empty, +.elevator_former_req_fn = elv_rb_former_request, +.elevator_latter_req_fn = elv_rb_latter_request, +.elevator_init_fn = vr_init_queue, +.elevator_exit_fn = vr_exit_queue, +}, + +.elevator_attrs = vr_attrs, +.elevator_name = "vr", +.elevator_owner = THIS_MODULE, +}; + +static int __init vr_init(void) +{ +elv_register(&iosched_vr); + +return 0; +} + +static void __exit vr_exit(void) +{ +elv_unregister(&iosched_vr); +} + +module_init(vr_init); +module_exit(vr_exit); + +MODULE_AUTHOR("Aaron Carroll"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("V(R) IO scheduler"); diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index 60ba0161b..d7a8df0aa 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -610,16 +610,20 @@ static void mmc_detect(struct mmc_host *host) */ static int mmc_suspend(struct mmc_host *host) { + int err = 0; + BUG_ON(!host); BUG_ON(!host->card); mmc_claim_host(host); - if (!mmc_host_is_spi(host)) + if (mmc_card_can_sleep(host)) + err = mmc_card_sleep(host); + else if (!mmc_host_is_spi(host)) mmc_deselect_cards(host); host->card->state &= ~MMC_STATE_HIGHSPEED; mmc_release_host(host); - return 0; + return err; } /* diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c index 2b49a9cea..4d370dce7 100644 --- a/drivers/mmc/host/msm_sdcc.c +++ b/drivers/mmc/host/msm_sdcc.c @@ -884,6 +884,13 @@ msmsdcc_irq(int irq, void *dev_id) #if IRQ_DEBUG msmsdcc_print_status(host, "irq0-p", status); #endif +#ifdef CONFIG_MMC_MSM_SDIO_SUPPORT + if (status & MCI_SDIOINTROPE) { + if (host->sdcc_suspending) + wake_lock(&host->sdio_suspend_wlock); + mmc_signal_sdio_irq(host->mmc); + } +#endif if ((host->plat->dummy52_required) && (host->dummy_52_state == DUMMY_52_STATE_SENT)) { @@ -905,13 +912,6 @@ msmsdcc_irq(int irq, void *dev_id) } data = host->curr.data; -#ifdef CONFIG_MMC_MSM_SDIO_SUPPORT - if (status & MCI_SDIOINTROPE) { - if (host->sdcc_suspending) - wake_lock(&host->sdio_suspend_wlock); - mmc_signal_sdio_irq(host->mmc); - } -#endif /* * Check for proper command response */ @@ -2054,18 +2054,11 @@ static int msmsdcc_pm_resume(struct device *dev) struct msmsdcc_host *host = mmc_priv(mmc); int rc = 0; - rc = msmsdcc_runtime_resume(dev); + if (!pm_runtime_suspended(dev)) + rc = msmsdcc_runtime_resume(dev); if (host->plat->status_irq) enable_irq(host->plat->status_irq); - /* Update the run-time PM status */ - pm_runtime_disable(dev); - rc = pm_runtime_set_active(dev); - if (rc < 0) - pr_info("%s: %s: failed with error %d", mmc_hostname(mmc), - __func__, rc); - pm_runtime_enable(dev); - return rc; } diff --git a/fs/ioprio.c b/fs/ioprio.c index 748cfb92d..3232045e0 100644 --- a/fs/ioprio.c +++ b/fs/ioprio.c @@ -30,7 +30,7 @@ int set_task_ioprio(struct task_struct *task, int ioprio) { - int err; + int err, i; struct io_context *ioc; const struct cred *cred = current_cred(), *tcred; @@ -60,12 +60,17 @@ int set_task_ioprio(struct task_struct *task, int ioprio) err = -ENOMEM; break; } + /* let other ioc users see the new values */ + smp_wmb(); task->io_context = ioc; } while (1); if (!err) { ioc->ioprio = ioprio; - ioc->ioprio_changed = 1; + /* make sure schedulers see the new ioprio value */ + wmb(); + for (i = 0; i < IOC_IOPRIO_CHANGED_BITS; i++) + set_bit(i, ioc->ioprio_changed); } task_unlock(task); diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h index ccefff02b..37f523b85 100644 --- a/include/linux/cgroup_subsys.h +++ b/include/linux/cgroup_subsys.h @@ -66,3 +66,9 @@ SUBSYS(blkio) #endif /* */ + +#ifdef CONFIG_CGROUP_BFQIO +SUBSYS(bfqio) +#endif + +/* */ diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h index 64d529133..8c7e25461 100644 --- a/include/linux/iocontext.h +++ b/include/linux/iocontext.h @@ -1,14 +1,14 @@ #ifndef IOCONTEXT_H #define IOCONTEXT_H +#include #include #include -struct cfq_queue; struct cfq_io_context { void *key; - struct cfq_queue *cfqq[2]; + void *cfqq[2]; struct io_context *ioc; @@ -27,6 +27,16 @@ struct cfq_io_context { struct rcu_head rcu_head; }; +/* + * Indexes into the ioprio_changed bitmap. A bit set indicates that + * the corresponding I/O scheduler needs to see a ioprio update. + */ +enum { + IOC_CFQ_IOPRIO_CHANGED, + IOC_BFQ_IOPRIO_CHANGED, + IOC_IOPRIO_CHANGED_BITS +}; + /* * I/O subsystem state of the associated processes. It is refcounted * and kmalloc'ed. These could be shared between processes. @@ -39,7 +49,7 @@ struct io_context { spinlock_t lock; unsigned short ioprio; - unsigned short ioprio_changed; + DECLARE_BITMAP(ioprio_changed, IOC_IOPRIO_CHANGED_BITS); #if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE) unsigned short cgroup_changed; @@ -53,6 +63,8 @@ struct io_context { struct radix_tree_root radix_root; struct hlist_head cic_list; + struct radix_tree_root bfq_radix_root; + struct hlist_head bfq_cic_list; void *ioc_data; }; From 2a08a6a7491663ce37b37b6323d539303ca6332d Mon Sep 17 00:00:00 2001 From: uNiXpSyChO Date: Sun, 1 Jan 2012 20:18:18 -0500 Subject: [PATCH 04/19] CPU HW based alignment --- arch/arm/Kconfig | 10 ++++++++++ arch/arm/kernel/head-nommu.S | 6 +++--- arch/arm/kernel/head.S | 6 +++--- 3 files changed, 16 insertions(+), 6 deletions(-) diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 538724723..250198906 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1078,6 +1078,16 @@ config KSAPI Scorpion processor supported hardware performance counters on a per thread basis or AXI counters on an overall system basis. +config ALLOW_CPU_ALIGNMENT + bool "Allow CPU-based alignment handling" + default y + help + Advanced ARM processors, such as the Cortex series and ARMv7-based + CPUS are capable of performing unaligned accesses for many types of + memory accesses. Typically, using a cpu-based alignment fixup is + faster than doing such a fixup in software. For best performance + on advanced CPUs, say Y here. + endmenu source "arch/arm/common/Kconfig" diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S index 573b803dc..788361a90 100644 --- a/arch/arm/kernel/head-nommu.S +++ b/arch/arm/kernel/head-nommu.S @@ -65,10 +65,10 @@ __after_proc_init: * CP15 system control register value returned in r0 from * the CPU init function. */ -#ifdef CONFIG_ALIGNMENT_TRAP - orr r0, r0, #CR_A -#else +#ifdef CONFIG_ALLOW_CPU_ALIGNMENT bic r0, r0, #CR_A +#else + orr r0, r0, #CR_A #endif #ifdef CONFIG_CPU_DCACHE_DISABLE bic r0, r0, #CR_C diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 40f47af7f..47eb2cd42 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -160,10 +160,10 @@ __secondary_data: * registers. */ __enable_mmu: -#ifdef CONFIG_ALIGNMENT_TRAP - orr r0, r0, #CR_A -#else +#ifdef CONFIG_ALLOW_CPU_ALIGNMENT bic r0, r0, #CR_A +#else + orr r0, r0, #CR_A #endif #ifdef CONFIG_CPU_DCACHE_DISABLE bic r0, r0, #CR_C From d6ef9e229b54c9a88f04d5a0854a02b41857f852 Mon Sep 17 00:00:00 2001 From: uNiXpSyChO Date: Sun, 1 Jan 2012 20:22:49 -0500 Subject: [PATCH 05/19] jhash3 --- include/linux/jhash.h | 142 ++++++------------------------- lib/Makefile | 2 +- lib/jhash.c | 128 ++++++++++++++++++++++++++++ net/ipv6/inet6_connection_sock.c | 19 ++--- net/ipv6/reassembly.c | 31 +++---- 5 files changed, 177 insertions(+), 145 deletions(-) create mode 100644 lib/jhash.c diff --git a/include/linux/jhash.h b/include/linux/jhash.h index 2a2f99fbc..4279961c7 100644 --- a/include/linux/jhash.h +++ b/include/linux/jhash.h @@ -1,131 +1,38 @@ #ifndef _LINUX_JHASH_H #define _LINUX_JHASH_H -/* jhash.h: Jenkins hash support. - * - * Copyright (C) 1996 Bob Jenkins (bob_jenkins@burtleburtle.net) - * - * http://burtleburtle.net/bob/hash/ - * - * These are the credits from Bob's sources: - * - * lookup2.c, by Bob Jenkins, December 1996, Public Domain. - * hash(), hash2(), hash3, and mix() are externally useful functions. - * Routines to test the hash are included if SELF_TEST is defined. - * You can use this free for any purpose. It has no warranty. - * - * Copyright (C) 2003 David S. Miller (davem@redhat.com) - * - * I've modified Bob's hash to be useful in the Linux kernel, and - * any bugs present are surely my fault. -DaveM - */ - -/* NOTE: Arguments are modified. */ -#define __jhash_mix(a, b, c) \ -{ \ - a -= b; a -= c; a ^= (c>>13); \ - b -= c; b -= a; b ^= (a<<8); \ - c -= a; c -= b; c ^= (b>>13); \ - a -= b; a -= c; a ^= (c>>12); \ - b -= c; b -= a; b ^= (a<<16); \ - c -= a; c -= b; c ^= (b>>5); \ - a -= b; a -= c; a ^= (c>>3); \ - b -= c; b -= a; b ^= (a<<10); \ - c -= a; c -= b; c ^= (b>>15); \ +/* Best hash sizes are of power of two */ +#define jhash_size(n) ((u32)1<<(n)) +/* Mask the hash value, i.e (value & jhash_mask(n)) instead of (value % n) */ +#define jhash_mask(n) (jhash_size(n)-1) + +/* __jhash_final - final mixing of 3 32-bit values (a,b,c) into c */ +#define __jhash_final(a, b, c) \ +{ \ + c ^= b; c -= rol32(b, 14); \ + a ^= c; a -= rol32(c, 11); \ + b ^= a; b -= rol32(a, 25); \ + c ^= b; c -= rol32(b, 16); \ + a ^= c; a -= rol32(c, 4); \ + b ^= a; b -= rol32(a, 14); \ + c ^= b; c -= rol32(b, 24); \ } -/* The golden ration: an arbitrary value */ -#define JHASH_GOLDEN_RATIO 0x9e3779b9 - -/* The most generic version, hashes an arbitrary sequence - * of bytes. No alignment or length assumptions are made about - * the input key. - */ -static inline u32 jhash(const void *key, u32 length, u32 initval) -{ - u32 a, b, c, len; - const u8 *k = key; - - len = length; - a = b = JHASH_GOLDEN_RATIO; - c = initval; - - while (len >= 12) { - a += (k[0] +((u32)k[1]<<8) +((u32)k[2]<<16) +((u32)k[3]<<24)); - b += (k[4] +((u32)k[5]<<8) +((u32)k[6]<<16) +((u32)k[7]<<24)); - c += (k[8] +((u32)k[9]<<8) +((u32)k[10]<<16)+((u32)k[11]<<24)); - - __jhash_mix(a,b,c); +/* An arbitrary initial parameter */ +#define JHASH_INITVAL 0xdeadbeef + +extern u32 jhash(const void *key, u32 length, u32 initval); +extern u32 jhash2(const u32 *k, u32 length, u32 initval); - k += 12; - len -= 12; - } - c += length; - switch (len) { - case 11: c += ((u32)k[10]<<24); - case 10: c += ((u32)k[9]<<16); - case 9 : c += ((u32)k[8]<<8); - case 8 : b += ((u32)k[7]<<24); - case 7 : b += ((u32)k[6]<<16); - case 6 : b += ((u32)k[5]<<8); - case 5 : b += k[4]; - case 4 : a += ((u32)k[3]<<24); - case 3 : a += ((u32)k[2]<<16); - case 2 : a += ((u32)k[1]<<8); - case 1 : a += k[0]; - }; - - __jhash_mix(a,b,c); - - return c; -} - -/* A special optimized version that handles 1 or more of u32s. - * The length parameter here is the number of u32s in the key. - */ -static inline u32 jhash2(const u32 *k, u32 length, u32 initval) -{ - u32 a, b, c, len; - - a = b = JHASH_GOLDEN_RATIO; - c = initval; - len = length; - - while (len >= 3) { - a += k[0]; - b += k[1]; - c += k[2]; - __jhash_mix(a, b, c); - k += 3; len -= 3; - } - - c += length * 4; - - switch (len) { - case 2 : b += k[1]; - case 1 : a += k[0]; - }; - - __jhash_mix(a,b,c); - - return c; -} - - -/* A special ultra-optimized versions that knows they are hashing exactly - * 3, 2 or 1 word(s). - * - * NOTE: In partilar the "c += length; __jhash_mix(a,b,c);" normally - * done at the end is not done here. - */ +/* jhash_3words - hash exactly 3, 2 or 1 word(s) */ static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval) { - a += JHASH_GOLDEN_RATIO; - b += JHASH_GOLDEN_RATIO; + a += JHASH_INITVAL; + b += JHASH_INITVAL; c += initval; - __jhash_mix(a, b, c); + __jhash_final(a, b, c); return c; } @@ -141,3 +48,4 @@ static inline u32 jhash_1word(u32 a, u32 initval) } #endif /* _LINUX_JHASH_H */ + diff --git a/lib/Makefile b/lib/Makefile index 0f603a42f..28b367f55 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -10,7 +10,7 @@ endif lib-y := ctype.o string.o vsprintf.o cmdline.o \ rbtree.o radix-tree.o dump_stack.o \ idr.o int_sqrt.o extable.o prio_tree.o \ - sha1.o irq_regs.o reciprocal_div.o argv_split.o \ + jhash.o sha1.o irq_regs.o reciprocal_div.o argv_split.o \ proportions.o prio_heap.o ratelimit.o show_mem.o \ is_single_threaded.o plist.o decompress.o flex_array.o diff --git a/lib/jhash.c b/lib/jhash.c new file mode 100644 index 000000000..641b14500 --- /dev/null +++ b/lib/jhash.c @@ -0,0 +1,128 @@ +/* jhash.c: Jenkins hash support. + * + * Copyright (C) 2006. Bob Jenkins (bob_jenkins@burtleburtle.net) + * + * http://burtleburtle.net/bob/hash/ + * + * These are the credits from Bob's sources: + * + * lookup3.c, by Bob Jenkins, May 2006, Public Domain. + * + * These are functions for producing 32-bit hashes for hash table lookup. + * hashword(), hashlittle(), hashlittle2(), hashbig(), mix(), and final() + * are externally useful functions. Routines to test the hash are included + * if SELF_TEST is defined. You can use this free for any purpose. It's in + * the public domain. It has no warranty. + * + * Copyright (C) 2009-2010 Jozsef Kadlecsik (kadlec@blackhole.kfki.hu) + * + * I've modified Bob's hash to be useful in the Linux kernel, and + * any bugs present are my fault. + * Jozsef + */ +#include +#include +#include +#include + +/* __jhash_mix -- mix 3 32-bit values reversibly. */ +#define __jhash_mix(a, b, c) \ +{ \ + a -= c; a ^= rol32(c, 4); c += b; \ + b -= a; b ^= rol32(a, 6); a += c; \ + c -= b; c ^= rol32(b, 8); b += a; \ + a -= c; a ^= rol32(c, 16); c += b; \ + b -= a; b ^= rol32(a, 19); a += c; \ + c -= b; c ^= rol32(b, 4); b += a; \ +} + +/* jhash - hash an arbitrary key + * @k: sequence of bytes as key + * @length: the length of the key + * @initval: the previous hash, or an arbitray value + * + * The generic version, hashes an arbitrary sequence of bytes. + * No alignment or length assumptions are made about the input key. + * + * Returns the hash value of the key. The result depends on endianness. + */ +u32 jhash(const void *key, u32 length, u32 initval) +{ + u32 a, b, c; + const u8 *k = key; + + /* Set up the internal state */ + a = b = c = JHASH_INITVAL + length + initval; + + /* All but the last block: affect some 32 bits of (a,b,c) */ + while (length > 12) { + a += __get_unaligned_cpu32(k); + b += __get_unaligned_cpu32(k + 4); + c += __get_unaligned_cpu32(k + 8); + __jhash_mix(a, b, c); + length -= 12; + k += 12; + } + /* Last block: affect all 32 bits of (c) */ + /* All the case statements fall through */ + switch (length) { + case 12: c += (u32)k[11]<<24; + case 11: c += (u32)k[10]<<16; + case 10: c += (u32)k[9]<<8; + case 9: c += k[8]; + case 8: b += (u32)k[7]<<24; + case 7: b += (u32)k[6]<<16; + case 6: b += (u32)k[5]<<8; + case 5: b += k[4]; + case 4: a += (u32)k[3]<<24; + case 3: a += (u32)k[2]<<16; + case 2: a += (u32)k[1]<<8; + case 1: a += k[0]; + __jhash_final(a, b, c); + case 0: /* Nothing left to add */ + break; + } + + return c; +} +EXPORT_SYMBOL(jhash); + +/* jhash2 - hash an array of u32's + * @k: the key which must be an array of u32's + * @length: the number of u32's in the key + * @initval: the previous hash, or an arbitray value + * + * Returns the hash value of the key. + */ +u32 jhash2(const u32 *k, u32 length, u32 initval) +{ + u32 a, b, c; + + /* Set up the internal state */ + a = b = c = JHASH_INITVAL + (length<<2) + initval; + + /* Handle most of the key */ + while (length > 3) { + a += k[0]; + b += k[1]; + c += k[2]; + __jhash_mix(a, b, c); + length -= 3; + k += 3; + } + + /* Handle the last 3 u32's: all the case statements fall through */ + switch (length) { + case 3: c += k[2]; + case 2: b += k[1]; + case 1: a += k[0]; + __jhash_final(a, b, c); + case 0: /* Nothing left to add */ + break; + } + + return c; +} +EXPORT_SYMBOL(jhash2); + + diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c index 0c5e3c3b7..7889eb77a 100644 --- a/net/ipv6/inet6_connection_sock.c +++ b/net/ipv6/inet6_connection_sock.c @@ -60,18 +60,16 @@ EXPORT_SYMBOL_GPL(inet6_csk_bind_conflict); static u32 inet6_synq_hash(const struct in6_addr *raddr, const __be16 rport, const u32 rnd, const u16 synq_hsize) { - u32 a = (__force u32)raddr->s6_addr32[0]; - u32 b = (__force u32)raddr->s6_addr32[1]; - u32 c = (__force u32)raddr->s6_addr32[2]; + u32 c; - a += JHASH_GOLDEN_RATIO; - b += JHASH_GOLDEN_RATIO; - c += rnd; - __jhash_mix(a, b, c); + c = jhash_3words((__force u32)raddr->s6_addr32[0], + (__force u32)raddr->s6_addr32[1], + (__force u32)raddr->s6_addr32[2], + rnd); - a += (__force u32)raddr->s6_addr32[3]; - b += (__force u32)rport; - __jhash_mix(a, b, c); + c = jhash_2words((__force u32)raddr->s6_addr32[3], + (__force u32)rport, + c); return c & (synq_hsize - 1); } @@ -238,3 +236,4 @@ int inet6_csk_xmit(struct sk_buff *skb) } EXPORT_SYMBOL_GPL(inet6_csk_xmit); + diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index 6d4292ff5..c731bd0d5 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c @@ -104,26 +104,22 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, unsigned int inet6_hash_frag(__be32 id, const struct in6_addr *saddr, const struct in6_addr *daddr, u32 rnd) { - u32 a, b, c; + u32 c; - a = (__force u32)saddr->s6_addr32[0]; - b = (__force u32)saddr->s6_addr32[1]; - c = (__force u32)saddr->s6_addr32[2]; + c = jhash_3words((__force u32)saddr->s6_addr32[0], + (__force u32)saddr->s6_addr32[1], + (__force u32)saddr->s6_addr32[2], + rnd); - a += JHASH_GOLDEN_RATIO; - b += JHASH_GOLDEN_RATIO; - c += rnd; - __jhash_mix(a, b, c); + c = jhash_3words((__force u32)saddr->s6_addr32[3], + (__force u32)daddr->s6_addr32[0], + (__force u32)daddr->s6_addr32[1], + c); - a += (__force u32)saddr->s6_addr32[3]; - b += (__force u32)daddr->s6_addr32[0]; - c += (__force u32)daddr->s6_addr32[1]; - __jhash_mix(a, b, c); - - a += (__force u32)daddr->s6_addr32[2]; - b += (__force u32)daddr->s6_addr32[3]; - c += (__force u32)id; - __jhash_mix(a, b, c); + c = jhash_3words((__force u32)daddr->s6_addr32[2], + (__force u32)daddr->s6_addr32[3], + (__force u32)id, + c); return c & (INETFRAGS_HASHSZ - 1); } @@ -805,3 +801,4 @@ void ipv6_frag_exit(void) unregister_pernet_subsys(&ip6_frags_ops); inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT); } + From 916cdb62a92ef080fea11015fc932367de898932 Mon Sep 17 00:00:00 2001 From: uNiXpSyChO Date: Sun, 1 Jan 2012 20:25:16 -0500 Subject: [PATCH 06/19] autogroup sched --- drivers/char/tty_io.c | 1 + include/linux/sched.h | 19 ++++++ init/Kconfig | 12 ++++ kernel/fork.c | 5 +- kernel/sched.c | 20 ++++-- kernel/sched_autogroup.c | 140 +++++++++++++++++++++++++++++++++++++++ kernel/sched_autogroup.h | 18 +++++ kernel/sysctl.c | 11 +++ 8 files changed, 218 insertions(+), 8 deletions(-) create mode 100644 kernel/sched_autogroup.c create mode 100644 kernel/sched_autogroup.h diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c index 94bb4403e..4d912f9f1 100644 --- a/drivers/char/tty_io.c +++ b/drivers/char/tty_io.c @@ -3051,6 +3051,7 @@ static void __proc_set_tty(struct task_struct *tsk, struct tty_struct *tty) put_pid(tsk->signal->tty_old_pgrp); tsk->signal->tty = tty_kref_get(tty); tsk->signal->tty_old_pgrp = NULL; + sched_autogroup_create_attach(tsk); } static void proc_set_tty(struct task_struct *tsk, struct tty_struct *tty) diff --git a/include/linux/sched.h b/include/linux/sched.h index ecf342828..754ec0a40 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -513,6 +513,8 @@ struct thread_group_cputimer { spinlock_t lock; }; +struct autogroup; + /* * NOTE! "signal_struct" does not have it's own * locking, because a shared signal_struct always @@ -580,6 +582,9 @@ struct signal_struct { struct tty_struct *tty; /* NULL if no tty */ +#ifdef CONFIG_SCHED_AUTOGROUP + struct autogroup *autogroup; +#endif /* * Cumulative resource counters for dead threads in the group, * and for reaped dead child processes forked by this group. @@ -1906,6 +1911,20 @@ int sched_rt_handler(struct ctl_table *table, int write, extern unsigned int sysctl_sched_compat_yield; +#ifdef CONFIG_SCHED_AUTOGROUP +extern unsigned int sysctl_sched_autogroup_enabled; + +extern void sched_autogroup_create_attach(struct task_struct *p); +extern void sched_autogroup_detach(struct task_struct *p); +extern void sched_autogroup_fork(struct signal_struct *sig); +extern void sched_autogroup_exit(struct signal_struct *sig); +#else +static inline void sched_autogroup_create_attach(struct task_struct *p) { } +static inline void sched_autogroup_detach(struct task_struct *p) { } +static inline void sched_autogroup_fork(struct signal_struct *sig) { } +static inline void sched_autogroup_exit(struct signal_struct *sig) { } +#endif + #ifdef CONFIG_RT_MUTEXES extern int rt_mutex_getprio(struct task_struct *p); extern void rt_mutex_setprio(struct task_struct *p, int prio); diff --git a/init/Kconfig b/init/Kconfig index 3900fd571..ddd3896e2 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -648,6 +648,18 @@ config DEBUG_BLK_CGROUP endif # CGROUPS +config SCHED_AUTOGROUP + bool "Automatic process group scheduling" + select CGROUPS + select CGROUP_SCHED + select FAIR_GROUP_SCHED + help + This option optimizes the scheduler for common desktop workloads by + automatically creating and populating task groups. This separation + of workloads isolates aggressive CPU burners (like build jobs) from + desktop applications. Task group autogeneration is currently based + upon task tty association. + config MM_OWNER bool diff --git a/kernel/fork.c b/kernel/fork.c index 95402acde..6a5e8d83c 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -176,8 +176,10 @@ static inline void free_signal_struct(struct signal_struct *sig) static inline void put_signal_struct(struct signal_struct *sig) { - if (atomic_dec_and_test(&sig->sigcnt)) + if (atomic_dec_and_test(&sig->sigcnt)) { + sched_autogroup_exit(sig); free_signal_struct(sig); + } } int task_free_register(struct notifier_block *n) @@ -916,6 +918,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) posix_cpu_timers_init_group(sig); tty_audit_fork(sig); + sched_autogroup_fork(sig); sig->oom_adj = current->signal->oom_adj; diff --git a/kernel/sched.c b/kernel/sched.c index 153870c88..b9fe24429 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -78,6 +78,7 @@ #include #include "sched_cpupri.h" +#include "sched_autogroup.h" #define CREATE_TRACE_POINTS #include @@ -1896,6 +1897,7 @@ static void deactivate_task(struct rq *rq, struct task_struct *p, int flags) #include "sched_idletask.c" #include "sched_fair.c" #include "sched_rt.c" +#include "sched_autogroup.c" #ifdef CONFIG_SCHED_DEBUG # include "sched_debug.c" #endif @@ -7581,7 +7583,7 @@ void __init sched_init(void) #ifdef CONFIG_CGROUP_SCHED list_add(&init_task_group.list, &task_groups); INIT_LIST_HEAD(&init_task_group.children); - + autogroup_init(&init_task); #endif /* CONFIG_CGROUP_SCHED */ #if defined CONFIG_FAIR_GROUP_SCHED && defined CONFIG_SMP @@ -8112,15 +8114,11 @@ void sched_destroy_group(struct task_group *tg) /* change task's runqueue when it moves between groups. * The caller of this function should have put the task in its new group * by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to - * reflect its new group. + * reflect its new group. Called with the runqueue lock held. */ -void sched_move_task(struct task_struct *tsk) +void __sched_move_task(struct task_struct *tsk, struct rq *rq) { int on_rq, running; - unsigned long flags; - struct rq *rq; - - rq = task_rq_lock(tsk, &flags); running = task_current(rq, tsk); on_rq = tsk->se.on_rq; @@ -8146,7 +8144,15 @@ void sched_move_task(struct task_struct *tsk) tsk->sched_class->set_curr_task(rq); if (on_rq) enqueue_task(rq, tsk, 0); +} +void sched_move_task(struct task_struct *tsk) +{ + struct rq *rq; + unsigned long flags; + + rq = task_rq_lock(tsk, &flags); + __sched_move_task(tsk, rq); task_rq_unlock(rq, &flags); } #endif /* CONFIG_CGROUP_SCHED */ diff --git a/kernel/sched_autogroup.c b/kernel/sched_autogroup.c new file mode 100644 index 000000000..62f1d0e90 --- /dev/null +++ b/kernel/sched_autogroup.c @@ -0,0 +1,140 @@ +#ifdef CONFIG_SCHED_AUTOGROUP + +unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1; + +struct autogroup { + struct kref kref; + struct task_group *tg; +}; + +static struct autogroup autogroup_default; + +static void autogroup_init(struct task_struct *init_task) +{ + autogroup_default.tg = &init_task_group; + kref_init(&autogroup_default.kref); + init_task->signal->autogroup = &autogroup_default; +} + +static inline void autogroup_destroy(struct kref *kref) +{ + struct autogroup *ag = container_of(kref, struct autogroup, kref); + struct task_group *tg = ag->tg; + + kfree(ag); + sched_destroy_group(tg); +} + +static inline void autogroup_kref_put(struct autogroup *ag) +{ + kref_put(&ag->kref, autogroup_destroy); +} + +static inline struct autogroup *autogroup_kref_get(struct autogroup *ag) +{ + kref_get(&ag->kref); + return ag; +} + +static inline struct autogroup *autogroup_create(void) +{ + struct autogroup *ag = kmalloc(sizeof(*ag), GFP_KERNEL); + + if (!ag) + goto out_fail; + + ag->tg = sched_create_group(&init_task_group); + kref_init(&ag->kref); + + if (!(IS_ERR(ag->tg))) + return ag; + +out_fail: + if (ag) { + kfree(ag); + WARN_ON(1); + } else + WARN_ON(1); + + return autogroup_kref_get(&autogroup_default); +} + +static inline struct task_group * +autogroup_task_group(struct task_struct *p, struct task_group *tg) +{ + int enabled = ACCESS_ONCE(sysctl_sched_autogroup_enabled); + + enabled &= (tg == &root_task_group); + enabled &= (p->sched_class == &fair_sched_class); + enabled &= (!(p->flags & PF_EXITING)); + + if (enabled) + return p->signal->autogroup->tg; + + return tg; +} + +static void +autogroup_move_group(struct task_struct *p, struct autogroup *ag) +{ + struct autogroup *prev; + struct task_struct *t; + struct rq *rq; + unsigned long flags; + + rq = task_rq_lock(p, &flags); + prev = p->signal->autogroup; + if (prev == ag) { + task_rq_unlock(rq, &flags); + return; + } + + p->signal->autogroup = autogroup_kref_get(ag); + __sched_move_task(p, rq); + task_rq_unlock(rq, &flags); + + rcu_read_lock(); + list_for_each_entry_rcu(t, &p->thread_group, thread_group) { + sched_move_task(t); + } + rcu_read_unlock(); + + autogroup_kref_put(prev); +} + +void sched_autogroup_create_attach(struct task_struct *p) +{ + struct autogroup *ag = autogroup_create(); + + autogroup_move_group(p, ag); + /* drop extra refrence added by autogroup_create() */ + autogroup_kref_put(ag); +} +EXPORT_SYMBOL(sched_autogroup_create_attach); + +/* currently has no users */ +void sched_autogroup_detach(struct task_struct *p) +{ + autogroup_move_group(p, &autogroup_default); +} +EXPORT_SYMBOL(sched_autogroup_detach); + +void sched_autogroup_fork(struct signal_struct *sig) +{ + sig->autogroup = autogroup_kref_get(current->signal->autogroup); +} + +void sched_autogroup_exit(struct signal_struct *sig) +{ + autogroup_kref_put(sig->autogroup); +} + +static int __init setup_autogroup(char *str) +{ + sysctl_sched_autogroup_enabled = 0; + + return 1; +} + +__setup("noautogroup", setup_autogroup); +#endif diff --git a/kernel/sched_autogroup.h b/kernel/sched_autogroup.h new file mode 100644 index 000000000..6048f5d36 --- /dev/null +++ b/kernel/sched_autogroup.h @@ -0,0 +1,18 @@ +#ifdef CONFIG_SCHED_AUTOGROUP + +static void __sched_move_task(struct task_struct *tsk, struct rq *rq); + +static inline struct task_group * +autogroup_task_group(struct task_struct *p, struct task_group *tg); + +#else /* !CONFIG_SCHED_AUTOGROUP */ + +static inline void autogroup_init(struct task_struct *init_task) { } + +static inline struct task_group * +autogroup_task_group(struct task_struct *p, struct task_group *tg) +{ + return tg; +} + +#endif /* CONFIG_SCHED_AUTOGROUP */ diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 7420e8704..6b244d8d1 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -387,6 +387,17 @@ static struct ctl_table kern_table[] = { .mode = 0644, .proc_handler = proc_dointvec, }, +#ifdef CONFIG_SCHED_AUTOGROUP + { + .procname = "sched_autogroup_enabled", + .data = &sysctl_sched_autogroup_enabled, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = proc_dointvec, + .extra1 = &zero, + .extra2 = &one, + }, +#endif #ifdef CONFIG_PROVE_LOCKING { .procname = "prove_locking", From 9df6c52538fa0d99de25735ff0dcaec3513bb406 Mon Sep 17 00:00:00 2001 From: uNiXpSyChO Date: Sun, 1 Jan 2012 20:31:32 -0500 Subject: [PATCH 07/19] CPU alignmnent and sched patches --- arch/arm/mm/alignment.c | 19 +++++++++++++++++-- arch/arm/mm/cache-v7.S | 35 +++++++++++++++++++++-------------- arch/arm/mm/proc-macros.S | 10 ++++++++++ kernel/sched_fair.c | 18 +++++++++--------- 4 files changed, 57 insertions(+), 25 deletions(-) diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index 6f98c3589..9527ce66a 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c @@ -885,8 +885,23 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) if (ai_usermode & UM_SIGNAL) force_sig(SIGBUS, current); - else - set_cr(cr_no_alignment); + else { + /* + * We're about to disable the alignment trap and return to + * user space. But if an interrupt occurs before actually + * reaching user space, then the IRQ vector entry code will + * notice that we were still in kernel space and therefore + * the alignment trap won't be re-enabled in that case as it + * is presumed to be always on from kernel space. + * Let's prevent that race by disabling interrupts here (they + * are disabled on the way back to user space anyway in + * entry-common.S) and disable the alignment trap only if + * there is no work pending for this thread. + */ + raw_local_irq_disable(); + if (!(current_thread_info()->flags & _TIF_WORK_MASK)) + set_cr(cr_no_alignment); + } return 0; } diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S index b39c1a8af..0d008a6c6 100644 --- a/arch/arm/mm/cache-v7.S +++ b/arch/arm/mm/cache-v7.S @@ -161,15 +161,15 @@ ENTRY(v7_coherent_user_range) UNWIND(.fnstart ) dcache_line_size r2, r3 sub r3, r2, #1 - bic r0, r0, r3 + bic r12, r0, r3 #ifdef CONFIG_SCORPION_Uni_45nm_BUG 1: - USER( mcr p15, 0, r0, c7, c11, 1 ) @ clean D line to PoU (DCCMVAU) - add r0, r0, r2 -2: - cmp r0, r1 + USER( mcr p15, 0, r12, c7, c11, 1 ) @ clean D line to PoU (DCCMVAU) + add r12, r12, r2 + cmp r12, r1 blo 1b dsb +3: mov r0, #0 mcr p15, 0, r0, c7, c5, 0 @ invalidate I-Cache and BTB dsb @@ -177,13 +177,20 @@ ENTRY(v7_coherent_user_range) mov pc, lr #else 1: - USER( mcr p15, 0, r0, c7, c11, 1 ) @ clean D line to the point of unification + USER( mcr p15, 0, r12, c7, c11, 1 ) @ clean D line to the point of unification + add r12, r12, r2 + cmp r12, r1 + blo 1b dsb - USER( mcr p15, 0, r0, c7, c5, 1 ) @ invalidate I line - add r0, r0, r2 + icache_line_size r2, r3 + sub r3, r2, #1 + bic r12, r0, r3 2: - cmp r0, r1 - blo 1b + USER( mcr p15, 0, r12, c7, c5, 1 ) @ invalidate I line + add r12, r12, r2 + cmp r12, r1 + blo 2b +3: mov r0, #0 #ifdef CONFIG_SMP mcr p15, 0, r0, c7, c1, 6 @ invalidate BTB Inner Shareable @@ -200,10 +207,10 @@ ENTRY(v7_coherent_user_range) * isn't mapped, just try the next page. */ 9001: - mov r0, r0, lsr #12 - mov r0, r0, lsl #12 - add r0, r0, #4096 - b 2b + mov r12, r12, lsr #12 + mov r12, r12, lsl #12 + add r12, r12, #4096 + b 3b UNWIND(.fnend ) ENDPROC(v7_coherent_kern_range) ENDPROC(v7_coherent_user_range) diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S index 321555b89..b795afd0a 100644 --- a/arch/arm/mm/proc-macros.S +++ b/arch/arm/mm/proc-macros.S @@ -72,6 +72,16 @@ mov \reg, \reg, lsl \tmp @ actual cache line size .endm +/* + * icache_line_size - get the minimum I-cache line size from the CTR register + * on ARMv7. + */ + .macro icache_line_size, reg, tmp + mrc p15, 0, \tmp, c0, c0, 1 @ read ctr + and \tmp, \tmp, #0xf @ cache line size encoding + mov \reg, #4 @ bytes per word + mov \reg, \reg, lsl \tmp @ actual cache line size + .endm /* * Sanity check the PTE configuration for the code below - which makes diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 363585d4c..33e92cf1d 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -1864,21 +1864,20 @@ balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, enum cpu_idle_type idle, int *all_pinned, int *this_best_prio, struct cfs_rq *busiest_cfs_rq) { - int loops = 0, pulled = 0, pinned = 0; + int loops = 0, pulled = 0; long rem_load_move = max_load_move; struct task_struct *p, *n; if (max_load_move == 0) goto out; - pinned = 1; - list_for_each_entry_safe(p, n, &busiest_cfs_rq->tasks, se.group_node) { if (loops++ > sysctl_sched_nr_migrate) break; if ((p->se.load.weight >> 1) > rem_load_move || - !can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned)) + !can_migrate_task(p, busiest, this_cpu, sd, idle, + all_pinned)) continue; pull_task(busiest, p, this_rq, this_cpu); @@ -1913,9 +1912,6 @@ balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, */ schedstat_add(sd, lb_gained[idle], pulled); - if (all_pinned) - *all_pinned = pinned; - return max_load_move - rem_load_move; } @@ -2876,6 +2872,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, * still unbalanced. ld_moved simply stays zero, so it is * correctly treated as an imbalance. */ + all_pinned = 1; local_irq_save(flags); double_rq_lock(this_rq, busiest); ld_moved = move_tasks(this_rq, this_cpu, busiest, @@ -3253,9 +3250,10 @@ int select_nohz_load_balancer(int stop_tick) cpu_rq(cpu)->in_nohz_recently = 1; if (!cpu_active(cpu)) { - if (atomic_read(&nohz.load_balancer) != cpu) + if (atomic_read(&nohz.load_balancer) != cpu) { + cpumask_clear_cpu(cpu, nohz.cpu_mask); return 0; - + } /* * If we are going offline and still the leader, * give up! @@ -3263,6 +3261,8 @@ int select_nohz_load_balancer(int stop_tick) if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu) BUG(); + cpumask_clear_cpu(cpu, nohz.cpu_mask); + return 0; } From 61dce8c3211f5e7790923bd57e20fa8d51f8ccee Mon Sep 17 00:00:00 2001 From: Rod Whitby Date: Wed, 4 Jan 2012 15:14:29 +1030 Subject: [PATCH 08/19] Modification of the lowmemnotify threshold calculation to take into account free swap space --- drivers/misc/lowmemnotify.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/drivers/misc/lowmemnotify.c b/drivers/misc/lowmemnotify.c index 1cce6ebe5..7d3f60126 100644 --- a/drivers/misc/lowmemnotify.c +++ b/drivers/misc/lowmemnotify.c @@ -190,7 +190,7 @@ unsigned long memnotify_get_free(void) if (other_free > totalreserve_pages) free += other_free - totalreserve_pages; - return free; + return free + nr_swap_pages; } EXPORT_SYMBOL(memnotify_get_free); @@ -202,15 +202,13 @@ EXPORT_SYMBOL(memnotify_get_free); unsigned long memnotify_get_used(void) { unsigned long used_mem; - unsigned long used_swap; unsigned long free_mem; free_mem = memnotify_get_free(); - used_swap = total_swap_pages - nr_swap_pages; - used_mem = totalram_pages - free_mem; + used_mem = totalram_pages + total_swap_pages - free_mem; - return used_mem + used_swap; + return used_mem; } /** @@ -227,7 +225,7 @@ int memnotify_threshold(void) int i; used = memnotify_get_used(); - used_ratio = used * 100 / totalram_pages; + used_ratio = used * 100 / (totalram_pages + total_swap_pages); threshold = THRESHOLD_NORMAL; last_threshold = atomic_read(&memnotify_last_threshold); @@ -346,7 +344,7 @@ meminfo_show(struct class *class, struct class_attribute *attr, char *buf) int i; used = memnotify_get_used(); - total_mem = totalram_pages; + total_mem = totalram_pages + total_swap_pages; threshold = memnotify_threshold(); last_threshold = atomic_read(&memnotify_last_threshold); @@ -357,7 +355,7 @@ meminfo_show(struct class *class, struct class_attribute *attr, char *buf) "Used (Mem+Swap): %ldMB\n", MB(used)); len += scnprintf(buf+len, PAGE_SIZE - len, - "Used (Mem): %ldMB\n", MB(totalram_pages-memnotify_get_free())); + "Used (Mem): %ldMB\n", MB(totalram_pages + nr_swap_pages - memnotify_get_free())); len += scnprintf(buf+len, PAGE_SIZE - len, "Used (Swap): %ldMB\n", MB(total_swap_pages - nr_swap_pages)); From bf361b989ac4f7c71d467b26b70e95270a424dcf Mon Sep 17 00:00:00 2001 From: Rod Whitby Date: Wed, 4 Jan 2012 15:20:21 +1030 Subject: [PATCH 09/19] NFS Support --- arch/arm/configs/tenderloin_defconfig | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/arch/arm/configs/tenderloin_defconfig b/arch/arm/configs/tenderloin_defconfig index 4748fa81c..b7a5e3157 100644 --- a/arch/arm/configs/tenderloin_defconfig +++ b/arch/arm/configs/tenderloin_defconfig @@ -2473,8 +2473,20 @@ CONFIG_MISC_FILESYSTEMS=y # CONFIG_SYSV_FS is not set # CONFIG_UFS_FS is not set CONFIG_NETWORK_FILESYSTEMS=y -# CONFIG_NFS_FS is not set +CONFIG_NFS_FS=y +CONFIG_NFS_V3=y +# CONFIG_NFS_V3_ACL is not set +CONFIG_NFS_V4=y +# CONFIG_NFS_V4_1 is not set +# CONFIG_ROOT_NFS is not set # CONFIG_NFSD is not set +CONFIG_LOCKD=y +CONFIG_LOCKD_V4=y +CONFIG_NFS_COMMON=y +CONFIG_SUNRPC=y +CONFIG_SUNRPC_GSS=y +CONFIG_RPCSEC_GSS_KRB5=y +# CONFIG_RPCSEC_GSS_SPKM3 is not set # CONFIG_SMB_FS is not set # CONFIG_CEPH_FS is not set # CONFIG_CIFS is not set From 8e8db634a0e044c1b87120a90b000ca1623f7f81 Mon Sep 17 00:00:00 2001 From: Rod Whitby Date: Wed, 4 Jan 2012 15:20:31 +1030 Subject: [PATCH 10/19] CIFS Support --- arch/arm/configs/tenderloin_defconfig | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/arch/arm/configs/tenderloin_defconfig b/arch/arm/configs/tenderloin_defconfig index b7a5e3157..36b30474f 100644 --- a/arch/arm/configs/tenderloin_defconfig +++ b/arch/arm/configs/tenderloin_defconfig @@ -2489,7 +2489,12 @@ CONFIG_RPCSEC_GSS_KRB5=y # CONFIG_RPCSEC_GSS_SPKM3 is not set # CONFIG_SMB_FS is not set # CONFIG_CEPH_FS is not set -# CONFIG_CIFS is not set +CONFIG_CIFS=y +# CONFIG_CIFS_STATS is not set +CONFIG_CIFS_WEAK_PW_HASH=y +# CONFIG_CIFS_XATTR is not set +# CONFIG_CIFS_DEBUG2 is not set +# CONFIG_CIFS_EXPERIMENTAL is not set # CONFIG_NCP_FS is not set # CONFIG_CODA_FS is not set # CONFIG_AFS_FS is not set @@ -2554,7 +2559,7 @@ CONFIG_NLS_ISO8859_1=y # CONFIG_NLS_ISO8859_15 is not set # CONFIG_NLS_KOI8_R is not set # CONFIG_NLS_KOI8_U is not set -# CONFIG_NLS_UTF8 is not set +CONFIG_NLS_UTF8=y # CONFIG_DLM is not set # From b3fe47f85a4bf7bb82dabe8f7e7c20607ebced08 Mon Sep 17 00:00:00 2001 From: Rod Whitby Date: Wed, 4 Jan 2012 15:20:59 +1030 Subject: [PATCH 11/19] EXT4 Support --- arch/arm/configs/tenderloin_defconfig | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/arch/arm/configs/tenderloin_defconfig b/arch/arm/configs/tenderloin_defconfig index 36b30474f..16b586bc5 100644 --- a/arch/arm/configs/tenderloin_defconfig +++ b/arch/arm/configs/tenderloin_defconfig @@ -2398,13 +2398,19 @@ CONFIG_EXT3_FS=y CONFIG_EXT3_FS_XATTR=y # CONFIG_EXT3_FS_POSIX_ACL is not set # CONFIG_EXT3_FS_SECURITY is not set -# CONFIG_EXT4_FS is not set +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_XATTR=y +# CONFIG_EXT4_FS_POSIX_ACL is not set +# CONFIG_EXT4_FS_SECURITY is not set +# CONFIG_EXT4_DEBUG is not set CONFIG_JBD=y # CONFIG_JBD_DEBUG is not set +CONFIG_JBD2=y +# CONFIG_JBD2_DEBUG is not set CONFIG_FS_MBCACHE=y # CONFIG_REISERFS_FS is not set # CONFIG_JFS_FS is not set -# CONFIG_FS_POSIX_ACL is not set +CONFIG_FS_POSIX_ACL=y # CONFIG_XFS_FS is not set # CONFIG_GFS2_FS is not set # CONFIG_OCFS2_FS is not set From 423cd39447c28fe3b9be980f5eb890ef59b3e55f Mon Sep 17 00:00:00 2001 From: Rod Whitby Date: Wed, 4 Jan 2012 15:21:08 +1030 Subject: [PATCH 12/19] iSCSI Support --- arch/arm/configs/tenderloin_defconfig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm/configs/tenderloin_defconfig b/arch/arm/configs/tenderloin_defconfig index 16b586bc5..5b5ae88dc 100644 --- a/arch/arm/configs/tenderloin_defconfig +++ b/arch/arm/configs/tenderloin_defconfig @@ -1027,12 +1027,12 @@ CONFIG_SCSI_WAIT_SCAN=m # # CONFIG_SCSI_SPI_ATTRS is not set # CONFIG_SCSI_FC_ATTRS is not set -# CONFIG_SCSI_ISCSI_ATTRS is not set +CONFIG_SCSI_ISCSI_ATTRS=y # CONFIG_SCSI_SAS_ATTRS is not set # CONFIG_SCSI_SAS_LIBSAS is not set # CONFIG_SCSI_SRP_ATTRS is not set CONFIG_SCSI_LOWLEVEL=y -# CONFIG_ISCSI_TCP is not set +CONFIG_ISCSI_TCP=y # CONFIG_LIBFC is not set # CONFIG_LIBFCOE is not set # CONFIG_SCSI_DEBUG is not set From edc57fe0d9779f02b5f2878aefbdbfacbe4f2423 Mon Sep 17 00:00:00 2001 From: Rod Whitby Date: Wed, 4 Jan 2012 15:21:19 +1030 Subject: [PATCH 13/19] ATAOE Support --- arch/arm/configs/tenderloin_defconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/configs/tenderloin_defconfig b/arch/arm/configs/tenderloin_defconfig index 5b5ae88dc..579248b8a 100644 --- a/arch/arm/configs/tenderloin_defconfig +++ b/arch/arm/configs/tenderloin_defconfig @@ -944,7 +944,7 @@ CONFIG_BLK_DEV_RAM_COUNT=16 CONFIG_BLK_DEV_RAM_SIZE=65536 # CONFIG_BLK_DEV_XIP is not set # CONFIG_CDROM_PKTCDVD is not set -# CONFIG_ATA_OVER_ETH is not set +CONFIG_ATA_OVER_ETH=y # CONFIG_MG_DISK is not set CONFIG_MISC_DEVICES=y # CONFIG_AD525X_DPOT is not set From 02cec9850f3b93495f496c6be68483e043bf3d68 Mon Sep 17 00:00:00 2001 From: uNiXpSyChO Date: Sun, 1 Jan 2012 20:26:48 -0500 Subject: [PATCH 14/19] override module --- arch/arm/mach-msm/acpuclock-8x60.c | 2 + arch/arm/mach-msm/override_plug.c | 86 +++++ drivers/base/cpu.c | 33 +- drivers/cpufreq/Kconfig | 44 +++ drivers/cpufreq/Makefile | 1 + drivers/cpufreq/cpufreq.c | 14 + drivers/cpufreq/cpufreq_override.c | 601 +++++++++++++++++++++++++++++ drivers/power/max8903b_charger.c | 12 + drivers/video/msm_pe/msm_fb.c | 23 ++ 9 files changed, 812 insertions(+), 4 deletions(-) create mode 100644 arch/arm/mach-msm/override_plug.c create mode 100644 drivers/cpufreq/cpufreq_override.c diff --git a/arch/arm/mach-msm/acpuclock-8x60.c b/arch/arm/mach-msm/acpuclock-8x60.c index e3604a093..5fa212a5a 100644 --- a/arch/arm/mach-msm/acpuclock-8x60.c +++ b/arch/arm/mach-msm/acpuclock-8x60.c @@ -231,6 +231,8 @@ static struct clkctl_acpu_speed *acpu_freq_tbl; static struct clkctl_l2_speed *l2_freq_tbl; static unsigned int l2_freq_tbl_size; +#include "override_plug.c" + unsigned long acpuclk_get_rate(int cpu) { return drv_state.current_speed[cpu]->acpuclk_khz; diff --git a/arch/arm/mach-msm/override_plug.c b/arch/arm/mach-msm/override_plug.c new file mode 100644 index 000000000..e607710af --- /dev/null +++ b/arch/arm/mach-msm/override_plug.c @@ -0,0 +1,86 @@ +/* + * override_plug.c + * + * Marco Benton . + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#ifdef CONFIG_CPU_FREQ_OVERRIDE_L2_HACK + +// define L2 overclock table indexes from l2_freq_tbl_v2 L2(x) +// set it the same to render it useless +#define L2_OCLK_IDX 17 +#define L2_NORM_IDX 16 + +// Minimum LVAL freq for L2 overclock 54 MHz * L_VAL +#define MIN_LVAL_L2 0x20 + +void acpuclk_set_l2_hack(bool state) +{ + struct clkctl_acpu_speed *f; + + for (f = acpu_freq_tbl_v2 + 7; f->acpuclk_khz != 0; f++) { + if(f->use_for_scaling[0] == 0) continue; + if(f->l_val < MIN_LVAL_L2) continue; + f->l2_level = (state) ? L2(L2_OCLK_IDX) : L2(L2_NORM_IDX); + } +} +EXPORT_SYMBOL(acpuclk_set_l2_hack); +#endif + +#ifdef CONFIG_CPU_FREQ_OVERRIDE_VOLT_CONFIG +void acpuclk_get_voltages(unsigned int acpu_freq_vlt_tbl[]) +{ + int i=0; + struct clkctl_acpu_speed *f; + + for (f = acpu_freq_tbl_v2; f->acpuclk_khz != 0; f++) { + if(f->use_for_scaling[0] == 0) continue; + acpu_freq_vlt_tbl[i] = f->vdd_sc; + i++; + } + +} +EXPORT_SYMBOL(acpuclk_get_voltages); + +void acpuclk_set_voltages(unsigned int acpu_freq_vlt_tbl[]) +{ + int i=0; + struct clkctl_acpu_speed *f; + + for (f = acpu_freq_tbl_v2; f->acpuclk_khz != 0; f++) { + if(f->use_for_scaling[0] == 0) continue; + f->vdd_sc = acpu_freq_vlt_tbl[i]; + i++; + } +} +EXPORT_SYMBOL(acpuclk_set_voltages); +#endif + +unsigned int acpuclk_get_freqs(unsigned int acpu_freq_tbl[]) +{ + int i=0; + struct clkctl_acpu_speed *f; + + for (f = acpu_freq_tbl_v2; f->acpuclk_khz != 0; f++) { + if(!f->use_for_scaling[0]) continue; + acpu_freq_tbl[i] = f->acpuclk_khz; + i++; + } + + return i; +} +EXPORT_SYMBOL(acpuclk_get_freqs); diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 251acea3d..07b06993c 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -14,6 +14,10 @@ #include "base.h" +#ifdef CONFIG_CPU_FREQ_OVERRIDE_TURBO_MODE +bool cpufreq_override_get_state(void); +#endif + static struct sysdev_class_attribute *cpu_sysdev_class_attrs[]; struct sysdev_class cpu_sysdev_class = { @@ -37,23 +41,44 @@ static ssize_t __ref store_online(struct sys_device *dev, struct sysdev_attribut const char *buf, size_t count) { struct cpu *cpu = container_of(dev, struct cpu, sysdev); - ssize_t ret; + ssize_t ret = 0; cpu_hotplug_driver_lock(); + // uncomment to prove to lusers what actually shuts off the CPU! + //printk("CPU SNOOP: process \"%s\" set cpu: %s\n",current->comm,buf[0] == '0' ? "off" : "on"); switch (buf[0]) { case '0': +#ifdef CONFIG_CPU_FREQ_OVERRIDE_TURBO_MODE + if(cpufreq_override_get_state()) goto out; +#endif ret = cpu_down(cpu->sysdev.id); if (!ret) kobject_uevent(&dev->kobj, KOBJ_OFFLINE); break; case '1': - ret = cpu_up(cpu->sysdev.id); - if (!ret) - kobject_uevent(&dev->kobj, KOBJ_ONLINE); +#ifdef CONFIG_CPU_FREQ_OVERRIDE_TURBO_MODE + if(cpufreq_override_get_state()) { + if(strcmp("powerd",current->comm) != 0) { +#endif + ret = cpu_up(cpu->sysdev.id); + if (!ret) + kobject_uevent(&dev->kobj, KOBJ_ONLINE); +#ifdef CONFIG_CPU_FREQ_OVERRIDE_TURBO_MODE + } + } + else { + ret = cpu_up(cpu->sysdev.id); + if (!ret) + kobject_uevent(&dev->kobj, KOBJ_ONLINE); + } +#endif break; default: ret = -EINVAL; } +#ifdef CONFIG_CPU_FREQ_OVERRIDE_TURBO_MODE +out: +#endif cpu_hotplug_driver_unlock(); if (ret >= 0) diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index 02274b55f..6b15b490a 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -18,6 +18,50 @@ if CPU_FREQ config CPU_FREQ_TABLE tristate +config CPU_FREQ_OVERRIDE + bool "Enable CPU options override" + default y + help + Say Y here + +config CPU_FREQ_OVERRIDE_L2_HACK + bool "Enable overclock L2 freqs" + depends on CPU_FREQ_OVERRIDE + default n + help + Say Y here + +config CPU_FREQ_OVERRIDE_VOLT_CONFIG + bool "Enable CPU voltage tweaking" + depends on CPU_FREQ_OVERRIDE + default y + help + Say Y here + +config CPU_FREQ_OVERRIDE_TURBO_MODE + bool "Enable static dual-core" + depends on CPU_FREQ_OVERRIDE + default n + help + Say Y here + +config CPU_FREQ_OVERRIDE_TURBO_MODE_ENABLE + bool "Enable static dual-core at boot" + depends on CPU_FREQ_OVERRIDE_TURBO_MODE + default n + +config CPU_FREQ_OVERRIDE_POWERSAVER + bool "Enable dual-core power saving mode" + depends on CPU_FREQ_OVERRIDE_TURBO_MODE + default n + help + Say Y here + +config CPU_FREQ_OVERRIDE_POWERSAVER_ENABLE + bool "Enable dual-core power saving mode at boot" + depends on CPU_FREQ_OVERRIDE_POWERSAVER + default n + config CPU_FREQ_DEBUG bool "Enable CPUfreq debugging" help diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index a59dca8b1..96ee6f880 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -10,6 +10,7 @@ obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += cpufreq_userspace.o obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND_TICKLE) += cpufreq_ondemand_tickle.o obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o +obj-$(CONFIG_CPU_FREQ_OVERRIDE) += cpufreq_override.o # CPUfreq cross-arch helpers obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index bf7d095d0..2c292e341 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -1816,6 +1816,20 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data, return ret; } +#ifdef CONFIG_CPU_FREQ_OVERRIDE +void cpufreq_set_policy(struct cpufreq_policy *policy, unsigned int cpu) +{ + struct cpufreq_policy *data = cpufreq_cpu_get(cpu); + lock_policy_rwsem_write(cpu); + __cpufreq_set_policy(data, policy); + unlock_policy_rwsem_write(cpu); + data->user_policy.min = data->min; + data->user_policy.max = data->max; + cpufreq_cpu_put(data); +} +EXPORT_SYMBOL(cpufreq_set_policy); +#endif + /** * cpufreq_update_policy - re-evaluate an existing cpufreq policy * @cpu: CPU which shall be re-evaluated diff --git a/drivers/cpufreq/cpufreq_override.c b/drivers/cpufreq/cpufreq_override.c new file mode 100644 index 000000000..97b48e20b --- /dev/null +++ b/drivers/cpufreq/cpufreq_override.c @@ -0,0 +1,601 @@ +/* + * drivers/cpufreq/cpufreq_override.c + * + * Marco Benton . + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// Voltage min +#define VDD_MIN 800000 + +// Voltage max +#define VDD_MAX 1600000 + +// Max Freq count. Not the actual number of freqs +#define MAX_FREQS 35 + +// L2 Boost mode. This requires mods in acpuclock-8x60.c +#define L2_BOOST 0 + +// CPU load ramp up percent +#define RAMPUP_PERCENT 20 + +// CPU load poll jiffies +#define POLL_FREQ 100 + +// Max charging freq default +#define CHARGING_MAX 1188000 + +/* ************* end of tunables ***************************************** */ + +#ifdef CONFIG_CPU_FREQ_OVERRIDE_VOLT_CONFIG +void acpuclk_get_voltages(unsigned int acpu_freq_vlt_tbl[]); +void acpuclk_set_voltages(unsigned int acpu_freq_vlt_tbl[]); +#endif + +unsigned int acpuclk_get_freqs(unsigned int acpu_freq_tbl[]); +static unsigned int freq_table[MAX_FREQS]; +static unsigned int nr_freqs; + +#ifdef CONFIG_CPU_FREQ_OVERRIDE_L2_HACK +void acpuclk_set_l2_hack(bool state); +static bool l2boost = L2_BOOST; +#endif + +#ifdef CONFIG_CPU_FREQ_OVERRIDE_TURBO_MODE +static bool lcd_state = 1; +static bool chrg_state = 0; +static bool chrg_override = 1; +static unsigned int chrg_max = CHARGING_MAX, chrg_prevmax = 0; +void cpufreq_set_policy(struct cpufreq_policy *policy, unsigned int cpu); +#ifdef CONFIG_CPU_FREQ_OVERRIDE_TURBO_MODE_ENABLE +static bool turbomode = 1; +#else +static bool turbomode = 0; +#endif +#endif + +#ifdef CONFIG_CPU_FREQ_OVERRIDE_POWERSAVER +static cputime64_t prev_cpu_wall = 0, prev_cpu_idle = 0; +static unsigned int time_in_state = 0; +#ifdef CONFIG_CPU_FREQ_OVERRIDE_POWERSAVER_ENABLE +static bool power_save = 1; +#else +static bool power_save = 0; +#endif + +static inline void check_load(struct work_struct *work); +static DEFINE_MUTEX(override_mutex); +static DECLARE_DELAYED_WORK(worker, check_load); +#endif + +#ifdef CONFIG_CPU_FREQ_OVERRIDE_TURBO_MODE +static void cpu_state(bool state) +{ + struct sys_device *dev = get_cpu_sysdev(1); + + cpu_hotplug_driver_lock(); + + if(!state) { + if(!cpu_down(1)) kobject_uevent(&dev->kobj, KOBJ_OFFLINE); + } + else { + if(!cpu_up(1)) kobject_uevent(&dev->kobj, KOBJ_ONLINE); + } + + cpu_hotplug_driver_unlock(); +} + +bool cpufreq_override_get_state(void) +{ + unsigned int ret = 0; + +#ifdef CONFIG_CPU_FREQ_OVERRIDE_POWERSAVER + if(lcd_state) ret = (power_save || turbomode); +#else + if(lcd_state) ret = turbomode; +#endif + + return ret; +} +EXPORT_SYMBOL(cpufreq_override_get_state); + +#ifdef CONFIG_CPU_FREQ_OVERRIDE_POWERSAVER +static inline cputime64_t get_cpu_idle_time(unsigned int cpu) +{ + cputime64_t idle_time; + cputime64_t cur_jiffies; + cputime64_t busy_time; + + cur_jiffies = jiffies64_to_cputime64(get_jiffies_64()); + busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user, + kstat_cpu(cpu).cpustat.system); + + busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq); + busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq); + busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal); + + idle_time = cputime64_sub(cur_jiffies, busy_time); + return idle_time; +} + +static inline unsigned int cur_load(void) +{ + unsigned int tmp_idle_ticks, idle_ticks, total_ticks, load = 0, ret = 0; + cputime64_t total_idle_ticks, cur_jiffies; + + idle_ticks = UINT_MAX; + cur_jiffies = jiffies64_to_cputime64(get_jiffies_64()); + total_ticks = (unsigned int)cputime64_sub(cur_jiffies, prev_cpu_wall); + prev_cpu_wall = get_jiffies_64(); + + if (!total_ticks) + goto out; + + total_idle_ticks = get_cpu_idle_time(0); + tmp_idle_ticks = (unsigned int)cputime64_sub(total_idle_ticks, + prev_cpu_idle); + prev_cpu_idle = total_idle_ticks; + + if (tmp_idle_ticks < idle_ticks) + idle_ticks = tmp_idle_ticks; + if (likely(total_ticks > idle_ticks)) + load = (100 * (total_ticks - idle_ticks)) / total_ticks; + + ret = load; + +out: + return ret; +} + +static inline void check_load(struct work_struct *work) +{ + mutex_lock(&override_mutex); + + BUG_ON(!power_save); + + if(!lcd_state) goto out; + + // only help out cpu0, not overall load + if(cur_load() > RAMPUP_PERCENT) { + if(!cpu_online(1)) + cpu_state(1); + + time_in_state = 0; + } + else { + if(cpu_online(1) && (time_in_state > 9)) { + cpu_state(0); + time_in_state = 0; + } + else + if(cpu_online(1)) time_in_state++; + } + +out: + mutex_unlock(&override_mutex); + schedule_delayed_work_on(0,&worker, POLL_FREQ); +} +#endif + +void cpufreq_override_set_lcd_state(bool state) +{ +#ifdef CONFIG_CPU_FREQ_OVERRIDE_POWERSAVER + mutex_lock(&override_mutex); +#endif + + lcd_state = state; + printk("override: lcd state=%u\n",state); + + // If screen is off, take cpu offline if in power save mode +#ifdef CONFIG_CPU_FREQ_OVERRIDE_POWERSAVER + if(power_save && !lcd_state) cpu_state(0); +#else + if(!lcd_state) cpu_state(0); +#endif + + // force CPU online regardless + if(lcd_state) cpu_state(1); + +#ifdef CONFIG_CPU_FREQ_OVERRIDE_POWERSAVER + mutex_unlock(&override_mutex); +#endif +} +EXPORT_SYMBOL(cpufreq_override_set_lcd_state); + +bool cpufreq_override_get_lcd_state(void) +{ + return lcd_state; +} +EXPORT_SYMBOL(cpufreq_override_get_lcd_state); +#endif + +#ifdef CONFIG_CPU_FREQ_OVERRIDE_TURBO_MODE +void cpufreq_override_set_chrg(bool state) +{ + struct cpufreq_policy policy; + int ttmp, i; + + if(chrg_override) goto out; + + mutex_lock(&override_mutex); + + if(state) { + if(!chrg_state) { + ttmp = turbomode; + turbomode=1; + cpu_state(1); + for_each_online_cpu(i) { + msleep(200); + cpufreq_get_policy(&policy, i); + chrg_prevmax = policy.max; + policy.max = chrg_max; + cpufreq_set_policy(&policy, i); + } + turbomode = ttmp; + } + } + else { + if(chrg_state) { + ttmp = turbomode; + turbomode=1; + cpu_state(1); + for_each_online_cpu(i) { + msleep(200); + cpufreq_get_policy(&policy, i); + policy.max = (chrg_prevmax) ? chrg_prevmax : + freq_table[nr_freqs-1]; + cpufreq_set_policy(&policy, i); + } + turbomode = ttmp; + } + } + + mutex_unlock(&override_mutex); +out: + chrg_state = state; + + printk("override: charger %s!\n", (chrg_state) ? "plugged in" \ + : "unplugged"); +} +EXPORT_SYMBOL(cpufreq_override_set_chrg); + +static ssize_t show_override_charger(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf,"%u\n", chrg_override); +} + +static ssize_t store_override_charger(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + struct cpufreq_policy policy; + unsigned int tmp, ttmp, i; + + if(sscanf(buf, "%u", &tmp) == 1) { + tmp = (tmp != 0 && tmp != 1) ? chrg_override : tmp; + if(tmp && (!chrg_override && chrg_state)) { + ttmp = turbomode; + turbomode=1; + cpu_state(1); + for_each_online_cpu(i) { + msleep(200); + cpufreq_get_policy(&policy, i); + policy.max = (chrg_prevmax) ? chrg_prevmax : + freq_table[nr_freqs-1]; + cpufreq_set_policy(&policy, i); + } + turbomode = ttmp; + } + if(!tmp && (chrg_override && chrg_state)) { + ttmp = turbomode; + turbomode=1; + cpu_state(1); + for_each_online_cpu(i) { + msleep(200); + cpufreq_get_policy(&policy, i); + chrg_prevmax = policy.max; + policy.max = chrg_max; + cpufreq_set_policy(&policy, i); + } + turbomode = ttmp; + } + chrg_override = tmp; + + printk("override: set chrg_override: %u\n", chrg_override); + } + else + printk("override: invalid chrg_override mode\n"); + + return count; +} +#endif + +#ifdef CONFIG_CPU_FREQ_OVERRIDE_VOLT_CONFIG +static ssize_t show_vdd_max(struct kobject *kobj, struct attribute *attr, + char *buf) +{ + return sprintf(buf, "%u\n", VDD_MAX); +} + +static ssize_t show_vdd_min(struct kobject *kobj, struct attribute *attr, + char *buf) +{ + return sprintf(buf, "%u\n", VDD_MIN); +} + +static ssize_t show_vdd(struct kobject *kobj, struct attribute *attr, char *buf) +{ + unsigned int i, acpu_freq_vlt_tbl[MAX_FREQS]; + char tmp[250]; + + acpuclk_get_voltages(acpu_freq_vlt_tbl); + + strcpy(buf,""); + + for(i=0 ; i < nr_freqs ; ++i) { + sprintf(tmp,"%u ",acpu_freq_vlt_tbl[i]); + strcat(buf,tmp); + } + + strcpy(tmp,buf); + + return sprintf(buf,"%s\n",tmp); +} + +static ssize_t store_vdd(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int i = 0, acpu_freq_vlt_tbl[MAX_FREQS]; + unsigned int *c = acpu_freq_vlt_tbl; + const char *wp = buf; + + for(i = 0; i < nr_freqs; i++) { + wp=skip_spaces(wp); + sscanf(wp,"%u",&c[i]); + if(c[i] < VDD_MIN || c[i] > VDD_MAX) break; + wp=strchr(wp,' ')+1; + } + + if(i != nr_freqs) + printk("override: store_vdd invalid\n"); + else { + acpuclk_set_voltages(acpu_freq_vlt_tbl); + + printk("override: set vdd %s\n",buf); + } + + return count; +} +#endif + +#ifdef CONFIG_CPU_FREQ_OVERRIDE_POWERSAVER +static ssize_t show_power_saver(struct kobject *kobj, struct attribute *attr, + char *buf) +{ + return sprintf(buf,"%u\n",power_save); +} + +static ssize_t store_power_saver(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int tmp; + + if(sscanf(buf, "%u", &tmp) == 1) { + tmp = (tmp != 0 && tmp != 1) ? 0 : tmp; + if(tmp) { + cpu_state(0); +#ifdef CONFIG_CPU_FREQ_OVERRIDE_TURBO_MODE + printk("override: disabling turbo mode\n"); + turbomode = 0; +#endif + if(!power_save) schedule_delayed_work_on(0,&worker,1); + +#ifdef CONFIG_CPU_FREQ_OVERRIDE_L2_HACK + printk("override: disabling L2 boost...\n"); + l2boost = 0; + acpuclk_set_l2_hack(0); +#endif + + } + else { + cpu_state(1); + if(power_save) cancel_delayed_work_sync(&worker); + } + + power_save = tmp; + + printk("override: set power_save: %u\n",power_save); + } + else + printk("override: invalid power save mode\n"); + + return count; +} +#endif + +#ifdef CONFIG_CPU_FREQ_OVERRIDE_TURBO_MODE +static ssize_t show_turbo_mode(struct kobject *kobj, struct attribute *attr, + char *buf) +{ + return sprintf(buf,"%u\n",turbomode); +} + +static ssize_t store_turbo_mode(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int tmp; + + if(sscanf(buf, "%u", &tmp) == 1) { + turbomode = (tmp != 0 && tmp != 1) ? turbomode : tmp; + + if(turbomode) { + cpu_state(1); +#ifdef CONFIG_CPU_FREQ_OVERRIDE_POWERSAVER + if(power_save) { + power_save = 0; + printk("override: disabling power saver\n"); + cancel_delayed_work_sync(&worker); + } +#endif + } + else cpu_state(0); + + printk("override: set turbo_mode: %u\n",turbomode); + } + else + printk("override: invalid turbo_mode\n"); + + return count; +} +#endif + +#ifdef CONFIG_CPU_FREQ_OVERRIDE_L2_HACK +static ssize_t show_l2_boost(struct kobject *kobj, struct attribute *attr, + char *buf) +{ + return sprintf(buf,"%u\n",l2boost); +} + +static ssize_t store_l2_boost(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int tmp; + + if(sscanf(buf, "%u", &tmp) == 1) { + l2boost = (tmp != 0 && tmp != 1) ? l2boost : tmp; + if(l2boost) acpuclk_set_l2_hack(1); + else acpuclk_set_l2_hack(0); + + printk("override: set l2 boost: %u\n",l2boost); + } + else + printk("override: invalid l2 boost mode\n"); + + return count; +} +#endif + +static ssize_t show_vdd_freqs(struct kobject *kobj, struct attribute *attr, char *buf) +{ + unsigned int i; + char tmp[250]; + + strcpy(buf,""); + + for(i=0 ; i < nr_freqs ; ++i) { + sprintf(tmp,"%u ", freq_table[i]); + strcat(buf,tmp); + } + + strcpy(tmp,buf); + + return sprintf(buf,"%s\n",tmp); +} + +#ifdef CONFIG_CPU_FREQ_OVERRIDE_VOLT_CONFIG +define_one_global_ro(vdd_min); +define_one_global_ro(vdd_max); +define_one_global_rw(vdd); +#endif +define_one_global_ro(vdd_freqs); +#ifdef CONFIG_CPU_FREQ_OVERRIDE_POWERSAVER +define_one_global_rw(power_saver); +#endif +#ifdef CONFIG_CPU_FREQ_OVERRIDE_TURBO_MODE +define_one_global_rw(turbo_mode); +define_one_global_rw(override_charger); +#endif +#ifdef CONFIG_CPU_FREQ_OVERRIDE_L2_HACK +define_one_global_rw(l2_boost); +#endif + +static struct attribute *default_attrs[] = { +#ifdef CONFIG_CPU_FREQ_OVERRIDE_VOLT_CONFIG + &vdd.attr, + &vdd_min.attr, + &vdd_max.attr, +#endif + &vdd_freqs.attr, +#ifdef CONFIG_CPU_FREQ_OVERRIDE_POWERSAVER + &power_saver.attr, +#endif +#ifdef CONFIG_CPU_FREQ_OVERRIDE_TURBO_MODE + &turbo_mode.attr, + &override_charger.attr, +#endif +#ifdef CONFIG_CPU_FREQ_OVERRIDE_L2_HACK + &l2_boost.attr, +#endif + NULL +}; + +static struct attribute_group override_attr_group = { + .attrs = default_attrs, + .name = "override" +}; + +static int __init cpufreq_override_driver_init(void) +{ + int ret = 0; + + nr_freqs = acpuclk_get_freqs(freq_table); + printk("override: freqs configured: %u\n",nr_freqs); + +#ifdef CONFIG_CPU_FREQ_OVERRIDE_POWERSAVER + if(power_save) { + schedule_delayed_work_on(0,&worker, 10); + turbomode = 0; + } +#endif + + if((ret = sysfs_create_group(cpufreq_global_kobject,&override_attr_group))) + printk("override: failed!\n"); + else + printk("override: initialized!\n"); + + return ret; +} + +static void __exit cpufreq_override_driver_exit(void) +{ +#ifdef CONFIG_CPU_FREQ_OVERRIDE_POWERSAVER + cancel_delayed_work(&worker); + flush_scheduled_work(); +#endif + + sysfs_remove_group(cpufreq_global_kobject, &override_attr_group); +} + +MODULE_AUTHOR("marco@unixpsycho.com"); +MODULE_DESCRIPTION("'cpufreq_override' - A driver to do cool stuff "); +MODULE_LICENSE("GPL"); + +module_init(cpufreq_override_driver_init); +module_exit(cpufreq_override_driver_exit); + diff --git a/drivers/power/max8903b_charger.c b/drivers/power/max8903b_charger.c index 275c49fe1..a7bdc6e49 100644 --- a/drivers/power/max8903b_charger.c +++ b/drivers/power/max8903b_charger.c @@ -35,6 +35,9 @@ #include #include +#ifdef CONFIG_CPU_FREQ_OVERRIDE_TURBO_MODE +void cpufreq_override_set_chrg(bool state); +#endif static struct max8903b_platform_data *pdevice_resource; static enum max8903b_current current_limit; @@ -50,12 +53,18 @@ static int max8903b_current_setup(enum max8903b_current value) /* disable charging */ gpio_set_value(pdevice_resource->CEN_N_in, pdevice_resource->CEN_N_in_polarity ? 0 : 1); /* charger disable */ printk(KERN_INFO "%s: ### CHARGE_DISABLE\n", __func__); +#ifdef CONFIG_CPU_FREQ_OVERRIDE_TURBO_MODE + cpufreq_override_set_chrg(0); +#endif break; case CURRENT_ZERO: // this is for no charger connection. gpio_set_value(pdevice_resource->DCM_in, pdevice_resource->DCM_in_polarity ? 1 : 0); /* usb mode */ gpio_set_value(pdevice_resource->USUS_in, pdevice_resource->USUS_in_polarity ? 0 : 1); /* usb suspend */ pdevice_resource->suspend_gpio_config(); printk(KERN_INFO "%s: CURRENT_ZERO\n", __func__); +#ifdef CONFIG_CPU_FREQ_OVERRIDE_TURBO_MODE + cpufreq_override_set_chrg(0); +#endif break; case CURRENT_100MA: gpio_set_value(pdevice_resource->DCM_in, pdevice_resource->DCM_in_polarity? 1 : 0); /* usb mode */ @@ -81,6 +90,9 @@ static int max8903b_current_setup(enum max8903b_current value) pdevice_resource->set_DC_CHG_Mode_current(value); gpio_set_value(pdevice_resource->CEN_N_in, pdevice_resource->CEN_N_in_polarity ? 1 : 0); /* charger enable */ printk(KERN_INFO "%s: CURRENT_750(4), 900(5), 1000(6), 1400(7), 2000MA(9): %d\n", __func__, value); +#ifdef CONFIG_CPU_FREQ_OVERRIDE_TURBO_MODE + cpufreq_override_set_chrg(1); +#endif break; default: printk(KERN_INFO "%s: Not supported current setting\n", __func__); diff --git a/drivers/video/msm_pe/msm_fb.c b/drivers/video/msm_pe/msm_fb.c index 9898c5d52..b4f469f70 100644 --- a/drivers/video/msm_pe/msm_fb.c +++ b/drivers/video/msm_pe/msm_fb.c @@ -51,6 +51,10 @@ #include "mdp.h" #include "mdp4.h" +#ifdef CONFIG_CPU_FREQ_OVERRIDE_TURBO_MODE +void cpufreq_override_set_lcd_state(bool state); +#endif + #ifdef CONFIG_FB_MSM_LOGO #define INIT_IMAGE_FILE "/initlogo.rle" extern int load_565rle_image(char *filename); @@ -68,6 +72,11 @@ static int pdev_list_cnt; int vsync_mode = 1; +#ifdef CONFIG_CPU_FREQ_GOV_ONDEMAND_TICKLE +bool msm_fb_state=1; +EXPORT_SYMBOL(msm_fb_state); +#endif + #define MAX_BLIT_REQ 256 #define MAX_FBI_LIST 32 @@ -347,6 +356,9 @@ static ssize_t msm_fb_store_state(struct device *dev, } else { printk(KERN_INFO "msmfb: Resuming msmfb\n"); +#ifdef CONFIG_CPU_FREQ_GOV_ONDEMAND_TICKLE + msm_fb_state=1; +#endif acquire_console_sem(); ret = msm_fb_resume_sub(mfd); release_console_sem(); @@ -354,6 +366,10 @@ static ssize_t msm_fb_store_state(struct device *dev, //fb_set_suspend(mfd->fbi[0], FBINFO_STATE_RUNNING); mfd->suspended = false; + +#ifdef CONFIG_CPU_FREQ_OVERRIDE_TURBO_MODE + cpufreq_override_set_lcd_state(1); +#endif } } @@ -364,6 +380,9 @@ static ssize_t msm_fb_store_state(struct device *dev, } else { printk(KERN_INFO "msmfb: Suspending msmfb\n"); +#ifdef CONFIG_CPU_FREQ_GOV_ONDEMAND_TICKLE + msm_fb_state=0; +#endif acquire_console_sem(); ret = msm_fb_suspend_sub(mfd); release_console_sem(); @@ -374,6 +393,10 @@ static ssize_t msm_fb_store_state(struct device *dev, //fb_set_suspend(mfd->fbi[0], FBINFO_STATE_SUSPENDED); mfd->pdev->dev.power.power_state = PMSG_SUSPEND; mfd->suspended = true; + +#ifdef CONFIG_CPU_FREQ_OVERRIDE_TURBO_MODE + cpufreq_override_set_lcd_state(0); +#endif } } From 4bae2a9c47703376f08e9ef0d6af6aa996a9fe52 Mon Sep 17 00:00:00 2001 From: uNiXpSyChO Date: Sun, 1 Jan 2012 20:32:34 -0500 Subject: [PATCH 15/19] ondemand-ng patch (depends on override mod) --- drivers/cpufreq/cpufreq_ondemand_tickle.c | 109 +++++++++++++++++++--- 1 file changed, 95 insertions(+), 14 deletions(-) diff --git a/drivers/cpufreq/cpufreq_ondemand_tickle.c b/drivers/cpufreq/cpufreq_ondemand_tickle.c index 3748c7444..873283112 100644 --- a/drivers/cpufreq/cpufreq_ondemand_tickle.c +++ b/drivers/cpufreq/cpufreq_ondemand_tickle.c @@ -1,5 +1,5 @@ /* - * drivers/cpufreq/cpufreq_ondemand_tickle.c + * drivers/cpufreq/cpufreq_ondemandtcl.c * * A version of cpufreq_ondemand supporing hinting, or tickling, into * high performance levels based on platform defined events. This governor @@ -13,6 +13,8 @@ * Jun Nakajima * (C) 2009 Palm Inc, Corey Tabaka * + * Screenstate mod by uNiXpXyChO + * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. @@ -315,6 +317,30 @@ static int get_num_sample_records(void) { return count; } +static bool ss_enabled = 0; +static unsigned int sleep_max_freq = 432000; +extern bool msm_fb_state; + +static int cpufreq_target(struct cpufreq_policy *policy, unsigned int freq, + unsigned int relation) +{ + int retval = -EINVAL; + + if(msm_fb_state || !ss_enabled) { + retval = __cpufreq_driver_target(policy, freq, relation); + } + else { + if(freq <= sleep_max_freq) + retval = __cpufreq_driver_target(policy, freq, + relation); + else + retval = __cpufreq_driver_target(policy, sleep_max_freq, + relation); + } + + return retval; +} + static void *stats_start(struct seq_file *m, loff_t *pos) { int i; @@ -773,6 +799,18 @@ static ssize_t show_sampling_rate_min(struct kobject *kobj, return sprintf(buf, "%u\n", min_sampling_rate); } +static ssize_t show_screen_off_max_freq(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", sleep_max_freq); +} + +static ssize_t show_screenstate_enable(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", ss_enabled); +} + define_one_global_ro(sampling_rate_max); define_one_global_ro(sampling_rate_min); @@ -783,6 +821,7 @@ static ssize_t show_##file_name \ { \ return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ } + show_one(sampling_rate, sampling_rate); show_one(io_is_busy, io_is_busy); show_one(up_threshold, up_threshold); @@ -993,6 +1032,44 @@ static ssize_t store_max_tickle_window(struct kobject *a, struct attribute *b, return count; } +static ssize_t store_screen_off_max_freq(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1) + return -EINVAL; + + if (input < 122880 || input > 2000000) { + printk("ondemandtcl: invalid sleep freq\n"); + return -EINVAL; + } + + sleep_max_freq = input; + + return count; +} + +static ssize_t store_screenstate_enable(struct kobject *a, struct attribute *b, + const char *buf, size_t count) +{ + unsigned int input; + int ret; + ret = sscanf(buf, "%u", &input); + + if (ret != 1) + return -EINVAL; + + if (input != 0 && input != 1) { + return -EINVAL; + } + + ss_enabled = input; + + return count; +} define_one_global_rw(sampling_rate); define_one_global_rw(io_is_busy); @@ -1002,6 +1079,8 @@ define_one_global_rw(sampling_down_factor); define_one_global_rw(ignore_nice_load); define_one_global_rw(powersave_bias); define_one_global_rw(max_tickle_window); +define_one_global_rw(screen_off_max_freq); +define_one_global_rw(screenstate_enable); static struct attribute *dbs_attributes[] = { &sampling_rate_max.attr, @@ -1014,6 +1093,8 @@ static struct attribute *dbs_attributes[] = { &powersave_bias.attr, &io_is_busy.attr, &max_tickle_window.attr, + &screen_off_max_freq.attr, + &screenstate_enable.attr, NULL }; @@ -1075,7 +1156,7 @@ dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq) else if (p->cur == p->max) return; - __cpufreq_driver_target(p, freq, dbs_tuners_ins.powersave_bias ? + cpufreq_target(p, freq, dbs_tuners_ins.powersave_bias ? CPUFREQ_RELATION_L : CPUFREQ_RELATION_H); } @@ -1173,7 +1254,7 @@ static void do_tickle_state_change(struct work_struct *work) if (policy->cur < policy->max) { record_sample(policy->cur, policy->max, -21, policy->cpu, NULL); - __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H); + cpufreq_target(policy, policy->max, CPUFREQ_RELATION_H); } dbs_info->tickle_active = 1; @@ -1383,7 +1464,7 @@ static void do_floor_state_change(struct work_struct *work) if (policy->cur < f) { record_sample(policy->cur, f, -31, policy->cpu, NULL); - __cpufreq_driver_target(policy, f, + cpufreq_target(policy, f, CPUFREQ_RELATION_L); } @@ -1697,7 +1778,7 @@ static void adjust_for_load(struct cpu_dbs_info_s *this_dbs_info) record_sample(policy->cur, policy->max, load, policy->cpu, &this_dbs_info->rlv); - __cpufreq_driver_target(policy, policy->max, + cpufreq_target(policy, policy->max, CPUFREQ_RELATION_H); } else { int freq = powersave_bias_target(policy, policy->max, @@ -1711,7 +1792,7 @@ static void adjust_for_load(struct cpu_dbs_info_s *this_dbs_info) if (policy->cur != freq) record_sample(policy->cur, freq, load, policy->cpu, &this_dbs_info->rlv); - __cpufreq_driver_target(policy, freq, + cpufreq_target(policy, freq, CPUFREQ_RELATION_L); } return; @@ -1750,7 +1831,7 @@ static void adjust_for_load(struct cpu_dbs_info_s *this_dbs_info) if (policy->cur != freq_next) record_sample(policy->cur, freq_next, load, policy->cpu, &this_dbs_info->rlv); - __cpufreq_driver_target(policy, freq_next, + cpufreq_target(policy, freq_next, CPUFREQ_RELATION_L); } else { int freq = powersave_bias_target(policy, freq_next, @@ -1763,7 +1844,7 @@ static void adjust_for_load(struct cpu_dbs_info_s *this_dbs_info) if (policy->cur != freq) record_sample(policy->cur, freq, load, policy->cpu, &this_dbs_info->rlv); - __cpufreq_driver_target(policy, freq, + cpufreq_target(policy, freq, CPUFREQ_RELATION_L); } } @@ -1799,7 +1880,7 @@ static void do_dbs_timer(struct work_struct *work) } else { record_sample(dbs_info->cur_policy->cur, dbs_info->freq_lo, -1, cpu, NULL); - __cpufreq_driver_target(dbs_info->cur_policy, + cpufreq_target(dbs_info->cur_policy, dbs_info->freq_lo, CPUFREQ_RELATION_H); } queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay); @@ -1894,8 +1975,7 @@ static void dbs_refresh_callback(struct work_struct *unused) record_sample(policy->cur, policy->max, -51, policy->cpu, NULL); policy->cur = policy->max; - __cpufreq_driver_target(policy, policy->max, - CPUFREQ_RELATION_L); + cpufreq_target(policy, policy->max, CPUFREQ_RELATION_L); this_dbs_info->prev_cpu_idle = get_cpu_idle_time(0, &this_dbs_info->prev_cpu_wall); } @@ -2165,13 +2245,13 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, if (policy->max < this_dbs_info->cur_policy->cur) { record_sample(policy->cur, policy->max, -40, policy->cpu, NULL); - __cpufreq_driver_target(this_dbs_info->cur_policy, + cpufreq_target(this_dbs_info->cur_policy, policy->max, CPUFREQ_RELATION_H); } else if (policy->min > this_dbs_info->cur_policy->cur) { record_sample(policy->cur, policy->min, -41, policy->cpu, NULL); - __cpufreq_driver_target(this_dbs_info->cur_policy, + cpufreq_target(this_dbs_info->cur_policy, policy->min, CPUFREQ_RELATION_L); } @@ -2260,7 +2340,8 @@ MODULE_AUTHOR("Venkatesh Pallipadi "); MODULE_AUTHOR("Alexey Starikovskiy "); MODULE_AUTHOR("Corey Tabaka "); MODULE_DESCRIPTION("'cpufreq_ondemand_tickle' - A dynamic cpufreq governor for " - "Low Latency Frequency Transition capable processors"); + "Low Latency Frequency Transition capable processors" + "With Screenstate"); MODULE_LICENSE("GPL"); #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND_TICKLE From c533dc20ca5d34ad8a0036cd2b2b8f923be487c8 Mon Sep 17 00:00:00 2001 From: uNiXpSyChO Date: Sun, 1 Jan 2012 20:20:56 -0500 Subject: [PATCH 16/19] Overclock to 1.8GHz, default to 1.2GHz --- arch/arm/configs/tenderloin_defconfig | 11 ++- arch/arm/mach-msm/acpuclock-8x60.c | 101 ++++++++++---------------- arch/arm/mach-msm/board-tenderloin.c | 8 +- 3 files changed, 52 insertions(+), 68 deletions(-) diff --git a/arch/arm/configs/tenderloin_defconfig b/arch/arm/configs/tenderloin_defconfig index 579248b8a..878411e94 100644 --- a/arch/arm/configs/tenderloin_defconfig +++ b/arch/arm/configs/tenderloin_defconfig @@ -392,7 +392,9 @@ CONFIG_MSM_RPC_OEM_RAPI=y # CONFIG_MSM_RPCSERVER_HANDSET is not set CONFIG_MSM_RMT_STORAGE_CLIENT=y # CONFIG_MSM_RMT_STORAGE_CLIENT_STATS is not set -# CONFIG_MSM_CPU_FREQ_SET_MIN_MAX is not set +CONFIG_MSM_CPU_FREQ_SET_MIN_MAX=y +CONFIG_MSM_CPU_FREQ_MIN=192000 +CONFIG_MSM_CPU_FREQ_MAX=1188000 # CONFIG_MSM_AVS_HW is not set # CONFIG_MSM_HW3D is not set CONFIG_AMSS_7X25_VERSION_2009=y @@ -537,6 +539,13 @@ CONFIG_CMDLINE="" # CONFIG_CPU_FREQ=y CONFIG_CPU_FREQ_TABLE=y +CONFIG_CPU_FREQ_OVERRIDE=y +# CONFIG_CPU_FREQ_OVERRIDE_L2_HACK is not set +CONFIG_CPU_FREQ_OVERRIDE_VOLT_CONFIG=y +CONFIG_CPU_FREQ_OVERRIDE_TURBO_MODE=y +# CONFIG_CPU_FREQ_OVERRIDE_TURBO_MODE_ENABLE is not set +CONFIG_CPU_FREQ_OVERRIDE_POWERSAVER=y +# CONFIG_CPU_FREQ_OVERRIDE_POWERSAVER_ENABLE is not set # CONFIG_CPU_FREQ_DEBUG is not set CONFIG_CPU_FREQ_STAT=y # CONFIG_CPU_FREQ_STAT_DETAILS is not set diff --git a/arch/arm/mach-msm/acpuclock-8x60.c b/arch/arm/mach-msm/acpuclock-8x60.c index 5fa212a5a..709cb0aab 100644 --- a/arch/arm/mach-msm/acpuclock-8x60.c +++ b/arch/arm/mach-msm/acpuclock-8x60.c @@ -52,9 +52,9 @@ * The PLL hardware is capable of 384MHz to 1536MHz. The L_VALs * used for calibration should respect these limits. */ #define L_VAL_SCPLL_CAL_MIN 0x08 /* = 432 MHz with 27MHz source */ -#define L_VAL_SCPLL_CAL_MAX 0x1C /* = 1512 MHz with 27MHz source */ +#define L_VAL_SCPLL_CAL_MAX 0x22 /* = 1836 MHz with 27MHz source */ -#define MAX_VDD_SC 1250000 /* uV */ +#define MAX_VDD_SC 1600000 /* uV */ #define MAX_AXI 310500 /* KHz */ #define SCPLL_LOW_VDD_FMAX 594000 /* KHz */ #define SCPLL_LOW_VDD 1000000 /* uV */ @@ -182,21 +182,21 @@ static uint32_t bus_perf_client; static struct clkctl_l2_speed l2_freq_tbl_v2[] = { [0] = { MAX_AXI, 0, 0, 1000000, 1100000, 0}, [1] = { 432000, 1, 0x08, 1000000, 1100000, 0}, - [2] = { 486000, 1, 0x09, 1000000, 1100000, 0}, - [3] = { 540000, 1, 0x0A, 1000000, 1100000, 0}, - [4] = { 594000, 1, 0x0B, 1000000, 1100000, 0}, - [5] = { 648000, 1, 0x0C, 1000000, 1100000, 1}, - [6] = { 702000, 1, 0x0D, 1100000, 1100000, 1}, - [7] = { 756000, 1, 0x0E, 1100000, 1100000, 1}, - [8] = { 810000, 1, 0x0F, 1100000, 1100000, 1}, - [9] = { 864000, 1, 0x10, 1100000, 1100000, 1}, - [10] = { 918000, 1, 0x11, 1100000, 1100000, 2}, - [11] = { 972000, 1, 0x12, 1100000, 1100000, 2}, - [12] = {1026000, 1, 0x13, 1100000, 1100000, 2}, - [13] = {1080000, 1, 0x14, 1100000, 1200000, 2}, - [14] = {1134000, 1, 0x15, 1100000, 1200000, 2}, - [15] = {1188000, 1, 0x16, 1200000, 1200000, 3}, - [16] = {1404000, 1, 0x1A, 1200000, 1250000, 3}, + [2] = { 540000, 1, 0x0A, 1000000, 1100000, 0}, + [3] = { 648000, 1, 0x0C, 1000000, 1100000, 1}, + [4] = { 756000, 1, 0x0E, 1100000, 1100000, 1}, + [5] = { 864000, 1, 0x10, 1100000, 1100000, 1}, + [6] = { 918000, 1, 0x11, 1100000, 1100000, 2}, + [7] = { 972000, 1, 0x12, 1100000, 1100000, 2}, + [8] = {1026000, 1, 0x13, 1100000, 1100000, 2}, + [9] = {1080000, 1, 0x14, 1100000, 1200000, 2}, + [10] = {1134000, 1, 0x15, 1100000, 1200000, 2}, + [11] = {1188000, 1, 0x16, 1200000, 1200000, 3}, + [12] = {1242000, 1, 0x17, 1200000, 1200000, 3}, + [13] = {1350000, 1, 0x19, 1200000, 1250000, 3}, + [14] = {1448000, 1, 0x1A, 1200000, 1250000, 3}, + [15] = {1458000, 1, 0x1B, 1200000, 1250000, 3}, + [16] = {1512000, 1, 0x1C, 1250000, 1300000, 3}, }; #define L2(x) (&l2_freq_tbl_v2[(x)]) @@ -207,21 +207,22 @@ static struct clkctl_acpu_speed acpu_freq_tbl_v2[] = { { {0, 0}, MAX_AXI, ACPU_AFAB, 1, 0, 0, 0, L2(0), 875000, 0x03006000}, { {1, 1}, 384000, ACPU_PLL_8, 3, 0, 0, 0, L2(1), 875000, 0x03006000}, { {1, 1}, 432000, ACPU_SCPLL, 0, 0, 1, 0x08, L2(1), 887500, 0x03006000}, - { {1, 1}, 486000, ACPU_SCPLL, 0, 0, 1, 0x09, L2(2), 912500, 0x03006000}, - { {1, 1}, 540000, ACPU_SCPLL, 0, 0, 1, 0x0A, L2(3), 925000, 0x03006000}, - { {1, 1}, 594000, ACPU_SCPLL, 0, 0, 1, 0x0B, L2(4), 937500, 0x03006000}, - { {1, 1}, 648000, ACPU_SCPLL, 0, 0, 1, 0x0C, L2(5), 950000, 0x03006000}, - { {1, 1}, 702000, ACPU_SCPLL, 0, 0, 1, 0x0D, L2(6), 975000, 0x03006000}, - { {1, 1}, 756000, ACPU_SCPLL, 0, 0, 1, 0x0E, L2(7), 1000000, 0x03006000}, - { {1, 1}, 810000, ACPU_SCPLL, 0, 0, 1, 0x0F, L2(8), 1012500, 0x03006000}, - { {1, 1}, 864000, ACPU_SCPLL, 0, 0, 1, 0x10, L2(9), 1037500, 0x03006000}, - { {1, 1}, 918000, ACPU_SCPLL, 0, 0, 1, 0x11, L2(10), 1062500, 0x03006000}, - { {1, 1}, 972000, ACPU_SCPLL, 0, 0, 1, 0x12, L2(11), 1087500, 0x03006000}, - { {1, 1}, 1026000, ACPU_SCPLL, 0, 0, 1, 0x13, L2(12), 1125000, 0x03006000}, - { {1, 1}, 1080000, ACPU_SCPLL, 0, 0, 1, 0x14, L2(13), 1137500, 0x03006000}, - { {1, 1}, 1134000, ACPU_SCPLL, 0, 0, 1, 0x15, L2(14), 1162500, 0x03006000}, - { {1, 1}, 1188000, ACPU_SCPLL, 0, 0, 1, 0x16, L2(15), 1187500, 0x03006000}, - { {1, 1}, 1512000, ACPU_SCPLL, 0, 0, 1, 0x1C, L2(16), 1250000, 0x03006000}, + { {1, 1}, 540000, ACPU_SCPLL, 0, 0, 1, 0x0A, L2(2), 925000, 0x03006000}, + { {1, 1}, 648000, ACPU_SCPLL, 0, 0, 1, 0x0C, L2(3), 950000, 0x03006000}, + { {1, 1}, 756000, ACPU_SCPLL, 0, 0, 1, 0x0E, L2(4), 1000000, 0x03006000}, + { {1, 1}, 864000, ACPU_SCPLL, 0, 0, 1, 0x10, L2(5), 1037500, 0x03006000}, + { {1, 1}, 918000, ACPU_SCPLL, 0, 0, 1, 0x11, L2(6), 1062500, 0x03006000}, + { {1, 1}, 972000, ACPU_SCPLL, 0, 0, 1, 0x12, L2(7), 1087500, 0x03006000}, + { {1, 1}, 1026000, ACPU_SCPLL, 0, 0, 1, 0x13, L2(8), 1125000, 0x03006000}, + { {1, 1}, 1080000, ACPU_SCPLL, 0, 0, 1, 0x14, L2(9), 1137500, 0x03006000}, + { {1, 1}, 1134000, ACPU_SCPLL, 0, 0, 1, 0x15, L2(10), 1162500, 0x03006000}, + { {1, 1}, 1188000, ACPU_SCPLL, 0, 0, 1, 0x16, L2(11), 1187500, 0x03006000}, + { {1, 1}, 1242000, ACPU_SCPLL, 0, 0, 1, 0x17, L2(12), 1190000, 0x03006000}, + { {1, 1}, 1350000, ACPU_SCPLL, 0, 0, 1, 0x19, L2(13), 1195000, 0x03006000}, + { {1, 1}, 1458000, ACPU_SCPLL, 0, 0, 1, 0x1B, L2(14), 1200000, 0x03006000}, + { {1, 1}, 1512000, ACPU_SCPLL, 0, 0, 1, 0x1C, L2(15), 1250000, 0x03006000}, + { {1, 1}, 1728000, ACPU_SCPLL, 0, 0, 1, 0x20, L2(16), 1350000, 0x03006000}, + { {1, 1}, 1836000, ACPU_SCPLL, 0, 0, 1, 0x22, L2(16), 1450000, 0x03006000}, { {0, 0}, 0 }, }; /* acpu_freq_tbl row to use when reconfiguring SC/L2 PLLs. */ @@ -751,37 +752,8 @@ static void __init cpufreq_table_init(void) static void __init cpufreq_table_init(void) {} #endif -static unsigned int __init select_freq_plan(void) -{ - uint32_t speed_bin, max_khz; - struct clkctl_acpu_speed *f; - - acpu_freq_tbl = acpu_freq_tbl_v2; - l2_freq_tbl = l2_freq_tbl_v2; - l2_freq_tbl_size = ARRAY_SIZE(l2_freq_tbl_v2); - - speed_bin = readl(QFPROM_SPEED_BIN_PRI) & 0xF; - if (speed_bin == 0x1) - max_khz = 1512000; - else - max_khz = 1188000; - - /* Truncate the table based to max_khz. */ - for (f = acpu_freq_tbl; f->acpuclk_khz != 0; f++) { - if (f->acpuclk_khz > max_khz) { - f->acpuclk_khz = 0; - break; - } - } - f--; - pr_info("Max ACPU freq: %u KHz\n", f->acpuclk_khz); - - return f->acpuclk_khz; -} - void __init msm_acpu_clock_init(struct msm_acpu_clock_platform_data *clkdata) { - unsigned int max_cpu_khz; int cpu; mutex_init(&drv_state.lock); @@ -790,7 +762,10 @@ void __init msm_acpu_clock_init(struct msm_acpu_clock_platform_data *clkdata) drv_state.vdd_switch_time_us = clkdata->vdd_switch_time_us; /* Configure hardware. */ - max_cpu_khz = select_freq_plan(); + acpu_freq_tbl = acpu_freq_tbl_v2; + l2_freq_tbl = l2_freq_tbl_v2; + l2_freq_tbl_size = ARRAY_SIZE(l2_freq_tbl_v2); + unselect_scplls(); scpll_set_refs(); for_each_possible_cpu(cpu) @@ -801,7 +776,7 @@ void __init msm_acpu_clock_init(struct msm_acpu_clock_platform_data *clkdata) /* Improve boot time by ramping up CPUs immediately. */ for_each_online_cpu(cpu) - acpuclk_set_rate(cpu, max_cpu_khz, SETRATE_INIT); + acpuclk_set_rate(cpu, 1188000, SETRATE_INIT); cpufreq_table_init(); } diff --git a/arch/arm/mach-msm/board-tenderloin.c b/arch/arm/mach-msm/board-tenderloin.c index a4d6771ea..8e6a02ee9 100644 --- a/arch/arm/mach-msm/board-tenderloin.c +++ b/arch/arm/mach-msm/board-tenderloin.c @@ -402,7 +402,7 @@ static struct regulator_init_data saw_s0_init_data = { .constraints = { .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE, .min_uV = 840000, - .max_uV = 1250000, + .max_uV = 1600000, }, .num_consumer_supplies = 1, .consumer_supplies = &saw_s0_supply, @@ -412,7 +412,7 @@ static struct regulator_init_data saw_s1_init_data = { .constraints = { .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE, .min_uV = 840000, - .max_uV = 1250000, + .max_uV = 1600000, }, .num_consumer_supplies = 1, .consumer_supplies = &saw_s1_supply, @@ -3005,9 +3005,9 @@ static struct rpm_vreg_pdata rpm_vreg_init_pdata[RPM_VREG_ID_MAX] = { RPM_VREG_INIT_LDO(PM8058_L24, 0, 1, 0, 1200000, 1200000, LDO150HMIN, 0), RPM_VREG_INIT_LDO(PM8058_L25, 0, 1, 0, 1200000, 1200000, LDO150HMIN, 0), - RPM_VREG_INIT_SMPS(PM8058_S0, 0, 1, 1, 500000, 1250000, SMPS_HMIN, 0, + RPM_VREG_INIT_SMPS(PM8058_S0, 0, 1, 1, 500000, 1600000, SMPS_HMIN, 0, RPM_VREG_FREQ_1p60), - RPM_VREG_INIT_SMPS(PM8058_S1, 0, 1, 1, 500000, 1250000, SMPS_HMIN, 0, + RPM_VREG_INIT_SMPS(PM8058_S1, 0, 1, 1, 500000, 1600000, SMPS_HMIN, 0, RPM_VREG_FREQ_1p60), RPM_VREG_INIT_SMPS(PM8058_S2, 0, 1, 0, 1200000, 1400000, SMPS_HMIN, RPM_VREG_PIN_CTRL_A0, RPM_VREG_FREQ_1p60), From fe47a9df3c067acd51e9c7666bab131c1f739ad7 Mon Sep 17 00:00:00 2001 From: Rod Whitby Date: Wed, 4 Jan 2012 15:28:43 +1030 Subject: [PATCH 17/19] Enable additional I/O schedulers --- arch/arm/configs/tenderloin_defconfig | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/arm/configs/tenderloin_defconfig b/arch/arm/configs/tenderloin_defconfig index 878411e94..6d65f6c09 100644 --- a/arch/arm/configs/tenderloin_defconfig +++ b/arch/arm/configs/tenderloin_defconfig @@ -172,7 +172,12 @@ CONFIG_BLK_DEV_BSG=y # CONFIG_IOSCHED_NOOP=y CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_VR=y CONFIG_IOSCHED_CFQ=y +CONFIG_CFQ_GROUP_IOSCHED=y +CONFIG_IOSCHED_BFQ=y +CONFIG_CGROUP_BFQIO=y +CONFIG_IOSCHED_SIO=y # CONFIG_DEFAULT_DEADLINE is not set CONFIG_DEFAULT_CFQ=y # CONFIG_DEFAULT_NOOP is not set From 0cee27edb6220442ed3843c7caea4e3ce8dd17fb Mon Sep 17 00:00:00 2001 From: Rod Whitby Date: Wed, 4 Jan 2012 15:29:16 +1030 Subject: [PATCH 18/19] Enable additional TCP congestion options --- arch/arm/configs/tenderloin_defconfig | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/arch/arm/configs/tenderloin_defconfig b/arch/arm/configs/tenderloin_defconfig index 6d65f6c09..7290b9073 100644 --- a/arch/arm/configs/tenderloin_defconfig +++ b/arch/arm/configs/tenderloin_defconfig @@ -655,16 +655,16 @@ CONFIG_INET_TCP_DIAG=y CONFIG_TCP_CONG_ADVANCED=y CONFIG_TCP_CONG_BIC=m CONFIG_TCP_CONG_CUBIC=y -# CONFIG_TCP_CONG_WESTWOOD is not set -# CONFIG_TCP_CONG_HTCP is not set -# CONFIG_TCP_CONG_HSTCP is not set -# CONFIG_TCP_CONG_HYBLA is not set -# CONFIG_TCP_CONG_VEGAS is not set -# CONFIG_TCP_CONG_SCALABLE is not set +CONFIG_TCP_CONG_WESTWOOD=y +CONFIG_TCP_CONG_HTCP=y +CONFIG_TCP_CONG_HSTCP=y +CONFIG_TCP_CONG_HYBLA=y +CONFIG_TCP_CONG_VEGAS=y +CONFIG_TCP_CONG_SCALABLE=y # CONFIG_TCP_CONG_LP is not set -# CONFIG_TCP_CONG_VENO is not set -# CONFIG_TCP_CONG_YEAH is not set -# CONFIG_TCP_CONG_ILLINOIS is not set +CONFIG_TCP_CONG_VENO=y +CONFIG_TCP_CONG_YEAH=y +CONFIG_TCP_CONG_ILLINOIS=y # CONFIG_DEFAULT_BIC is not set CONFIG_DEFAULT_CUBIC=y # CONFIG_DEFAULT_HTCP is not set From 558c8fbd14d4b938259adc12d1c527c82b1471e8 Mon Sep 17 00:00:00 2001 From: Hewball Date: Sun, 15 Jan 2012 08:41:34 +0800 Subject: [PATCH 19/19] Added USB Audio Support --- arch/arm/configs/tenderloin_defconfig | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/arch/arm/configs/tenderloin_defconfig b/arch/arm/configs/tenderloin_defconfig index 7290b9073..dee79c57d 100644 --- a/arch/arm/configs/tenderloin_defconfig +++ b/arch/arm/configs/tenderloin_defconfig @@ -1926,6 +1926,8 @@ CONFIG_SOUND=y CONFIG_SND=y CONFIG_SND_TIMER=y CONFIG_SND_PCM=y +CONFIG_SND_HWDEP=y +CONFIG_SND_RAWMIDI=y CONFIG_SND_JACK=y # CONFIG_SND_SEQUENCER is not set # CONFIG_SND_MIXER_OSS is not set @@ -1947,7 +1949,10 @@ CONFIG_SND_DRIVERS=y # CONFIG_SND_SERIAL_U16550 is not set # CONFIG_SND_MPU401 is not set CONFIG_SND_ARM=y -# CONFIG_SND_USB is not set +CONFIG_SND_USB=y +CONFIG_SND_USB_AUDIO=y +# CONFIG_SND_USB_UA101 is not set +# CONFIG_SND_USB_CAIAQ is not set CONFIG_SND_SOC=y #