summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-03-26 16:05:01 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2009-03-26 16:05:01 -0700
commit831576fe40f4175e0767623cffa4aeb28157943a (patch)
treede54e628e5849d6cf201df4446d760d17f752326 /include
parent21cdbc1378e8aa96e1ed4a606dce1a8e7daf7fdf (diff)
parent66fef08f7d5267b2312c4b48a6d2957d2d414105 (diff)
downloadlwn-831576fe40f4175e0767623cffa4aeb28157943a.tar.gz
lwn-831576fe40f4175e0767623cffa4aeb28157943a.zip
Merge branch 'sched-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'sched-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (46 commits) sched: Add comments to find_busiest_group() function sched: Refactor the power savings balance code sched: Optimize the !power_savings_balance during fbg() sched: Create a helper function to calculate imbalance sched: Create helper to calculate small_imbalance in fbg() sched: Create a helper function to calculate sched_domain stats for fbg() sched: Define structure to store the sched_domain statistics for fbg() sched: Create a helper function to calculate sched_group stats for fbg() sched: Define structure to store the sched_group statistics for fbg() sched: Fix indentations in find_busiest_group() using gotos sched: Simple helper functions for find_busiest_group() sched: remove unused fields from struct rq sched: jiffies not printed per CPU sched: small optimisation of can_migrate_task() sched: fix typos in documentation sched: add avg_overlap decay x86, sched_clock(): mark variables read-mostly sched: optimize ttwu vs group scheduling sched: TIF_NEED_RESCHED -> need_reshed() cleanup sched: don't rebalance if attached on NULL domain ...
Diffstat (limited to 'include')
-rw-r--r--include/linux/init_task.h1
-rw-r--r--include/linux/latencytop.h10
-rw-r--r--include/linux/plist.h9
-rw-r--r--include/linux/sched.h17
4 files changed, 32 insertions, 5 deletions
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index e752d973fa21..af1de95e711e 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -147,6 +147,7 @@ extern struct cred init_cred;
.nr_cpus_allowed = NR_CPUS, \
}, \
.tasks = LIST_HEAD_INIT(tsk.tasks), \
+ .pushable_tasks = PLIST_NODE_INIT(tsk.pushable_tasks, MAX_PRIO), \
.ptraced = LIST_HEAD_INIT(tsk.ptraced), \
.ptrace_entry = LIST_HEAD_INIT(tsk.ptrace_entry), \
.real_parent = &tsk, \
diff --git a/include/linux/latencytop.h b/include/linux/latencytop.h
index 901c2d6377a8..b0e99898527c 100644
--- a/include/linux/latencytop.h
+++ b/include/linux/latencytop.h
@@ -9,6 +9,7 @@
#ifndef _INCLUDE_GUARD_LATENCYTOP_H_
#define _INCLUDE_GUARD_LATENCYTOP_H_
+#include <linux/compiler.h>
#ifdef CONFIG_LATENCYTOP
#define LT_SAVECOUNT 32
@@ -24,7 +25,14 @@ struct latency_record {
struct task_struct;
-void account_scheduler_latency(struct task_struct *task, int usecs, int inter);
+extern int latencytop_enabled;
+void __account_scheduler_latency(struct task_struct *task, int usecs, int inter);
+static inline void
+account_scheduler_latency(struct task_struct *task, int usecs, int inter)
+{
+ if (unlikely(latencytop_enabled))
+ __account_scheduler_latency(task, usecs, inter);
+}
void clear_all_latency_tracing(struct task_struct *p);
diff --git a/include/linux/plist.h b/include/linux/plist.h
index 85de2f055874..45926d77d6ac 100644
--- a/include/linux/plist.h
+++ b/include/linux/plist.h
@@ -96,6 +96,10 @@ struct plist_node {
# define PLIST_HEAD_LOCK_INIT(_lock)
#endif
+#define _PLIST_HEAD_INIT(head) \
+ .prio_list = LIST_HEAD_INIT((head).prio_list), \
+ .node_list = LIST_HEAD_INIT((head).node_list)
+
/**
* PLIST_HEAD_INIT - static struct plist_head initializer
* @head: struct plist_head variable name
@@ -103,8 +107,7 @@ struct plist_node {
*/
#define PLIST_HEAD_INIT(head, _lock) \
{ \
- .prio_list = LIST_HEAD_INIT((head).prio_list), \
- .node_list = LIST_HEAD_INIT((head).node_list), \
+ _PLIST_HEAD_INIT(head), \
PLIST_HEAD_LOCK_INIT(&(_lock)) \
}
@@ -116,7 +119,7 @@ struct plist_node {
#define PLIST_NODE_INIT(node, __prio) \
{ \
.prio = (__prio), \
- .plist = PLIST_HEAD_INIT((node).plist, NULL), \
+ .plist = { _PLIST_HEAD_INIT((node).plist) }, \
}
/**
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 2c36f62e7544..ff904b0606d4 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -998,6 +998,7 @@ struct sched_class {
struct rq *busiest, struct sched_domain *sd,
enum cpu_idle_type idle);
void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
+ int (*needs_post_schedule) (struct rq *this_rq);
void (*post_schedule) (struct rq *this_rq);
void (*task_wake_up) (struct rq *this_rq, struct task_struct *task);
@@ -1052,6 +1053,10 @@ struct sched_entity {
u64 last_wakeup;
u64 avg_overlap;
+ u64 start_runtime;
+ u64 avg_wakeup;
+ u64 nr_migrations;
+
#ifdef CONFIG_SCHEDSTATS
u64 wait_start;
u64 wait_max;
@@ -1067,7 +1072,6 @@ struct sched_entity {
u64 exec_max;
u64 slice_max;
- u64 nr_migrations;
u64 nr_migrations_cold;
u64 nr_failed_migrations_affine;
u64 nr_failed_migrations_running;
@@ -1164,6 +1168,7 @@ struct task_struct {
#endif
struct list_head tasks;
+ struct plist_node pushable_tasks;
struct mm_struct *mm, *active_mm;
@@ -1675,6 +1680,16 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
return set_cpus_allowed_ptr(p, &new_mask);
}
+/*
+ * Architectures can set this to 1 if they have specified
+ * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
+ * but then during bootup it turns out that sched_clock()
+ * is reliable after all:
+ */
+#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
+extern int sched_clock_stable;
+#endif
+
extern unsigned long long sched_clock(void);
extern void sched_clock_init(void);