summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorLi Zefan <lizefan@huawei.com>2013-03-05 16:06:23 +0800
committerIngo Molnar <mingo@kernel.org>2013-03-06 11:24:31 +0100
commit5e6521eaa1ee581a13b904f35b80c5efeb2baccb (patch)
tree4a8e82ba57da872636ff432edc036914163249e5 /include
parentcc1f4b1f3faed9f2040eff2a75f510b424b3cf18 (diff)
sched: Move struct sched_group to kernel/sched/sched.h
Move struct sched_group_power and sched_group and related inline functions to kernel/sched/sched.h, as they are used internally only. Signed-off-by: Li Zefan <lizefan@huawei.com> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/5135A77F.2010705@huawei.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include')
-rw-r--r--include/linux/sched.h58
1 files changed, 2 insertions, 56 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index f8826d04fb12..0d641304c0ff 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -780,62 +780,6 @@ enum cpu_idle_type {
extern int __weak arch_sd_sibiling_asym_packing(void);
-struct sched_group_power {
- atomic_t ref;
- /*
- * CPU power of this group, SCHED_LOAD_SCALE being max power for a
- * single CPU.
- */
- unsigned int power, power_orig;
- unsigned long next_update;
- /*
- * Number of busy cpus in this group.
- */
- atomic_t nr_busy_cpus;
-
- unsigned long cpumask[0]; /* iteration mask */
-};
-
-struct sched_group {
- struct sched_group *next; /* Must be a circular list */
- atomic_t ref;
-
- unsigned int group_weight;
- struct sched_group_power *sgp;
-
- /*
- * The CPUs this group covers.
- *
- * NOTE: this field is variable length. (Allocated dynamically
- * by attaching extra space to the end of the structure,
- * depending on how many CPUs the kernel has booted up with)
- */
- unsigned long cpumask[0];
-};
-
-static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
-{
- return to_cpumask(sg->cpumask);
-}
-
-/*
- * cpumask masking which cpus in the group are allowed to iterate up the domain
- * tree.
- */
-static inline struct cpumask *sched_group_mask(struct sched_group *sg)
-{
- return to_cpumask(sg->sgp->cpumask);
-}
-
-/**
- * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
- * @group: The group whose first cpu is to be returned.
- */
-static inline unsigned int group_first_cpu(struct sched_group *group)
-{
- return cpumask_first(sched_group_cpus(group));
-}
-
struct sched_domain_attr {
int relax_domain_level;
};
@@ -846,6 +790,8 @@ struct sched_domain_attr {
extern int sched_domain_level_max;
+struct sched_group;
+
struct sched_domain {
/* These fields must be setup */
struct sched_domain *parent; /* top domain must be null terminated */