Skip to content
Snippets Groups Projects
Commit 9d30e487 authored by Ollrogge's avatar Ollrogge
Browse files

work in progress

parent 4242685e
No related branches found
No related tags found
No related merge requests found
......@@ -18,7 +18,7 @@ SYSCALL_DEFINE1(pb_set_plan, pb_plan_t*, plan) {
/*
* It is possible submit a plan only if no plan is currently executed
*/
int pb_submit_plan(struct pb_init_struct *initstr, struct rq *rq)
int pb_submit_plan(struct rq *rq)
{
/*
* Must be volatile to ensure correct initialization order
......@@ -29,10 +29,10 @@ int pb_submit_plan(struct pb_init_struct *initstr, struct rq *rq)
return -1;
}
pb->plan = initstr->plan;
pb->size = initstr->size;
pb->n_pb_cycles = initstr->n_pb_cycles;
pb->n_admin_cycles = initstr->n_admin_cycles;
pb->plan = pb->plan;
pb->size = pb->size;
pb->n_pb_cycles = pb->n_pb_cycles;
pb->n_admin_cycles = pb->n_admin_cycles;
pb->c_entry = 0;
pb->count_pb_cycles = 0;
......@@ -44,6 +44,7 @@ int pb_submit_plan(struct pb_init_struct *initstr, struct rq *rq)
return 0;
}
EXPORT_SYMBOL(pb_submit_plan);
/*
* Kelvin's Testcodes
*/
......
......@@ -64,7 +64,9 @@ extern long calc_load_fold_active(struct rq *this_rq, long adjust);
#ifdef CONFIG_SMP
extern void cpu_load_update_active(struct rq *this_rq);
#else
static inline void cpu_load_update_active(struct rq *this_rq) { }
static inline void cpu_load_update_active(struct rq *this_rq)
{
}
#endif
/*
......@@ -72,8 +74,6 @@ static inline void cpu_load_update_active(struct rq *this_rq) { }
*/
#define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
/*
* Increase resolution of nice-level calculations for 64-bit architectures.
* The extra resolution improves shares distribution and load balancing of
......@@ -171,12 +171,14 @@ dl_entity_preempt(struct sched_dl_entity *a, struct sched_dl_entity *b)
/*
* This is the priority-queue data structure of the RT scheduling class:
*/
struct rt_prio_array {
struct rt_prio_array
{
DECLARE_BITMAP(bitmap, MAX_RT_PRIO + 1); /* include 1 bit for delimiter */
struct list_head queue[MAX_RT_PRIO];
};
struct rt_bandwidth {
struct rt_bandwidth
{
/* nests inside the rq lock: */
raw_spinlock_t rt_runtime_lock;
ktime_t rt_period;
......@@ -211,7 +213,8 @@ void __dl_clear_params(struct task_struct *p);
* be red. It on its turn can be changed by writing on its own
* control.
*/
struct dl_bandwidth {
struct dl_bandwidth
{
raw_spinlock_t dl_runtime_lock;
u64 dl_runtime;
u64 dl_period;
......@@ -222,29 +225,27 @@ static inline int dl_bandwidth_enabled(void)
return sysctl_sched_rt_runtime >= 0;
}
struct dl_bw {
struct dl_bw
{
raw_spinlock_t lock;
u64 bw, total_bw;
};
static inline void __dl_update(struct dl_bw *dl_b, s64 bw);
static inline
void __dl_clear(struct dl_bw *dl_b, u64 tsk_bw, int cpus)
static inline void __dl_clear(struct dl_bw *dl_b, u64 tsk_bw, int cpus)
{
dl_b->total_bw -= tsk_bw;
__dl_update(dl_b, (s32)tsk_bw / cpus);
}
static inline
void __dl_add(struct dl_bw *dl_b, u64 tsk_bw, int cpus)
static inline void __dl_add(struct dl_bw *dl_b, u64 tsk_bw, int cpus)
{
dl_b->total_bw += tsk_bw;
__dl_update(dl_b, -((s32)tsk_bw / cpus));
}
static inline
bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw)
static inline bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw)
{
return dl_b->bw != -1 &&
dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw;
......@@ -276,7 +277,8 @@ struct rt_rq;
extern struct list_head task_groups;
struct cfs_bandwidth {
struct cfs_bandwidth
{
#ifdef CONFIG_CFS_BANDWIDTH
raw_spinlock_t lock;
ktime_t period;
......@@ -295,7 +297,8 @@ struct cfs_bandwidth {
};
/* task group related information */
struct task_group {
struct task_group
{
struct cgroup_subsys_state css;
#ifdef CONFIG_FAIR_GROUP_SCHED
......@@ -409,18 +412,23 @@ extern void set_task_rq_fair(struct sched_entity *se,
struct cfs_rq *prev, struct cfs_rq *next);
#else /* !CONFIG_SMP */
static inline void set_task_rq_fair(struct sched_entity *se,
struct cfs_rq *prev, struct cfs_rq *next) { }
struct cfs_rq *prev, struct cfs_rq *next)
{
}
#endif /* CONFIG_SMP */
#endif /* CONFIG_FAIR_GROUP_SCHED */
#else /* CONFIG_CGROUP_SCHED */
struct cfs_bandwidth { };
struct cfs_bandwidth
{
};
#endif /* CONFIG_CGROUP_SCHED */
/* CFS-related fields in a runqueue */
struct cfs_rq {
struct cfs_rq
{
struct load_weight load;
unsigned int nr_running, h_nr_running;
......@@ -503,7 +511,8 @@ struct cfs_rq {
/*
* Mode of the PB scheduler
*/
enum pb_mode{
enum pb_mode
{
PB_DISABLED_MODE, // PB scheduler is disabled (no plan available)
PB_EXEC_MODE, // PB task is being executed
PB_ADMIN_MODE // Admin task is being executed
......@@ -514,13 +523,15 @@ enum pb_mode{
/*
* A PB-Task consists of one or more plan_entry
*/
struct plan_entry {
struct plan_entry
{
u64 n_instr; // number of instructions in the entry
u64 task_id;
struct task_struct *task_struct; // linux task struct
};
struct pb_init_struct {
struct pb_init_struct
{
struct plan_entry *plan; // plan
unsigned int size; // size of the plan
......@@ -528,7 +539,8 @@ struct pb_init_struct {
u64 n_admin_cycles; // amount of timer ticks before PB tasks are allowed to run
};
struct pb_rq {
struct pb_rq
{
struct plan_entry *plan; // plan (used to be proxy_task)
unsigned int size; // size of the plan
unsigned int c_entry; // index of currently executed entry
......@@ -550,7 +562,7 @@ struct pb_rq {
volatile int is_initialized;
};
int pb_submit_plan(struct pb_init_struct *initstr, struct rq *rq);
int pb_submit_plan(struct rq *rq);
static inline int rt_bandwidth_enabled(void)
{
......@@ -563,12 +575,14 @@ static inline int rt_bandwidth_enabled(void)
#endif
/* Real-Time classes' related field in a runqueue: */
struct rt_rq {
struct rt_rq
{
struct rt_prio_array active;
unsigned int rt_nr_running;
unsigned int rr_nr_running;
#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
struct {
struct
{
int curr; /* highest queued rt task prio */
#ifdef CONFIG_SMP
int next; /* next highest */
......@@ -604,7 +618,8 @@ struct rt_rq {
};
/* Deadline class' related fields in a runqueue */
struct dl_rq {
struct dl_rq
{
/* runqueue is an rbtree, ordered by deadline */
struct rb_root rb_root;
struct rb_node *rb_leftmost;
......@@ -618,7 +633,8 @@ struct dl_rq {
* the decision wether or not a ready but not running task
* should migrate somewhere else.
*/
struct {
struct
{
u64 curr;
u64 next;
} earliest_dl;
......@@ -677,7 +693,8 @@ static inline bool sched_asym_prefer(int a, int b)
* object.
*
*/
struct root_domain {
struct root_domain
{
atomic_t refcount;
atomic_t rto_count;
struct rcu_head rcu;
......@@ -722,7 +739,8 @@ extern void rq_attach_root(struct rq *rq, struct root_domain *rd);
* (such as the load balancing or the thread migration code), lock
* acquire operations must be ordered by ascending &runqueue.
*/
struct rq {
struct rq
{
/* runqueue lock: */
raw_spinlock_t lock;
......@@ -875,43 +893,56 @@ static inline int determine_next_mode_pb(struct rq *rq)
int mode = PB_DISABLED_MODE;
struct pb_rq *pb = &(rq->pb);
if (pb->c_entry < pb->size) {
if (pb->c_entry < pb->size)
{
// initial switch
if (pb->mode == PB_DISABLED_MODE && pb->is_initialized) {
if (pb->mode == PB_DISABLED_MODE && pb->is_initialized)
{
return PB_EXEC_MODE;
} else {
if (pb->mode == PB_EXEC_MODE) {
}
else
{
if (pb->mode == PB_EXEC_MODE)
{
// stay for n timer interrupts cycles in exec mode
/*
* Is the tick interrupt active in this moment?
*/
if(pb->count_pb_cycles > pb->n_pb_cycles) {
if (pb->count_pb_cycles > pb->n_pb_cycles)
{
mode = PB_ADMIN_MODE;
pb->count_pb_cycles = 0;
} else {
}
else
{
mode = PB_EXEC_MODE;
}
} else if (pb->mode == PB_ADMIN_MODE) {
}
else if (pb->mode == PB_ADMIN_MODE)
{
// stay for n timer interrupt cylces in uall mode for admin tasks
/*
* Is the tick interrupt active in this moment?
*/
if (pb->count_admin_cycles > pb->n_admin_cycles) {
if (pb->count_admin_cycles > pb->n_admin_cycles)
{
/*
* If the current plan_entry's process is blocked, we cannot
* go in PB_EXEC_MODE. The reason could be, that administrative
* tasks were pushed forward by the default scheduler and the IO
* starved. We have to wait until the process is runnable.
*/
if (pb->plan[pb->c_entry].task_struct->state >= 0) {
if (pb->plan[pb->c_entry].task_struct->state >= 0)
{
/*
* 0 == Runnable (IO succeeded)
* >0 == stopped (Process finished)
*/
mode = PB_EXEC_MODE;
pb->count_admin_cycles = 0;
} else {
}
else
{
/*
* -1 == unrunnable
*/
......@@ -920,8 +951,9 @@ static inline int determine_next_mode_pb(struct rq *rq)
pb->plan[pb->c_entry].task_id,
pb->count_admin_cycles - pb->n_admin_cycles);
}
} else {
}
else
{
mode = PB_ADMIN_MODE;
}
}
......@@ -931,7 +963,6 @@ static inline int determine_next_mode_pb(struct rq *rq)
return mode;
}
#ifdef CONFIG_SCHED_SMT
extern struct static_key_false sched_smt_present;
......@@ -945,7 +976,9 @@ static inline void update_idle_core(struct rq *rq)
}
#else
static inline void update_idle_core(struct rq *rq) { }
static inline void update_idle_core(struct rq *rq)
{
}
#endif
DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
......@@ -1022,7 +1055,8 @@ static inline void rq_clock_skip_update(struct rq *rq, bool skip)
rq->clock_update_flags &= ~RQCF_REQ_SKIP;
}
struct rq_flags {
struct rq_flags
{
unsigned long flags;
struct pin_cookie cookie;
#ifdef CONFIG_SCHED_DEBUG
......@@ -1068,7 +1102,8 @@ static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf)
}
#ifdef CONFIG_NUMA
enum numa_topology_type {
enum numa_topology_type
{
NUMA_DIRECT,
NUMA_GLUELESS_MESH,
NUMA_BACKPLANE,
......@@ -1083,14 +1118,17 @@ extern void sched_init_numa(void);
extern void sched_domains_numa_masks_set(unsigned int cpu);
extern void sched_domains_numa_masks_clear(unsigned int cpu);
#else
static inline void sched_init_numa(void) { }
static inline void sched_init_numa(void)
{
}
static inline void sched_domains_numa_masks_set(unsigned int cpu) {}
static inline void sched_domains_numa_masks_clear(unsigned int cpu) {}
#endif
#ifdef CONFIG_NUMA_BALANCING
/* The regions in numa_faults array from task_struct */
enum numa_faults_stats {
enum numa_faults_stats
{
NUMA_MEM = 0,
NUMA_CPU,
NUMA_MEMBUF,
......@@ -1150,7 +1188,8 @@ static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
{
struct sched_domain *sd, *hsd = NULL;
for_each_domain(cpu, sd) {
for_each_domain(cpu, sd)
{
if (!(sd->flags & flag))
break;
hsd = sd;
......@@ -1163,7 +1202,8 @@ static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
{
struct sched_domain *sd;
for_each_domain(cpu, sd) {
for_each_domain(cpu, sd)
{
if (sd->flags & flag)
break;
}
......@@ -1178,7 +1218,8 @@ DECLARE_PER_CPU(struct sched_domain_shared *, sd_llc_shared);
DECLARE_PER_CPU(struct sched_domain *, sd_numa);
DECLARE_PER_CPU(struct sched_domain *, sd_asym);
struct sched_group_capacity {
struct sched_group_capacity
{
atomic_t ref;
/*
* CPU capacity of this group, SCHED_CAPACITY_SCALE being max capacity
......@@ -1196,7 +1237,8 @@ struct sched_group_capacity {
unsigned long cpumask[0]; /* balance mask */
};
struct sched_group {
struct sched_group
{
struct sched_group *next; /* Must be a circular list */
atomic_t ref;
......@@ -1252,7 +1294,9 @@ static inline void unregister_sched_domain_sysctl(void)
#else
static inline void sched_ttwu_pending(void) { }
static inline void sched_ttwu_pending(void)
{
}
#endif /* CONFIG_SMP */
......@@ -1300,7 +1344,9 @@ static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
#else /* CONFIG_CGROUP_SCHED */
static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
{
}
static inline struct task_group *task_group(struct task_struct *p)
{
return NULL;
......@@ -1342,7 +1388,8 @@ extern const_debug unsigned int sysctl_sched_features;
#define SCHED_FEAT(name, enabled) \
__SCHED_FEAT_##name,
enum {
enum
{
#include "features.h"
__SCHED_FEAT_NR,
};
......@@ -1407,10 +1454,16 @@ static inline int task_on_rq_migrating(struct task_struct *p)
}
#ifndef prepare_arch_switch
# define prepare_arch_switch(next) do { } while (0)
#define prepare_arch_switch(next) \
do \
{ \
} while (0)
#endif
#ifndef finish_arch_post_lock_switch
# define finish_arch_post_lock_switch() do { } while (0)
#define finish_arch_post_lock_switch() \
do \
{ \
} while (0)
#endif
static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
......@@ -1515,7 +1568,8 @@ extern const u32 sched_prio_to_wmult[40];
#define RETRY_TASK ((void *)-1UL)
struct sched_class {
struct sched_class
{
const struct sched_class *next;
void (*enqueue_task)(struct rq *rq, struct task_struct *p, int flags);
......@@ -1604,7 +1658,6 @@ extern const struct sched_class pb_sched_class;
extern const struct sched_class fair_sched_class;
extern const struct sched_class idle_sched_class;
#ifdef CONFIG_SMP
extern void update_group_capacity(struct sched_domain *sd, int cpu);
......@@ -1695,7 +1748,9 @@ static inline void sched_update_tick_dependency(struct rq *rq)
tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED);
}
#else
static inline void sched_update_tick_dependency(struct rq *rq) { }
static inline void sched_update_tick_dependency(struct rq *rq)
{
}
#endif
static inline void add_nr_running(struct rq *rq, unsigned count)
......@@ -1704,7 +1759,8 @@ static inline void add_nr_running(struct rq *rq, unsigned count)
rq->nr_running = prev_nr + count;
if (prev_nr < 2 && rq->nr_running >= 2) {
if (prev_nr < 2 && rq->nr_running >= 2)
{
#ifdef CONFIG_SMP
if (!rq->rd->overload)
rq->rd->overload = true;
......@@ -1775,16 +1831,14 @@ static inline int hrtick_enabled(struct rq *rq)
extern void sched_avg_update(struct rq *rq);
#ifndef arch_scale_freq_capacity
static __always_inline
unsigned long arch_scale_freq_capacity(struct sched_domain *sd, int cpu)
static __always_inline unsigned long arch_scale_freq_capacity(struct sched_domain *sd, int cpu)
{
return SCHED_CAPACITY_SCALE;
}
#endif
#ifndef arch_scale_cpu_capacity
static __always_inline
unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
static __always_inline unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
{
if (sd && (sd->flags & SD_SHARE_CPUCAPACITY) && (sd->span_weight > 1))
return sd->smt_gain / sd->span_weight;
......@@ -1799,7 +1853,9 @@ static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
sched_avg_update(rq);
}
#else
static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { }
static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
{
}
static inline void sched_avg_update(struct rq *rq) {}
#endif
......@@ -1922,14 +1978,17 @@ static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
{
int ret = 0;
if (unlikely(!raw_spin_trylock(&busiest->lock))) {
if (busiest < this_rq) {
if (unlikely(!raw_spin_trylock(&busiest->lock)))
{
if (busiest < this_rq)
{
raw_spin_unlock(&this_rq->lock);
raw_spin_lock(&busiest->lock);
raw_spin_lock_nested(&this_rq->lock,
SINGLE_DEPTH_NESTING);
ret = 1;
} else
}
else
raw_spin_lock_nested(&busiest->lock,
SINGLE_DEPTH_NESTING);
}
......@@ -1943,7 +2002,8 @@ static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
*/
static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest)
{
if (unlikely(!irqs_disabled())) {
if (unlikely(!irqs_disabled()))
{
/* printk() doesn't work good under rq->lock */
raw_spin_unlock(&this_rq->lock);
BUG_ON(1);
......@@ -1997,14 +2057,20 @@ static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
__acquires(rq2->lock)
{
BUG_ON(!irqs_disabled());
if (rq1 == rq2) {
if (rq1 == rq2)
{
raw_spin_lock(&rq1->lock);
__acquire(rq2->lock); /* Fake it out ;) */
} else {
if (rq1 < rq2) {
}
else
{
if (rq1 < rq2)
{
raw_spin_lock(&rq1->lock);
raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
} else {
}
else
{
raw_spin_lock(&rq2->lock);
raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
}
......@@ -2097,7 +2163,8 @@ extern void cfs_bandwidth_usage_inc(void);
extern void cfs_bandwidth_usage_dec(void);
#ifdef CONFIG_NO_HZ_COMMON
enum rq_nohz_flag_bits {
enum rq_nohz_flag_bits
{
NOHZ_TICK_STOPPED,
NOHZ_BALANCE_KICK,
};
......@@ -2106,28 +2173,28 @@ enum rq_nohz_flag_bits {
extern void nohz_balance_exit_idle(unsigned int cpu);
#else
static inline void nohz_balance_exit_idle(unsigned int cpu) { }
static inline void nohz_balance_exit_idle(unsigned int cpu)
{
}
#endif
#ifdef CONFIG_SMP
static inline
void __dl_update(struct dl_bw *dl_b, s64 bw)
static inline void __dl_update(struct dl_bw *dl_b, s64 bw)
{
struct root_domain *rd = container_of(dl_b, struct root_domain, dl_bw);
int i;
RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
"sched RCU must be held");
for_each_cpu_and(i, rd->span, cpu_active_mask) {
for_each_cpu_and(i, rd->span, cpu_active_mask)
{
struct rq *rq = cpu_rq(i);
rq->dl.extra_bw += bw;
}
}
#else
static inline
void __dl_update(struct dl_bw *dl_b, s64 bw)
static inline void __dl_update(struct dl_bw *dl_b, s64 bw)
{
struct dl_rq *dl = container_of(dl_b, struct dl_rq, dl_bw);
......@@ -2135,9 +2202,9 @@ void __dl_update(struct dl_bw *dl_b, s64 bw)
}
#endif
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
struct irqtime {
struct irqtime
{
u64 total;
u64 tick_delta;
u64 irq_start_time;
......@@ -2157,7 +2224,8 @@ static inline u64 irq_time_read(int cpu)
unsigned int seq;
u64 total;
do {
do
{
seq = __u64_stats_fetch_begin(&irqtime->sync);
total = irqtime->total;
} while (__u64_stats_fetch_retry(&irqtime->sync, seq));
......@@ -2206,7 +2274,9 @@ static inline void cpufreq_update_this_cpu(struct rq *rq, unsigned int flags)
cpufreq_update_util(rq, flags);
}
#else
static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {}
static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
{
}
static inline void cpufreq_update_this_cpu(struct rq *rq, unsigned int flags) {}
#endif /* CONFIG_CPU_FREQ */
......
......@@ -12,7 +12,7 @@ COMMAND+=$IMAGE_OPTIONS
COMMAND+=" -append \"root=/dev/sda rootwait rw single console=ttyS0 nokaslr\"" # disable kaslr for better gdb debugging
COMMAND+=" --enable-kvm"
COMMAND+=" --nographic"
COMMAND+=" --smp 1"
COMMAND+=" --smp 2"
COMMAND+=" -net nic -net user"
COMMAND+=" -monitor none"
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment