Skip to content
Snippets Groups Projects
Commit 5b195ea2 authored by FKHals's avatar FKHals
Browse files

[DO NOT MERGE] Properly init number of exited procs

parent f888bd96
No related branches found
No related tags found
No related merge requests found
...@@ -41,6 +41,8 @@ SYSCALL_DEFINE1(pb_set_plan, pb_plan_t __user*, plan) { ...@@ -41,6 +41,8 @@ SYSCALL_DEFINE1(pb_set_plan, pb_plan_t __user*, plan) {
unsigned int i; unsigned int i;
int res; int res;
PBM* pbm;
copied = copy_from_user(&_plan, plan, sizeof(pb_plan_t)); copied = copy_from_user(&_plan, plan, sizeof(pb_plan_t));
if (copied != 0) { if (copied != 0) {
...@@ -68,8 +70,10 @@ SYSCALL_DEFINE1(pb_set_plan, pb_plan_t __user*, plan) { ...@@ -68,8 +70,10 @@ SYSCALL_DEFINE1(pb_set_plan, pb_plan_t __user*, plan) {
return -1; return -1;
} }
pbm = get_pbm_by_pid(last_plan_pid/*_plan.ref_pid (FIXME: THIS IS JUST FOR TESTING PURPOSES since it would be better to be able to insert the wanted pid as an argument)*/);
// prepare the plan runtime stack by pushing the root node of the reference model/plan // prepare the plan runtime stack by pushing the root node of the reference model/plan
plan_rt_state_push(get_pbm_by_pid(last_plan_pid/*_plan.ref_pid (FIXME: THIS IS JUST FOR TESTING PURPOSES)*/)->root, task); plan_rt_state_push(pbm->root, task);
rq = this_rq(); rq = this_rq();
...@@ -77,12 +81,18 @@ SYSCALL_DEFINE1(pb_set_plan, pb_plan_t __user*, plan) { ...@@ -77,12 +81,18 @@ SYSCALL_DEFINE1(pb_set_plan, pb_plan_t __user*, plan) {
// it to contiue running instead of getting switched // it to contiue running instead of getting switched
reset_triggering_syscall_info(); reset_triggering_syscall_info();
// FIXME All new processes need to be initialized with the same initial settings as this one! // Only the root process has the scheduling class manually set since it is already initialized
// for using the fair scheduler in the fork() syscall in pb_submitter which must happen before
// initializing the plan since knowledge of the PID of the root process is necessary to init
// the performance counting as soon as possible.
// BEWARE: This also means that the scheduling class of this root process needs to be reset to
// the fair scheduler on exit so that it can do the necessary cleanup in its data structures.
task->sched_class = &pb_sched_class; task->sched_class = &pb_sched_class;
pb_rq = &rq->pb; pb_rq = &rq->pb;
pb_rq->root_proc = task; pb_rq->root_proc = task;
pb_rq->num_exited_procs = pbm->child_count;
set_pb_plan_size(pb_rq, _plan.num_tasks); set_pb_plan_size(pb_rq, _plan.num_tasks);
...@@ -223,15 +233,12 @@ static void dequeue_task_pb(struct rq *rq, struct task_struct *p, int flags) ...@@ -223,15 +233,12 @@ static void dequeue_task_pb(struct rq *rq, struct task_struct *p, int flags)
u64 perf_counter; u64 perf_counter;
u64 counter_diff; u64 counter_diff;
u64 read_error; u64 read_error;
bool premature_finish = false;
u64 expected_instr_count; u64 expected_instr_count;
u64 diff_from_expected;
pbm_NODE* cur_node; pbm_NODE* cur_node;
struct task_struct* cur_proc; struct task_struct* cur_proc;
struct perf_event* pevent; struct perf_event* pevent;
int fork_node_type;
pbm_NODE* fork_node;
bool process_exited = false; bool process_exited = false;
printk("Dequeue task: %u\n", p->pid); printk("Dequeue task: %u\n", p->pid);
...@@ -242,6 +249,7 @@ static void dequeue_task_pb(struct rq *rq, struct task_struct *p, int flags) ...@@ -242,6 +249,7 @@ static void dequeue_task_pb(struct rq *rq, struct task_struct *p, int flags)
pb->waiting_on_io = 1; pb->waiting_on_io = 1;
c_entry_curr = pb->c_entry; c_entry_curr = pb->c_entry;
// safe current process for later use since the plan_rt_state might get modified
plan_rt_state_peek(&cur_node, &cur_proc); plan_rt_state_peek(&cur_node, &cur_proc);
pevent = get_pevent_by_pid(cur_proc->pid); pevent = get_pevent_by_pid(cur_proc->pid);
...@@ -258,24 +266,40 @@ static void dequeue_task_pb(struct rq *rq, struct task_struct *p, int flags) ...@@ -258,24 +266,40 @@ static void dequeue_task_pb(struct rq *rq, struct task_struct *p, int flags)
switch(pb->triggering_syscall.type) { switch(pb->triggering_syscall.type) {
case sched_trig_FORK: case sched_trig_FORK:
// TODO If we do a fork: At which point is this called? before, during (between {
// parent and child) or after switching to the child? int fork_node_type;
pbm_NODE* fork_node = cur_node->children;
fork_node_type = fork_node->type;
/**
* the scheduling class (pb) of the forked child is set in kernel/sched/core.c:sched_fork()
*/
printk(KERN_WARNING Bold Red "FORK TRIGGERED THIS!!!" End "\n"); printk(KERN_WARNING Bold Red "FORK TRIGGERED THIS!!!" End "\n");
// if a fork occured then the next node should be a fork node // if a fork occured then the next node should be a fork node
fork_node = cur_node->children;
fork_node_type = fork_node->type;
if (FORK != fork_node_type) { if (FORK != fork_node_type) {
printk(KERN_WARNING "ERROR: Fork node expected but got: %i\n", fork_node_type); printk(KERN_WARNING "ERROR: Fork node expected but got: %i\n", fork_node_type);
//TODO: Delegate to higher instance
} }
// set the scheduling policy
pb->triggering_syscall.target->sched_class = &pb_sched_class; /**
/* FIXME This does not work properly in case of the first (root) fork */ //<------------------------------------------------------ * since we prepend the child node in pbm_fork() (see behave.c) the child of a
// since we prepend the child node in pbm_fork() (see behave.c) the child of a * fork-node is the child node (->children) while the parent is the next sibling
// fork-node is the child node while the parent is the next sibling (next_sib). * (->next_sib).
plan_rt_state_push(fork_node->children, */
pb->triggering_syscall.target); // .origin (for parent)
// update the parent node: Keep the process and replace the node before the fork
// with the _parent_ node after it
// Precondition: The plan_rt_state is not empty (since pb_set_plan() initialized it)
plan_rt_state_pop();
plan_rt_state_push(fork_node->children->next_sib, pb->triggering_syscall.origin);
// add the child
plan_rt_state_push(fork_node->children, pb->triggering_syscall.target);
plan_rt_state_debug_print(); plan_rt_state_debug_print();
break; break;
}
case sched_trig_EXIT: case sched_trig_EXIT:
printk(KERN_WARNING Bold Red "EXIT TRIGGERED THIS!!!" End "\n"); printk(KERN_WARNING Bold Red "EXIT TRIGGERED THIS!!!" End "\n");
...@@ -317,15 +341,10 @@ static void dequeue_task_pb(struct rq *rq, struct task_struct *p, int flags) ...@@ -317,15 +341,10 @@ static void dequeue_task_pb(struct rq *rq, struct task_struct *p, int flags)
*/ */
break; break;
} }
// reset the info so that the relevant syscalls can be detected if they are the trigger // reset the info so that the next relevant triggering syscall can be detected again
reset_triggering_syscall_info(); reset_triggering_syscall_info();
// printk(KERN_ALERT "DEBUG: Passed %s %d \n",__FUNCTION__,__LINE__); // printk(KERN_ALERT "DEBUG: Passed %s %d \n",__FUNCTION__,__LINE__);
/*
* dequeue_task() is called in __schedule() by deactivate_task() BEFORE pick_next_task() which
* means the state of the plan (e.g. in case of an exit) is not yet updated so we can assume
* that plan_rt_state still contains the current process as the head even if it is exiting!
*/
read_error = get_perf_counter(pevent, &perf_counter); read_error = get_perf_counter(pevent, &perf_counter);
if (read_error) { if (read_error) {
printk(KERN_WARNING "FETCHING PERFORMANCE COUNTER IN PB SCHEDULER FAILED WITH %llu\n", read_error); printk(KERN_WARNING "FETCHING PERFORMANCE COUNTER IN PB SCHEDULER FAILED WITH %llu\n", read_error);
...@@ -334,30 +353,24 @@ static void dequeue_task_pb(struct rq *rq, struct task_struct *p, int flags) ...@@ -334,30 +353,24 @@ static void dequeue_task_pb(struct rq *rq, struct task_struct *p, int flags)
printk(KERN_WARNING "COUNTER DIFF: %llu\n", counter_diff); printk(KERN_WARNING "COUNTER DIFF: %llu\n", counter_diff);
pb->plan[c_entry_curr].n_instr_counted = counter_diff; pb->plan[c_entry_curr].n_instr_counted = counter_diff;
pb->total_instr = perf_counter; pb->total_instr = perf_counter;
if (counter_diff < expected_instr_count) { diff_from_expected = abs(counter_diff - expected_instr_count);
//TODO: Set proper threshold for significance (relative values would probably be better than absolutes)
if (diff_from_expected > 0) {
u64 under_time = expected_instr_count - counter_diff; u64 under_time = expected_instr_count - counter_diff;
printk(KERN_WARNING "PB TASK %u RAN %llu INSTRUCTIONS TOO %s\n", cur_proc->pid, under_time,
printk(KERN_WARNING "PB TASK %llu RAN %llu INSTRUCTIONS TOO SHORT\n", pb->plan[pb->c_entry].task_id, under_time); counter_diff < expected_instr_count ? "SHORT" : "LONG");
} else if (counter_diff > expected_instr_count) {
//TODO: Check if actually an overflow occurs and an another calculation is necessary
// (setting a flag in the perf overflow_handler could be a solution)
u64 over_time = counter_diff - expected_instr_count;
printk(KERN_WARNING "PB TASK %llu RAN %llu INSTRUCTIONS TOO LONG\n", pb->plan[pb->c_entry].task_id, over_time);
} }
pb->c_entry++; pb->c_entry++;
/** //TODO: Can this actually happen? Can a process die without calling exit?
* Don't schedule a task that is dead. (e.g. plan was incorrect and program finished quicker) // remove a dead process which has not called exit from the plan
* TODO: if we have multiple tasks structs try the next plan entry if (!process_exited && cur_proc->state == TASK_DEAD) {
*/ plan_rt_state_pop();
if (!is_plan_finished(pb) && plan_rt_state_peek_proc()->state == TASK_DEAD) {
premature_finish = true;
} }
if (is_plan_finished(pb) || premature_finish) { if (is_plan_finished(pb)) {
if (premature_finish) { if (!is_plan_successful(pb)) {
printk(KERN_WARNING "PLAN TERMINATED PREMATURELY \n"); printk(KERN_WARNING "PLAN TERMINATED PREMATURELY \n");
} }
else { else {
...@@ -369,21 +382,12 @@ static void dequeue_task_pb(struct rq *rq, struct task_struct *p, int flags) ...@@ -369,21 +382,12 @@ static void dequeue_task_pb(struct rq *rq, struct task_struct *p, int flags)
} }
printk(KERN_WARNING Bold Yellow "Exited: %i .Proc state: %li (?= %i)" End "\n", process_exited, cur_proc->state, TASK_DEAD); printk(KERN_WARNING Bold Yellow "Exited: %i .Proc state: %li (?= %i)" End "\n", process_exited, cur_proc->state, TASK_DEAD);
if (process_exited /*|| cur_proc->state == TASK_DEAD*/) { if (process_exited && pb->root_proc == cur_proc) {
cur_proc->sched_class = &fair_sched_class; cur_proc->sched_class = &fair_sched_class;
/*
// set scheduler to CFS (see #normalize_rt_tasks(void))
struct sched_attr attr = {
.sched_policy = SCHED_NORMAL,
//.sched_priority = 0,
//.sched_nice = 0
};
// this also calls set_curr_task() and check_class_changed() which is important (?)
sched_setattr(cur_proc, &attr);
*/
resched_curr(rq); resched_curr(rq);
}
/*
// show all current processes (source: https://unix.stackexchange.com/questions/299140/linux-is-there-a-way-to-dump-the-task-run-queue/336663#336663) // show all current processes (source: https://unix.stackexchange.com/questions/299140/linux-is-there-a-way-to-dump-the-task-run-queue/336663#336663)
{ {
struct task_struct *process, *thread; struct task_struct *process, *thread;
...@@ -405,7 +409,7 @@ static void dequeue_task_pb(struct rq *rq, struct task_struct *p, int flags) ...@@ -405,7 +409,7 @@ static void dequeue_task_pb(struct rq *rq, struct task_struct *p, int flags)
} }
rcu_read_unlock(); rcu_read_unlock();
} }
} */
} }
static struct task_struct * pick_next_task_pb(struct rq *rq, static struct task_struct * pick_next_task_pb(struct rq *rq,
......
...@@ -926,6 +926,7 @@ static inline int is_plan_finished(struct pb_rq* pb) ...@@ -926,6 +926,7 @@ static inline int is_plan_finished(struct pb_rq* pb)
*/ */
static inline int is_plan_successful(struct pb_rq* pb) static inline int is_plan_successful(struct pb_rq* pb)
{ {
printk(KERN_WARNING "Plan successful? Exited processes: actual: %llu, expected: %llu\n", pb->num_exited_procs, plan_rt_state_num_exited_procs());
return is_plan_finished(pb) && pb->num_exited_procs == plan_rt_state_num_exited_procs(); return is_plan_finished(pb) && pb->num_exited_procs == plan_rt_state_num_exited_procs();
} }
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment