Skip to content
Snippets Groups Projects
Commit a6fb2554 authored by mandersch's avatar mandersch
Browse files

WIP Make use of Dequeue and Enqueue functions to properly handly long tasks and syscalls

parent 60ad08fb
No related branches found
No related tags found
No related merge requests found
...@@ -143,27 +143,83 @@ void init_pb_rq(struct pb_rq *pb_rq) ...@@ -143,27 +143,83 @@ void init_pb_rq(struct pb_rq *pb_rq)
pb_rq->n_admin_cycles = 20; pb_rq->n_admin_cycles = 20;
pb_rq->count_admin_cycles = 0; pb_rq->count_admin_cycles = 0;
pb_rq->mode = PB_DISABLED_MODE; pb_rq->mode = PB_DISABLED_MODE;
pb_rq->c_entry = 0; pb_rq->c_entry = -1;
pb_rq->size = 0; pb_rq->size = 0;
pb_rq->pevent = NULL; pb_rq->pevent = NULL;
pb_rq->is_initialized = 0; pb_rq->is_initialized = 0;
// pb_rq->need_mode_change = 0; pb_rq->waiting_on_io = 0;
pb_rq->is_preempted = false;
pb_rq->is_in_critical = false;
raw_spin_lock_init(pb_rq->pb_lock); raw_spin_lock_init(pb_rq->pb_lock);
} }
EXPORT_SYMBOL(init_pb_rq); EXPORT_SYMBOL(init_pb_rq);
// task enters the runnable state // IO has finished, we can schedule the next task
static void enqueue_task_pb(struct rq *rq, struct task_struct *p, int flags) static void enqueue_task_pb(struct rq *rq, struct task_struct *p, int flags)
{ {
// NOP struct pb_rq *pb = &(rq->pb);
bool premature_finish = false;
pb->waiting_on_io = 0;
pb->c_entry++;
/**
* Don't schedule a task that is dead. (e.g. plan was incorrect and program finished quicker)
* todo: if we have multiple tasks structs just try the next plan entry
*/
if (pb->c_entry < pb->size && pb->plan[pb->c_entry].task_struct->state == TASK_DEAD) {
premature_finish = true;
pb->is_initialized = 0;
}
if (pb->c_entry >= pb->size || premature_finish) {
if (premature_finish) {
printk(KERN_WARNING "PLAN TERMINATED PREMATURELY \n");
}
else {
printk(KERN_WARNING "PLAN DONE \n");
}
}
printk("DONE");
} }
// task exists the runnable state // task started IO and thus it is finished
static void dequeue_task_pb(struct rq *rq, struct task_struct *p, int flags) static void dequeue_task_pb(struct rq *rq, struct task_struct *p, int flags)
{ {
// NOP struct pb_rq *pb = &(rq->pb);
unsigned int c_entry_curr;
u64 perf_counter;
u64 counter_diff;
u64 read_error;
if (pb->waiting_on_io) {
return;
}
pb->waiting_on_io = 1;
c_entry_curr = pb->c_entry;
if(!pb->pevent) {
printk("WARNING: PERF EVENT IS NULL");
}
// printk(KERN_ALERT "DEBUG: Passed %s %d \n",__FUNCTION__,__LINE__);
read_error = get_perf_counter(pb->pevent, &perf_counter);
if (read_error) {
printk(KERN_WARNING "FETCHING PERFORMANCE COUNTER IN PB SCHEDULER FAILED WITH %llu\n", read_error);
}
counter_diff = perf_counter - pb->total_instr;
pb->plan[c_entry_curr].n_instr_counted = counter_diff;
pb->total_instr = perf_counter;
if (counter_diff < pb->plan[c_entry_curr].n_instr) {
u64 under_time = pb->plan[c_entry_curr].n_instr - counter_diff;
printk(KERN_WARNING "PB TASK %llu RAN %llu INSTRUCTIONS TOO SHORT\n", pb->plan[pb->c_entry].task_id, under_time);
} else if (counter_diff > pb->plan[c_entry_curr].n_instr) {
//TODO: Check if actually an overflow occurs and an another calculation is necessary
// (setting a flag in the perf overflow_handler could be a solution)
u64 over_time = counter_diff - pb->plan[c_entry_curr].n_instr;
printk(KERN_WARNING "PB TASK %llu RAN %llu INSTRUCTIONS TOO LONG\n", pb->plan[pb->c_entry].task_id, over_time);
}
} }
static void yield_task_pb(struct rq *rq) static void yield_task_pb(struct rq *rq)
...@@ -184,15 +240,21 @@ static struct task_struct * pick_next_task_pb(struct rq *rq, ...@@ -184,15 +240,21 @@ static struct task_struct * pick_next_task_pb(struct rq *rq,
struct task_struct *picked = NULL; struct task_struct *picked = NULL;
enum pb_mode current_mode, next_mode; enum pb_mode current_mode, next_mode;
struct pb_rq *pb = &(rq->pb); struct pb_rq *pb = &(rq->pb);
bool premature_finish = false; unsigned long lock_flags;
unsigned long flags;
pb->l_entry = pb->c_entry;
current_mode = pb->mode; current_mode = pb->mode;
next_mode = determine_next_mode_pb(rq); next_mode = determine_next_mode_pb(rq);
pb->mode = next_mode; pb->mode = next_mode;
if (next_mode == PB_DISABLED_MODE && current_mode == PB_EXEC_MODE) {
// After Plan is done do the cleanup
raw_spin_lock_irqsave(pb->pb_lock, lock_flags);
terminate_perf_event(pb->pevent);
raw_spin_unlock_irqrestore(pb->pb_lock, lock_flags);
pb->pevent = NULL;
// TODO: Check if we have to free the memory or if perf takes care of it
// see 'perf_event_release_kernel(struct perf_event *event)' in core.c
}
/** /**
* This handles the case where the program to be run is dead before the * This handles the case where the program to be run is dead before the
* pb scheduler starts executing * pb scheduler starts executing
...@@ -219,72 +281,6 @@ static struct task_struct * pick_next_task_pb(struct rq *rq, ...@@ -219,72 +281,6 @@ static struct task_struct * pick_next_task_pb(struct rq *rq,
} }
} }
if (current_mode == PB_EXEC_MODE && !pb->is_preempted) {
unsigned int c_entry_curr;
u64 perf_counter;
u64 counter_diff;
u64 read_error;
pb->is_in_critical = true;
c_entry_curr = pb->c_entry;
if(!pb->pevent) {
printk("WARNING: PERF EVENT IS NULL");
}
// printk(KERN_ALERT "DEBUG: Passed %s %d \n",__FUNCTION__,__LINE__);
read_error = get_perf_counter(pb->pevent, &perf_counter);
if (read_error) {
printk(KERN_WARNING "FETCHING PERFORMANCE COUNTER IN PB SCHEDULER FAILED WITH %llu\n", read_error);
}
counter_diff = perf_counter - pb->total_instr;
pb->plan[c_entry_curr].n_instr_counted = counter_diff;
pb->total_instr = perf_counter;
if (counter_diff < pb->plan[c_entry_curr].n_instr) {
u64 under_time = pb->plan[c_entry_curr].n_instr - counter_diff;
printk(KERN_WARNING "PB TASK %llu RAN %llu INSTRUCTIONS TOO SHORT\n", pb->plan[pb->c_entry].task_id, under_time);
} else if (counter_diff > pb->plan[c_entry_curr].n_instr) {
//TODO: Check if actually an overflow occurs and an another calculation is necessary
// (setting a flag in the perf overflow_handler could be a solution)
u64 over_time = counter_diff - pb->plan[c_entry_curr].n_instr;
printk(KERN_WARNING "PB TASK %llu RAN %llu INSTRUCTIONS TOO LONG\n", pb->plan[pb->c_entry].task_id, over_time);
}
pb->c_entry++;
/**
* Don't schedule a task that is dead. (e.g. plan was incorrect and program finished quicker)
* todo: if we have multiple tasks structs just try the next plan entry
*/
if (pb->c_entry < pb->size && pb->plan[pb->c_entry].task_struct->state == TASK_DEAD) {
premature_finish = true;
}
if (pb->c_entry >= pb->size || premature_finish) {
raw_spin_lock_irqsave(pb->pb_lock, flags);
terminate_perf_event(pb->pevent);
raw_spin_unlock_irqrestore(pb->pb_lock, flags);
pb->pevent = NULL;
// TODO: Check if we have to free the memory or if perf takes care of it
// see 'perf_event_release_kernel(struct perf_event *event)' in core.c
pb->mode = PB_DISABLED_MODE;
next_mode = PB_DISABLED_MODE;
picked = NULL;
pb->is_initialized = 0;
if (premature_finish) {
printk(KERN_WARNING "PLAN TERMINATED PREMATURELY \n");
}
else {
printk(KERN_WARNING "PLAN DONE \n");
}
}
pb->is_in_critical = false;
printk("DONE");
}
// EXEC Mode is next, so we return our next task to be executed // EXEC Mode is next, so we return our next task to be executed
if (next_mode == PB_EXEC_MODE) { if (next_mode == PB_EXEC_MODE) {
// printk(KERN_ALERT "DEBUG: Passed %s %d \n",__FUNCTION__,__LINE__); // printk(KERN_ALERT "DEBUG: Passed %s %d \n",__FUNCTION__,__LINE__);
...@@ -293,7 +289,6 @@ static struct task_struct * pick_next_task_pb(struct rq *rq, ...@@ -293,7 +289,6 @@ static struct task_struct * pick_next_task_pb(struct rq *rq,
} else if (current_mode == PB_DISABLED_MODE) { } else if (current_mode == PB_DISABLED_MODE) {
printk("Switching from disabled to EXEC\n"); printk("Switching from disabled to EXEC\n");
} }
pb->is_preempted = false;
picked = pb->plan[pb->c_entry].task_struct; picked = pb->plan[pb->c_entry].task_struct;
} }
...@@ -325,12 +320,8 @@ static void task_tick_pb(struct rq *rq, struct task_struct *p, int queued) ...@@ -325,12 +320,8 @@ static void task_tick_pb(struct rq *rq, struct task_struct *p, int queued)
// printk("TICK #%d\n",pb->count_pb_cycles); // printk("TICK #%d\n",pb->count_pb_cycles);
if (determine_next_mode_pb(rq) != PB_EXEC_MODE && pb->mode == PB_EXEC_MODE && !pb->is_preempted && !pb->is_in_critical) { if (determine_next_mode_pb(rq) != PB_EXEC_MODE && pb->mode == PB_EXEC_MODE) {
//printk("Reschudling in task_tick_pb"); //printk("Reschudling in task_tick_pb");
if (pb->l_entry != pb->c_entry){
// If the currrent task is not the last finished one, that means its unfinished and thus we set the preemtped flag
pb->is_preempted = true;
}
resched_curr(rq); resched_curr(rq);
} }
} }
......
...@@ -75,10 +75,10 @@ void overflow_handler( ...@@ -75,10 +75,10 @@ void overflow_handler(
cpu = smp_processor_id(); cpu = smp_processor_id();
pb_rq = &cpu_rq(cpu)->pb; pb_rq = &cpu_rq(cpu)->pb;
if(pb_rq->is_initialized) // if(pb_rq->is_initialized)
printk(KERN_WARNING "OH: PB TASK %llu RAN TOO LONG\n",pb_rq->plan[pb_rq->c_entry].task_id); // printk(KERN_WARNING "OH: PB TASK %llu RAN TOO LONG\n",pb_rq->plan[pb_rq->c_entry].task_id);
else // else
printk(KERN_WARNING "OH: PB TASK RAN TOO LONG\n"); // printk(KERN_WARNING "OH: PB TASK RAN TOO LONG\n");
} }
struct perf_event* perf_event_create(struct perf_event_attr *hw_event_uptr, int cpu, struct task_struct *task_struct) struct perf_event* perf_event_create(struct perf_event_attr *hw_event_uptr, int cpu, struct task_struct *task_struct)
......
...@@ -551,7 +551,6 @@ struct pb_rq ...@@ -551,7 +551,6 @@ struct pb_rq
struct plan_entry *plan; // plan (used to be proxy_task) struct plan_entry *plan; // plan (used to be proxy_task)
unsigned int size; // size of the plan unsigned int size; // size of the plan
unsigned int c_entry; // index of currently executed entry unsigned int c_entry; // index of currently executed entry
unsigned int l_entry; // index of last finished task, neccessary or preemption management
u64 n_pb_cycles; // amount of timer ticks before admin tasks are allowed to run u64 n_pb_cycles; // amount of timer ticks before admin tasks are allowed to run
u64 count_pb_cycles; // current timer tick count for PB tasks u64 count_pb_cycles; // current timer tick count for PB tasks
...@@ -570,8 +569,7 @@ struct pb_rq ...@@ -570,8 +569,7 @@ struct pb_rq
* this variable must be initialized last * this variable must be initialized last
*/ */
volatile int is_initialized; volatile int is_initialized;
volatile int is_preempted; // flag determining whether the last task has been prematurely preempted during last mode switch volatile int waiting_on_io;
volatile int is_in_critical; // flag determining whether the scheduler is in the critical section in pick_next_task_pb
raw_spinlock_t *pb_lock; // spinlock used to deactivate interrupts especially when handling perf-events raw_spinlock_t *pb_lock; // spinlock used to deactivate interrupts especially when handling perf-events
}; };
...@@ -947,7 +945,7 @@ static inline int determine_next_mode_pb(struct rq *rq) ...@@ -947,7 +945,7 @@ static inline int determine_next_mode_pb(struct rq *rq)
* tasks were pushed forward by the default scheduler and the IO * tasks were pushed forward by the default scheduler and the IO
* starved. We have to wait until the process is runnable. * starved. We have to wait until the process is runnable.
*/ */
if (pb->plan[pb->c_entry].task_struct->state >= 0) if (pb->plan[pb->c_entry].task_struct->state == 0)
{ {
/* /*
* 0 == Runnable (IO succeeded) * 0 == Runnable (IO succeeded)
......
...@@ -8,7 +8,9 @@ fi ...@@ -8,7 +8,9 @@ fi
cd /mnt/pb_utils/pb_submitter cd /mnt/pb_utils/pb_submitter
gcc -static -o pb_submitter pb_submitter.c gcc -static -o pb_submitter pb_submitter.c
gcc -static -o test_prog test_prog.c gcc -static -o test_prog test_prog.c
gcc -static -o task_long task_long_test.c
gcc -static -o sysc_long syscall_long_test.c
cp pb_submitter test_prog example_run.sh example_plan /root cp pb_submitter test_prog task_long sysc_long example_run.sh example_plan /root
echo "All done. Run '/root/example_run.sh' within ./run_qemu.sh now" echo "All done. Run '/root/example_run.sh' within ./run_qemu.sh now"
File added
#include <stdio.h>
#include <unistd.h>
int main(void)
{
// Make sure program is not finished before pb scheduler takes control
sleep(1);
int b = 0;
for (;b < 100; b++) {
int a = 0;
int c = 0;
for (;a < 100000; a++){c = c + a;}
// check if program runs && syscall to switch tasks
printf("loop run: %d, c = %d \n", b, c);
usleep(10000);
}
return 0;
}
File added
#include <stdio.h>
#include <unistd.h>
int main(void)
{
// Make sure program is not finished before pb scheduler takes control
sleep(1);
int b = 0;
for (;b < 100; b++) {
int a = 0;
int c = 0;
for (;a < 10000000; a++){c = c + a;}
// check if program runs && syscall to switch tasks
printf("loop run: %d, c = %d \n", b, c);
usleep(1);
}
return 0;
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment