diff --git a/kernel/sched/pb.c b/kernel/sched/pb.c index fe3b3f6d833b94a85ba6f7eb75df968368f5e83e..f320071e32375aa779c1baca2309c08b1400bba2 100644 --- a/kernel/sched/pb.c +++ b/kernel/sched/pb.c @@ -143,27 +143,83 @@ void init_pb_rq(struct pb_rq *pb_rq) pb_rq->n_admin_cycles = 20; pb_rq->count_admin_cycles = 0; pb_rq->mode = PB_DISABLED_MODE; - pb_rq->c_entry = 0; + pb_rq->c_entry = -1; pb_rq->size = 0; pb_rq->pevent = NULL; pb_rq->is_initialized = 0; - // pb_rq->need_mode_change = 0; - pb_rq->is_preempted = false; - pb_rq->is_in_critical = false; + pb_rq->waiting_on_io = 0; raw_spin_lock_init(pb_rq->pb_lock); } EXPORT_SYMBOL(init_pb_rq); -// task enters the runnable state +// IO has finished, we can schedule the next task static void enqueue_task_pb(struct rq *rq, struct task_struct *p, int flags) { - // NOP + struct pb_rq *pb = &(rq->pb); + bool premature_finish = false; + + pb->waiting_on_io = 0; + + pb->c_entry++; + + /** + * Don't schedule a task that is dead. (e.g. plan was incorrect and program finished quicker) + * todo: if we have multiple tasks structs just try the next plan entry + */ + if (pb->c_entry < pb->size && pb->plan[pb->c_entry].task_struct->state == TASK_DEAD) { + premature_finish = true; + pb->is_initialized = 0; + } + + if (pb->c_entry >= pb->size || premature_finish) { + if (premature_finish) { + printk(KERN_WARNING "PLAN TERMINATED PREMATURELY \n"); + } + else { + printk(KERN_WARNING "PLAN DONE \n"); + } + } + printk("DONE"); } -// task exists the runnable state +// task started IO and thus it is finished static void dequeue_task_pb(struct rq *rq, struct task_struct *p, int flags) { - // NOP + struct pb_rq *pb = &(rq->pb); + unsigned int c_entry_curr; + u64 perf_counter; + u64 counter_diff; + u64 read_error; + + if (pb->waiting_on_io) { + return; + } + pb->waiting_on_io = 1; + c_entry_curr = pb->c_entry; + + if(!pb->pevent) { + printk("WARNING: PERF EVENT IS NULL"); + } + + // printk(KERN_ALERT "DEBUG: Passed %s %d \n",__FUNCTION__,__LINE__); + read_error = get_perf_counter(pb->pevent, &perf_counter); + if (read_error) { + printk(KERN_WARNING "FETCHING PERFORMANCE COUNTER IN PB SCHEDULER FAILED WITH %llu\n", read_error); + } + counter_diff = perf_counter - pb->total_instr; + pb->plan[c_entry_curr].n_instr_counted = counter_diff; + pb->total_instr = perf_counter; + if (counter_diff < pb->plan[c_entry_curr].n_instr) { + u64 under_time = pb->plan[c_entry_curr].n_instr - counter_diff; + + printk(KERN_WARNING "PB TASK %llu RAN %llu INSTRUCTIONS TOO SHORT\n", pb->plan[pb->c_entry].task_id, under_time); + } else if (counter_diff > pb->plan[c_entry_curr].n_instr) { + //TODO: Check if actually an overflow occurs and an another calculation is necessary + // (setting a flag in the perf overflow_handler could be a solution) + u64 over_time = counter_diff - pb->plan[c_entry_curr].n_instr; + + printk(KERN_WARNING "PB TASK %llu RAN %llu INSTRUCTIONS TOO LONG\n", pb->plan[pb->c_entry].task_id, over_time); + } } static void yield_task_pb(struct rq *rq) @@ -184,15 +240,21 @@ static struct task_struct * pick_next_task_pb(struct rq *rq, struct task_struct *picked = NULL; enum pb_mode current_mode, next_mode; struct pb_rq *pb = &(rq->pb); - bool premature_finish = false; - - unsigned long flags; - pb->l_entry = pb->c_entry; + unsigned long lock_flags; current_mode = pb->mode; next_mode = determine_next_mode_pb(rq); pb->mode = next_mode; + if (next_mode == PB_DISABLED_MODE && current_mode == PB_EXEC_MODE) { + // After Plan is done do the cleanup + raw_spin_lock_irqsave(pb->pb_lock, lock_flags); + terminate_perf_event(pb->pevent); + raw_spin_unlock_irqrestore(pb->pb_lock, lock_flags); + pb->pevent = NULL; + // TODO: Check if we have to free the memory or if perf takes care of it + // see 'perf_event_release_kernel(struct perf_event *event)' in core.c + } /** * This handles the case where the program to be run is dead before the * pb scheduler starts executing @@ -219,72 +281,6 @@ static struct task_struct * pick_next_task_pb(struct rq *rq, } } - if (current_mode == PB_EXEC_MODE && !pb->is_preempted) { - unsigned int c_entry_curr; - u64 perf_counter; - u64 counter_diff; - u64 read_error; - - pb->is_in_critical = true; - c_entry_curr = pb->c_entry; - - if(!pb->pevent) { - printk("WARNING: PERF EVENT IS NULL"); - } - - // printk(KERN_ALERT "DEBUG: Passed %s %d \n",__FUNCTION__,__LINE__); - read_error = get_perf_counter(pb->pevent, &perf_counter); - if (read_error) { - printk(KERN_WARNING "FETCHING PERFORMANCE COUNTER IN PB SCHEDULER FAILED WITH %llu\n", read_error); - } - counter_diff = perf_counter - pb->total_instr; - pb->plan[c_entry_curr].n_instr_counted = counter_diff; - pb->total_instr = perf_counter; - if (counter_diff < pb->plan[c_entry_curr].n_instr) { - u64 under_time = pb->plan[c_entry_curr].n_instr - counter_diff; - - printk(KERN_WARNING "PB TASK %llu RAN %llu INSTRUCTIONS TOO SHORT\n", pb->plan[pb->c_entry].task_id, under_time); - } else if (counter_diff > pb->plan[c_entry_curr].n_instr) { - //TODO: Check if actually an overflow occurs and an another calculation is necessary - // (setting a flag in the perf overflow_handler could be a solution) - u64 over_time = counter_diff - pb->plan[c_entry_curr].n_instr; - - printk(KERN_WARNING "PB TASK %llu RAN %llu INSTRUCTIONS TOO LONG\n", pb->plan[pb->c_entry].task_id, over_time); - } - - pb->c_entry++; - - /** - * Don't schedule a task that is dead. (e.g. plan was incorrect and program finished quicker) - * todo: if we have multiple tasks structs just try the next plan entry - */ - if (pb->c_entry < pb->size && pb->plan[pb->c_entry].task_struct->state == TASK_DEAD) { - premature_finish = true; - } - - if (pb->c_entry >= pb->size || premature_finish) { - raw_spin_lock_irqsave(pb->pb_lock, flags); - terminate_perf_event(pb->pevent); - raw_spin_unlock_irqrestore(pb->pb_lock, flags); - pb->pevent = NULL; - // TODO: Check if we have to free the memory or if perf takes care of it - // see 'perf_event_release_kernel(struct perf_event *event)' in core.c - pb->mode = PB_DISABLED_MODE; - next_mode = PB_DISABLED_MODE; - picked = NULL; - pb->is_initialized = 0; - - if (premature_finish) { - printk(KERN_WARNING "PLAN TERMINATED PREMATURELY \n"); - } - else { - printk(KERN_WARNING "PLAN DONE \n"); - } - } - pb->is_in_critical = false; - printk("DONE"); - } - // EXEC Mode is next, so we return our next task to be executed if (next_mode == PB_EXEC_MODE) { // printk(KERN_ALERT "DEBUG: Passed %s %d \n",__FUNCTION__,__LINE__); @@ -293,7 +289,6 @@ static struct task_struct * pick_next_task_pb(struct rq *rq, } else if (current_mode == PB_DISABLED_MODE) { printk("Switching from disabled to EXEC\n"); } - pb->is_preempted = false; picked = pb->plan[pb->c_entry].task_struct; } @@ -325,12 +320,8 @@ static void task_tick_pb(struct rq *rq, struct task_struct *p, int queued) // printk("TICK #%d\n",pb->count_pb_cycles); - if (determine_next_mode_pb(rq) != PB_EXEC_MODE && pb->mode == PB_EXEC_MODE && !pb->is_preempted && !pb->is_in_critical) { + if (determine_next_mode_pb(rq) != PB_EXEC_MODE && pb->mode == PB_EXEC_MODE) { //printk("Reschudling in task_tick_pb"); - if (pb->l_entry != pb->c_entry){ - // If the currrent task is not the last finished one, that means its unfinished and thus we set the preemtped flag - pb->is_preempted = true; - } resched_curr(rq); } } diff --git a/kernel/sched/perf_error_detection.c b/kernel/sched/perf_error_detection.c index 8c308497312c911bd3d90f6be97828e1a2f60363..fa3aea7f259b20ae8c812715b31bdb20d4554a80 100644 --- a/kernel/sched/perf_error_detection.c +++ b/kernel/sched/perf_error_detection.c @@ -75,10 +75,10 @@ void overflow_handler( cpu = smp_processor_id(); pb_rq = &cpu_rq(cpu)->pb; - if(pb_rq->is_initialized) - printk(KERN_WARNING "OH: PB TASK %llu RAN TOO LONG\n",pb_rq->plan[pb_rq->c_entry].task_id); - else - printk(KERN_WARNING "OH: PB TASK RAN TOO LONG\n"); + // if(pb_rq->is_initialized) + // printk(KERN_WARNING "OH: PB TASK %llu RAN TOO LONG\n",pb_rq->plan[pb_rq->c_entry].task_id); + // else + // printk(KERN_WARNING "OH: PB TASK RAN TOO LONG\n"); } struct perf_event* perf_event_create(struct perf_event_attr *hw_event_uptr, int cpu, struct task_struct *task_struct) diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 06ad593f6dcf3b3dea061c134360455f80fde837..fbdf48949d145923e7967ec96fb09732072b3248 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -551,7 +551,6 @@ struct pb_rq struct plan_entry *plan; // plan (used to be proxy_task) unsigned int size; // size of the plan unsigned int c_entry; // index of currently executed entry - unsigned int l_entry; // index of last finished task, neccessary or preemption management u64 n_pb_cycles; // amount of timer ticks before admin tasks are allowed to run u64 count_pb_cycles; // current timer tick count for PB tasks @@ -570,8 +569,7 @@ struct pb_rq * this variable must be initialized last */ volatile int is_initialized; - volatile int is_preempted; // flag determining whether the last task has been prematurely preempted during last mode switch - volatile int is_in_critical; // flag determining whether the scheduler is in the critical section in pick_next_task_pb + volatile int waiting_on_io; raw_spinlock_t *pb_lock; // spinlock used to deactivate interrupts especially when handling perf-events }; @@ -947,7 +945,7 @@ static inline int determine_next_mode_pb(struct rq *rq) * tasks were pushed forward by the default scheduler and the IO * starved. We have to wait until the process is runnable. */ - if (pb->plan[pb->c_entry].task_struct->state >= 0) + if (pb->plan[pb->c_entry].task_struct->state == 0) { /* * 0 == Runnable (IO succeeded) diff --git a/pb_utils/pb_submitter/build.sh b/pb_utils/pb_submitter/build.sh index 518b471e4177f4121a15f9197a58204e01fe7a7c..56d3f6d821576ea2f7cc14dbf269b83c0aa19d26 100755 --- a/pb_utils/pb_submitter/build.sh +++ b/pb_utils/pb_submitter/build.sh @@ -8,7 +8,9 @@ fi cd /mnt/pb_utils/pb_submitter gcc -static -o pb_submitter pb_submitter.c gcc -static -o test_prog test_prog.c +gcc -static -o task_long task_long_test.c +gcc -static -o sysc_long syscall_long_test.c -cp pb_submitter test_prog example_run.sh example_plan /root +cp pb_submitter test_prog task_long sysc_long example_run.sh example_plan /root echo "All done. Run '/root/example_run.sh' within ./run_qemu.sh now" diff --git a/pb_utils/pb_submitter/sysc_long b/pb_utils/pb_submitter/sysc_long new file mode 100755 index 0000000000000000000000000000000000000000..841ec848ca83a0816237c4ca39b0c7ca19818fd8 Binary files /dev/null and b/pb_utils/pb_submitter/sysc_long differ diff --git a/pb_utils/pb_submitter/syscall_long_test.c b/pb_utils/pb_submitter/syscall_long_test.c new file mode 100644 index 0000000000000000000000000000000000000000..afefbcba6c6bb21301255d8220c4efbfd879dd6c --- /dev/null +++ b/pb_utils/pb_submitter/syscall_long_test.c @@ -0,0 +1,19 @@ +#include <stdio.h> +#include <unistd.h> + +int main(void) +{ + // Make sure program is not finished before pb scheduler takes control + sleep(1); + int b = 0; + for (;b < 100; b++) { + int a = 0; + int c = 0; + for (;a < 100000; a++){c = c + a;} + // check if program runs && syscall to switch tasks + printf("loop run: %d, c = %d \n", b, c); + usleep(10000); + } + return 0; +} + diff --git a/pb_utils/pb_submitter/task_long b/pb_utils/pb_submitter/task_long new file mode 100755 index 0000000000000000000000000000000000000000..6be357d1ed5369230eda8442a249b09a3e4e1f37 Binary files /dev/null and b/pb_utils/pb_submitter/task_long differ diff --git a/pb_utils/pb_submitter/task_long_test.c b/pb_utils/pb_submitter/task_long_test.c new file mode 100644 index 0000000000000000000000000000000000000000..b9f9c2d7610448545c08be0fc78d901ae118c827 --- /dev/null +++ b/pb_utils/pb_submitter/task_long_test.c @@ -0,0 +1,19 @@ +#include <stdio.h> +#include <unistd.h> + +int main(void) +{ + // Make sure program is not finished before pb scheduler takes control + sleep(1); + int b = 0; + for (;b < 100; b++) { + int a = 0; + int c = 0; + for (;a < 10000000; a++){c = c + a;} + // check if program runs && syscall to switch tasks + printf("loop run: %d, c = %d \n", b, c); + usleep(1); + } + return 0; +} +