diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 2d1cdb3917184c7ae713849af6b1343239595f1f..70e003198d28ddec653cb8ef525fa84e1bf67bfa 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -208,7 +208,7 @@ extern struct trace_event_functions exit_syscall_print_funcs; static inline long SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__)) -asmlinkage long sys_pb_set_plan(pid_t reference_proc_pid); +asmlinkage long sys_pb_set_plan(pid_t reference_proc, pid_t root_proc); asmlinkage long sys_pbm_set_root_proc(void); asmlinkage long sys32_quotactl(unsigned int cmd, const char __user *special, qid_t id, void __user *addr); diff --git a/kernel/behave.c b/kernel/behave.c index 129d699b3b3c5b799f29a74c2e01eedd5cf8e049..eac8d5f69ebb9a6d05d04bc5bdbd65c044fee676 100644 --- a/kernel/behave.c +++ b/kernel/behave.c @@ -31,9 +31,6 @@ SYSCALL_DEFINE0(pbm_set_root_proc) { return 0; } -// FIXME: THIS IS JUST FOR TESTING PURPOSES so that i dont have to read and write the reference pid manually in the console -pid_t last_plan_pid; - /****************************************************************************** * Based on "libpbm" (see header file for more info) */ @@ -658,8 +655,6 @@ void pbm_join_and_print_graph_self(pid_t pid) { is_initialized = 0; printk(KERN_WARNING "ROOT: %u\n", pid); - // FIXME: THIS IS JUST FOR TESTING PURPOSES - last_plan_pid = pid; } /* Crude recursive ADG printer, starts with given node */ diff --git a/kernel/behave.h b/kernel/behave.h index 8348874054c5a49f8b6d791857afb2cd3032508b..45bd1e4dfb01edecf250de71af20af4f1fd99e02 100644 --- a/kernel/behave.h +++ b/kernel/behave.h @@ -4,9 +4,6 @@ #include <linux/types.h> #include "sched/perf_error_detection.h" -// FIXME: THIS IS JUST FOR TESTING PURPOSES so that i dont have to read and write the reference pid manually in the console -extern pid_t last_plan_pid; - /****************************************************************************** * Based on "libpbm": * Program Behaviour Model (PBM) as a Task Precedence Graph (TPG), diff --git a/kernel/sched/pb.c b/kernel/sched/pb.c index fb196f10b390f14912815731f65af8d16947fa4c..c102a9729c342223706d1f92c42cb305f7c638c4 100644 --- a/kernel/sched/pb.c +++ b/kernel/sched/pb.c @@ -28,48 +28,21 @@ static void reset_triggering_syscall_info(void) { /* -------------------------------------------------------------------------- */ -SYSCALL_DEFINE1(pb_set_plan, pid_t, reference_proc_pid) { - pb_plan_t _plan; +SYSCALL_DEFINE2(pb_set_plan, pid_t, reference_proc, pid_t, root_proc) { struct task_struct* task; struct rq* rq; struct pb_rq* pb_rq; - size_t expected; - uint64_t* inst_cnt; - unsigned long copied; - unsigned int i; int res; PBM* pbm; - - copied = copy_from_user(&_plan, plan, sizeof(pb_plan_t)); - - if (copied != 0) { - return -1; - } - - expected = _plan.num_tasks * sizeof(*_plan.inst_cnt); - - inst_cnt = (uint64_t *)kzalloc(expected, GFP_KERNEL); - - if (inst_cnt == NULL) { - return -1; - } - - copied = copy_from_user(inst_cnt, _plan.inst_cnt, expected); - - if (copied != 0) { - printk("copy from user inst_cnt failed \n"); - return -1; - } - - task = find_task_by_vpid(_plan.pid); + task = find_task_by_vpid(root_proc); if (!task) { return -1; } - pbm = get_pbm_by_pid(last_plan_pid/*_plan.ref_pid (FIXME: THIS IS JUST FOR TESTING PURPOSES since it would be better to be able to insert the wanted pid as an argument)*/); + pbm = get_pbm_by_pid(reference_proc); printk(KERN_WARNING "Init Plan RunTime state\n"); // reset plan runtime state @@ -97,20 +70,6 @@ SYSCALL_DEFINE1(pb_set_plan, pid_t, reference_proc_pid) { pb_rq->root_proc = task; pb_rq->num_exited_procs = pbm->child_count; - set_pb_plan_size(pb_rq, _plan.num_tasks); - - for (i = 0; i < _plan.num_tasks; i++ ) { - set_pb_plan_entry( - pb_rq, - i, - inst_cnt[i], - i, - task - ); - } - - kfree(inst_cnt); - res = pb_submit_plan(rq); if (res == -1) { @@ -137,7 +96,7 @@ int pb_submit_plan(struct rq *rq) return -1; } - error = init_perf_event_into_map(pb->plan[i].task_struct, pb->plan[i].n_instr); + error = init_perf_event_into_map(plan_rt_state_peek_proc(), 0); if(error) { //initialization error detection/handling could happen here printk(KERN_WARNING "PB INIT,%u: FAILED OPEN PERF EVENT\n", i); @@ -145,7 +104,6 @@ int pb_submit_plan(struct rq *rq) printk(KERN_DEBUG "PB INIT,%u\n", i); } - pb->c_entry = 0; pb->count_pb_cycles = 0; pb->count_admin_cycles = 0; pb->total_instr = 0; @@ -157,28 +115,6 @@ int pb_submit_plan(struct rq *rq) return 0; } EXPORT_SYMBOL(pb_submit_plan); -/* - * Kelvin's Testcodes - */ -void set_pb_plan_size(struct pb_rq *pb_rq, unsigned int size) -{ - pb_rq->size = size; - pb_rq->plan = kmalloc(sizeof(struct plan_entry) * size , GFP_KERNEL); - memset(pb_rq->plan, 0x0, sizeof(struct plan_entry) * size); -} -EXPORT_SYMBOL(set_pb_plan_size); - -/* - * Kelvin's Testcode - */ -//insert into pb queue (analog to enqueue) -void set_pb_plan_entry(struct pb_rq *pb_rq, unsigned int i, u64 n_instr, u64 task_id, struct task_struct *task_struct) -{ - pb_rq->plan[i].n_instr = n_instr; - pb_rq->plan[i].task_id = task_id; - pb_rq->plan[i].task_struct = task_struct; -} -EXPORT_SYMBOL(set_pb_plan_entry); // called by core.c sched_init void init_pb_rq(struct pb_rq *pb_rq) @@ -188,8 +124,6 @@ void init_pb_rq(struct pb_rq *pb_rq) pb_rq->n_admin_cycles = 20; pb_rq->count_admin_cycles = 0; pb_rq->mode = PB_DISABLED_MODE; - pb_rq->c_entry = 0; - pb_rq->size = 0; pb_rq->is_initialized = 0; pb_rq->waiting_on_io = 0; @@ -275,7 +209,6 @@ static void check_preempt_curr_pb(struct rq *rq, struct task_struct *p, int flag static void dequeue_task_pb(struct rq *rq, struct task_struct *p, int flags) { struct pb_rq *pb = &(rq->pb); - unsigned int c_entry_curr; u64 perf_counter; u64 counter_diff; u64 read_error; @@ -294,7 +227,6 @@ static void dequeue_task_pb(struct rq *rq, struct task_struct *p, int flags) return; } pb->waiting_on_io = 1; - c_entry_curr = pb->c_entry; // safe current process for later use since the plan_rt_state might get modified plan_rt_state_peek(&cur_node, &cur_proc); @@ -331,7 +263,6 @@ static void dequeue_task_pb(struct rq *rq, struct task_struct *p, int flags) printk(KERN_WARNING "FETCHING PERFORMANCE COUNTER IN PB SCHEDULER FAILED WITH %llu\n", read_error); } counter_diff = perf_counter; //- pb->total_instr; - pb->plan[c_entry_curr].n_instr_counted = counter_diff; pb->total_instr = perf_counter; diff_from_expected = abs(counter_diff - expected_instr_count); //TODO: Set proper threshold for significance (relative values would probably be better than absolutes) @@ -342,8 +273,6 @@ static void dequeue_task_pb(struct rq *rq, struct task_struct *p, int flags) counter_diff < expected_instr_count ? "SHORT" : "LONG"); } - pb->c_entry++; - //TODO: Can this actually happen? Can a process die without calling exit? // remove a dead process which has not called exit from the plan if (!process_exited && cur_proc->state == TASK_DEAD) { @@ -412,9 +341,6 @@ static struct task_struct * pick_next_task_pb(struct rq *rq, struct task_struct *picked = NULL; enum pb_mode current_mode, next_mode; struct pb_rq *pb = &(rq->pb); - - // FIXME: Testing purposes - struct task_struct* one; int i; current_mode = pb->mode; next_mode = determine_next_mode_pb(rq); @@ -458,7 +384,7 @@ static struct task_struct * pick_next_task_pb(struct rq *rq, // printk(KERN_ALERT "DEBUG: Passed %s %d \n",__FUNCTION__,__LINE__); if(current_mode == PB_ADMIN_MODE) { - printk(KERN_DEBUG "PB ADMIN,STOP,%u,%llu\n", pb->c_entry, sched_clock()); + printk(KERN_DEBUG "PB ADMIN,STOP,%llu\n", sched_clock()); } else if (current_mode == PB_DISABLED_MODE) { printk("Switching from disabled to EXEC\n"); } @@ -548,140 +474,3 @@ const struct sched_class pb_sched_class = { .update_curr = update_curr_pb, // NOP }; EXPORT_SYMBOL(pb_sched_class); - - -/*********************************************************************** - * /proc filesystem entry - * use 'cat /proc/pbsched' to read - **********************************************************************/ - -static int show_pbsched(struct seq_file *seq, void *v) -{ - int cpu; - - if (v == (void *)1) { - seq_printf(seq, "cpuid mode curr_entry curr_pb_cycles curr_admin_cycles\n"); - } else { - char mode; - struct rq *rq; - struct pb_rq *pb; - - int i; - struct plan_entry *plan; - - cpu = (unsigned long)(v - 2); - rq = cpu_rq(cpu); - pb = &(rq->pb); - - switch(pb->mode) { - case PB_DISABLED_MODE: mode='D'; break; - case PB_EXEC_MODE: mode='E'; break; - case PB_ADMIN_MODE: mode='A'; break; - default: mode='U'; break; - } - - /* runqueue-specific stats */ - seq_printf(seq, - "cpu%d %c %u %llu %llu\n", - cpu, - mode, - pb->c_entry, - pb->count_pb_cycles, - pb->count_admin_cycles - ); - - /* plan stats */ - if(pb->size){ - seq_printf(seq, "\ntask_id n_instr n_instr_counted\n"); - - plan = pb->plan; - for (i=0; i < pb->size; i++){ - // only print completed tasks, after completion is_initialized is 0 and we can print the last - if(i<pb->c_entry || !pb->is_initialized){ - seq_printf(seq, - "%llu %llu %llu\n", - plan[i].task_id, - plan[i].n_instr, - plan[i].n_instr_counted - ); - }else{ - seq_printf(seq, - "%llu %llu queued\n", - plan[i].task_id, - plan[i].n_instr - ); - } - - - } - - } - } - return 0; -} - -/* - * This itererator needs some explanation. - * It returns 1 for the header position. - * This means 2 is cpu 0. - * In a hotplugged system some cpus, including cpu 0, may be missing so we have - * to use cpumask_* to iterate over the cpus. - */ -static void *pbsched_start(struct seq_file *file, loff_t *offset) -{ - unsigned long n = *offset; - - if (n == 0) - return (void *) 1; - - n--; - - if (n > 0) - n = cpumask_next(n - 1, cpu_online_mask); - else - n = cpumask_first(cpu_online_mask); - - *offset = n + 1; - - if (n < nr_cpu_ids) - return (void *)(unsigned long)(n + 2); - return NULL; -} - -static void *pbsched_next(struct seq_file *file, void *data, loff_t *offset) -{ - (*offset)++; - return pbsched_start(file, offset); -} - -static void pbsched_stop(struct seq_file *file, void *data) -{ - // NOP -} - -static const struct seq_operations pbsched_sops = { - .start = pbsched_start, - .next = pbsched_next, - .stop = pbsched_stop, - .show = show_pbsched, -}; - -static int pbsched_open(struct inode *inode, struct file *file) -{ - return seq_open(file, &pbsched_sops); -} - -static const struct file_operations proc_pbsched_operations = { - .open = pbsched_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release, -}; - -static int __init proc_pbsched_init(void) -{ - proc_create("pbsched", 0, NULL, &proc_pbsched_operations); - return 0; -} -subsys_initcall(proc_pbsched_init); - diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 063e47dde2510ad2e6caf4f1d72919549697ba90..e336142b167155c191d0d05d2a80737032f9efbc 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -520,35 +520,6 @@ enum pb_mode PB_ADMIN_MODE // Admin task is being executed }; -#define PB_MAX_PLAN_LENGTH 100 - -/* - * A PB-Task consists of one or more plan_entry - */ -struct plan_entry -{ - u64 n_instr; // number of instructions in the entry - u64 task_id; // identifier of the plan entry - struct task_struct *task_struct; // linux task struct - u64 n_instr_counted; // number of instructions we counted for the entry -}; - -struct pb_init_struct -{ - struct plan_entry *plan; // plan - unsigned int size; // size of the plan - - u64 n_pb_cycles; // amount of timer ticks before admin tasks are allowed to run - u64 n_admin_cycles; // amount of timer ticks before PB tasks are allowed to run -}; - -struct pb_plan { - pid_t pid; // process_id of the prgramm tp execute with the plan - uint64_t *inst_cnt; // array of estimated instructions for each task - size_t num_tasks; // number of tasks in the plan - pid_t ref_pid; // pid of the root parent task of the plan used as reference (as generated by libpbm) -}; - enum sched_trigger_syscall { sched_trig_FORK, sched_trig_EXIT, sched_trig_OTHER }; struct syscall_info { @@ -559,10 +530,6 @@ struct syscall_info { struct pb_rq { - struct plan_entry *plan; // plan (used to be proxy_task) - unsigned int size; // size of the plan - unsigned int c_entry; // index of currently executed entry - u64 n_pb_cycles; // amount of timer ticks before admin tasks are allowed to run u64 count_pb_cycles; // current timer tick count for PB tasks @@ -943,7 +910,7 @@ static inline int determine_next_mode_pb(struct rq *rq) int mode = PB_DISABLED_MODE; struct pb_rq *pb = &(rq->pb); - if (pb->c_entry < pb->size) + if (!is_plan_finished(pb)) { // initial switch if (pb->mode == PB_DISABLED_MODE && pb->is_initialized) @@ -997,8 +964,8 @@ static inline int determine_next_mode_pb(struct rq *rq) * -1 == unrunnable */ mode = PB_ADMIN_MODE; - printk(KERN_WARNING "PB TASK %llu BLOCKED BY IO FOR %llu TOO MANY TICKS\n", - pb->plan[pb->c_entry].task_id, + printk(KERN_WARNING "PB TASK %u BLOCKED BY IO FOR %llu TOO MANY TICKS\n", + plan_rt_state_is_empty() ? -1 : plan_rt_state_peek_proc()->pid, pb->count_admin_cycles - pb->n_admin_cycles); } } diff --git a/pb_utils/pb_submitter/pb_submitter.c b/pb_utils/pb_submitter/pb_submitter.c index 296c8d011ae287bb91973b2fca0644abc9270fa5..f82ae7d9d6a72ba2f92a650f00209b2b297e2c70 100644 --- a/pb_utils/pb_submitter/pb_submitter.c +++ b/pb_utils/pb_submitter/pb_submitter.c @@ -8,14 +8,6 @@ #define PB_SET_PLAN 0x1337 -// FIXME DRY with the same struct in sched.h -typedef struct { - pid_t pid; - uint64_t *inst_cnt; - size_t num_tasks; - pid_t ref_pid; -} pb_plan_t; - static void usage(void) { puts("Usage: echo '<ref_proc_pid>' | ./pb_submitter <prog_name> <prog_arg_0> ..."); @@ -28,13 +20,6 @@ static void handler(int n) stop = 1; } -volatile sig_atomic_t stop; - -static void handler(int n) -{ - stop = 1; -} - int main(int argc, char** argv) { int ret; @@ -101,7 +86,7 @@ int main(int argc, char** argv) pause(); printf("Selected plan (reference pid): %u, real pid: %u\n", ref_proc, pid); - ret = syscall(PB_SET_PLAN, ref_proc); + ret = syscall(PB_SET_PLAN, ref_proc, pid); // continue running the child kill(pid, SIGCONT);