Skip to content
Snippets Groups Projects
Commit aa7a8b3d authored by mandersch's avatar mandersch
Browse files

Move Perf event initilization to pb_submit_plan function, unblocking the pb_scheduler

parent 9d886f7a
Branches
No related tags found
No related merge requests found
...@@ -32,6 +32,18 @@ int pb_submit_plan(struct rq *rq) ...@@ -32,6 +32,18 @@ int pb_submit_plan(struct rq *rq)
return -1; return -1;
} }
int i = 0;
for (i = 0; i < pb->size; i++){
int perf_init_res = init_perf_event(&pb->plan[i], &pb->plan[i].perf_event);
if(perf_init_res < 0) {
//initialization error detection/handling could happen here
printk(KERN_WARNING "PB INIT,%u: FAILED OPEN PERF EVENT\n", i);
} else {
printk(KERN_DEBUG "PB INIT,%u\n", i);
}
}
pb->c_entry = 0; pb->c_entry = 0;
pb->count_pb_cycles = 0; pb->count_pb_cycles = 0;
pb->count_admin_cycles = 0; pb->count_admin_cycles = 0;
...@@ -61,8 +73,9 @@ EXPORT_SYMBOL(set_pb_plan_size); ...@@ -61,8 +73,9 @@ EXPORT_SYMBOL(set_pb_plan_size);
void set_pb_plan_entry(struct pb_rq *pb_rq, unsigned int i, u64 n_instr, u64 task_id, struct task_struct *task_struct) void set_pb_plan_entry(struct pb_rq *pb_rq, unsigned int i, u64 n_instr, u64 task_id, struct task_struct *task_struct)
{ {
pb_rq->plan[i].n_instr = n_instr; pb_rq->plan[i].n_instr = n_instr;
pb_rq->plan[i].task_id = task_id; pb_rq->plan[i].task_id = i;
pb_rq->plan[i].task_struct = task_struct; pb_rq->plan[i].task_struct = task_struct;
pb_rq->plan[i].perf_event = NULL;
} }
EXPORT_SYMBOL(set_pb_plan_entry); EXPORT_SYMBOL(set_pb_plan_entry);
...@@ -135,21 +148,21 @@ static struct task_struct * pick_next_task_pb(struct rq *rq, ...@@ -135,21 +148,21 @@ static struct task_struct * pick_next_task_pb(struct rq *rq,
printk(KERN_ALERT "DEBUG: Passed %s %d \n",__FUNCTION__,__LINE__); printk(KERN_ALERT "DEBUG: Passed %s %d \n",__FUNCTION__,__LINE__);
if (current_mode == PB_EXEC_MODE) { if (current_mode == PB_EXEC_MODE) {
printk(KERN_ALERT "DEBUG: Passed %s %d \n",__FUNCTION__,__LINE__); printk(KERN_ALERT "DEBUG: Passed %s %d \n",__FUNCTION__,__LINE__);
if (pb->pevent == NULL) { if (pb->plan[pb->c_entry].perf_event == NULL) {
printk("PEVENT IS NULL\n"); printk("PEVENT IS NULL\n");
pb->mode = PB_ADMIN_MODE; pb->mode = PB_ADMIN_MODE;
return rq->curr; return rq->curr;
} }
u64 perf_counter; u64 perf_counter;
u64 read_error = get_perf_counter(pb->pevent, &perf_counter); u64 read_error = get_perf_counter(pb->plan[pb->c_entry].perf_event, &perf_counter);
if (read_error) { if (read_error) {
printk(KERN_WARNING "FETCHING PERFORMANCE COUNTER IN PB SCHEDULER FAILED WITH %llu\n", read_error); printk(KERN_WARNING "FETCHING PERFORMANCE COUNTER IN PB SCHEDULER FAILED WITH %llu\n", read_error);
} }
terminate_perf_event(pb->pevent); terminate_perf_event(pb->plan[pb->c_entry].perf_event);
// TODO: Check if we have to free the memory or if perf takes care of it // TODO: Check if we have to free the memory or if perf takes care of it
// see 'perf_event_release_kernel(struct perf_event *event)' in core.c // see 'perf_event_release_kernel(struct perf_event *event)' in core.c
pb->pevent = NULL; pb->plan[pb->c_entry].perf_event = NULL;
if (perf_counter < pb->plan[pb->c_entry].n_instr) { if (perf_counter < pb->plan[pb->c_entry].n_instr) {
u64 under_time = pb->plan[pb->c_entry].n_instr - perf_counter; u64 under_time = pb->plan[pb->c_entry].n_instr - perf_counter;
...@@ -168,29 +181,29 @@ static struct task_struct * pick_next_task_pb(struct rq *rq, ...@@ -168,29 +181,29 @@ static struct task_struct * pick_next_task_pb(struct rq *rq,
printk(KERN_DEBUG "PLAN DONE\n"); printk(KERN_DEBUG "PLAN DONE\n");
pb->mode = PB_DISABLED_MODE; pb->mode = PB_DISABLED_MODE;
pb->is_initialized = 0; pb->is_initialized = 0;
} else { // } else {
int perf_init_res = init_perf_event(&pb->plan[pb->c_entry], &pb->pevent); // int perf_init_res = init_perf_event(&pb->plan[pb->c_entry], &pb->pevent);
if(perf_init_res < 0) { // if(perf_init_res < 0) {
//initialization error detection/handling could happen here // //initialization error detection/handling could happen here
printk(KERN_WARNING "PB EXEC,START,%u,%llu: FAILED OPEN PERF EVENT\n", pb->c_entry, sched_clock()); // printk(KERN_WARNING "PB EXEC,START,%u,%llu: FAILED OPEN PERF EVENT\n", pb->c_entry, sched_clock());
} else { // } else {
printk(KERN_DEBUG "PB EXEC,START,%u,%llu\n", pb->c_entry, sched_clock()); // printk(KERN_DEBUG "PB EXEC,START,%u,%llu\n", pb->c_entry, sched_clock());
} // }
} }
} else if(current_mode == PB_ADMIN_MODE) { } else if(current_mode == PB_ADMIN_MODE) {
printk(KERN_DEBUG "PB ADMIN,STOP,%u,%llu\n", pb->c_entry, sched_clock()); printk(KERN_DEBUG "PB ADMIN,STOP,%u,%llu\n", pb->c_entry, sched_clock());
} else { } else {
printk("Switching from disabled to EXEC\n"); printk("Switching from disabled to EXEC\n");
// PB_DISABLED_MODE // PB_DISABLED_MODE
int perf_init_res = init_perf_event(&pb->plan[pb->c_entry], &pb->pevent); // int perf_init_res = init_perf_event(&pb->plan[pb->c_entry], &pb->pevent);
printk("HERE"); // printk("HERE");
if(perf_init_res < 0) { // if(perf_init_res < 0) {
//initialization error detection/handling could happen here // //initialization error detection/handling could happen here
printk(KERN_WARNING "PB INIT,%u,%llu: FAILED OPEN PERF EVENT\n", pb->c_entry, sched_clock()); // printk(KERN_WARNING "PB INIT,%u,%llu: FAILED OPEN PERF EVENT\n", pb->c_entry, sched_clock());
} else { // } else {
printk(KERN_DEBUG "PB INIT,%u,%llu\n", pb->c_entry, sched_clock()); // printk(KERN_DEBUG "PB INIT,%u,%llu\n", pb->c_entry, sched_clock());
} // }
} }
if (pb->mode != PB_DISABLED_MODE) { if (pb->mode != PB_DISABLED_MODE) {
......
...@@ -35,19 +35,14 @@ int init_perf_event(struct plan_entry *plan_entry, struct perf_event **pevent){ ...@@ -35,19 +35,14 @@ int init_perf_event(struct plan_entry *plan_entry, struct perf_event **pevent){
pe.disabled = 0; // start the counter as soon as we're in userland pe.disabled = 0; // start the counter as soon as we're in userland
pe.pinned = 1; // ? pe.pinned = 1; // ?
pe.exclude_kernel = 1; pe.exclude_kernel = 1;
printk("HERE BEFORE");
pe.exclude_hv = 1; pe.exclude_hv = 1;
printk("HERE AFTER");
pe.precise_ip = 2; // 2 SAMPLE_IP requested to have 0 skid. pe.precise_ip = 2; // 2 SAMPLE_IP requested to have 0 skid.
printk("HERE AFTER2");
/* Not needed on 3.2? */ /* Not needed on 3.2? */
pe.wakeup_events = 1; pe.wakeup_events = 1;
printk("HERE");
// cpu = -1 -> cpu independent (assumed to be regulated by plan) // cpu = -1 -> cpu independent (assumed to be regulated by plan)
*pevent = perf_event_create(&pe, 0); *pevent = perf_event_create(&pe, 0, plan_entry->task_struct);
printk("HERE2");
if (IS_ERR(pevent)) { if (IS_ERR(pevent)) {
printk(KERN_WARNING "PB ERROR INITIALISING PERF EVENT\n"); printk(KERN_WARNING "PB ERROR INITIALISING PERF EVENT\n");
...@@ -95,12 +90,12 @@ void overflow_handler( ...@@ -95,12 +90,12 @@ void overflow_handler(
printk(KERN_WARNING "PB TASK RAN TOO LONG\n"); printk(KERN_WARNING "PB TASK RAN TOO LONG\n");
} }
struct perf_event* perf_event_create(struct perf_event_attr *hw_event_uptr, int cpu) struct perf_event* perf_event_create(struct perf_event_attr *hw_event_uptr, int cpu, struct task_struct *task_struct)
{ {
return perf_event_create_kernel_counter( return perf_event_create_kernel_counter(
&wd_hw_attr, &wd_hw_attr,
cpu, cpu,
NULL, /* per CPU */ task_struct, /* per CPU */
&overflow_handler, &overflow_handler,
NULL /* What's meant by context? 'oprofile' uses NULL */); NULL /* What's meant by context? 'oprofile' uses NULL */);
} }
...@@ -15,6 +15,6 @@ void overflow_handler( ...@@ -15,6 +15,6 @@ void overflow_handler(
struct perf_sample_data *, struct perf_sample_data *,
struct pt_regs *regs); struct pt_regs *regs);
struct perf_event *perf_event_create(struct perf_event_attr *hw_event_uptr, int); struct perf_event *perf_event_create(struct perf_event_attr *hw_event_uptr, int cpu, struct task_struct *task_struct);
#endif #endif
...@@ -528,6 +528,7 @@ struct plan_entry ...@@ -528,6 +528,7 @@ struct plan_entry
u64 n_instr; // number of instructions in the entry u64 n_instr; // number of instructions in the entry
u64 task_id; u64 task_id;
struct task_struct *task_struct; // linux task struct struct task_struct *task_struct; // linux task struct
struct perf_event *perf_event;
}; };
struct pb_init_struct struct pb_init_struct
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment