Skip to content
Snippets Groups Projects
Commit 852e49df authored by Tobias Bouschen's avatar Tobias Bouschen
Browse files

Removed unnecessary time measure vars and added error handling

parent 953fd28c
Branches
No related tags found
No related merge requests found
//#include "sched.h"
#include <linux/kthread.h> #include <linux/kthread.h>
#include "perf_error_detection.h" #include "perf_error_detection.h"
/*
* Kelvin's Testcode
*/
void set_pb_measure_on(struct pb_rq *pb)
{
pb->measure_k = PB_MEASURE_K_ON;
pb->start = sched_clock();
pb->ktime = 0;
pb->kstart = 0;
}
EXPORT_SYMBOL(set_pb_measure_on);
/*
* Kelvin's Testcode
*/
void set_pb_measure_off(struct pb_rq *pb_rq)
{
u64 runtime;
u64 stop = sched_clock();
pb_rq->measure_k = PB_MEASURE_K_OFF;
if (stop < pb_rq->start)
{
printk(KERN_DEBUG "Start is greater than stop. This is a bug!\n");
}
runtime = stop - pb_rq->start;
printk("Measured for %lluus detected ktime of %lluus\n", runtime, pb_rq->ktime);
pb_rq->ktime = 0;
pb_rq->kstart = 0;
pb_rq->start = 0;
}
EXPORT_SYMBOL(set_pb_measure_off);
/* /*
* Kelvin's Testcode * Kelvin's Testcode
*/ */
...@@ -49,6 +10,9 @@ void set_pb_plan_size(struct pb_rq *pb_rq, unsigned int size) ...@@ -49,6 +10,9 @@ void set_pb_plan_size(struct pb_rq *pb_rq, unsigned int size)
} }
EXPORT_SYMBOL(set_pb_plan_size); EXPORT_SYMBOL(set_pb_plan_size);
/*
* Kelvin's Testcode
*/
//insert into pb queue (analog to enqueue) //insert into pb queue (analog to enqueue)
void set_pb_plan_entry(struct pb_rq *pb_rq, unsigned int i, u64 n_instr, u64 task_id, struct task_struct *task_struct ) void set_pb_plan_entry(struct pb_rq *pb_rq, unsigned int i, u64 n_instr, u64 task_id, struct task_struct *task_struct )
{ {
...@@ -67,16 +31,16 @@ void init_pb_rq(struct pb_rq *pb_rq) ...@@ -67,16 +31,16 @@ void init_pb_rq(struct pb_rq *pb_rq)
pb_rq->count_admin_cycles = 0; pb_rq->count_admin_cycles = 0;
pb_rq->mode = PB_DISABLED_MODE; pb_rq->mode = PB_DISABLED_MODE;
pb_rq->c_entry = 0; pb_rq->c_entry = 0;
pb_rq->proxy_task = NULL;
pb_rq->size = 0; pb_rq->size = 0;
pb_rq->pevent = NULL; pb_rq->pevent = NULL;
pb_rq->measure_k = PB_MEASURE_K_OFF;
pb_rq->kstart = 0;
pb_rq->ktime = 0;
pb_rq->start = 0;
} }
EXPORT_SYMBOL(init_pb_rq); EXPORT_SYMBOL(init_pb_rq);
void finalize_pq_rq_init(struct pb_rq *pb_rq)
{
pb_rq->is_initialized = 1;
}
// task enters the runnable state // task enters the runnable state
static void static void
...@@ -88,12 +52,7 @@ enqueue_task_pb(struct rq *rq, struct task_struct *p, int flags) ...@@ -88,12 +52,7 @@ enqueue_task_pb(struct rq *rq, struct task_struct *p, int flags)
// task exists the runnable state // task exists the runnable state
static void dequeue_task_pb(struct rq *rq, struct task_struct *p, int flags) static void dequeue_task_pb(struct rq *rq, struct task_struct *p, int flags)
{ {
/* - check performance counter: // NOP
* compare smaller then expected -> compute difference -> throw event
* - unregister papi event
* - check flags
*/
} }
static void yield_task_pb(struct rq *rq) static void yield_task_pb(struct rq *rq)
...@@ -125,7 +84,9 @@ static struct task_struct * pick_next_task_pb(struct rq *rq, ...@@ -125,7 +84,9 @@ static struct task_struct * pick_next_task_pb(struct rq *rq,
u64 perf_counter; u64 perf_counter;
u64 read_error = get_perf_counter(pb->pevent, &perf_counter); u64 read_error = get_perf_counter(pb->pevent, &perf_counter);
//TODO error handling if (read_error){
printk(KERN_WARNING "FETCHING PERFORMACE COUNTER IN PB SCHEDULER FAILED WITH %llu\n", read_error);
}
terminate_perf_event(pb->pevent); terminate_perf_event(pb->pevent);
...@@ -145,6 +106,7 @@ static struct task_struct * pick_next_task_pb(struct rq *rq, ...@@ -145,6 +106,7 @@ static struct task_struct * pick_next_task_pb(struct rq *rq,
if (pb->c_entry >= pb->size){ if (pb->c_entry >= pb->size){
printk(KERN_DEBUG "PLAN DONE\n"); printk(KERN_DEBUG "PLAN DONE\n");
pb->mode = PB_DISABLED_MODE; pb->mode = PB_DISABLED_MODE;
pb->is_initialized = 0;
}else{ }else{
int perf_init_res = init_perf_event(pb->plan[pb->c_entry], &pb->pevent); int perf_init_res = init_perf_event(pb->plan[pb->c_entry], &pb->pevent);
if(perf_init_res < 0){ if(perf_init_res < 0){
......
...@@ -536,8 +536,6 @@ struct pb_rq { ...@@ -536,8 +536,6 @@ struct pb_rq {
unsigned int size; unsigned int size;
// currently executed entry of the plan // currently executed entry of the plan
unsigned int c_entry; unsigned int c_entry;
// pointer to the dummy task
struct task_struct *proxy_task; //TODO: We should comment this out and iterate the plan accordingly
// amount of timer consecutive timer interrupts for pb tasks // amount of timer consecutive timer interrupts for pb tasks
u64 n_pb_cycles; u64 n_pb_cycles;
...@@ -552,17 +550,14 @@ struct pb_rq { ...@@ -552,17 +550,14 @@ struct pb_rq {
// one of PB_DISABLED_MODE, PB_EXEC_MODE, PB_ADMIN_MODE // one of PB_DISABLED_MODE, PB_EXEC_MODE, PB_ADMIN_MODE
int mode; int mode;
// one event for each core, not for each task
//TODO: Do we still need those? struct perf_event *pevent;
int measure_k;
u64 kstart;
u64 ktime;
u64 start;
/* /*
* Per Core, nicht per Task * flag determining whether the plan is completely initialized and should be run
* this variable must be initialized last
*/ */
struct perf_event *pevent; volatile int is_initialized;
}; };
/* Real-Time classes' related field in a runqueue: */ /* Real-Time classes' related field in a runqueue: */
...@@ -882,13 +877,7 @@ static inline int determine_next_mode_pb(struct rq *rq) ...@@ -882,13 +877,7 @@ static inline int determine_next_mode_pb(struct rq *rq)
if (pb->c_entry < pb->size) if (pb->c_entry < pb->size)
{ {
// initial switch // initial switch
if (pb->mode == PB_DISABLED_MODE && if (pb->mode == PB_DISABLED_MODE && pb->is_initialized)
//TODO:
/*
* We have to iterate the Plan, and think about a mechanism to signal that a plan is ready
* to be processed - maybe set pb->size 0 when the plan is done, n when a plan with n entries is ready.
*/
pb->proxy_task != NULL)
{ {
return PB_EXEC_MODE; return PB_EXEC_MODE;
} }
...@@ -2084,9 +2073,6 @@ extern void init_rt_rq(struct rt_rq *rt_rq); ...@@ -2084,9 +2073,6 @@ extern void init_rt_rq(struct rt_rq *rt_rq);
extern void init_pb_rq(struct pb_rq *pb_rq); extern void init_pb_rq(struct pb_rq *pb_rq);
extern void init_dl_rq(struct dl_rq *dl_rq); extern void init_dl_rq(struct dl_rq *dl_rq);
extern void set_pb_measure_off(struct pb_rq *pb_rq);
extern void set_pb_measure_on(struct pb_rq *pb_rq);
extern void set_pb_plan_size(struct pb_rq *pb_rq, unsigned int size); extern void set_pb_plan_size(struct pb_rq *pb_rq, unsigned int size);
extern void set_pb_plan_entry(struct pb_rq *pb_rq, unsigned int i, u64 n_instr, u64 task_id, struct task_struct *task_struct); extern void set_pb_plan_entry(struct pb_rq *pb_rq, unsigned int i, u64 n_instr, u64 task_id, struct task_struct *task_struct);
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment