diff --git a/kernel/sched/pb.c b/kernel/sched/pb.c
index c83ab44e7dcb25b3c148831e835943087289fddb..7a180f593fc150c681eaaba70e874bdfb3a44e7a 100644
--- a/kernel/sched/pb.c
+++ b/kernel/sched/pb.c
@@ -96,17 +96,16 @@ int pb_submit_plan(struct rq *rq)
 		return -1;
 	}
 
-	for (; i < pb->size; i++){
-		int perf_init_res = init_perf_event(&pb->plan[i], &pb->plan[i].perf_event);
-		if(perf_init_res < 0) {
-			//initialization error detection/handling could happen here
-			printk(KERN_WARNING "PB INIT,%u: FAILED OPEN PERF EVENT\n", i);
-		} else {
-			printk(KERN_DEBUG "PB INIT,%u\n", i);
-		}
 
+	int perf_init_res = init_perf_event(&pb->plan[i], &pb->pevent);
+	if(perf_init_res < 0) {
+		//initialization error detection/handling could happen here
+		printk(KERN_WARNING "PB INIT,%u: FAILED OPEN PERF EVENT\n", i);
+	} else {
+		printk(KERN_DEBUG "PB INIT,%u\n", i);
 	}
 
+
 	pb->c_entry = 0;
 	pb->count_pb_cycles = 0;
 	pb->count_admin_cycles = 0;
@@ -138,7 +137,6 @@ void set_pb_plan_entry(struct pb_rq *pb_rq, unsigned int i, u64 n_instr, u64 tas
 	pb_rq->plan[i].n_instr = n_instr;
 	pb_rq->plan[i].task_id = i;
 	pb_rq->plan[i].task_struct = task_struct;
-	pb_rq->plan[i].perf_event = NULL;
 }
 EXPORT_SYMBOL(set_pb_plan_entry);
 
@@ -186,14 +184,14 @@ static void check_preempt_curr_pb(struct rq *rq, struct task_struct *p, int flag
 static struct task_struct * pick_next_task_pb(struct rq *rq,
 		struct task_struct *prev, struct rq_flags *rf)
 {
-	
+
 	// contains task to be executed
 	struct task_struct *picked = NULL;
 	enum pb_mode current_mode, next_mode;
 	struct pb_rq *pb = &(rq->pb);
 
     unsigned long flags;
-	
+	pb->l_entry = pb->c_entry;
 	
 	current_mode = pb->mode;
 	next_mode = determine_next_mode_pb(rq);
@@ -203,7 +201,7 @@ static struct task_struct * pick_next_task_pb(struct rq *rq,
 		printk("SWITCHING MODES\n");
 		pb->count_admin_cycles = 0;
 		pb->count_pb_cycles = 0;
-		// Push last onn-plan task back in its corresponding runqueue
+		// Push last non-plan task back in its corresponding runqueue
 		if (next_mode == PB_EXEC_MODE) {
 			// Necessary to manage the preempted task
 			printk("PUT OLD TASK BACK IN RQ\n");
@@ -214,35 +212,32 @@ static struct task_struct * pick_next_task_pb(struct rq *rq,
 	if (current_mode == PB_EXEC_MODE && !pb->is_preempted) {
 		unsigned int c_entry_curr;
 		u64 perf_counter;
+		u64 counter_diff;
 		u64 read_error;
 
 		pb->is_in_critical = true;
 		c_entry_curr = pb->c_entry;
 		
-		if(!pb->plan[c_entry_curr].perf_event) {
+		if(!pb->pevent) {
 			printk("WARNING: PERF EVENT IS NULL");
 		}
 
 		// printk(KERN_ALERT "DEBUG: Passed %s %d \n",__FUNCTION__,__LINE__);
-		read_error = get_perf_counter(pb->plan[c_entry_curr].perf_event, &perf_counter);
+		read_error = get_perf_counter(pb->pevent, &perf_counter);
 		if (read_error) {
 			printk(KERN_WARNING "FETCHING PERFORMANCE COUNTER IN PB SCHEDULER FAILED WITH %llu\n", read_error);
 		}
-		raw_spin_lock_irqsave(pb->pb_lock, flags);
-		terminate_perf_event(pb->plan[c_entry_curr].perf_event);
-		raw_spin_unlock_irqrestore(pb->pb_lock, flags);
-		// TODO: Check if we have to free the memory or if perf takes care of it
-		// see 'perf_event_release_kernel(struct perf_event *event)' in core.c
-		pb->plan[c_entry_curr].perf_event = NULL;
-
-		if (perf_counter < pb->plan[c_entry_curr].n_instr) {
-			u64 under_time = pb->plan[c_entry_curr].n_instr - perf_counter;
+		counter_diff = perf_counter - pb->total_instr;
+		pb->plan[c_entry_curr].n_instr_counted = counter_diff;
+		pb->total_instr = perf_counter;
+		if (counter_diff < pb->plan[c_entry_curr].n_instr) {
+			u64 under_time = pb->plan[c_entry_curr].n_instr - counter_diff;
 
 			printk(KERN_WARNING "PB TASK %llu RAN %llu INSTRUCTIONS TOO SHORT\n", pb->plan[pb->c_entry].task_id, under_time);
-		} else if (perf_counter > pb->plan[c_entry_curr].n_instr) {
+		} else if (counter_diff > pb->plan[c_entry_curr].n_instr) {
 			//TODO: Check if actually an overflow occurs and an another calculation is necessary
 			// (setting a flag in the perf overflow_handler could be a solution)
-			u64 over_time = perf_counter - pb->plan[c_entry_curr].n_instr;
+			u64 over_time = counter_diff - pb->plan[c_entry_curr].n_instr;
 
 			printk(KERN_WARNING "PB TASK %llu RAN %llu INSTRUCTIONS TOO LONG\n", pb->plan[pb->c_entry].task_id, over_time);
 		}
@@ -250,6 +245,12 @@ static struct task_struct * pick_next_task_pb(struct rq *rq,
 		pb->c_entry++;
 		
 		if (pb->c_entry >= pb->size) {
+			raw_spin_lock_irqsave(pb->pb_lock, flags);
+			terminate_perf_event(pb->pevent);
+			raw_spin_unlock_irqrestore(pb->pb_lock, flags);
+			pb->pevent = NULL;
+			// TODO: Check if we have to free the memory or if perf takes care of it
+			// see 'perf_event_release_kernel(struct perf_event *event)' in core.c
 			printk(KERN_DEBUG "PLAN DONE\n");
 			pb->mode = PB_DISABLED_MODE;
 			pb->is_initialized = 0;
@@ -257,8 +258,6 @@ static struct task_struct * pick_next_task_pb(struct rq *rq,
 		pb->is_in_critical = false;
 	printk("DONE");
 	}
-	pb->is_preempted = false;
-	
 
 	// EXEC Mode is next, so we return our next task to be executed
 	if (next_mode == PB_EXEC_MODE) {
@@ -268,6 +267,7 @@ static struct task_struct * pick_next_task_pb(struct rq *rq,
 		} else if (current_mode == PB_DISABLED_MODE) {
 			printk("Switching from disabled to EXEC\n");
 		}
+		pb->is_preempted = false;
 		picked = pb->plan[pb->c_entry].task_struct;
 	}
 
@@ -301,7 +301,10 @@ static void task_tick_pb(struct rq *rq, struct task_struct *p, int queued)
 
 	if (determine_next_mode_pb(rq) != PB_EXEC_MODE && pb->mode == PB_EXEC_MODE && !pb->is_preempted && !pb->is_in_critical) {
 		printk("Reschudling in task_tick_pb");
-		pb->is_preempted = true;
+		if (pb->l_entry != pb->c_entry){
+			// If the currrent task is not the last finished one, that means its unfinished and thus we set the preemtped flag
+			pb->is_preempted = true;
+		}
 		resched_curr(rq);
 	}
 }
@@ -367,13 +370,16 @@ static int show_pbsched(struct seq_file *seq, void *v)
         char mode;
 		struct rq *rq;
 		struct pb_rq *pb;
+
+ 		// perf event stuff here is for testing and will be deleted in the future
 		struct perf_event *event;
 		int perf_init_res;
-
-
 		u64 perf_counter;
 		u64 read_error;
 
+		int i;
+		struct plan_entry *plan;
+
 		cpu = (unsigned long)(v - 2);
 		rq = cpu_rq(cpu);
         pb = &(rq->pb);
@@ -406,6 +412,33 @@ static int show_pbsched(struct seq_file *seq, void *v)
             pb->count_pb_cycles,
             pb->count_admin_cycles
         );
+
+		/* plan stats */
+		if(pb->size){
+			seq_printf(seq, "\ntask_id n_instr n_instr_counted\n");
+
+			plan = pb->plan;
+			for (i=0; i < pb->size; i++){
+				 // only print completed tasks, after completion is_initialized is 0 and we can print the last
+				if(i<pb->c_entry || !pb->is_initialized){
+					seq_printf(seq,
+						"%llu %llu %llu\n",
+						plan[i].task_id,
+						plan[i].n_instr,
+						plan[i].n_instr_counted
+					);
+				}else{
+					seq_printf(seq,
+						"%llu %llu queued\n",
+						plan[i].task_id,
+						plan[i].n_instr
+					);
+				}
+
+
+			}
+
+		}
 	}
 	return 0;
 }
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 82c899a6f9e3296e047b2e0e06d8184d932157a3..3269c44f724516843abc1b367b42ea68b14a05b1 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -528,7 +528,7 @@ struct plan_entry
 	u64 n_instr; // number of instructions in the entry
 	u64 task_id;
 	struct task_struct *task_struct; // linux task struct
-	struct perf_event *perf_event;
+	u64 n_instr_counted; // number of instructions we counted for the entry
 };
 
 struct pb_init_struct
@@ -551,6 +551,7 @@ struct pb_rq
 	struct plan_entry *plan; // plan (used to be proxy_task)
 	unsigned int size;		 // size of the plan
 	unsigned int c_entry;	 // index of currently executed entry
+	unsigned int l_entry;	 // index of last finished task, neccessary or preemption management
 
 	u64 n_pb_cycles;	 // amount of timer ticks before admin tasks are allowed to run
 	u64 count_pb_cycles; // current timer tick count for PB tasks
@@ -560,6 +561,8 @@ struct pb_rq
 
 	enum pb_mode mode;
 
+	u64 total_instr;
+
 	struct perf_event *pevent; // linux perf handle
 
 	/*
diff --git a/pb_utils/mod_gen/tmpl/module.tt b/pb_utils/mod_gen/tmpl/module.tt
index c2f066d0bbed53a2dde383f028cb3f63e670a967..efa4adb9d6154cefaadec7342862b2155b0206cd 100644
--- a/pb_utils/mod_gen/tmpl/module.tt
+++ b/pb_utils/mod_gen/tmpl/module.tt
@@ -10,30 +10,28 @@ MODULE_DESCRIPTION("PB Scheduler - Module that sets a proxy task and scheduling
 
 static int loop_thread_func(void *data)
 {
-        unsigned int c = 0;
-        // printk(KERN_WARNING "Hello from Module.\n");
+    unsigned int c = 0;
+    while (!kthread_should_stop()) {
         int a = 0;
-        // printk(KERN_WARNING "A.\n");
+        int b = 0;
         set_current_state(TASK_INTERRUPTIBLE);
-        for (;a < 200000; a++){}
-        // printk(KERN_WARNING "B.\n");
+        for (;a < 200000; a++){b = b + a;}
+        printk("b = %d\n", b);
+        schedule();
         c++;
-        // printk(KERN_WARNING "Bye from module.\n");
-        return 0;
+    }
+    return 0;
 }
 
 static void init_rq(struct rq *rq)
 {
     struct pb_rq *pb_rq = &rq->pb;
-    struct task_struct **proxy_task;
-    proxy_task = (struct task_struct **) kmalloc(sizeof(struct task_struct*) * [% plan_size %] , GFP_KERNEL);
+    struct task_struct *proxy_task;
     int i;
-    for (i = 0; i < [% plan_size %]; i++) {
-        proxy_task[i] = kthread_create(loop_thread_func, NULL, "PB proxy thread");
-        proxy_task[i]->sched_class = &pb_sched_class;
-    }
+    proxy_task = kthread_create(loop_thread_func, NULL, "PB proxy thread");
+    proxy_task->sched_class = &pb_sched_class;
     set_pb_plan_size(pb_rq, [% plan_size %]);
-    [% FOREACH entry IN plan %]set_pb_plan_entry(pb_rq, [%entry.index%], [%entry.exec_t%], [%entry.idle_t%], proxy_task[[%entry.index%]]);
+    [% FOREACH entry IN plan %]set_pb_plan_entry(pb_rq, [%entry.index%], [%entry.exec_t%], [%entry.idle_t%], proxy_task);
     [% END %]
 
     pb_submit_plan(rq);