diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 8983b6b54534bb1af28917ca90a11de820b1de03..72c06576b54f502843cdbcf56cc1b0eb26af5df7 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -37,6 +37,8 @@
 
 #include "sched.h"
 
+#include "pb_internal.h"
+
 /*
  * Targeted preemption latency for CPU-bound tasks:
  *
@@ -6186,7 +6188,7 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf
 	struct cfs_rq *cfs_rq = &rq->cfs;
 	struct sched_entity *se;
 	struct task_struct *p;
-	int next_mode;
+	enum pb_mode next_mode;
 	int new_tasks;
 
 	next_mode = determine_next_mode_pb(rq);
@@ -8958,7 +8960,7 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
 {
 	struct cfs_rq *cfs_rq;
 	struct sched_entity *se = &curr->se;
-	int next_mode;
+	enum pb_mode next_mode;
 	struct pb_rq *pb = &(rq->pb);
 
 	for_each_sched_entity(se) {
diff --git a/kernel/sched/idle_task.c b/kernel/sched/idle_task.c
index 3dce92055e4f3d03fbc70849d30e1381603c6c26..1a52e16e47fd50945cae1c35541e4f3ed2eef197 100644
--- a/kernel/sched/idle_task.c
+++ b/kernel/sched/idle_task.c
@@ -1,4 +1,5 @@
 #include "sched.h"
+#include "pb_internal.h"
 
 /*
  * idle-task scheduling class.
@@ -26,7 +27,7 @@ static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int fl
 static struct task_struct *
 pick_next_task_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 {
-	int next_mode;
+	enum pb_mode next_mode;
 
 	next_mode = determine_next_mode_pb(rq);
 
@@ -62,9 +63,9 @@ static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
 static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued)
 {
 	struct pb_rq *pb = &(rq->pb);
-	int next_mode;
+	enum pb_mode next_mode;
 
-	if(pb->mode==PB_ADMIN_MODE){
+	if(pb->mode == PB_ADMIN_MODE){
 		pb->count_admin_cycles++;
 	}
 
diff --git a/kernel/sched/pb.c b/kernel/sched/pb.c
index d6e87803d435248e1d8ef65ed4743fcf003f8d3a..764589e0321cdebb98d6fb63c57feaf771a3e71e 100644
--- a/kernel/sched/pb.c
+++ b/kernel/sched/pb.c
@@ -1,8 +1,31 @@
-#include <linux/kthread.h>
-#include "perf_error_detection.h"
+#include "pb_internal.h"
 
 /*
- * Kelvin's Testcode
+ * It is possible to initialize a runqueue only if no plan is currently executed
+ */
+int pb_init_rq(struct pb_init_struct *initstr, struct rq *rq)
+{
+	/*
+	 * Must be volatile to ensure correct initialization order
+	 */
+	volatile struct pb_rq * pb = (volatile struct pb_rq*)(&(rq->pb));
+
+	if (pb->mode != PB_DISABLED_MODE)
+		return -1;
+
+	pb->plan = initstr->plan;
+	pb->size = initstr->size;
+	pb->n_pb_cycles = initstr->n_pb_cycles;
+	pb->n_admin_cycles = initstr->n_admin_cycles;
+
+	pb->c_entry = 0;
+	pb->count_pb_cycles = 0;
+	pb->count_admin_cycles = 0;
+
+	pb->is_initialized = 1;	// must be initialized last
+}
+/*
+ * Kelvin's Testcodes
  */
 void set_pb_plan_size(struct pb_rq *pb_rq, unsigned int size)
 {
@@ -70,7 +93,7 @@ static struct task_struct * pick_next_task_pb(struct rq *rq,
 {
 	// contains task to be executed
 	struct task_struct *picked = NULL;
-	int current_mode, next_mode;
+	enum pb_mode current_mode, next_mode;
 	struct pb_rq *pb = &(rq->pb);
 
 	current_mode = pb->mode;
@@ -89,6 +112,9 @@ static struct task_struct * pick_next_task_pb(struct rq *rq,
 			}
 
 			terminate_perf_event(pb->pevent);
+			// TODO: Check if we have to free the memory or if perf takes care of it
+			// see 'perf_event_release_kernel(struct perf_event *event)' in core.c
+			pb->pevent = NULL;
 
 			if (perf_counter < pb->plan[pb->c_entry].n_instr){
 				u64 under_time = pb->plan[pb->c_entry].n_instr - perf_counter;
@@ -161,7 +187,7 @@ static void set_curr_task_pb(struct rq *rq)
  */
 static void task_tick_pb(struct rq *rq, struct task_struct *p, int queued)
 {
-	int next_mode;
+	enum pb_mode next_mode;
 	struct pb_rq *pb = &(rq->pb);
 
 	pb->count_pb_cycles++;
@@ -216,3 +242,50 @@ const struct sched_class pb_sched_class = {
 	.update_curr		= update_curr_pb, // NOP
 };
 EXPORT_SYMBOL(pb_sched_class);
+
+// used to determine the next mode of the PB-Scheduler
+// This function is located in sched.h since pb.c and fair.c are using this function
+inline int determine_next_mode_pb(struct rq *rq)
+{
+	int mode = PB_DISABLED_MODE;
+	struct pb_rq *pb = &(rq->pb);
+
+	if (pb->c_entry < pb->size)
+	{
+		// initial switch
+		if (pb->mode == PB_DISABLED_MODE &&	pb->is_initialized)
+		{
+			return PB_EXEC_MODE;
+		}
+		else
+		{
+			if (pb->mode == PB_EXEC_MODE)
+			{
+				//stay for n timer interrupts cycles in exec mode
+				/*
+				 * Is the tick interrupt active in this moment?
+				 */
+				if(pb->count_pb_cycles > pb->n_pb_cycles){
+					mode = PB_ADMIN_MODE;
+					pb->count_pb_cycles = 0;
+				}else{
+					mode = PB_EXEC_MODE;
+				}
+			}
+			else if (pb->mode == PB_ADMIN_MODE)
+			{
+				//stay for n timer interrupt cylces in uall mode for admin tasks
+				/*
+				 * Is the tick interrupt active in this moment?
+				 */
+				if(pb->count_admin_cycles > pb->n_admin_cycles){
+					mode = PB_EXEC_MODE;
+					pb->count_admin_cycles = 0;
+				}else{
+					mode = PB_ADMIN_MODE;
+				}
+			}
+		}
+	}
+	return mode;
+}
diff --git a/kernel/sched/pb.h b/kernel/sched/pb.h
new file mode 100644
index 0000000000000000000000000000000000000000..5813061361e50de1729edf850f123648a3ef420f
--- /dev/null
+++ b/kernel/sched/pb.h
@@ -0,0 +1,36 @@
+
+#ifndef KERNEL_SCHED_PB_H_
+#define KERNEL_SCHED_PB_H_
+
+/*
+ * Mode of the PB scheduler
+ */
+enum pb_mode{
+	PB_DISABLED_MODE,	// PB scheduler is disabled (no plan available)
+	PB_EXEC_MODE,		// PB task is being executed
+	PB_ADMIN_MODE		// Admin task is being executed
+};
+
+#define PB_MAX_PLAN_LENGTH 100
+
+/*
+ * Entry for PB-Task
+ */
+struct plan_entry {
+	u64 n_instr; 						// number of instructions for a task
+	u64 task_id;
+	struct task_struct *task_struct;	// linux task struct
+};
+
+struct pb_init_struct {
+	struct plan_entry *plan;	// PB plan
+	unsigned int size; 			// size of the plan
+
+	u64 n_pb_cycles;			// amount of timer ticks before admin tasks are allowed to run
+	u64 n_admin_cycles;			// amount of timer ticks before PB tasks are allowed to run
+};
+
+
+int pb_init_rq(struct pb_init_struct *initstr, struct rq *rq);
+
+#endif /* KERNEL_SCHED_PB_H_ */
diff --git a/kernel/sched/pb_internal.h b/kernel/sched/pb_internal.h
new file mode 100644
index 0000000000000000000000000000000000000000..47810245fdf65ff08c230cd41aa523425731a341
--- /dev/null
+++ b/kernel/sched/pb_internal.h
@@ -0,0 +1,11 @@
+#ifndef KERNEL_SCHED_PB_INTERNAL_H_
+#define KERNEL_SCHED_PB_INTERNAL_H_
+
+#include <linux/kthread.h>
+#include <sched.h>
+#include "perf_error_detection.h"
+#include "pb.h"
+
+static inline int determine_next_mode_pb(struct rq *rq);
+
+#endif /* KERNEL_SCHED_PB_INTERNAL_H_ */
diff --git a/kernel/sched/perf_error_detection.c b/kernel/sched/perf_error_detection.c
index c38c89680387dd8da7828e02b4b2c114ba09cdaa..f2fd20aa7bb4b38a7242c3fe5ec077f3208f7c88 100644
--- a/kernel/sched/perf_error_detection.c
+++ b/kernel/sched/perf_error_detection.c
@@ -1,5 +1,4 @@
 #include "perf_error_detection.h"
-#include <linux/smp.h>
 
 /*
  * Our understanding of perf so far. Please correct as needed.
@@ -10,11 +9,13 @@
  *
  */
 
-//initialize perf event for new task
+
+/*
+ * initialize perf event for new task
+ */
 int init_perf_event(struct plan_entry plan_entry, struct perf_event **pevent){
 	struct perf_event_attr pe;
 
-	//set perf_event_attr for init perf event
 	memset(&pe, 0, sizeof(struct perf_event_attr));
 	pe.type = PERF_TYPE_HARDWARE;
 	pe.size = sizeof(struct perf_event_attr);
@@ -55,8 +56,9 @@ u64 terminate_perf_event(struct perf_event *pevent)
 }
 
 
-
-//handle the perf overflow event -> task needed more instructions than planed
+/*
+ *	handle the perf overflow event -> task needed more instructions than planed
+ */
 void overflow_handler(
 		struct perf_event *event,
 		struct perf_sample_data *data,
@@ -79,5 +81,5 @@ struct perf_event* perf_event_create(struct perf_event_attr *hw_event_uptr, int
 			cpu,
 			NULL, /* per CPU */
 			&overflow_handler,
-			NULL /* Was ist eigentlich context? */);
+			NULL /* What's meant by context? oprofile uses NULL */);
 }
diff --git a/kernel/sched/perf_error_detection.h b/kernel/sched/perf_error_detection.h
index 12fea1f70e5d3bfe93a5d52cf8eb003af669f93f..ccd01998c842ae165b82de09204909ddfc313031 100644
--- a/kernel/sched/perf_error_detection.h
+++ b/kernel/sched/perf_error_detection.h
@@ -1,8 +1,10 @@
 #ifndef __PERF_ERROR_DETECTION_H
 #define __PERF_ERROR_DETECTION_H
+
 #include <linux/perf_event.h>
+#include <linux/smp.h>
 #include "sched.h"
-#endif
+#include "pb_internal.h"
 
 int init_perf_event(struct plan_entry, struct perf_event **pevent);
 
@@ -16,3 +18,5 @@ void overflow_handler(
 		struct pt_regs *regs);
 
 struct perf_event *perf_event_create(struct perf_event_attr *hw_event_uptr, int);
+
+#endif
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 0425d47670d14ddfc0adbb5816d9d5cf8f0f7ca1..9f71fcaa6bf4c93140822d0065db347ab8d6e62f 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -29,7 +29,6 @@
 #include <linux/irq_work.h>
 #include <linux/tick.h>
 #include <linux/slab.h>
-#include <linux/perf_event.h>
 
 #ifdef CONFIG_PARAVIRT
 #include <asm/paravirt.h>
@@ -39,21 +38,15 @@
 #include "cpudeadline.h"
 #include "cpuacct.h"
 
+#include <linux/perf_event.h>	// For performance counter
+#include "pb.h"					// For PB
+
 #ifdef CONFIG_SCHED_DEBUG
 # define SCHED_WARN_ON(x)	WARN_ONCE(x, #x)
 #else
 # define SCHED_WARN_ON(x)	({ (void)(x), 0; })
 #endif
 
-#define PB_DISABLED_MODE 0
-#define PB_EXEC_MODE 1
-#define PB_ADMIN_MODE 2
-
-#define PB_MEASURE_K_OFF 0
-#define PB_MEASURE_K_ON 1
-
-#define PB_MAX_PLAN_LENGTH 100
-
 struct rq;
 struct cpuidle_state;
 
@@ -508,48 +501,20 @@ struct cfs_rq {
 #endif /* CONFIG_FAIR_GROUP_SCHED */
 };
 
-static inline int rt_bandwidth_enabled(void)
-{
-	return sysctl_sched_rt_runtime >= 0;
-}
+struct pb_rq {
+	struct plan_entry *plan;	// PB plan
+	unsigned int size; 			// size of the plan
+	unsigned int c_entry;		// index of currently executed entry
 
-/* RT IPI pull logic requires IRQ_WORK */
-#ifdef CONFIG_IRQ_WORK
-# define HAVE_RT_PUSH_IPI
-#endif
+	u64 n_pb_cycles;			// amount of timer ticks before admin tasks are allowed to run
+	u64 count_pb_cycles;		// current timer tick count for PB tasks
 
-//task in plan entry
-struct plan_entry {
-	// number of instructions for tasks
-	u64 n_instr;
-	//task id
-	u64 task_id;
-	//task struct
-	struct task_struct *task_struct;
-};
+	u64 n_admin_cycles;			// amount of timer ticks before PB tasks are allowed to run
+	u64 count_admin_cycles;		// current timer tick count for admin tasks
 
-struct pb_rq {
-	struct plan_entry plan[PB_MAX_PLAN_LENGTH];
-	// size of the plan
-	unsigned int size;
-	// currently executed entry of the plan
-	unsigned int c_entry;
-
-	// amount of timer consecutive timer interrupts for pb tasks
-	u64 n_pb_cycles;
-	// counter for timer interrupt cycles for pb tasks
-	u64 count_pb_cycles;
-	// amount of consecutive timer interrupt cycles for admin tasks (cfs)
-	u64 n_admin_cycles;
-	// counter for timer interrupt cycles for admin tasks (cfs)
-	u64 count_admin_cycles;
-
-	// mode of the PB-Scheduler (introduced to improve the readability)
-	// one of PB_DISABLED_MODE, PB_EXEC_MODE, PB_ADMIN_MODE
-	int mode;
-
-	// one event for each core, not for each task
-	struct perf_event *pevent;
+	enum pb_mode mode;
+
+	struct perf_event *pevent;	// linux perf handle
 
 	/*
 	 * flag determining whether the plan is completely initialized and should be run
@@ -558,6 +523,16 @@ struct pb_rq {
 	volatile int is_initialized;
 };
 
+static inline int rt_bandwidth_enabled(void)
+{
+	return sysctl_sched_rt_runtime >= 0;
+}
+
+/* RT IPI pull logic requires IRQ_WORK */
+#ifdef CONFIG_IRQ_WORK
+# define HAVE_RT_PUSH_IPI
+#endif
+
 /* Real-Time classes' related field in a runqueue: */
 struct rt_rq {
 	struct rt_prio_array active;
@@ -865,54 +840,6 @@ static inline int cpu_of(struct rq *rq)
 }
 
 
-// used to determine the next mode of the PB-Scheduler
-// This function is located in sched.h since pb.c and fair.c are using this function
-static inline int determine_next_mode_pb(struct rq *rq)
-{
-	int mode = PB_DISABLED_MODE;
-	struct pb_rq *pb = &(rq->pb);
-
-	if (pb->c_entry < pb->size)
-	{
-		// initial switch
-		if (pb->mode == PB_DISABLED_MODE &&	pb->is_initialized)
-		{
-			return PB_EXEC_MODE;
-		}
-		else
-		{
-			if (pb->mode == PB_EXEC_MODE)
-			{
-				//stay for n timer interrupts cycles in exec mode
-				/*
-				 * Is the tick interrupt active in this moment?
-				 */
-				if(pb->count_pb_cycles > pb->n_pb_cycles){
-					mode = PB_ADMIN_MODE;
-					pb->count_pb_cycles = 0;
-				}else{
-					mode = PB_EXEC_MODE;
-				}
-			}
-			else if (pb->mode == PB_ADMIN_MODE)
-			{
-				//stay for n timer interrupt cylces in uall mode for admin tasks
-				/*
-				 * Is the tick interrupt active in this moment?
-				 */
-				if(pb->count_admin_cycles > pb->n_admin_cycles){
-					mode = PB_EXEC_MODE;
-					pb->count_admin_cycles = 0;
-				}else{
-					mode = PB_ADMIN_MODE;
-				}
-			}
-		}
-	}
-	return mode;
-}
-
-
 #ifdef CONFIG_SCHED_SMT
 
 extern struct static_key_false sched_smt_present;