diff --git a/kernel/sched/pb.c b/kernel/sched/pb.c
index beb9cb92f13732b745cdeee81fb4e873ffea3509..fe3b3f6d833b94a85ba6f7eb75df968368f5e83e 100644
--- a/kernel/sched/pb.c
+++ b/kernel/sched/pb.c
@@ -86,13 +86,14 @@ int pb_submit_plan(struct rq *rq)
 	 * Must be volatile to ensure correct initialization order
 	 */
 	volatile struct pb_rq * pb = (volatile struct pb_rq*)(&(rq->pb));
+	int perf_init_res;
 	int i = 0;
 
 	if (pb->mode != PB_DISABLED_MODE) {
 		return -1;
 	}
 
-	int perf_init_res = init_perf_event(&pb->plan[i], &pb->pevent);
+	perf_init_res = init_perf_event(&pb->plan[i], &pb->pevent);
 	if(perf_init_res < 0) {
 		//initialization error detection/handling could happen here
 		printk(KERN_WARNING "PB INIT,%u: FAILED OPEN PERF EVENT\n", i);
@@ -129,7 +130,7 @@ EXPORT_SYMBOL(set_pb_plan_size);
 void set_pb_plan_entry(struct pb_rq *pb_rq, unsigned int i, u64 n_instr, u64 task_id, struct task_struct *task_struct)
 {
 	pb_rq->plan[i].n_instr = n_instr;
-	pb_rq->plan[i].task_id = i;
+	pb_rq->plan[i].task_id = task_id;
 	pb_rq->plan[i].task_struct = task_struct;
 }
 EXPORT_SYMBOL(set_pb_plan_entry);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 3269c44f724516843abc1b367b42ea68b14a05b1..06ad593f6dcf3b3dea061c134360455f80fde837 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -526,7 +526,7 @@ enum pb_mode
 struct plan_entry
 {
 	u64 n_instr; // number of instructions in the entry
-	u64 task_id;
+	u64 task_id; // identifier of the plan entry
 	struct task_struct *task_struct; // linux task struct
 	u64 n_instr_counted; // number of instructions we counted for the entry
 };
@@ -541,9 +541,9 @@ struct pb_init_struct
 };
 
 struct pb_plan {
- 	    pid_t pid;
-        uint64_t *inst_cnt;
-        size_t num_tasks;
+ 	    pid_t pid;			 // process_id of the prgramm tp execute with the plan
+        uint64_t *inst_cnt;  // array of estimated instructions for each task
+        size_t num_tasks;	 // number of tasks in the plan
 };
 
 struct pb_rq
@@ -559,9 +559,9 @@ struct pb_rq
 	u64 n_admin_cycles;		// amount of timer ticks before PB tasks are allowed to run
 	u64 count_admin_cycles; // current timer tick count for admin tasks
 
-	enum pb_mode mode;
+	enum pb_mode mode;		// current scheduler mode
 
-	u64 total_instr;
+	u64 total_instr;		// total counted instructions for current plan
 
 	struct perf_event *pevent; // linux perf handle
 
@@ -570,10 +570,9 @@ struct pb_rq
 	 * this variable must be initialized last
 	 */
 	volatile int is_initialized;
-	// volatile int need_mode_change;
-	volatile int is_preempted;
-	volatile int is_in_critical;
-	raw_spinlock_t *pb_lock;
+	volatile int is_preempted;	// flag determining whether the last task has been prematurely preempted during last mode switch
+	volatile int is_in_critical; // flag determining whether the scheduler is in the critical section in pick_next_task_pb
+	raw_spinlock_t *pb_lock;	 // spinlock used to deactivate interrupts especially when handling perf-events
 };
 
 int pb_submit_plan(struct rq *rq);
diff --git a/pb_utils/mod_gen/tmpl/module.tt b/pb_utils/mod_gen/tmpl/module.tt
index 4855ac59bed0357ff1161a0dbb687554d3b40f78..068b6ee3b966a91f8f0b6d8ba21cc4386b8c59da 100644
--- a/pb_utils/mod_gen/tmpl/module.tt
+++ b/pb_utils/mod_gen/tmpl/module.tt
@@ -31,7 +31,7 @@ static void init_rq(struct rq *rq)
     proxy_task = kthread_create(loop_thread_func, NULL, "PB proxy thread");
     proxy_task->sched_class = &pb_sched_class;
     set_pb_plan_size(pb_rq, [% plan_size %]);
-    [% FOREACH entry IN plan %]set_pb_plan_entry(pb_rq, [%entry.index%], [%entry.exec_t%], [%entry.idle_t%], proxy_task);
+    [% FOREACH entry IN plan %]set_pb_plan_entry(pb_rq, [%entry.index%], [%entry.exec_t%], [%entry.index%], proxy_task);
     [% END %]
 
     pb_submit_plan(rq);