diff --git a/kernel/sched/pb.c b/kernel/sched/pb.c index 01c30585430bb377ae352ec5933da09b4925c472..c4a17aacdbdfe72ffc2833a2b38def6c8d329a3e 100644 --- a/kernel/sched/pb.c +++ b/kernel/sched/pb.c @@ -149,7 +149,6 @@ void init_pb_rq(struct pb_rq *pb_rq) pb_rq->pevent = NULL; pb_rq->is_initialized = 0; pb_rq->waiting_on_io = 0; - raw_spin_lock_init(pb_rq->pb_lock); } EXPORT_SYMBOL(init_pb_rq); @@ -245,7 +244,6 @@ static struct task_struct * pick_next_task_pb(struct rq *rq, struct task_struct *picked = NULL; enum pb_mode current_mode, next_mode; struct pb_rq *pb = &(rq->pb); - unsigned long lock_flags; current_mode = pb->mode; next_mode = determine_next_mode_pb(rq); @@ -253,9 +251,7 @@ static struct task_struct * pick_next_task_pb(struct rq *rq, if (next_mode == PB_DISABLED_MODE && current_mode == PB_EXEC_MODE) { // After Plan is done do the cleanup - raw_spin_lock_irqsave(pb->pb_lock, lock_flags); terminate_perf_event(pb->pevent); - raw_spin_unlock_irqrestore(pb->pb_lock, lock_flags); pb->pevent = NULL; // TODO: Check if we have to free the memory or if perf takes care of it // see 'perf_event_release_kernel(struct perf_event *event)' in core.c diff --git a/kernel/sched/perf_error_detection.c b/kernel/sched/perf_error_detection.c index fa3aea7f259b20ae8c812715b31bdb20d4554a80..e9a37a0922d2e65e684e4a22903ed45c9fb26eff 100644 --- a/kernel/sched/perf_error_detection.c +++ b/kernel/sched/perf_error_detection.c @@ -23,16 +23,9 @@ int init_perf_event(struct plan_entry *plan_entry, struct perf_event **pevent){ pe.config = PERF_COUNT_HW_INSTRUCTIONS; pe.sample_period = plan_entry->n_instr; pe.disabled = 0; // start the counter as soon as we're in userland - pe.pinned = 1; // ? - pe.exclude_kernel = 0; //////////////// DAS HABEN WIR AUS DAMIT WIR MIT KERNEL THREADS ARBEITEN KÖNNEN + pe.exclude_kernel = 1; // only count user space pe.exclude_hv = 1; // excluding events that happen in the hypervisor - // this throws exceptions on my system - //pe.precise_ip = 2; // 2 SAMPLE_IP requested to have 0 skid. - - /* Not needed on 3.2? */ - // pe.wakeup_events = 1; - // disable irqs to make 'perf_event_ctx_activate' in 'kernel/events/core.c' happy local_irq_save(irq_flags); *pevent = perf_event_create(&pe, 0, plan_entry->task_struct); @@ -57,7 +50,14 @@ u64 get_perf_counter(struct perf_event *pevent, u64 *perf_counter) u64 terminate_perf_event(struct perf_event *pevent) { - return perf_event_release_kernel(pevent); + u64 result; + unsigned long irq_flags; + + local_irq_save(irq_flags); + result = perf_event_release_kernel(pevent); + local_irq_restore(irq_flags); + + return result; } diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index fbdf48949d145923e7967ec96fb09732072b3248..f6d363a4c25a779be182dc08a3879cd29ed86073 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -570,7 +570,6 @@ struct pb_rq */ volatile int is_initialized; volatile int waiting_on_io; - raw_spinlock_t *pb_lock; // spinlock used to deactivate interrupts especially when handling perf-events }; int pb_submit_plan(struct rq *rq);