blob: d879bb3a72001743a08d191794f16faf1e017b2c [file] [log] [blame]
From 7a3b2ac7488b7b848dd027e010fdf158337a613b Mon Sep 17 00:00:00 2001
From: Vineeth Pillai <vineethrp@google.com>
Date: Thu, 11 Jan 2024 09:16:23 -0500
Subject: [PATCH] CHROMIUM: kvm: trace: debug tracepoints for dynamic vcpu
boost feature
These assist in tracing the behavior of vcpu boosting and throttling
functionality. It is for debugging only.
UPSTREAM-TASK=b:303645537
BUG=b:262267726
TEST=boot
Co-developed-by: Joel Fernandes (Google) <joel@joelfernandes.org>
Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org>
Signed-off-by: Vineeth Pillai <vineethrp@google.com>
(cherry picked from commit b59e9f49bdaa98db879bc93adebf97971b2a1677)
Change-Id: I14dbb2b0465129986ff7e3f234cd81647ec1bb01
Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/kernel/+/5426732
Commit-Queue: Vineeth Pillai <vineethrp@google.com>
Reviewed-by: Joel Fernandes <joelaf@google.com>
Tested-by: Vineeth Pillai <vineethrp@google.com>
---
arch/x86/kvm/x86.c | 10 +++
include/trace/events/kvm.h | 123 +++++++++++++++++++++++++++++++++++
include/trace/events/sched.h | 26 ++++++++
kernel/sched/core.c | 1 +
virt/kvm/kvm_main.c | 3 +
5 files changed, 163 insertions(+)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 4125925544346a05937d52800615dccdf78d47f8..195cc632756cc6d5ec1ec38a56b14f9b0a2c987f 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -209,6 +209,9 @@ module_param(pvsched_max_taskprio_us, uint, 0644);
static enum hrtimer_restart boost_throttle_timer_fn(struct hrtimer *data)
{
+ struct vcpu_pv_sched *pv_sched = container_of(data, struct vcpu_pv_sched, boost_thr_timer);
+
+ trace_kvm_pvsched_hrtimer_expire(pv_sched);
return HRTIMER_NORESTART;
}
#endif
@@ -10963,6 +10966,7 @@ static inline void kvm_vcpu_pvsched_update_vmenter(struct kvm_vcpu_arch *arch)
WARN_ON(max_ns <= elapsed_ns);
expire = ktime_add_ns(arch->pv_sched.vmentry_ts, max_ns - elapsed_ns);
hrtimer_start(&arch->pv_sched.boost_thr_timer, expire, HRTIMER_MODE_ABS_HARD);
+ trace_kvm_pvsched_hrtimer_start(expire, &arch->pv_sched);
}
static inline void kvm_vcpu_pvsched_update_vmexit(struct kvm_vcpu_arch *arch)
@@ -10991,6 +10995,7 @@ static inline void kvm_vcpu_pvsched_update_vmexit(struct kvm_vcpu_arch *arch)
thr_type = KVM_PVSCHED_BOOST_TASKPRIO;
}
if (elapsed_ns >= max_ns) {
+ trace_kvm_pvsched_throttled(&arch->pv_sched, elapsed_ns, max_ns);
arch->pv_sched.throttled = thr_type;
arch->pv_sched.boosted = 0;
arch->pv_sched.kerncs_ns = arch->pv_sched.taskprio_ns = 0;
@@ -11005,6 +11010,7 @@ static inline void kvm_vcpu_pvsched_update_vmexit(struct kvm_vcpu_arch *arch)
}
if (elapsed_ns >= max_ns) {
+ trace_kvm_pvsched_unthrottled(&arch->pv_sched, elapsed_ns, max_ns);
arch->pv_sched.throttled = 0;
arch->pv_sched.kerncs_ns = arch->pv_sched.taskprio_ns = 0;
}
@@ -11017,9 +11023,12 @@ static inline void kvm_vcpu_pvsched_update_vmexit(struct kvm_vcpu_arch *arch)
*/
static void record_vcpu_pv_sched(struct kvm_vcpu *vcpu)
{
+ union vcpu_sched_attr *attr = &vcpu->arch.pv_sched.attr;
if (!kvm_arch_vcpu_pv_sched_enabled(&vcpu->arch))
return;
+ trace_kvm_pvsched_schedattr(1, attr);
+
pagefault_disable();
kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.pv_sched.data,
&vcpu->arch.pv_sched.attr, PV_SCHEDATTR_HOST_OFFSET, sizeof(union vcpu_sched_attr));
@@ -11041,6 +11050,7 @@ static inline void kvm_vcpu_do_pv_sched(struct kvm_vcpu *vcpu)
if (kvm_read_guest_offset_cached(vcpu->kvm, &vcpu->arch.pv_sched.data,
&attr, PV_SCHEDATTR_GUEST_OFFSET, sizeof(attr)))
return;
+ trace_kvm_pvsched_schedattr(2, &attr);
kvm_vcpu_set_sched(vcpu, attr);
}
}
diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h
index 74e40d5d4af424e7c3d207be68ff3076094df9d7..a8ec4e2eaeb94863986d8ef718c7afd815868363 100644
--- a/include/trace/events/kvm.h
+++ b/include/trace/events/kvm.h
@@ -489,6 +489,129 @@ TRACE_EVENT(kvm_test_age_hva,
TP_printk("mmu notifier test age hva: %#016lx", __entry->hva)
);
+#ifdef CONFIG_PARAVIRT_SCHED_KVM
+TRACE_EVENT(kvm_pvsched_hrtimer_start,
+ TP_PROTO(unsigned long long expire, struct vcpu_pv_sched *pv_sched),
+ TP_ARGS(expire, pv_sched),
+
+ TP_STRUCT__entry(
+ __field(unsigned long long, expire)
+ __field(int, boosted)
+ __field(int, throttled)
+ ),
+
+ TP_fast_assign(
+ __entry->expire = expire;
+ __entry->boosted = pv_sched->boosted;
+ __entry->throttled = pv_sched->throttled;
+ ),
+
+ TP_printk("expire: %llu, boosted: %x, throttled: %x",
+ __entry->expire, __entry->boosted, __entry->throttled)
+);
+
+TRACE_EVENT(kvm_pvsched_hrtimer_expire,
+ TP_PROTO(struct vcpu_pv_sched *pv_sched),
+ TP_ARGS(pv_sched),
+
+ TP_STRUCT__entry(
+ __field(int, boosted)
+ __field(int, throttled)
+ ),
+
+ TP_fast_assign(
+ __entry->boosted = pv_sched->boosted;
+ __entry->throttled = pv_sched->throttled;
+ ),
+
+ TP_printk("boosted: %x, throttled: %x",
+ __entry->boosted, __entry->throttled)
+);
+
+TRACE_EVENT(kvm_pvsched_throttled,
+ TP_PROTO(struct vcpu_pv_sched *pv_sched, unsigned long long elapsed_ns,
+ unsigned long long max_ns),
+ TP_ARGS(pv_sched, elapsed_ns, max_ns),
+
+ TP_STRUCT__entry(
+ __field(int, boosted)
+ __field(unsigned long long, elapsed_ns)
+ __field(unsigned long long, max_ns)
+ ),
+
+ TP_fast_assign(
+ __entry->boosted = pv_sched->boosted;
+ __entry->elapsed_ns = elapsed_ns;
+ __entry->max_ns = max_ns;
+ ),
+
+ TP_printk("boosted: %x, boosted_ns: %llu, max_boost_ns: %llu",
+ __entry->boosted, __entry->elapsed_ns, __entry->max_ns)
+);
+
+TRACE_EVENT(kvm_pvsched_unthrottled,
+ TP_PROTO(struct vcpu_pv_sched *pv_sched, unsigned long long elapsed_ns,
+ unsigned long long max_ns),
+ TP_ARGS(pv_sched, elapsed_ns, max_ns),
+
+ TP_STRUCT__entry(
+ __field(int, throttled)
+ __field(unsigned long long, elapsed_ns)
+ __field(unsigned long long, max_ns)
+ ),
+
+ TP_fast_assign(
+ __entry->throttled = pv_sched->throttled;
+ __entry->elapsed_ns = elapsed_ns;
+ __entry->max_ns = max_ns;
+ ),
+
+ TP_printk("throttled: %x, throttled_ns: %llu, max_boost_ns: %llu",
+ __entry->throttled, __entry->elapsed_ns, __entry->max_ns)
+);
+
+TRACE_EVENT(kvm_pvsched_vcpu_state,
+ TP_PROTO(int boosted, int throttled),
+ TP_ARGS(boosted, throttled),
+
+ TP_STRUCT__entry(
+ __field(int, boosted)
+ __field(int, throttled)
+ ),
+
+ TP_fast_assign(
+ __entry->boosted = boosted;
+ __entry->throttled = throttled;
+ ),
+
+ TP_printk("boosted: %x, throttled: %x", __entry->boosted, __entry->throttled)
+);
+
+TRACE_EVENT(kvm_pvsched_schedattr,
+ TP_PROTO(int call_loc, union vcpu_sched_attr *attr),
+ TP_ARGS(call_loc, attr),
+
+ TP_STRUCT__entry(
+ __field(int, call_loc)
+ __field(int, sched_policy)
+ __field(int, rt_priority)
+ __field(int, sched_nice)
+ __field(int, kern_cs)
+ ),
+
+ TP_fast_assign(
+ __entry->call_loc = call_loc;
+ __entry->sched_policy = attr->sched_policy;
+ __entry->rt_priority = attr->rt_priority;
+ __entry->sched_nice = attr->sched_nice;
+ __entry->kern_cs = attr->kern_cs;
+ ),
+
+ TP_printk("call_loc: %d, policy: %d, rt_prio: %d, nice: %d, kerncs=%x",
+ __entry->call_loc, __entry->sched_policy, __entry->rt_priority,
+ __entry->sched_nice, __entry->kern_cs)
+);
+#endif
#endif /* _TRACE_KVM_MAIN_H */
/* This part must be outside protection */
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 9f07e8dac1acfc2ba9e003eb862c93a16907fee7..9c7390fa908c922d02cdb1b72051234e12f49f69 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -848,6 +848,32 @@ DECLARE_TRACE(sched_compute_energy_tp,
unsigned long max_util, unsigned long busy_time),
TP_ARGS(p, dst_cpu, energy, max_util, busy_time));
+#ifdef CONFIG_PARAVIRT_SCHED
+#include <linux/kvm_para.h>
+TRACE_EVENT(sched_pvsched_vcpu_update,
+ TP_PROTO(union vcpu_sched_attr *attr),
+ TP_ARGS(attr),
+
+ TP_STRUCT__entry(
+ __field(int, sched_policy)
+ __field(int, rt_priority)
+ __field(int, sched_nice)
+ __field(int, kern_cs)
+ ),
+
+ TP_fast_assign(
+ __entry->sched_policy = attr->sched_policy;
+ __entry->rt_priority = attr->rt_priority;
+ __entry->sched_nice = attr->sched_nice;
+ __entry->kern_cs = attr->kern_cs;
+ ),
+
+ TP_printk("policy: %d, rt_prio: %d, nice: %d, kerncs=%x",
+ __entry->sched_policy, __entry->rt_priority,
+ __entry->sched_nice, __entry->kern_cs)
+);
+#endif /* CONFIG_PARAVIRT_SCHED */
+
#endif /* _TRACE_SCHED_H */
/* This part must be outside protection */
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 2f19f5f043646b18d1da8527e5c524dcbff026ea..b08d799112cc3d497c89e465c0a1d6459ca40c72 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -186,6 +186,7 @@ static inline void __pv_sched_vcpu_attr_update(union vcpu_sched_attr attr,
return;
this_cpu_write(pv_sched.attr[PV_SCHEDATTR_GUEST].pad, attr.pad);
+ trace_sched_pvsched_vcpu_update(&attr);
if (!lazy)
kvm_pv_sched_notify_host();
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index aded8805c46a358f96b3fe065db3754c95b58a78..1fd5a96fe11c4e679c485f3a5b2750e09e02803e 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -4286,6 +4286,7 @@ int kvm_vcpu_set_sched(struct kvm_vcpu *vcpu, union vcpu_sched_attr attr)
put_task_struct(vcpu_task);
attr.enabled = kvm_vcpu_sched_enabled(vcpu);
+ trace_kvm_pvsched_schedattr(ret, &attr);
/*
* If the feature is disabled, we set it in the priority field to let the guest know.
*/
@@ -4301,6 +4302,8 @@ int kvm_vcpu_set_sched(struct kvm_vcpu *vcpu, union vcpu_sched_attr attr)
kvm_arch_vcpu_set_sched_attr(&vcpu->arch, attr);
kvm_make_request(KVM_REQ_VCPU_PV_SCHED, vcpu);
+ trace_kvm_pvsched_vcpu_state(kvm_arch_vcpu_is_boosted(&vcpu->arch),
+ kvm_arch_vcpu_is_throttled(&vcpu->arch));
return ret;
}
--
2.45.2.803.g4e1b14247a-goog