1
- From f6c3a183801123395e05959b7a6508bf420eb726 Mon Sep 17 00:00:00 2001
1
+ From e7b85181cec682a351825e7af734a8b1805d9ae3 Mon Sep 17 00:00:00 2001
2
2
From: Patrick Roy <
[email protected] >
3
- Date: Tue, 3 Jun 2025 13:57:15 +0100
3
+ Date: Tue, 3 Jun 2025 14:55:28 +0100
4
4
Subject: [PATCH] de-gpc-ify kvm-clock
5
5
6
6
Signed-off-by: Patrick Roy <
[email protected] >
7
7
---
8
8
arch/x86/include/asm/kvm_host.h | 2 +-
9
- arch/x86/kvm/x86.c | 47 ++++++++++-----------------------
10
- 2 files changed, 15 insertions(+), 34 deletions(-)
9
+ arch/x86/kvm/x86.c | 63 ++++++++++-----------------------
10
+ 2 files changed, 20 insertions(+), 45 deletions(-)
11
11
12
12
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
13
- index ce9ad4cd93c5..6b2e9c2ee599 100644
13
+ index b874e54a5ee4..7cdef9002af8 100644
14
14
--- a/arch/x86/include/asm/kvm_host.h
15
15
+++ b/arch/x86/include/asm/kvm_host.h
16
- @@ -915 ,7 +915 ,7 @@ struct kvm_vcpu_arch {
17
- s8 pvclock_tsc_shift ;
18
- u32 pvclock_tsc_mul ;
16
+ @@ -911 ,7 +911 ,7 @@ struct kvm_vcpu_arch {
17
+ gpa_t time ;
18
+ struct pvclock_vcpu_time_info hv_clock ;
19
19
unsigned int hw_tsc_khz;
20
20
- struct gfn_to_pfn_cache pv_time;
21
21
+ gpa_t system_time;
22
22
/* set guest stopped flag in pvclock flags field */
23
23
bool pvclock_set_guest_stopped_request;
24
24
25
25
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
26
- index 2a02f2457c42..5aad662ec0e1 100644
26
+ index 4b64ab350bcd..5198d3e0fc81 100644
27
27
--- a/arch/x86/kvm/x86.c
28
28
+++ b/arch/x86/kvm/x86.c
29
- @@ -2333 ,12 +2333 ,9 @@ static void kvm_write_system_time(struct kvm_vcpu *vcpu, gpa_t system_time,
29
+ @@ -2326 ,12 +2326 ,9 @@ static void kvm_write_system_time(struct kvm_vcpu *vcpu, gpa_t system_time,
30
30
31
31
/* we verify if the enable bit is set... */
32
32
if (system_time & 1)
@@ -41,90 +41,98 @@ index 2a02f2457c42..5aad662ec0e1 100644
41
41
}
42
42
43
43
static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
44
- @@ -3132,26 +3129,14 @@ u64 get_kvmclock_ns(struct kvm *kvm)
44
+ @@ -3116,26 +3113,16 @@ u64 get_kvmclock_ns(struct kvm *kvm)
45
+ return data.clock;
46
+ }
45
47
46
- static void kvm_setup_guest_pvclock(struct pvclock_vcpu_time_info *ref_hv_clock,
47
- struct kvm_vcpu *vcpu,
48
+ - static void kvm_setup_guest_pvclock(struct kvm_vcpu *v,
48
49
- struct gfn_to_pfn_cache *gpc,
49
- - unsigned int offset)
50
+ - unsigned int offset,
51
+ - bool force_tsc_unstable)
52
+ + static void kvm_setup_guest_pvclock(struct pvclock_vcpu_time_info *ref_hv_clock,
53
+ + struct kvm_vcpu *vcpu,
50
54
+ gpa_t gpa)
51
55
{
56
+ - struct kvm_vcpu_arch *vcpu = &v->arch;
52
57
- struct pvclock_vcpu_time_info *guest_hv_clock;
53
- + struct pvclock_vcpu_time_info guest_hv_clock;
54
- struct pvclock_vcpu_time_info hv_clock;
55
58
- unsigned long flags;
56
-
57
- memcpy(&hv_clock, ref_hv_clock, sizeof(hv_clock));
58
-
59
+ -
59
60
- read_lock_irqsave(&gpc->lock, flags);
60
61
- while (!kvm_gpc_check(gpc, offset + sizeof(*guest_hv_clock))) {
61
62
- read_unlock_irqrestore(&gpc->lock, flags);
62
63
-
63
64
- if (kvm_gpc_refresh(gpc, offset + sizeof(*guest_hv_clock)))
64
65
- return;
65
- -
66
+ + struct pvclock_vcpu_time_info guest_hv_clock;
67
+ + struct pvclock_vcpu_time_info hv_clock;
68
+
66
69
- read_lock_irqsave(&gpc->lock, flags);
67
70
- }
68
- -
71
+ + memcpy(&hv_clock, ref_hv_clock, sizeof(hv_clock));
72
+
69
73
- guest_hv_clock = (void *)(gpc->khva + offset);
70
74
+ kvm_read_guest(vcpu->kvm, gpa, &guest_hv_clock, sizeof(struct pvclock_vcpu_time_info));
71
75
72
76
/*
73
77
* This VCPU is paused, but it's legal for a guest to read another
74
- @@ -3160,20 +3145,18 @@ static void kvm_setup_guest_pvclock(struct pvclock_vcpu_time_info *ref_hv_clock ,
78
+ @@ -3144,30 +3131,20 @@ static void kvm_setup_guest_pvclock(struct kvm_vcpu *v ,
75
79
* it is consistent.
76
80
*/
77
81
78
- - guest_hv_clock->version = hv_clock.version = (guest_hv_clock->version + 1) | 1;
82
+ - guest_hv_clock->version = vcpu-> hv_clock.version = (guest_hv_clock->version + 1) | 1;
79
83
+ guest_hv_clock.version = hv_clock.version = (guest_hv_clock.version + 1) | 1;
80
84
smp_wmb();
81
85
82
86
/* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
83
- - hv_clock.flags |= (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED);
87
+ - vcpu->hv_clock.flags |= (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED);
88
+ -
89
+ - if (vcpu->pvclock_set_guest_stopped_request) {
90
+ - vcpu->hv_clock.flags |= PVCLOCK_GUEST_STOPPED;
91
+ - vcpu->pvclock_set_guest_stopped_request = false;
92
+ - }
84
93
+ hv_clock.flags |= (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED);
85
94
86
- - memcpy(guest_hv_clock, &hv_clock, sizeof(*guest_hv_clock));
95
+ - memcpy(guest_hv_clock, &vcpu->hv_clock, sizeof(*guest_hv_clock));
96
+ -
97
+ - if (force_tsc_unstable)
98
+ - guest_hv_clock->flags &= ~PVCLOCK_TSC_STABLE_BIT;
87
99
+ kvm_write_guest(vcpu->kvm, gpa, &hv_clock, sizeof(struct pvclock_vcpu_time_info));
88
100
89
101
smp_wmb();
90
102
91
- - guest_hv_clock->version = ++hv_clock.version;
103
+ - guest_hv_clock->version = ++vcpu-> hv_clock.version;
92
104
-
93
105
- kvm_gpc_mark_dirty_in_slot(gpc);
94
106
- read_unlock_irqrestore(&gpc->lock, flags);
95
107
+ ++hv_clock.version;
96
108
+ kvm_write_guest(vcpu->kvm, gpa + offsetof(struct pvclock_vcpu_time_info, version), &hv_clock.version, sizeof(hv_clock.version));
97
109
98
- trace_kvm_pvclock_update(vcpu->vcpu_id, &hv_clock);
110
+ - trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock);
111
+ + trace_kvm_pvclock_update(vcpu->vcpu_id, &hv_clock);
99
112
}
100
- @@ -3264,7 +3247,7 @@ int kvm_guest_time_update(struct kvm_vcpu *v)
101
- if (use_master_clock)
102
- hv_clock.flags |= PVCLOCK_TSC_STABLE_BIT;
103
113
104
- - if (vcpu->pv_time.active) {
114
+ static int kvm_guest_time_update(struct kvm_vcpu *v)
115
+ @@ -3267,8 +3244,8 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
116
+
117
+ vcpu->hv_clock.flags = pvclock_flags;
118
+
119
+ - if (vcpu->pv_time.active)
120
+ - kvm_setup_guest_pvclock(v, &vcpu->pv_time, 0, false);
105
121
+ if (vcpu->system_time != INVALID_GPA) {
106
- /*
107
- * GUEST_STOPPED is only supported by kvmclock, and KVM's
108
- * historic behavior is to only process the request if kvmclock
109
- @@ -3274,7 +3257,7 @@ int kvm_guest_time_update(struct kvm_vcpu *v)
110
- hv_clock.flags |= PVCLOCK_GUEST_STOPPED;
111
- vcpu->pvclock_set_guest_stopped_request = false;
112
- }
113
- - kvm_setup_guest_pvclock(&hv_clock, v, &vcpu->pv_time, 0);
114
- + kvm_setup_guest_pvclock(&hv_clock, v, vcpu->system_time);
115
-
116
- hv_clock.flags &= ~PVCLOCK_GUEST_STOPPED;
117
- }
118
- @@ -3590,7 +3573,7 @@ static int kvm_pv_enable_async_pf_int(struct kvm_vcpu *vcpu, u64 data)
122
+ + kvm_setup_guest_pvclock(v, &vcpu->pv_time, vcpu->system_time);
123
+ #ifdef CONFIG_KVM_XEN
124
+ if (vcpu->xen.vcpu_info_cache.active)
125
+ kvm_setup_guest_pvclock(v, &vcpu->xen.vcpu_info_cache,
126
+ @@ -3570,7 +3547,7 @@ static int kvm_pv_enable_async_pf_int(struct kvm_vcpu *vcpu, u64 data)
119
127
120
128
static void kvmclock_reset(struct kvm_vcpu *vcpu)
121
129
{
122
130
- kvm_gpc_deactivate(&vcpu->arch.pv_time);
123
- + vcpu->arch.system_time = INVALID_GPA;
131
+ + vcpu->arch.syste_time = INVALID_GPA;
124
132
vcpu->arch.time = 0;
125
133
}
126
134
127
- @@ -5688 ,7 +5671 ,7 @@ static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
135
+ @@ -5656 ,7 +5633 ,7 @@ static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
128
136
*/
129
137
static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
130
138
{
@@ -133,14 +141,14 @@ index 2a02f2457c42..5aad662ec0e1 100644
133
141
return -EINVAL;
134
142
vcpu->arch.pvclock_set_guest_stopped_request = true;
135
143
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
136
- @@ -12278 ,8 +12261 ,6 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
144
+ @@ -12251 ,8 +12228 ,6 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
137
145
vcpu->arch.regs_avail = ~0;
138
146
vcpu->arch.regs_dirty = ~0;
139
147
140
148
- kvm_gpc_init(&vcpu->arch.pv_time, vcpu->kvm);
141
149
-
142
150
if (!irqchip_in_kernel(vcpu->kvm) || kvm_vcpu_is_reset_bsp(vcpu))
143
- kvm_set_mp_state( vcpu, KVM_MP_STATE_RUNNABLE) ;
151
+ vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
144
152
else
145
153
- -
146
154
2.49.0
0 commit comments