debuggers.hg

view xen/arch/x86/hvm/vpt.c @ 16583:0f9b5ab59579

hvm: Split no_missed_tick_accounting into two modes:
* no_missed_ticks_pending ('SYNC')
* one_missed_tick_pending ('MIXED')

This is based on a patch by Dave Winchell <dwinchell@virtualiron.com>

Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Dec 06 11:56:51 2007 +0000 (2007-12-06)
parents 8ff5bb70136d
children f2f7c92bf1c1
line source
1 /*
2 * vpt.c: Virtual Platform Timer
3 *
4 * Copyright (c) 2006, Xiaowei Yang, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 *
19 */
21 #include <xen/time.h>
22 #include <asm/hvm/support.h>
23 #include <asm/hvm/vpt.h>
24 #include <asm/event.h>
26 #define mode_is(d, name) \
27 ((d)->arch.hvm_domain.params[HVM_PARAM_TIMER_MODE] == HVMPTM_##name)
29 static void pt_lock(struct periodic_time *pt)
30 {
31 struct vcpu *v;
33 for ( ; ; )
34 {
35 v = pt->vcpu;
36 spin_lock(&v->arch.hvm_vcpu.tm_lock);
37 if ( likely(pt->vcpu == v) )
38 break;
39 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
40 }
41 }
43 static void pt_unlock(struct periodic_time *pt)
44 {
45 spin_unlock(&pt->vcpu->arch.hvm_vcpu.tm_lock);
46 }
48 static void pt_process_missed_ticks(struct periodic_time *pt)
49 {
50 s_time_t missed_ticks, now = NOW();
52 if ( pt->one_shot )
53 return;
55 missed_ticks = now - pt->scheduled;
56 if ( missed_ticks <= 0 )
57 return;
59 missed_ticks = missed_ticks / (s_time_t) pt->period + 1;
60 if ( mode_is(pt->vcpu->domain, no_missed_ticks_pending) )
61 pt->do_not_freeze = !pt->pending_intr_nr;
62 else
63 pt->pending_intr_nr += missed_ticks;
64 pt->scheduled += missed_ticks * pt->period;
65 }
67 static void pt_freeze_time(struct vcpu *v)
68 {
69 if ( !mode_is(v->domain, delay_for_missed_ticks) )
70 return;
72 v->arch.hvm_vcpu.guest_time = hvm_get_guest_time(v);
73 }
75 static void pt_thaw_time(struct vcpu *v)
76 {
77 if ( !mode_is(v->domain, delay_for_missed_ticks) )
78 return;
80 if ( v->arch.hvm_vcpu.guest_time == 0 )
81 return;
83 hvm_set_guest_time(v, v->arch.hvm_vcpu.guest_time);
84 v->arch.hvm_vcpu.guest_time = 0;
85 }
87 void pt_save_timer(struct vcpu *v)
88 {
89 struct list_head *head = &v->arch.hvm_vcpu.tm_list;
90 struct periodic_time *pt;
92 if ( test_bit(_VPF_blocked, &v->pause_flags) )
93 return;
95 spin_lock(&v->arch.hvm_vcpu.tm_lock);
97 list_for_each_entry ( pt, head, list )
98 if ( !pt->do_not_freeze )
99 stop_timer(&pt->timer);
101 pt_freeze_time(v);
103 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
104 }
106 void pt_restore_timer(struct vcpu *v)
107 {
108 struct list_head *head = &v->arch.hvm_vcpu.tm_list;
109 struct periodic_time *pt;
111 spin_lock(&v->arch.hvm_vcpu.tm_lock);
113 list_for_each_entry ( pt, head, list )
114 {
115 pt_process_missed_ticks(pt);
116 set_timer(&pt->timer, pt->scheduled);
117 }
119 pt_thaw_time(v);
121 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
122 }
124 static void pt_timer_fn(void *data)
125 {
126 struct periodic_time *pt = data;
128 pt_lock(pt);
130 pt->pending_intr_nr++;
132 if ( !pt->one_shot )
133 {
134 pt->scheduled += pt->period;
135 pt_process_missed_ticks(pt);
136 set_timer(&pt->timer, pt->scheduled);
137 }
139 vcpu_kick(pt->vcpu);
141 pt_unlock(pt);
142 }
144 void pt_update_irq(struct vcpu *v)
145 {
146 struct list_head *head = &v->arch.hvm_vcpu.tm_list;
147 struct periodic_time *pt;
148 uint64_t max_lag = -1ULL;
149 int irq = -1;
151 spin_lock(&v->arch.hvm_vcpu.tm_lock);
153 list_for_each_entry ( pt, head, list )
154 {
155 if ( !is_isa_irq_masked(v, pt->irq) && pt->pending_intr_nr &&
156 ((pt->last_plt_gtime + pt->period_cycles) < max_lag) )
157 {
158 max_lag = pt->last_plt_gtime + pt->period_cycles;
159 irq = pt->irq;
160 }
161 }
163 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
165 if ( is_lvtt(v, irq) )
166 {
167 vlapic_set_irq(vcpu_vlapic(v), irq, 0);
168 }
169 else if ( irq >= 0 )
170 {
171 hvm_isa_irq_deassert(v->domain, irq);
172 hvm_isa_irq_assert(v->domain, irq);
173 }
174 }
176 static struct periodic_time *is_pt_irq(
177 struct vcpu *v, struct hvm_intack intack)
178 {
179 struct list_head *head = &v->arch.hvm_vcpu.tm_list;
180 struct periodic_time *pt;
181 struct RTCState *rtc = &v->domain->arch.hvm_domain.pl_time.vrtc;
182 int vector;
184 list_for_each_entry ( pt, head, list )
185 {
186 if ( !pt->pending_intr_nr )
187 continue;
189 if ( is_lvtt(v, pt->irq) )
190 {
191 if ( pt->irq != intack.vector )
192 continue;
193 return pt;
194 }
196 vector = get_isa_irq_vector(v, pt->irq, intack.source);
198 /* RTC irq need special care */
199 if ( (intack.vector != vector) ||
200 ((pt->irq == 8) && !is_rtc_periodic_irq(rtc)) )
201 continue;
203 return pt;
204 }
206 return NULL;
207 }
209 void pt_intr_post(struct vcpu *v, struct hvm_intack intack)
210 {
211 struct periodic_time *pt;
212 time_cb *cb;
213 void *cb_priv;
215 spin_lock(&v->arch.hvm_vcpu.tm_lock);
217 pt = is_pt_irq(v, intack);
218 if ( pt == NULL )
219 {
220 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
221 return;
222 }
224 pt->do_not_freeze = 0;
226 if ( pt->one_shot )
227 {
228 pt->enabled = 0;
229 list_del(&pt->list);
230 }
231 else
232 {
233 if ( mode_is(v->domain, one_missed_tick_pending) )
234 {
235 pt->last_plt_gtime = hvm_get_guest_time(v);
236 pt->pending_intr_nr = 0; /* 'collapse' all missed ticks */
237 }
238 else
239 {
240 pt->last_plt_gtime += pt->period_cycles;
241 pt->pending_intr_nr--;
242 }
243 }
245 if ( mode_is(v->domain, delay_for_missed_ticks) &&
246 (hvm_get_guest_time(v) < pt->last_plt_gtime) )
247 hvm_set_guest_time(v, pt->last_plt_gtime);
249 cb = pt->cb;
250 cb_priv = pt->priv;
252 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
254 if ( cb != NULL )
255 cb(v, cb_priv);
256 }
258 void pt_reset(struct vcpu *v)
259 {
260 struct list_head *head = &v->arch.hvm_vcpu.tm_list;
261 struct periodic_time *pt;
263 spin_lock(&v->arch.hvm_vcpu.tm_lock);
265 list_for_each_entry ( pt, head, list )
266 {
267 pt->pending_intr_nr = 0;
268 pt->last_plt_gtime = hvm_get_guest_time(pt->vcpu);
269 pt->scheduled = NOW() + pt->period;
270 set_timer(&pt->timer, pt->scheduled);
271 }
273 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
274 }
276 void pt_migrate(struct vcpu *v)
277 {
278 struct list_head *head = &v->arch.hvm_vcpu.tm_list;
279 struct periodic_time *pt;
281 spin_lock(&v->arch.hvm_vcpu.tm_lock);
283 list_for_each_entry ( pt, head, list )
284 migrate_timer(&pt->timer, v->processor);
286 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
287 }
289 void create_periodic_time(
290 struct vcpu *v, struct periodic_time *pt, uint64_t period,
291 uint8_t irq, char one_shot, time_cb *cb, void *data)
292 {
293 destroy_periodic_time(pt);
295 spin_lock(&v->arch.hvm_vcpu.tm_lock);
297 pt->enabled = 1;
298 pt->pending_intr_nr = 0;
299 pt->do_not_freeze = 0;
301 /* Periodic timer must be at least 0.9ms. */
302 if ( (period < 900000) && !one_shot )
303 {
304 gdprintk(XENLOG_WARNING,
305 "HVM_PlatformTime: program too small period %"PRIu64"\n",
306 period);
307 period = 900000;
308 }
310 pt->period = period;
311 pt->vcpu = v;
312 pt->last_plt_gtime = hvm_get_guest_time(pt->vcpu);
313 pt->irq = irq;
314 pt->period_cycles = (u64)period * cpu_khz / 1000000L;
315 pt->one_shot = one_shot;
316 pt->scheduled = NOW() + period;
317 /*
318 * Offset LAPIC ticks from other timer ticks. Otherwise guests which use
319 * LAPIC ticks for process accounting can see long sequences of process
320 * ticks incorrectly accounted to interrupt processing.
321 */
322 if ( is_lvtt(v, irq) )
323 pt->scheduled += period >> 1;
324 pt->cb = cb;
325 pt->priv = data;
327 list_add(&pt->list, &v->arch.hvm_vcpu.tm_list);
329 init_timer(&pt->timer, pt_timer_fn, pt, v->processor);
330 set_timer(&pt->timer, pt->scheduled);
332 spin_unlock(&v->arch.hvm_vcpu.tm_lock);
333 }
335 void destroy_periodic_time(struct periodic_time *pt)
336 {
337 if ( !pt->enabled )
338 return;
340 pt_lock(pt);
341 pt->enabled = 0;
342 list_del(&pt->list);
343 pt_unlock(pt);
345 /*
346 * pt_timer_fn() can run until this kill_timer() returns. We must do this
347 * outside pt_lock() otherwise we can deadlock with pt_timer_fn().
348 */
349 kill_timer(&pt->timer);
350 }