debuggers.hg

view linux-2.6.8.1-xen-sparse/arch/xen/i386/kernel/time.c @ 2651:e1abd8945ded

bitkeeper revision 1.1159.1.213 (416517f3vAbY9ISDviAe0Gjenl6dKw)

Take better care of over time consistency in XenLinux.
Avoid spinning on serial line with interrupts disabled in Xen.
author kaf24@freefall.cl.cam.ac.uk
date Thu Oct 07 10:18:27 2004 +0000 (2004-10-07)
parents ff4e7a241335
children 7565994e86cb
line source
1 /*
2 * linux/arch/i386/kernel/time.c
3 *
4 * Copyright (C) 1991, 1992, 1995 Linus Torvalds
5 *
6 * This file contains the PC-specific time handling details:
7 * reading the RTC at bootup, etc..
8 * 1994-07-02 Alan Modra
9 * fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime
10 * 1995-03-26 Markus Kuhn
11 * fixed 500 ms bug at call to set_rtc_mmss, fixed DS12887
12 * precision CMOS clock update
13 * 1996-05-03 Ingo Molnar
14 * fixed time warps in do_[slow|fast]_gettimeoffset()
15 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
16 * "A Kernel Model for Precision Timekeeping" by Dave Mills
17 * 1998-09-05 (Various)
18 * More robust do_fast_gettimeoffset() algorithm implemented
19 * (works with APM, Cyrix 6x86MX and Centaur C6),
20 * monotonic gettimeofday() with fast_get_timeoffset(),
21 * drift-proof precision TSC calibration on boot
22 * (C. Scott Ananian <cananian@alumni.princeton.edu>, Andrew D.
23 * Balsa <andrebalsa@altern.org>, Philip Gladstone <philip@raptor.com>;
24 * ported from 2.0.35 Jumbo-9 by Michael Krause <m.krause@tu-harburg.de>).
25 * 1998-12-16 Andrea Arcangeli
26 * Fixed Jumbo-9 code in 2.1.131: do_gettimeofday was missing 1 jiffy
27 * because was not accounting lost_ticks.
28 * 1998-12-24 Copyright (C) 1998 Andrea Arcangeli
29 * Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
30 * serialize accesses to xtime/lost_ticks).
31 */
33 #include <linux/errno.h>
34 #include <linux/sched.h>
35 #include <linux/kernel.h>
36 #include <linux/param.h>
37 #include <linux/string.h>
38 #include <linux/mm.h>
39 #include <linux/interrupt.h>
40 #include <linux/time.h>
41 #include <linux/delay.h>
42 #include <linux/init.h>
43 #include <linux/smp.h>
44 #include <linux/module.h>
45 #include <linux/sysdev.h>
46 #include <linux/bcd.h>
47 #include <linux/efi.h>
48 #include <linux/sysctl.h>
50 #include <asm/io.h>
51 #include <asm/smp.h>
52 #include <asm/irq.h>
53 #include <asm/msr.h>
54 #include <asm/delay.h>
55 #include <asm/mpspec.h>
56 #include <asm/uaccess.h>
57 #include <asm/processor.h>
58 #include <asm/timer.h>
60 #include "mach_time.h"
62 #include <linux/timex.h>
63 #include <linux/config.h>
65 #include <asm/hpet.h>
67 #include <asm/arch_hooks.h>
69 #include "io_ports.h"
71 extern spinlock_t i8259A_lock;
72 int pit_latch_buggy; /* extern */
74 #include "do_timer.h"
76 u64 jiffies_64 = INITIAL_JIFFIES;
78 EXPORT_SYMBOL(jiffies_64);
80 unsigned long cpu_khz; /* Detected as we calibrate the TSC */
82 extern unsigned long wall_jiffies;
84 spinlock_t rtc_lock = SPIN_LOCK_UNLOCKED;
86 spinlock_t i8253_lock = SPIN_LOCK_UNLOCKED;
87 EXPORT_SYMBOL(i8253_lock);
89 struct timer_opts *cur_timer = &timer_none;
91 /* These are peridically updated in shared_info, and then copied here. */
92 u32 shadow_tsc_stamp;
93 u64 shadow_system_time;
94 static u32 shadow_time_version;
95 static struct timeval shadow_tv;
96 extern u64 processed_system_time;
98 /*
99 * We use this to ensure that gettimeofday() is monotonically increasing. We
100 * only break this guarantee if the wall clock jumps backwards "a long way".
101 */
102 static struct timeval last_seen_tv = {0,0};
104 #ifdef CONFIG_XEN_PRIVILEGED_GUEST
105 /* Periodically propagate synchronised time base to the RTC and to Xen. */
106 static long last_rtc_update, last_update_to_xen;
107 #endif
109 /* Periodically take synchronised time base from Xen, if we need it. */
110 static long last_update_from_xen; /* UTC seconds when last read Xen clock. */
112 /* Keep track of last time we did processing/updating of jiffies and xtime. */
113 u64 processed_system_time; /* System time (ns) at last processing. */
115 #define NS_PER_TICK (1000000000ULL/HZ)
117 #define HANDLE_USEC_UNDERFLOW(_tv) do { \
118 while ((_tv).tv_usec < 0) { \
119 (_tv).tv_usec += USEC_PER_SEC; \
120 (_tv).tv_sec--; \
121 } \
122 } while (0)
123 #define HANDLE_USEC_OVERFLOW(_tv) do { \
124 while ((_tv).tv_usec >= USEC_PER_SEC) { \
125 (_tv).tv_usec -= USEC_PER_SEC; \
126 (_tv).tv_sec++; \
127 } \
128 } while (0)
129 static inline void __normalize_time(time_t *sec, s64 *nsec)
130 {
131 while (*nsec >= NSEC_PER_SEC) {
132 (*nsec) -= NSEC_PER_SEC;
133 (*sec)++;
134 }
135 while (*nsec < 0) {
136 (*nsec) += NSEC_PER_SEC;
137 (*sec)--;
138 }
139 }
141 /* Does this guest OS track Xen time, or set its wall clock independently? */
142 static int independent_wallclock = 0;
143 static int __init __independent_wallclock(char *str)
144 {
145 independent_wallclock = 1;
146 return 1;
147 }
148 __setup("independent_wallclock", __independent_wallclock);
149 #define INDEPENDENT_WALLCLOCK() \
150 (independent_wallclock || (xen_start_info.flags & SIF_INITDOMAIN))
152 /*
153 * Reads a consistent set of time-base values from Xen, into a shadow data
154 * area. Must be called with the xtime_lock held for writing.
155 */
156 static void __get_time_values_from_xen(void)
157 {
158 shared_info_t *s = HYPERVISOR_shared_info;
160 do {
161 shadow_time_version = s->time_version2;
162 rmb();
163 shadow_tv.tv_sec = s->wc_sec;
164 shadow_tv.tv_usec = s->wc_usec;
165 shadow_tsc_stamp = (u32)s->tsc_timestamp;
166 shadow_system_time = s->system_time;
167 rmb();
168 }
169 while (shadow_time_version != s->time_version1);
171 cur_timer->mark_offset();
172 }
174 #define TIME_VALUES_UP_TO_DATE \
175 ({ rmb(); (shadow_time_version == HYPERVISOR_shared_info->time_version2); })
177 /*
178 * This version of gettimeofday has microsecond resolution
179 * and better than microsecond precision on fast x86 machines with TSC.
180 */
181 void do_gettimeofday(struct timeval *tv)
182 {
183 unsigned long seq;
184 unsigned long usec, sec;
185 unsigned long max_ntp_tick;
186 unsigned long flags;
187 s64 nsec;
189 do {
190 unsigned long lost;
192 seq = read_seqbegin(&xtime_lock);
194 usec = cur_timer->get_offset();
195 lost = jiffies - wall_jiffies;
197 /*
198 * If time_adjust is negative then NTP is slowing the clock
199 * so make sure not to go into next possible interval.
200 * Better to lose some accuracy than have time go backwards..
201 */
202 if (unlikely(time_adjust < 0)) {
203 max_ntp_tick = (USEC_PER_SEC / HZ) - tickadj;
204 usec = min(usec, max_ntp_tick);
206 if (lost)
207 usec += lost * max_ntp_tick;
208 }
209 else if (unlikely(lost))
210 usec += lost * (USEC_PER_SEC / HZ);
212 sec = xtime.tv_sec;
213 usec += (xtime.tv_nsec / NSEC_PER_USEC);
215 nsec = shadow_system_time - processed_system_time;
216 __normalize_time(&sec, &nsec);
217 usec += (long)nsec / NSEC_PER_USEC;
219 if (unlikely(!TIME_VALUES_UP_TO_DATE)) {
220 /*
221 * We may have blocked for a long time,
222 * rendering our calculations invalid
223 * (e.g. the time delta may have
224 * overflowed). Detect that and recalculate
225 * with fresh values.
226 */
227 write_seqlock_irqsave(&xtime_lock, flags);
228 __get_time_values_from_xen();
229 write_sequnlock_irqrestore(&xtime_lock, flags);
230 continue;
231 }
232 } while (read_seqretry(&xtime_lock, seq));
234 while (usec >= USEC_PER_SEC) {
235 usec -= USEC_PER_SEC;
236 sec++;
237 }
239 /* Ensure that time-of-day is monotonically increasing. */
240 if ((sec < last_seen_tv.tv_sec) ||
241 ((sec == last_seen_tv.tv_sec) && (usec < last_seen_tv.tv_usec))) {
242 sec = last_seen_tv.tv_sec;
243 usec = last_seen_tv.tv_usec;
244 } else {
245 last_seen_tv.tv_sec = sec;
246 last_seen_tv.tv_usec = usec;
247 }
249 tv->tv_sec = sec;
250 tv->tv_usec = usec;
251 }
253 EXPORT_SYMBOL(do_gettimeofday);
255 int do_settimeofday(struct timespec *tv)
256 {
257 time_t wtm_sec, sec = tv->tv_sec;
258 long wtm_nsec;
259 s64 nsec;
260 struct timespec xentime;
262 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
263 return -EINVAL;
265 if (!INDEPENDENT_WALLCLOCK())
266 return 0; /* Silent failure? */
268 write_seqlock_irq(&xtime_lock);
270 /*
271 * Ensure we don't get blocked for a long time so that our time delta
272 * overflows. If that were to happen then our shadow time values would
273 * be stale, so we can retry with fresh ones.
274 */
275 again:
276 nsec = tv->tv_nsec - cur_timer->get_offset() * NSEC_PER_USEC;
277 if (unlikely(!TIME_VALUES_UP_TO_DATE)) {
278 __get_time_values_from_xen();
279 goto again;
280 }
282 __normalize_time(&sec, &nsec);
283 set_normalized_timespec(&xentime, sec, nsec);
285 /*
286 * This is revolting. We need to set "xtime" correctly. However, the
287 * value in this location is the value at the most recent update of
288 * wall time. Discover what correction gettimeofday() would have
289 * made, and then undo it!
290 */
291 nsec -= (jiffies - wall_jiffies) * TICK_NSEC;
293 nsec -= (shadow_system_time - processed_system_time);
295 __normalize_time(&sec, &nsec);
296 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
297 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
299 set_normalized_timespec(&xtime, sec, nsec);
300 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
302 time_adjust = 0; /* stop active adjtime() */
303 time_status |= STA_UNSYNC;
304 time_maxerror = NTP_PHASE_LIMIT;
305 time_esterror = NTP_PHASE_LIMIT;
307 /* Reset all our running time counts. They make no sense now. */
308 last_seen_tv.tv_sec = 0;
309 last_update_from_xen = 0;
311 #ifdef CONFIG_XEN_PRIVILEGED_GUEST
312 if (xen_start_info.flags & SIF_INITDOMAIN) {
313 dom0_op_t op;
314 last_rtc_update = last_update_to_xen = 0;
315 op.cmd = DOM0_SETTIME;
316 op.u.settime.secs = xentime.tv_sec;
317 op.u.settime.usecs = xentime.tv_nsec / NSEC_PER_USEC;
318 op.u.settime.system_time = shadow_system_time;
319 write_sequnlock_irq(&xtime_lock);
320 HYPERVISOR_dom0_op(&op);
321 } else
322 #endif
323 write_sequnlock_irq(&xtime_lock);
325 clock_was_set();
326 return 0;
327 }
329 EXPORT_SYMBOL(do_settimeofday);
331 #ifdef CONFIG_XEN_PRIVILEGED_GUEST
332 static int set_rtc_mmss(unsigned long nowtime)
333 {
334 int retval;
336 /* gets recalled with irq locally disabled */
337 spin_lock(&rtc_lock);
338 if (efi_enabled)
339 retval = efi_set_rtc_mmss(nowtime);
340 else
341 retval = mach_set_rtc_mmss(nowtime);
342 spin_unlock(&rtc_lock);
344 return retval;
345 }
346 #endif
348 /* monotonic_clock(): returns # of nanoseconds passed since time_init()
349 * Note: This function is required to return accurate
350 * time even in the absence of multiple timer ticks.
351 */
352 unsigned long long monotonic_clock(void)
353 {
354 return cur_timer->monotonic_clock();
355 }
356 EXPORT_SYMBOL(monotonic_clock);
359 /*
360 * timer_interrupt() needs to keep up the real-time clock,
361 * as well as call the "do_timer()" routine every clocktick
362 */
363 static inline void do_timer_interrupt(int irq, void *dev_id,
364 struct pt_regs *regs)
365 {
366 time_t wtm_sec, sec;
367 s64 delta, nsec;
368 long sec_diff, wtm_nsec;
370 retry:
371 __get_time_values_from_xen();
373 delta = (s64)(shadow_system_time +
374 (cur_timer->get_offset() * NSEC_PER_USEC) -
375 processed_system_time);
376 if (delta < 0) {
377 if (!TIME_VALUES_UP_TO_DATE)
378 goto retry;
379 printk("Timer ISR: Time went backwards: %lld\n", delta);
380 return;
381 }
383 /* Process elapsed jiffies since last call. */
384 while (delta >= NS_PER_TICK) {
385 delta -= NS_PER_TICK;
386 processed_system_time += NS_PER_TICK;
387 do_timer_interrupt_hook(regs);
388 }
390 /*
391 * Take synchronised time from Xen once a minute if we're not
392 * synchronised ourselves, and we haven't chosen to keep an independent
393 * time base.
394 */
395 if (!INDEPENDENT_WALLCLOCK() &&
396 ((time_status & STA_UNSYNC) != 0) &&
397 (xtime.tv_sec > (last_update_from_xen + 60))) {
398 /* Adjust shadow for jiffies that haven't updated xtime yet. */
399 shadow_tv.tv_usec -=
400 (jiffies - wall_jiffies) * (USEC_PER_SEC / HZ);
401 HANDLE_USEC_UNDERFLOW(shadow_tv);
403 /*
404 * Reset our running time counts if they are invalidated by
405 * a warp backwards of more than 500ms.
406 */
407 sec_diff = xtime.tv_sec - shadow_tv.tv_sec;
408 if (unlikely(abs(sec_diff) > 1) ||
409 unlikely(((sec_diff * USEC_PER_SEC) +
410 (xtime.tv_nsec / NSEC_PER_USEC) -
411 shadow_tv.tv_usec) > 500000)) {
412 #ifdef CONFIG_XEN_PRIVILEGED_GUEST
413 last_rtc_update = last_update_to_xen = 0;
414 #endif
415 last_seen_tv.tv_sec = 0;
416 }
418 /* Update our unsynchronised xtime appropriately. */
419 sec = shadow_tv.tv_sec;
420 nsec = shadow_tv.tv_usec * NSEC_PER_USEC;
422 __normalize_time(&sec, &nsec);
423 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
424 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
426 set_normalized_timespec(&xtime, sec, nsec);
427 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
429 last_update_from_xen = sec;
430 }
432 #ifdef CONFIG_XEN_PRIVILEGED_GUEST
433 if (!(xen_start_info.flags & SIF_INITDOMAIN))
434 return;
436 /* Send synchronised time to Xen approximately every minute. */
437 if (((time_status & STA_UNSYNC) == 0) &&
438 (xtime.tv_sec > (last_update_to_xen + 60))) {
439 dom0_op_t op;
440 struct timeval tv;
442 tv.tv_sec = xtime.tv_sec;
443 tv.tv_usec = xtime.tv_nsec / NSEC_PER_USEC;
444 tv.tv_usec += (jiffies - wall_jiffies) * (USEC_PER_SEC/HZ);
445 HANDLE_USEC_OVERFLOW(tv);
447 op.cmd = DOM0_SETTIME;
448 op.u.settime.secs = tv.tv_sec;
449 op.u.settime.usecs = tv.tv_usec;
450 op.u.settime.system_time = shadow_system_time;
451 HYPERVISOR_dom0_op(&op);
453 last_update_to_xen = xtime.tv_sec;
454 }
456 /*
457 * If we have an externally synchronized Linux clock, then update
458 * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
459 * called as close as possible to 500 ms before the new second starts.
460 */
461 if ((time_status & STA_UNSYNC) == 0 &&
462 xtime.tv_sec > last_rtc_update + 660 &&
463 (xtime.tv_nsec / 1000)
464 >= USEC_AFTER - ((unsigned) TICK_SIZE) / 2 &&
465 (xtime.tv_nsec / 1000)
466 <= USEC_BEFORE + ((unsigned) TICK_SIZE) / 2) {
467 /* horrible...FIXME */
468 if (efi_enabled) {
469 if (efi_set_rtc_mmss(xtime.tv_sec) == 0)
470 last_rtc_update = xtime.tv_sec;
471 else
472 last_rtc_update = xtime.tv_sec - 600;
473 } else if (set_rtc_mmss(xtime.tv_sec) == 0)
474 last_rtc_update = xtime.tv_sec;
475 else
476 last_rtc_update = xtime.tv_sec - 600; /* do it again in 60 s */
477 }
478 #endif
479 }
481 /*
482 * This is the same as the above, except we _also_ save the current
483 * Time Stamp Counter value at the time of the timer interrupt, so that
484 * we later on can estimate the time of day more exactly.
485 */
486 irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
487 {
488 /*
489 * Here we are in the timer irq handler. We just have irqs locally
490 * disabled but we don't know if the timer_bh is running on the other
491 * CPU. We need to avoid to SMP race with it. NOTE: we don' t need
492 * the irq version of write_lock because as just said we have irq
493 * locally disabled. -arca
494 */
495 write_seqlock(&xtime_lock);
496 do_timer_interrupt(irq, NULL, regs);
497 write_sequnlock(&xtime_lock);
498 return IRQ_HANDLED;
499 }
501 /* not static: needed by APM */
502 unsigned long get_cmos_time(void)
503 {
504 unsigned long retval;
506 spin_lock(&rtc_lock);
508 if (efi_enabled)
509 retval = efi_get_time();
510 else
511 retval = mach_get_cmos_time();
513 spin_unlock(&rtc_lock);
515 return retval;
516 }
518 static long clock_cmos_diff;
520 static int __time_suspend(struct sys_device *dev, u32 state)
521 {
522 /*
523 * Estimate time zone so that set_time can update the clock
524 */
525 clock_cmos_diff = -get_cmos_time();
526 clock_cmos_diff += get_seconds();
527 return 0;
528 }
530 static int __time_resume(struct sys_device *dev)
531 {
532 unsigned long sec = get_cmos_time() + clock_cmos_diff;
533 write_seqlock_irq(&xtime_lock);
534 xtime.tv_sec = sec;
535 xtime.tv_nsec = 0;
536 write_sequnlock_irq(&xtime_lock);
537 return 0;
538 }
540 static struct sysdev_class pit_sysclass = {
541 .resume = __time_resume,
542 .suspend = __time_suspend,
543 set_kset_name("pit"),
544 };
547 /* XXX this driverfs stuff should probably go elsewhere later -john */
548 static struct sys_device device_i8253 = {
549 .id = 0,
550 .cls = &pit_sysclass,
551 };
553 static int time_init_device(void)
554 {
555 int error = sysdev_class_register(&pit_sysclass);
556 if (!error)
557 error = sysdev_register(&device_i8253);
558 return error;
559 }
561 device_initcall(time_init_device);
563 #ifdef CONFIG_HPET_TIMER
564 extern void (*late_time_init)(void);
565 /* Duplicate of time_init() below, with hpet_enable part added */
566 void __init hpet_time_init(void)
567 {
568 xtime.tv_sec = get_cmos_time();
569 wall_to_monotonic.tv_sec = -xtime.tv_sec;
570 xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
571 wall_to_monotonic.tv_nsec = -xtime.tv_nsec;
573 if (hpet_enable() >= 0) {
574 printk("Using HPET for base-timer\n");
575 }
577 cur_timer = select_timer();
578 printk(KERN_INFO "Using %s for high-res timesource\n",cur_timer->name);
580 time_init_hook();
581 }
582 #endif
584 /* Dynamically-mapped IRQ. */
585 static int time_irq;
587 static struct irqaction irq_timer = {
588 timer_interrupt, SA_INTERRUPT, CPU_MASK_NONE, "timer",
589 NULL, NULL
590 };
592 void __init time_init(void)
593 {
594 #ifdef CONFIG_HPET_TIMER
595 if (is_hpet_capable()) {
596 /*
597 * HPET initialization needs to do memory-mapped io. So, let
598 * us do a late initialization after mem_init().
599 */
600 late_time_init = hpet_time_init;
601 return;
602 }
603 #endif
604 __get_time_values_from_xen();
605 xtime.tv_sec = shadow_tv.tv_sec;
606 wall_to_monotonic.tv_sec = -xtime.tv_sec;
607 xtime.tv_nsec = shadow_tv.tv_usec * NSEC_PER_USEC;
608 wall_to_monotonic.tv_nsec = -xtime.tv_nsec;
609 processed_system_time = shadow_system_time;
611 cur_timer = select_timer();
612 printk(KERN_INFO "Using %s for high-res timesource\n",cur_timer->name);
614 time_irq = bind_virq_to_irq(VIRQ_TIMER);
616 (void)setup_irq(time_irq, &irq_timer);
617 }
619 /* Convert jiffies to system time. Call with xtime_lock held for reading. */
620 static inline u64 __jiffies_to_st(unsigned long j)
621 {
622 return processed_system_time + ((j - jiffies) * NS_PER_TICK);
623 }
625 /*
626 * This function works out when the the next timer function has to be
627 * executed (by looking at the timer list) and sets the Xen one-shot
628 * domain timer to the appropriate value. This is typically called in
629 * cpu_idle() before the domain blocks.
630 *
631 * The function returns a non-0 value on error conditions.
632 *
633 * It must be called with interrupts disabled.
634 */
635 int set_timeout_timer(void)
636 {
637 u64 alarm = 0;
638 int ret = 0;
640 /*
641 * This is safe against long blocking (since calculations are
642 * not based on TSC deltas). It is also safe against warped
643 * system time since suspend-resume is cooperative and we
644 * would first get locked out. It is safe against normal
645 * updates of jiffies since interrupts are off.
646 */
647 alarm = __jiffies_to_st(next_timer_interrupt());
649 /* Failure is pretty bad, but we'd best soldier on. */
650 if ( HYPERVISOR_set_timer_op(alarm) != 0 )
651 ret = -1;
653 return ret;
654 }
656 void time_suspend(void)
657 {
658 }
660 void time_resume(void)
661 {
662 unsigned long flags;
663 write_lock_irqsave(&xtime_lock, flags);
664 /* Get timebases for new environment. */
665 __get_time_values_from_xen();
666 /* Reset our own concept of passage of system time. */
667 processed_system_time = shadow_system_time;
668 /* Accept a warp in UTC (wall-clock) time. */
669 last_seen_tv.tv_sec = 0;
670 /* Make sure we resync UTC time with Xen on next timer interrupt. */
671 last_update_from_xen = 0;
672 write_unlock_irqrestore(&xtime_lock, flags);
673 }
675 /*
676 * /proc/sys/xen: This really belongs in another file. It can stay here for
677 * now however.
678 */
679 static ctl_table xen_subtable[] = {
680 {1, "independent_wallclock", &independent_wallclock,
681 sizeof(independent_wallclock), 0644, NULL, proc_dointvec},
682 {0}
683 };
684 static ctl_table xen_table[] = {
685 {123, "xen", NULL, 0, 0555, xen_subtable},
686 {0}
687 };
688 static int __init xen_sysctl_init(void)
689 {
690 (void)register_sysctl_table(xen_table, 0);
691 return 0;
692 }
693 __initcall(xen_sysctl_init);