debuggers.hg

view xen/common/ac_timer.c @ 3705:4294cfa9fad3

bitkeeper revision 1.1159.212.95 (4204aa0ee0re5Xx1zWrJ9ejxzgRs3w)

Various cleanups. Remove PDB pending simpler GDB stub and/or NetBSD debugger.
Force emacs mode to appropriate tabbing in various files.
Signed-off-by: keir.fraser@cl.cam.ac.uk
author kaf24@scramble.cl.cam.ac.uk
date Sat Feb 05 11:12:14 2005 +0000 (2005-02-05)
parents 0ef6e8e6e85d
children 88957a238191
line source
1 /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*-
2 ****************************************************************************
3 * (C) 2002-2003 - Rolf Neugebauer - Intel Research Cambridge
4 * (C) 2002-2003 University of Cambridge
5 ****************************************************************************
6 *
7 * File: ac_timer.c
8 * Author: Rolf Neugebauer (neugebar@dcs.gla.ac.uk)
9 * Keir Fraser (kaf24@cl.cam.ac.uk)
10 *
11 * Environment: Xen Hypervisor
12 * Description: Accurate timer for the Hypervisor
13 */
15 #include <xen/config.h>
16 #include <xen/init.h>
17 #include <xen/types.h>
18 #include <xen/errno.h>
19 #include <xen/sched.h>
20 #include <xen/lib.h>
21 #include <xen/smp.h>
22 #include <xen/perfc.h>
23 #include <xen/time.h>
24 #include <xen/softirq.h>
25 #include <xen/ac_timer.h>
26 #include <xen/keyhandler.h>
27 #include <asm/system.h>
28 #include <asm/desc.h>
30 /*
31 * We pull handlers off the timer list this far in future,
32 * rather than reprogramming the time hardware.
33 */
34 #define TIMER_SLOP (50*1000) /* ns */
36 #define DEFAULT_HEAP_LIMIT 127
38 /* A timer list per CPU */
39 typedef struct ac_timers_st
40 {
41 spinlock_t lock;
42 struct ac_timer **heap;
43 } __cacheline_aligned ac_timers_t;
44 static ac_timers_t ac_timers[NR_CPUS];
47 /****************************************************************************
48 * HEAP OPERATIONS.
49 */
51 #define GET_HEAP_SIZE(_h) ((int)(((u16 *)(_h))[0]))
52 #define SET_HEAP_SIZE(_h,_v) (((u16 *)(_h))[0] = (u16)(_v))
54 #define GET_HEAP_LIMIT(_h) ((int)(((u16 *)(_h))[1]))
55 #define SET_HEAP_LIMIT(_h,_v) (((u16 *)(_h))[1] = (u16)(_v))
57 /* Sink down element @pos of @heap. */
58 static void down_heap(struct ac_timer **heap, int pos)
59 {
60 int sz = GET_HEAP_SIZE(heap), nxt;
61 struct ac_timer *t = heap[pos];
63 while ( (nxt = (pos << 1)) <= sz )
64 {
65 if ( ((nxt+1) <= sz) && (heap[nxt+1]->expires < heap[nxt]->expires) )
66 nxt++;
67 if ( heap[nxt]->expires > t->expires )
68 break;
69 heap[pos] = heap[nxt];
70 heap[pos]->heap_offset = pos;
71 pos = nxt;
72 }
74 heap[pos] = t;
75 t->heap_offset = pos;
76 }
78 /* Float element @pos up @heap. */
79 static void up_heap(struct ac_timer **heap, int pos)
80 {
81 struct ac_timer *t = heap[pos];
83 while ( (pos > 1) && (t->expires < heap[pos>>1]->expires) )
84 {
85 heap[pos] = heap[pos>>1];
86 heap[pos]->heap_offset = pos;
87 pos >>= 1;
88 }
90 heap[pos] = t;
91 t->heap_offset = pos;
92 }
95 /* Delete @t from @heap. Return TRUE if new top of heap. */
96 static int remove_entry(struct ac_timer **heap, struct ac_timer *t)
97 {
98 int sz = GET_HEAP_SIZE(heap);
99 int pos = t->heap_offset;
101 t->heap_offset = 0;
103 if ( unlikely(pos == sz) )
104 {
105 SET_HEAP_SIZE(heap, sz-1);
106 goto out;
107 }
109 heap[pos] = heap[sz];
110 heap[pos]->heap_offset = pos;
112 SET_HEAP_SIZE(heap, --sz);
114 if ( (pos > 1) && (heap[pos]->expires < heap[pos>>1]->expires) )
115 up_heap(heap, pos);
116 else
117 down_heap(heap, pos);
119 out:
120 return (pos == 1);
121 }
124 /* Add new entry @t to @heap. Return TRUE if new top of heap. */
125 static int add_entry(struct ac_timer **heap, struct ac_timer *t)
126 {
127 int sz = GET_HEAP_SIZE(heap);
129 /* Copy the heap if it is full. */
130 if ( unlikely(sz == GET_HEAP_LIMIT(heap)) )
131 {
132 int i, limit = (GET_HEAP_LIMIT(heap)+1) << 1;
133 struct ac_timer **new_heap = xmalloc_array(struct ac_timer *, limit);
134 if ( new_heap == NULL ) BUG();
135 memcpy(new_heap, heap, (limit>>1)*sizeof(struct ac_timer *));
136 for ( i = 0; i < smp_num_cpus; i++ )
137 if ( ac_timers[i].heap == heap )
138 ac_timers[i].heap = new_heap;
139 xfree(heap);
140 heap = new_heap;
141 SET_HEAP_LIMIT(heap, limit-1);
142 }
144 SET_HEAP_SIZE(heap, ++sz);
145 heap[sz] = t;
146 t->heap_offset = sz;
147 up_heap(heap, sz);
148 return (t->heap_offset == 1);
149 }
152 /****************************************************************************
153 * TIMER OPERATIONS.
154 */
156 static inline void __add_ac_timer(struct ac_timer *timer)
157 {
158 int cpu = timer->cpu;
159 if ( add_entry(ac_timers[cpu].heap, timer) )
160 cpu_raise_softirq(cpu, AC_TIMER_SOFTIRQ);
161 }
163 void add_ac_timer(struct ac_timer *timer)
164 {
165 int cpu = timer->cpu;
166 unsigned long flags;
168 spin_lock_irqsave(&ac_timers[cpu].lock, flags);
169 ASSERT(timer != NULL);
170 ASSERT(!active_ac_timer(timer));
171 __add_ac_timer(timer);
172 spin_unlock_irqrestore(&ac_timers[cpu].lock, flags);
173 }
176 static inline void __rem_ac_timer(struct ac_timer *timer)
177 {
178 int cpu = timer->cpu;
179 if ( remove_entry(ac_timers[cpu].heap, timer) )
180 cpu_raise_softirq(cpu, AC_TIMER_SOFTIRQ);
181 }
183 void rem_ac_timer(struct ac_timer *timer)
184 {
185 int cpu = timer->cpu;
186 unsigned long flags;
188 spin_lock_irqsave(&ac_timers[cpu].lock, flags);
189 ASSERT(timer != NULL);
190 if ( active_ac_timer(timer) )
191 __rem_ac_timer(timer);
192 spin_unlock_irqrestore(&ac_timers[cpu].lock, flags);
193 }
196 void mod_ac_timer(struct ac_timer *timer, s_time_t new_time)
197 {
198 int cpu = timer->cpu;
199 unsigned long flags;
201 spin_lock_irqsave(&ac_timers[cpu].lock, flags);
202 ASSERT(timer != NULL);
203 if ( active_ac_timer(timer) )
204 __rem_ac_timer(timer);
205 timer->expires = new_time;
206 __add_ac_timer(timer);
207 spin_unlock_irqrestore(&ac_timers[cpu].lock, flags);
208 }
211 static void ac_timer_softirq_action(void)
212 {
213 int cpu = smp_processor_id();
214 struct ac_timer *t, **heap;
215 s_time_t now;
216 void (*fn)(unsigned long);
218 spin_lock_irq(&ac_timers[cpu].lock);
220 do {
221 heap = ac_timers[cpu].heap;
222 now = NOW();
224 while ( (GET_HEAP_SIZE(heap) != 0) &&
225 ((t = heap[1])->expires < (now + TIMER_SLOP)) )
226 {
227 remove_entry(heap, t);
229 if ( (fn = t->function) != NULL )
230 {
231 unsigned long data = t->data;
232 spin_unlock_irq(&ac_timers[cpu].lock);
233 (*fn)(data);
234 spin_lock_irq(&ac_timers[cpu].lock);
235 }
237 /* Heap may have grown while the lock was released. */
238 heap = ac_timers[cpu].heap;
239 }
240 }
241 while ( !reprogram_ac_timer(GET_HEAP_SIZE(heap) ? heap[1]->expires : 0) );
243 spin_unlock_irq(&ac_timers[cpu].lock);
244 }
247 static void dump_timerq(unsigned char key)
248 {
249 struct ac_timer *t;
250 unsigned long flags;
251 s_time_t now = NOW();
252 int i, j;
254 printk("Dumping ac_timer queues: NOW=0x%08X%08X\n",
255 (u32)(now>>32), (u32)now);
257 for ( i = 0; i < smp_num_cpus; i++ )
258 {
259 printk("CPU[%02d] ", i);
260 spin_lock_irqsave(&ac_timers[i].lock, flags);
261 for ( j = 1; j <= GET_HEAP_SIZE(ac_timers[i].heap); j++ )
262 {
263 t = ac_timers[i].heap[j];
264 printk (" %d : %p ex=0x%08X%08X %lu\n",
265 j, t, (u32)(t->expires>>32), (u32)t->expires, t->data);
266 }
267 spin_unlock_irqrestore(&ac_timers[i].lock, flags);
268 printk("\n");
269 }
270 }
273 void __init ac_timer_init(void)
274 {
275 int i;
277 open_softirq(AC_TIMER_SOFTIRQ, ac_timer_softirq_action);
279 for ( i = 0; i < smp_num_cpus; i++ )
280 {
281 ac_timers[i].heap = xmalloc_array(struct ac_timer *, DEFAULT_HEAP_LIMIT+1);
282 if ( ac_timers[i].heap == NULL ) BUG();
283 SET_HEAP_SIZE(ac_timers[i].heap, 0);
284 SET_HEAP_LIMIT(ac_timers[i].heap, DEFAULT_HEAP_LIMIT);
285 spin_lock_init(&ac_timers[i].lock);
286 }
288 register_keyhandler('a', dump_timerq, "dump ac_timer queues");
289 }