debuggers.hg

view xen/common/perfc.c @ 22855:1d1eec7e1fb4

xl: Perform minimal validation of virtual disk file while parsing config file

This patch performs some very basic validation on the virtual disk
file passed through the config file. This validation ensures that we
don't go too far with the initialization like spawn qemu and more
while there could be some potentially fundamental issues.

[ Patch fixed up to work with PHYSTYPE_EMPTY 22808:6ec61438713a -iwj ]

Signed-off-by: Kamala Narasimhan <kamala.narasimhan@citrix.com>
Acked-by: Ian Jackson <ian.jackson@eu.citrix.com>
Signed-off-by: Ian Jackson <ian.jackson@eu.citrix.com>
Committed-by: Ian Jackson <ian.jackson@eu.citrix.com>
author Kamala Narasimhan <kamala.narasimhan@gmail.com>
date Tue Jan 25 18:09:49 2011 +0000 (2011-01-25)
parents 6348b2ab5c39
children
line source
2 #include <xen/lib.h>
3 #include <xen/smp.h>
4 #include <xen/time.h>
5 #include <xen/perfc.h>
6 #include <xen/keyhandler.h>
7 #include <xen/spinlock.h>
8 #include <xen/mm.h>
9 #include <xen/guest_access.h>
10 #include <public/sysctl.h>
11 #include <asm/perfc.h>
13 #define PERFCOUNTER( var, name ) { name, TYPE_SINGLE, 0 },
14 #define PERFCOUNTER_ARRAY( var, name, size ) { name, TYPE_ARRAY, size },
15 #define PERFSTATUS( var, name ) { name, TYPE_S_SINGLE, 0 },
16 #define PERFSTATUS_ARRAY( var, name, size ) { name, TYPE_S_ARRAY, size },
17 static const struct {
18 const char *name;
19 enum { TYPE_SINGLE, TYPE_ARRAY,
20 TYPE_S_SINGLE, TYPE_S_ARRAY
21 } type;
22 unsigned int nr_elements;
23 } perfc_info[] = {
24 #include <xen/perfc_defn.h>
25 };
27 #define NR_PERFCTRS (sizeof(perfc_info) / sizeof(perfc_info[0]))
29 DEFINE_PER_CPU(perfc_t[NUM_PERFCOUNTERS], perfcounters);
31 void perfc_printall(unsigned char key)
32 {
33 unsigned int i, j;
34 s_time_t now = NOW();
36 printk("Xen performance counters SHOW (now = 0x%08X:%08X)\n",
37 (u32)(now>>32), (u32)now);
39 for ( i = j = 0; i < NR_PERFCTRS; i++ )
40 {
41 unsigned int k, cpu;
42 unsigned long long sum = 0;
44 printk("%-32s ", perfc_info[i].name);
45 switch ( perfc_info[i].type )
46 {
47 case TYPE_SINGLE:
48 case TYPE_S_SINGLE:
49 for_each_online_cpu ( cpu )
50 sum += per_cpu(perfcounters, cpu)[j];
51 if ( perfc_info[i].type == TYPE_S_SINGLE )
52 sum = (perfc_t) sum;
53 printk("TOTAL[%12Lu]", sum);
54 if ( sum )
55 {
56 k = 0;
57 for_each_online_cpu ( cpu )
58 {
59 if ( k > 0 && (k % 4) == 0 )
60 printk("\n%46s", "");
61 printk(" CPU%02u[%10"PRIperfc"u]", cpu, per_cpu(perfcounters, cpu)[j]);
62 ++k;
63 }
64 }
65 ++j;
66 break;
67 case TYPE_ARRAY:
68 case TYPE_S_ARRAY:
69 for_each_online_cpu ( cpu )
70 {
71 perfc_t *counters = per_cpu(perfcounters, cpu) + j;
73 for ( k = 0; k < perfc_info[i].nr_elements; k++ )
74 sum += counters[k];
75 }
76 if ( perfc_info[i].type == TYPE_S_ARRAY )
77 sum = (perfc_t) sum;
78 printk("TOTAL[%12Lu]", sum);
79 if (sum)
80 {
81 #ifdef PERF_ARRAYS
82 for ( k = 0; k < perfc_info[i].nr_elements; k++ )
83 {
84 sum = 0;
85 for_each_online_cpu ( cpu )
86 sum += per_cpu(perfcounters, cpu)[j + k];
87 if ( perfc_info[i].type == TYPE_S_ARRAY )
88 sum = (perfc_t) sum;
89 if ( (k % 4) == 0 )
90 printk("\n%16s", "");
91 printk(" ARR%02u[%10Lu]", k, sum);
92 }
93 #else
94 k = 0;
95 for_each_online_cpu ( cpu )
96 {
97 perfc_t *counters = per_cpu(perfcounters, cpu) + j;
98 unsigned int n;
100 sum = 0;
101 for ( n = 0; n < perfc_info[i].nr_elements; n++ )
102 sum += counters[n];
103 if ( perfc_info[i].type == TYPE_S_ARRAY )
104 sum = (perfc_t) sum;
105 if ( k > 0 && (k % 4) == 0 )
106 printk("\n%46s", "");
107 printk(" CPU%02u[%10Lu]", cpu, sum);
108 ++k;
109 }
110 #endif
111 }
112 j += perfc_info[i].nr_elements;
113 break;
114 }
115 printk("\n");
116 }
117 }
119 void perfc_reset(unsigned char key)
120 {
121 unsigned int i, j;
122 s_time_t now = NOW();
124 if ( key != '\0' )
125 printk("Xen performance counters RESET (now = 0x%08X:%08X)\n",
126 (u32)(now>>32), (u32)now);
128 /* leave STATUS counters alone -- don't reset */
130 for ( i = j = 0; i < NR_PERFCTRS; i++ )
131 {
132 unsigned int cpu;
134 switch ( perfc_info[i].type )
135 {
136 case TYPE_SINGLE:
137 for_each_online_cpu ( cpu )
138 per_cpu(perfcounters, cpu)[j] = 0;
139 case TYPE_S_SINGLE:
140 ++j;
141 break;
142 case TYPE_ARRAY:
143 for_each_online_cpu ( cpu )
144 memset(per_cpu(perfcounters, cpu) + j, 0,
145 perfc_info[i].nr_elements * sizeof(perfc_t));
146 case TYPE_S_ARRAY:
147 j += perfc_info[i].nr_elements;
148 break;
149 }
150 }
152 arch_perfc_reset();
153 }
155 static xen_sysctl_perfc_desc_t perfc_d[NR_PERFCTRS];
156 static xen_sysctl_perfc_val_t *perfc_vals;
157 static unsigned int perfc_nbr_vals;
158 static cpumask_t perfc_cpumap;
160 static int perfc_copy_info(XEN_GUEST_HANDLE_64(xen_sysctl_perfc_desc_t) desc,
161 XEN_GUEST_HANDLE_64(xen_sysctl_perfc_val_t) val)
162 {
163 unsigned int i, j, v;
165 /* We only copy the name and array-size information once. */
166 if ( !cpus_equal(cpu_online_map, perfc_cpumap) )
167 {
168 unsigned int nr_cpus;
169 perfc_cpumap = cpu_online_map;
170 nr_cpus = cpus_weight(perfc_cpumap);
172 perfc_nbr_vals = 0;
174 for ( i = 0; i < NR_PERFCTRS; i++ )
175 {
176 safe_strcpy(perfc_d[i].name, perfc_info[i].name);
178 switch ( perfc_info[i].type )
179 {
180 case TYPE_SINGLE:
181 case TYPE_S_SINGLE:
182 perfc_d[i].nr_vals = nr_cpus;
183 break;
184 case TYPE_ARRAY:
185 case TYPE_S_ARRAY:
186 perfc_d[i].nr_vals = perfc_info[i].nr_elements;
187 break;
188 }
189 perfc_nbr_vals += perfc_d[i].nr_vals;
190 }
192 xfree(perfc_vals);
193 perfc_vals = xmalloc_array(xen_sysctl_perfc_val_t, perfc_nbr_vals);
194 }
196 if ( guest_handle_is_null(desc) )
197 return 0;
199 if ( perfc_vals == NULL )
200 return -ENOMEM;
202 /* Architecture may fill counters from hardware. */
203 arch_perfc_gather();
205 /* We gather the counts together every time. */
206 for ( i = j = v = 0; i < NR_PERFCTRS; i++ )
207 {
208 unsigned int cpu;
210 switch ( perfc_info[i].type )
211 {
212 case TYPE_SINGLE:
213 case TYPE_S_SINGLE:
214 for_each_cpu_mask ( cpu, perfc_cpumap )
215 perfc_vals[v++] = per_cpu(perfcounters, cpu)[j];
216 ++j;
217 break;
218 case TYPE_ARRAY:
219 case TYPE_S_ARRAY:
220 memset(perfc_vals + v, 0, perfc_d[i].nr_vals * sizeof(*perfc_vals));
221 for_each_cpu_mask ( cpu, perfc_cpumap )
222 {
223 perfc_t *counters = per_cpu(perfcounters, cpu) + j;
224 unsigned int k;
226 for ( k = 0; k < perfc_d[i].nr_vals; k++ )
227 perfc_vals[v + k] += counters[k];
228 }
229 v += perfc_d[i].nr_vals;
230 j += perfc_info[i].nr_elements;
231 break;
232 }
233 }
234 BUG_ON(v != perfc_nbr_vals);
236 if ( copy_to_guest(desc, perfc_d, NR_PERFCTRS) )
237 return -EFAULT;
238 if ( copy_to_guest(val, perfc_vals, perfc_nbr_vals) )
239 return -EFAULT;
240 return 0;
241 }
243 /* Dom0 control of perf counters */
244 int perfc_control(xen_sysctl_perfc_op_t *pc)
245 {
246 static DEFINE_SPINLOCK(lock);
247 int rc;
249 spin_lock(&lock);
251 switch ( pc->cmd )
252 {
253 case XEN_SYSCTL_PERFCOP_reset:
254 rc = perfc_copy_info(pc->desc, pc->val);
255 perfc_reset(0);
256 break;
258 case XEN_SYSCTL_PERFCOP_query:
259 rc = perfc_copy_info(pc->desc, pc->val);
260 break;
262 default:
263 rc = -EINVAL;
264 break;
265 }
267 spin_unlock(&lock);
269 pc->nr_counters = NR_PERFCTRS;
270 pc->nr_vals = perfc_nbr_vals;
272 return rc;
273 }
275 /*
276 * Local variables:
277 * mode: C
278 * c-set-style: "BSD"
279 * c-basic-offset: 4
280 * tab-width: 4
281 * indent-tabs-mode: nil
282 * End:
283 */