debuggers.hg

view xen/common/cpu.c @ 22855:1d1eec7e1fb4

xl: Perform minimal validation of virtual disk file while parsing config file

This patch performs some very basic validation on the virtual disk
file passed through the config file. This validation ensures that we
don't go too far with the initialization like spawn qemu and more
while there could be some potentially fundamental issues.

[ Patch fixed up to work with PHYSTYPE_EMPTY 22808:6ec61438713a -iwj ]

Signed-off-by: Kamala Narasimhan <kamala.narasimhan@citrix.com>
Acked-by: Ian Jackson <ian.jackson@eu.citrix.com>
Signed-off-by: Ian Jackson <ian.jackson@eu.citrix.com>
Committed-by: Ian Jackson <ian.jackson@eu.citrix.com>
author Kamala Narasimhan <kamala.narasimhan@gmail.com>
date Tue Jan 25 18:09:49 2011 +0000 (2011-01-25)
parents ff97273750b8
children
line source
1 #include <xen/config.h>
2 #include <xen/cpumask.h>
3 #include <xen/cpu.h>
4 #include <xen/event.h>
5 #include <xen/sched.h>
6 #include <xen/stop_machine.h>
8 /*
9 * cpu_bit_bitmap[] is a special, "compressed" data structure that
10 * represents all NR_CPUS bits binary values of 1<<nr.
11 *
12 * It is used by cpumask_of() to get a constant address to a CPU
13 * mask value that has a single bit set only.
14 */
16 /* cpu_bit_bitmap[0] is empty - so we can back into it */
17 #define MASK_DECLARE_1(x) [x+1][0] = 1UL << (x)
18 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
19 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
20 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
22 const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
24 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
25 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
26 #if BITS_PER_LONG > 32
27 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
28 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
29 #endif
30 };
32 static DEFINE_SPINLOCK(cpu_add_remove_lock);
34 bool_t get_cpu_maps(void)
35 {
36 return spin_trylock_recursive(&cpu_add_remove_lock);
37 }
39 void put_cpu_maps(void)
40 {
41 spin_unlock_recursive(&cpu_add_remove_lock);
42 }
44 bool_t cpu_hotplug_begin(void)
45 {
46 return get_cpu_maps();
47 }
49 void cpu_hotplug_done(void)
50 {
51 put_cpu_maps();
52 }
54 static NOTIFIER_HEAD(cpu_chain);
56 void register_cpu_notifier(struct notifier_block *nb)
57 {
58 if ( !spin_trylock(&cpu_add_remove_lock) )
59 BUG(); /* Should never fail as we are called only during boot. */
60 notifier_chain_register(&cpu_chain, nb);
61 spin_unlock(&cpu_add_remove_lock);
62 }
64 static int take_cpu_down(void *unused)
65 {
66 void *hcpu = (void *)(long)smp_processor_id();
67 int notifier_rc = notifier_call_chain(&cpu_chain, CPU_DYING, hcpu, NULL);
68 BUG_ON(notifier_rc != NOTIFY_DONE);
69 __cpu_disable();
70 return 0;
71 }
73 int cpu_down(unsigned int cpu)
74 {
75 int err, notifier_rc;
76 void *hcpu = (void *)(long)cpu;
77 struct notifier_block *nb = NULL;
79 if ( !cpu_hotplug_begin() )
80 return -EBUSY;
82 if ( (cpu >= NR_CPUS) || (cpu == 0) || !cpu_online(cpu) )
83 {
84 cpu_hotplug_done();
85 return -EINVAL;
86 }
88 notifier_rc = notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE, hcpu, &nb);
89 if ( notifier_rc != NOTIFY_DONE )
90 {
91 err = notifier_to_errno(notifier_rc);
92 goto fail;
93 }
95 if ( (err = stop_machine_run(take_cpu_down, NULL, cpu)) < 0 )
96 goto fail;
98 __cpu_die(cpu);
99 BUG_ON(cpu_online(cpu));
101 notifier_rc = notifier_call_chain(&cpu_chain, CPU_DEAD, hcpu, NULL);
102 BUG_ON(notifier_rc != NOTIFY_DONE);
104 send_guest_global_virq(dom0, VIRQ_PCPU_STATE);
105 cpu_hotplug_done();
106 return 0;
108 fail:
109 notifier_rc = notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED, hcpu, &nb);
110 BUG_ON(notifier_rc != NOTIFY_DONE);
111 cpu_hotplug_done();
112 return err;
113 }
115 int cpu_up(unsigned int cpu)
116 {
117 int notifier_rc, err = 0;
118 void *hcpu = (void *)(long)cpu;
119 struct notifier_block *nb = NULL;
121 if ( !cpu_hotplug_begin() )
122 return -EBUSY;
124 if ( (cpu >= NR_CPUS) || cpu_online(cpu) || !cpu_present(cpu) )
125 {
126 cpu_hotplug_done();
127 return -EINVAL;
128 }
130 notifier_rc = notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu, &nb);
131 if ( notifier_rc != NOTIFY_DONE )
132 {
133 err = notifier_to_errno(notifier_rc);
134 goto fail;
135 }
137 err = __cpu_up(cpu);
138 if ( err < 0 )
139 goto fail;
141 notifier_rc = notifier_call_chain(&cpu_chain, CPU_ONLINE, hcpu, NULL);
142 BUG_ON(notifier_rc != NOTIFY_DONE);
144 send_guest_global_virq(dom0, VIRQ_PCPU_STATE);
146 cpu_hotplug_done();
147 return 0;
149 fail:
150 notifier_rc = notifier_call_chain(&cpu_chain, CPU_UP_CANCELED, hcpu, &nb);
151 BUG_ON(notifier_rc != NOTIFY_DONE);
152 cpu_hotplug_done();
153 return err;
154 }
156 void notify_cpu_starting(unsigned int cpu)
157 {
158 void *hcpu = (void *)(long)cpu;
159 int notifier_rc = notifier_call_chain(
160 &cpu_chain, CPU_STARTING, hcpu, NULL);
161 BUG_ON(notifier_rc != NOTIFY_DONE);
162 }
164 static cpumask_t frozen_cpus;
166 int disable_nonboot_cpus(void)
167 {
168 int cpu, error = 0;
170 BUG_ON(smp_processor_id() != 0);
172 cpus_clear(frozen_cpus);
174 printk("Disabling non-boot CPUs ...\n");
176 for_each_online_cpu ( cpu )
177 {
178 if ( cpu == 0 )
179 continue;
181 if ( (error = cpu_down(cpu)) )
182 {
183 BUG_ON(error == -EBUSY);
184 printk("Error taking CPU%d down: %d\n", cpu, error);
185 break;
186 }
188 cpu_set(cpu, frozen_cpus);
189 }
191 BUG_ON(!error && (num_online_cpus() != 1));
192 return error;
193 }
195 void enable_nonboot_cpus(void)
196 {
197 int cpu, error;
199 printk("Enabling non-boot CPUs ...\n");
201 for_each_cpu_mask ( cpu, frozen_cpus )
202 {
203 if ( (error = cpu_up(cpu)) )
204 {
205 BUG_ON(error == -EBUSY);
206 printk("Error taking CPU%d up: %d\n", cpu, error);
207 }
208 }
210 cpus_clear(frozen_cpus);
211 }