debuggers.hg

annotate xen/common/cpu.c @ 22848:6341fe0f4e5a

Added tag 4.1.0-rc2 for changeset 9dca60d88c63
author Keir Fraser <keir@xen.org>
date Tue Jan 25 14:06:55 2011 +0000 (2011-01-25)
parents ff97273750b8
children
rev   line source
keir@19689 1 #include <xen/config.h>
keir@19689 2 #include <xen/cpumask.h>
keir@21429 3 #include <xen/cpu.h>
keir@21430 4 #include <xen/event.h>
keir@21430 5 #include <xen/sched.h>
keir@21430 6 #include <xen/stop_machine.h>
keir@19689 7
keir@19689 8 /*
keir@19689 9 * cpu_bit_bitmap[] is a special, "compressed" data structure that
keir@19689 10 * represents all NR_CPUS bits binary values of 1<<nr.
keir@19689 11 *
keir@19689 12 * It is used by cpumask_of() to get a constant address to a CPU
keir@19689 13 * mask value that has a single bit set only.
keir@19689 14 */
keir@19689 15
keir@19689 16 /* cpu_bit_bitmap[0] is empty - so we can back into it */
keir@19689 17 #define MASK_DECLARE_1(x) [x+1][0] = 1UL << (x)
keir@19689 18 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
keir@19689 19 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
keir@19689 20 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
keir@19689 21
keir@19689 22 const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
keir@19689 23
keir@19689 24 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
keir@19689 25 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
keir@19689 26 #if BITS_PER_LONG > 32
keir@19689 27 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
keir@19689 28 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
keir@19689 29 #endif
keir@19689 30 };
keir@21429 31
keir@21430 32 static DEFINE_SPINLOCK(cpu_add_remove_lock);
keir@21430 33
keir@21430 34 bool_t get_cpu_maps(void)
keir@21430 35 {
keir@21430 36 return spin_trylock_recursive(&cpu_add_remove_lock);
keir@21430 37 }
keir@21430 38
keir@21430 39 void put_cpu_maps(void)
keir@21430 40 {
keir@21430 41 spin_unlock_recursive(&cpu_add_remove_lock);
keir@21430 42 }
keir@21430 43
keir@21430 44 bool_t cpu_hotplug_begin(void)
keir@21430 45 {
keir@21430 46 return get_cpu_maps();
keir@21430 47 }
keir@21430 48
keir@21430 49 void cpu_hotplug_done(void)
keir@21430 50 {
keir@21430 51 put_cpu_maps();
keir@21430 52 }
keir@21429 53
keir@21457 54 static NOTIFIER_HEAD(cpu_chain);
keir@21429 55
keir@21457 56 void register_cpu_notifier(struct notifier_block *nb)
keir@21429 57 {
keir@21430 58 if ( !spin_trylock(&cpu_add_remove_lock) )
keir@21430 59 BUG(); /* Should never fail as we are called only during boot. */
keir@21457 60 notifier_chain_register(&cpu_chain, nb);
keir@21429 61 spin_unlock(&cpu_add_remove_lock);
keir@21429 62 }
keir@21429 63
keir@21430 64 static int take_cpu_down(void *unused)
keir@21430 65 {
keir@21430 66 void *hcpu = (void *)(long)smp_processor_id();
keir@21457 67 int notifier_rc = notifier_call_chain(&cpu_chain, CPU_DYING, hcpu, NULL);
keir@21457 68 BUG_ON(notifier_rc != NOTIFY_DONE);
keir@21434 69 __cpu_disable();
keir@21434 70 return 0;
keir@21430 71 }
keir@21430 72
keir@21430 73 int cpu_down(unsigned int cpu)
keir@21429 74 {
keir@21457 75 int err, notifier_rc;
keir@21430 76 void *hcpu = (void *)(long)cpu;
keir@21457 77 struct notifier_block *nb = NULL;
keir@21430 78
keir@21430 79 if ( !cpu_hotplug_begin() )
keir@21430 80 return -EBUSY;
keir@21430 81
keir@21457 82 if ( (cpu >= NR_CPUS) || (cpu == 0) || !cpu_online(cpu) )
keir@21430 83 {
keir@21430 84 cpu_hotplug_done();
keir@21430 85 return -EINVAL;
keir@21430 86 }
keir@21430 87
keir@21457 88 notifier_rc = notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE, hcpu, &nb);
keir@21430 89 if ( notifier_rc != NOTIFY_DONE )
keir@21430 90 {
keir@21430 91 err = notifier_to_errno(notifier_rc);
keir@21457 92 goto fail;
keir@21430 93 }
keir@21430 94
keir@21430 95 if ( (err = stop_machine_run(take_cpu_down, NULL, cpu)) < 0 )
keir@21457 96 goto fail;
keir@21430 97
keir@21430 98 __cpu_die(cpu);
keir@21430 99 BUG_ON(cpu_online(cpu));
keir@21430 100
keir@21457 101 notifier_rc = notifier_call_chain(&cpu_chain, CPU_DEAD, hcpu, NULL);
keir@21430 102 BUG_ON(notifier_rc != NOTIFY_DONE);
keir@21430 103
keir@21457 104 send_guest_global_virq(dom0, VIRQ_PCPU_STATE);
keir@21457 105 cpu_hotplug_done();
keir@21457 106 return 0;
keir@21457 107
keir@21457 108 fail:
keir@21457 109 notifier_rc = notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED, hcpu, &nb);
keir@21457 110 BUG_ON(notifier_rc != NOTIFY_DONE);
keir@21430 111 cpu_hotplug_done();
keir@21430 112 return err;
keir@21429 113 }
keir@21429 114
keir@21430 115 int cpu_up(unsigned int cpu)
keir@21429 116 {
keir@21457 117 int notifier_rc, err = 0;
keir@21430 118 void *hcpu = (void *)(long)cpu;
keir@21457 119 struct notifier_block *nb = NULL;
keir@21430 120
keir@21430 121 if ( !cpu_hotplug_begin() )
keir@21430 122 return -EBUSY;
keir@21430 123
keir@21457 124 if ( (cpu >= NR_CPUS) || cpu_online(cpu) || !cpu_present(cpu) )
keir@21430 125 {
keir@21430 126 cpu_hotplug_done();
keir@21430 127 return -EINVAL;
keir@21430 128 }
keir@21430 129
keir@21457 130 notifier_rc = notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu, &nb);
keir@21430 131 if ( notifier_rc != NOTIFY_DONE )
keir@21430 132 {
keir@21430 133 err = notifier_to_errno(notifier_rc);
keir@21430 134 goto fail;
keir@21430 135 }
keir@21430 136
keir@21430 137 err = __cpu_up(cpu);
keir@21430 138 if ( err < 0 )
keir@21430 139 goto fail;
keir@21430 140
keir@21457 141 notifier_rc = notifier_call_chain(&cpu_chain, CPU_ONLINE, hcpu, NULL);
keir@21430 142 BUG_ON(notifier_rc != NOTIFY_DONE);
keir@21430 143
keir@21430 144 send_guest_global_virq(dom0, VIRQ_PCPU_STATE);
keir@21430 145
keir@21430 146 cpu_hotplug_done();
keir@21430 147 return 0;
keir@21430 148
keir@21430 149 fail:
keir@21457 150 notifier_rc = notifier_call_chain(&cpu_chain, CPU_UP_CANCELED, hcpu, &nb);
keir@21430 151 BUG_ON(notifier_rc != NOTIFY_DONE);
keir@21430 152 cpu_hotplug_done();
keir@21430 153 return err;
keir@21429 154 }
keir@21429 155
keir@22520 156 void notify_cpu_starting(unsigned int cpu)
keir@22520 157 {
keir@22520 158 void *hcpu = (void *)(long)cpu;
keir@22520 159 int notifier_rc = notifier_call_chain(
keir@22520 160 &cpu_chain, CPU_STARTING, hcpu, NULL);
keir@22520 161 BUG_ON(notifier_rc != NOTIFY_DONE);
keir@22520 162 }
keir@22520 163
keir@21430 164 static cpumask_t frozen_cpus;
keir@21430 165
keir@21430 166 int disable_nonboot_cpus(void)
keir@21429 167 {
keir@21430 168 int cpu, error = 0;
keir@21430 169
keir@21461 170 BUG_ON(smp_processor_id() != 0);
keir@21430 171
keir@21430 172 cpus_clear(frozen_cpus);
keir@21430 173
keir@21430 174 printk("Disabling non-boot CPUs ...\n");
keir@21430 175
keir@21430 176 for_each_online_cpu ( cpu )
keir@21430 177 {
keir@21430 178 if ( cpu == 0 )
keir@21430 179 continue;
keir@21430 180
keir@21430 181 if ( (error = cpu_down(cpu)) )
keir@21430 182 {
keir@21430 183 BUG_ON(error == -EBUSY);
keir@21430 184 printk("Error taking CPU%d down: %d\n", cpu, error);
keir@21430 185 break;
keir@21430 186 }
keir@21430 187
keir@21430 188 cpu_set(cpu, frozen_cpus);
keir@21430 189 }
keir@21430 190
keir@21430 191 BUG_ON(!error && (num_online_cpus() != 1));
keir@21430 192 return error;
keir@21429 193 }
keir@21430 194
keir@21430 195 void enable_nonboot_cpus(void)
keir@21430 196 {
keir@21430 197 int cpu, error;
keir@21430 198
keir@21430 199 printk("Enabling non-boot CPUs ...\n");
keir@21430 200
keir@21430 201 for_each_cpu_mask ( cpu, frozen_cpus )
keir@21430 202 {
keir@21430 203 if ( (error = cpu_up(cpu)) )
keir@21430 204 {
keir@21430 205 BUG_ON(error == -EBUSY);
keir@21430 206 printk("Error taking CPU%d up: %d\n", cpu, error);
keir@21430 207 }
keir@21430 208 }
keir@21430 209
keir@21430 210 cpus_clear(frozen_cpus);
keir@21430 211 }