/root/src/xen/xen/common/stop_machine.c
Line | Count | Source (jump to first uncovered line) |
1 | | /****************************************************************************** |
2 | | * common/stop_machine.c |
3 | | * |
4 | | * Facilities to put whole machine in a safe 'stop' state |
5 | | * |
6 | | * Copyright 2005 Rusty Russell rusty@rustcorp.com.au IBM Corporation |
7 | | * Copyright 2008 Kevin Tian <kevin.tian@intel.com>, Intel Corporation. |
8 | | * |
9 | | * This program is free software; you can redistribute it and/or modify it |
10 | | * under the terms and conditions of the GNU General Public License, |
11 | | * version 2, as published by the Free Software Foundation. |
12 | | * |
13 | | * This program is distributed in the hope it will be useful, but WITHOUT |
14 | | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
15 | | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
16 | | * more details. |
17 | | * |
18 | | * You should have received a copy of the GNU General Public License along with |
19 | | * this program; If not, see <http://www.gnu.org/licenses/>. |
20 | | */ |
21 | | |
22 | | #include <xen/init.h> |
23 | | #include <xen/sched.h> |
24 | | #include <xen/spinlock.h> |
25 | | #include <xen/tasklet.h> |
26 | | #include <xen/stop_machine.h> |
27 | | #include <xen/errno.h> |
28 | | #include <xen/smp.h> |
29 | | #include <xen/cpu.h> |
30 | | #include <asm/current.h> |
31 | | #include <asm/processor.h> |
32 | | |
33 | | enum stopmachine_state { |
34 | | STOPMACHINE_START, |
35 | | STOPMACHINE_PREPARE, |
36 | | STOPMACHINE_DISABLE_IRQ, |
37 | | STOPMACHINE_INVOKE, |
38 | | STOPMACHINE_EXIT |
39 | | }; |
40 | | |
41 | | struct stopmachine_data { |
42 | | unsigned int nr_cpus; |
43 | | |
44 | | enum stopmachine_state state; |
45 | | atomic_t done; |
46 | | |
47 | | unsigned int fn_cpu; |
48 | | int fn_result; |
49 | | int (*fn)(void *); |
50 | | void *fn_data; |
51 | | }; |
52 | | |
53 | | static DEFINE_PER_CPU(struct tasklet, stopmachine_tasklet); |
54 | | static struct stopmachine_data stopmachine_data; |
55 | | static DEFINE_SPINLOCK(stopmachine_lock); |
56 | | |
57 | | static void stopmachine_set_state(enum stopmachine_state state) |
58 | 0 | { |
59 | 0 | atomic_set(&stopmachine_data.done, 0); |
60 | 0 | smp_wmb(); |
61 | 0 | stopmachine_data.state = state; |
62 | 0 | } |
63 | | |
64 | | static void stopmachine_wait_state(void) |
65 | 0 | { |
66 | 0 | while ( atomic_read(&stopmachine_data.done) != stopmachine_data.nr_cpus ) |
67 | 0 | cpu_relax(); |
68 | 0 | } |
69 | | |
70 | | int stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu) |
71 | 0 | { |
72 | 0 | cpumask_t allbutself; |
73 | 0 | unsigned int i, nr_cpus; |
74 | 0 | int ret; |
75 | 0 |
|
76 | 0 | BUG_ON(!local_irq_is_enabled()); |
77 | 0 |
|
78 | 0 | /* cpu_online_map must not change. */ |
79 | 0 | if ( !get_cpu_maps() ) |
80 | 0 | return -EBUSY; |
81 | 0 |
|
82 | 0 | cpumask_andnot(&allbutself, &cpu_online_map, |
83 | 0 | cpumask_of(smp_processor_id())); |
84 | 0 | nr_cpus = cpumask_weight(&allbutself); |
85 | 0 |
|
86 | 0 | /* Must not spin here as the holder will expect us to be descheduled. */ |
87 | 0 | if ( !spin_trylock(&stopmachine_lock) ) |
88 | 0 | { |
89 | 0 | put_cpu_maps(); |
90 | 0 | return -EBUSY; |
91 | 0 | } |
92 | 0 |
|
93 | 0 | stopmachine_data.fn = fn; |
94 | 0 | stopmachine_data.fn_data = data; |
95 | 0 | stopmachine_data.nr_cpus = nr_cpus; |
96 | 0 | stopmachine_data.fn_cpu = cpu; |
97 | 0 | stopmachine_data.fn_result = 0; |
98 | 0 | atomic_set(&stopmachine_data.done, 0); |
99 | 0 | stopmachine_data.state = STOPMACHINE_START; |
100 | 0 |
|
101 | 0 | smp_wmb(); |
102 | 0 |
|
103 | 0 | for_each_cpu ( i, &allbutself ) |
104 | 0 | tasklet_schedule_on_cpu(&per_cpu(stopmachine_tasklet, i), i); |
105 | 0 |
|
106 | 0 | stopmachine_set_state(STOPMACHINE_PREPARE); |
107 | 0 | stopmachine_wait_state(); |
108 | 0 |
|
109 | 0 | local_irq_disable(); |
110 | 0 | stopmachine_set_state(STOPMACHINE_DISABLE_IRQ); |
111 | 0 | stopmachine_wait_state(); |
112 | 0 | spin_debug_disable(); |
113 | 0 |
|
114 | 0 | stopmachine_set_state(STOPMACHINE_INVOKE); |
115 | 0 | if ( (cpu == smp_processor_id()) || (cpu == NR_CPUS) ) |
116 | 0 | { |
117 | 0 | ret = (*fn)(data); |
118 | 0 | if ( ret ) |
119 | 0 | write_atomic(&stopmachine_data.fn_result, ret); |
120 | 0 | } |
121 | 0 | stopmachine_wait_state(); |
122 | 0 | ret = stopmachine_data.fn_result; |
123 | 0 |
|
124 | 0 | spin_debug_enable(); |
125 | 0 | stopmachine_set_state(STOPMACHINE_EXIT); |
126 | 0 | stopmachine_wait_state(); |
127 | 0 | local_irq_enable(); |
128 | 0 |
|
129 | 0 | spin_unlock(&stopmachine_lock); |
130 | 0 |
|
131 | 0 | put_cpu_maps(); |
132 | 0 |
|
133 | 0 | return ret; |
134 | 0 | } |
135 | | |
136 | | static void stopmachine_action(unsigned long cpu) |
137 | 0 | { |
138 | 0 | enum stopmachine_state state = STOPMACHINE_START; |
139 | 0 |
|
140 | 0 | BUG_ON(cpu != smp_processor_id()); |
141 | 0 |
|
142 | 0 | smp_mb(); |
143 | 0 |
|
144 | 0 | while ( state != STOPMACHINE_EXIT ) |
145 | 0 | { |
146 | 0 | while ( stopmachine_data.state == state ) |
147 | 0 | cpu_relax(); |
148 | 0 |
|
149 | 0 | state = stopmachine_data.state; |
150 | 0 | switch ( state ) |
151 | 0 | { |
152 | 0 | case STOPMACHINE_DISABLE_IRQ: |
153 | 0 | local_irq_disable(); |
154 | 0 | break; |
155 | 0 | case STOPMACHINE_INVOKE: |
156 | 0 | if ( (stopmachine_data.fn_cpu == smp_processor_id()) || |
157 | 0 | (stopmachine_data.fn_cpu == NR_CPUS) ) |
158 | 0 | { |
159 | 0 | int ret = stopmachine_data.fn(stopmachine_data.fn_data); |
160 | 0 |
|
161 | 0 | if ( ret ) |
162 | 0 | write_atomic(&stopmachine_data.fn_result, ret); |
163 | 0 | } |
164 | 0 | break; |
165 | 0 | default: |
166 | 0 | break; |
167 | 0 | } |
168 | 0 |
|
169 | 0 | smp_mb(); |
170 | 0 | atomic_inc(&stopmachine_data.done); |
171 | 0 | } |
172 | 0 |
|
173 | 0 | local_irq_enable(); |
174 | 0 | } |
175 | | |
176 | | static int cpu_callback( |
177 | | struct notifier_block *nfb, unsigned long action, void *hcpu) |
178 | 12 | { |
179 | 12 | unsigned int cpu = (unsigned long)hcpu; |
180 | 12 | |
181 | 12 | if ( action == CPU_UP_PREPARE ) |
182 | 12 | tasklet_init(&per_cpu(stopmachine_tasklet, cpu), |
183 | 12 | stopmachine_action, cpu); |
184 | 12 | |
185 | 12 | return NOTIFY_DONE; |
186 | 12 | } |
187 | | |
188 | | static struct notifier_block cpu_nfb = { |
189 | | .notifier_call = cpu_callback |
190 | | }; |
191 | | |
192 | | static int __init cpu_stopmachine_init(void) |
193 | 1 | { |
194 | 1 | unsigned int cpu; |
195 | 1 | for_each_online_cpu ( cpu ) |
196 | 12 | { |
197 | 12 | void *hcpu = (void *)(long)cpu; |
198 | 12 | cpu_callback(&cpu_nfb, CPU_UP_PREPARE, hcpu); |
199 | 12 | } |
200 | 1 | register_cpu_notifier(&cpu_nfb); |
201 | 1 | return 0; |
202 | 1 | } |
203 | | __initcall(cpu_stopmachine_init); |