debuggers.hg

view xen/arch/x86/sysctl.c @ 22804:d276f4528b32

x86: On CPU online/offline from dom0, try flushing RCU work on EBUSY.

Although the caller should react appropriately to EBUSY, if the error
is due to pending RCU work then we can help things along by executing
rcu_barrier() and then retrying. To this end, this changeset is an
optimisation only.

Signed-off-by: Keir Fraser <keir@xen.org>
author Keir Fraser <keir@xen.org>
date Fri Jan 14 14:19:55 2011 +0000 (2011-01-14)
parents 6c9bcfb0fb84
children
line source
1 /******************************************************************************
2 * Arch-specific sysctl.c
3 *
4 * System management operations. For use by node control stack.
5 *
6 * Copyright (c) 2002-2006, K Fraser
7 */
9 #include <xen/config.h>
10 #include <xen/types.h>
11 #include <xen/lib.h>
12 #include <xen/mm.h>
13 #include <xen/guest_access.h>
14 #include <public/sysctl.h>
15 #include <xen/sched.h>
16 #include <xen/event.h>
17 #include <xen/domain_page.h>
18 #include <asm/msr.h>
19 #include <xen/trace.h>
20 #include <xen/console.h>
21 #include <xen/iocap.h>
22 #include <asm/irq.h>
23 #include <asm/hvm/hvm.h>
24 #include <asm/hvm/support.h>
25 #include <asm/processor.h>
26 #include <asm/numa.h>
27 #include <xen/nodemask.h>
28 #include <xen/cpu.h>
29 #include <xsm/xsm.h>
31 #define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0)
33 long cpu_up_helper(void *data)
34 {
35 int cpu = (unsigned long)data;
36 int ret = cpu_up(cpu);
37 if ( ret == -EBUSY )
38 {
39 /* On EBUSY, flush RCU work and have one more go. */
40 rcu_barrier();
41 ret = cpu_up(cpu);
42 }
43 return ret;
44 }
46 long cpu_down_helper(void *data)
47 {
48 int cpu = (unsigned long)data;
49 int ret = cpu_down(cpu);
50 if ( ret == -EBUSY )
51 {
52 /* On EBUSY, flush RCU work and have one more go. */
53 rcu_barrier();
54 ret = cpu_down(cpu);
55 }
56 return ret;
57 }
59 extern int __node_distance(int a, int b);
61 long arch_do_sysctl(
62 struct xen_sysctl *sysctl, XEN_GUEST_HANDLE(xen_sysctl_t) u_sysctl)
63 {
64 long ret = 0;
66 switch ( sysctl->cmd )
67 {
69 case XEN_SYSCTL_physinfo:
70 {
71 xen_sysctl_physinfo_t *pi = &sysctl->u.physinfo;
73 ret = xsm_physinfo();
74 if ( ret )
75 break;
78 memset(pi, 0, sizeof(*pi));
79 pi->threads_per_core =
80 cpus_weight(per_cpu(cpu_sibling_map, 0));
81 pi->cores_per_socket =
82 cpus_weight(per_cpu(cpu_core_map, 0)) / pi->threads_per_core;
83 pi->nr_cpus = num_online_cpus();
84 pi->nr_nodes = num_online_nodes();
85 pi->max_node_id = MAX_NUMNODES-1;
86 pi->max_cpu_id = NR_CPUS-1;
87 pi->total_pages = total_pages;
88 pi->free_pages = avail_domheap_pages();
89 pi->scrub_pages = 0;
90 pi->cpu_khz = cpu_khz;
91 memcpy(pi->hw_cap, boot_cpu_data.x86_capability, NCAPINTS*4);
92 if ( hvm_enabled )
93 pi->capabilities |= XEN_SYSCTL_PHYSCAP_hvm;
94 if ( iommu_enabled )
95 pi->capabilities |= XEN_SYSCTL_PHYSCAP_hvm_directio;
97 if ( copy_to_guest(u_sysctl, sysctl, 1) )
98 ret = -EFAULT;
99 }
100 break;
102 case XEN_SYSCTL_topologyinfo:
103 {
104 uint32_t i, max_cpu_index, last_online_cpu;
105 xen_sysctl_topologyinfo_t *ti = &sysctl->u.topologyinfo;
107 last_online_cpu = last_cpu(cpu_online_map);
108 max_cpu_index = min_t(uint32_t, ti->max_cpu_index, last_online_cpu);
109 ti->max_cpu_index = last_online_cpu;
111 for ( i = 0; i <= max_cpu_index; i++ )
112 {
113 if ( !guest_handle_is_null(ti->cpu_to_core) )
114 {
115 uint32_t core = cpu_online(i) ? cpu_to_core(i) : ~0u;
116 if ( copy_to_guest_offset(ti->cpu_to_core, i, &core, 1) )
117 break;
118 }
119 if ( !guest_handle_is_null(ti->cpu_to_socket) )
120 {
121 uint32_t socket = cpu_online(i) ? cpu_to_socket(i) : ~0u;
122 if ( copy_to_guest_offset(ti->cpu_to_socket, i, &socket, 1) )
123 break;
124 }
125 if ( !guest_handle_is_null(ti->cpu_to_node) )
126 {
127 uint32_t node = cpu_online(i) ? cpu_to_node(i) : ~0u;
128 if ( copy_to_guest_offset(ti->cpu_to_node, i, &node, 1) )
129 break;
130 }
131 }
133 ret = ((i <= max_cpu_index) || copy_to_guest(u_sysctl, sysctl, 1))
134 ? -EFAULT : 0;
135 }
136 break;
138 case XEN_SYSCTL_numainfo:
139 {
140 uint32_t i, j, max_node_index, last_online_node;
141 xen_sysctl_numainfo_t *ni = &sysctl->u.numainfo;
143 last_online_node = last_node(node_online_map);
144 max_node_index = min_t(uint32_t, ni->max_node_index, last_online_node);
145 ni->max_node_index = last_online_node;
147 for ( i = 0; i <= max_node_index; i++ )
148 {
149 if ( !guest_handle_is_null(ni->node_to_memsize) )
150 {
151 uint64_t memsize = node_online(i) ?
152 node_spanned_pages(i) << PAGE_SHIFT : 0ul;
153 if ( copy_to_guest_offset(ni->node_to_memsize, i, &memsize, 1) )
154 break;
155 }
156 if ( !guest_handle_is_null(ni->node_to_memfree) )
157 {
158 uint64_t memfree = node_online(i) ?
159 avail_node_heap_pages(i) << PAGE_SHIFT : 0ul;
160 if ( copy_to_guest_offset(ni->node_to_memfree, i, &memfree, 1) )
161 break;
162 }
164 if ( !guest_handle_is_null(ni->node_to_node_distance) )
165 {
166 for ( j = 0; j <= max_node_index; j++)
167 {
168 uint32_t distance = ~0u;
169 if ( node_online(i) && node_online(j) )
170 distance = __node_distance(i, j);
171 if ( copy_to_guest_offset(
172 ni->node_to_node_distance,
173 i*(max_node_index+1) + j, &distance, 1) )
174 break;
175 }
176 if ( j <= max_node_index )
177 break;
178 }
179 }
181 ret = ((i <= max_node_index) || copy_to_guest(u_sysctl, sysctl, 1))
182 ? -EFAULT : 0;
183 }
184 break;
186 case XEN_SYSCTL_cpu_hotplug:
187 {
188 unsigned int cpu = sysctl->u.cpu_hotplug.cpu;
190 switch ( sysctl->u.cpu_hotplug.op )
191 {
192 case XEN_SYSCTL_CPU_HOTPLUG_ONLINE:
193 ret = continue_hypercall_on_cpu(
194 0, cpu_up_helper, (void *)(unsigned long)cpu);
195 break;
196 case XEN_SYSCTL_CPU_HOTPLUG_OFFLINE:
197 ret = continue_hypercall_on_cpu(
198 0, cpu_down_helper, (void *)(unsigned long)cpu);
199 break;
200 default:
201 ret = -EINVAL;
202 break;
203 }
204 }
205 break;
207 default:
208 ret = -ENOSYS;
209 break;
210 }
212 return ret;
213 }
215 /*
216 * Local variables:
217 * mode: C
218 * c-set-style: "BSD"
219 * c-basic-offset: 4
220 * tab-width: 4
221 * indent-tabs-mode: nil
222 * End:
223 */