debuggers.hg

view xen/arch/ia64/xen/xen.lds.S @ 19964:3952eaeb70b0

Introduce and use a per-CPU read-mostly sub-section

Since mixing data that only gets setup once and then (perhaps
frequently) gets read by remote CPUs with data that the local CPU may
modify (again, perhaps frequently) still causes undesirable cache
protocol related bus traffic, separate the former class of objects
from the latter.

These objects converted here are just picked based on their write-once
(or write-very-rarely) properties; perhaps some more adjustments may
be desirable subsequently. The primary users of the new sub-section
will result from the next patch.

Signed-off-by: Jan Beulich <jbeulich@novell.com>
author Keir Fraser <keir.fraser@citrix.com>
date Mon Jul 13 11:32:41 2009 +0100 (2009-07-13)
parents 5599cc1e0a84
children 809b20f066fb
line source
1 #include <linux/config.h>
3 #include <asm/cache.h>
4 #include <asm/ptrace.h>
5 #include <asm/system.h>
6 #include <asm/pgtable.h>
8 #define LOAD_OFFSET (KERNEL_START - KERNEL_TR_PAGE_SIZE)
9 #include <asm-generic/vmlinux.lds.h>
11 OUTPUT_FORMAT("elf64-ia64-little")
12 OUTPUT_ARCH(ia64)
13 ENTRY(phys_start)
14 jiffies = jiffies_64;
15 PHDRS {
16 code PT_LOAD;
17 percpu PT_LOAD;
18 data PT_LOAD;
19 }
20 SECTIONS
21 {
22 /* Sections to be discarded */
23 /DISCARD/ : {
24 *(.exit.text)
25 *(.exit.data)
26 *(.exitcall.exit)
27 *(.IA_64.unwind.exit.text)
28 *(.IA_64.unwind_info.exit.text)
29 }
31 v = PAGE_OFFSET; /* this symbol is here to make debugging easier... */
32 phys_start = _start - LOAD_OFFSET;
34 code : { } :code
35 . = KERNEL_START;
37 _text = .;
38 _stext = .;
40 .text : AT(ADDR(.text) - LOAD_OFFSET)
41 {
42 *(.text.ivt)
43 *(.text)
44 SCHED_TEXT
45 LOCK_TEXT
46 *(.gnu.linkonce.t*)
47 }
48 .text2 : AT(ADDR(.text2) - LOAD_OFFSET)
49 { *(.text2) }
50 #ifdef CONFIG_SMP
51 .text.lock : AT(ADDR(.text.lock) - LOAD_OFFSET)
52 { *(.text.lock) }
53 #endif
54 _etext = .;
56 /* Read-only data */
58 /* Exception table */
59 . = ALIGN(16);
60 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET)
61 {
62 __start___ex_table = .;
63 *(__ex_table)
64 __stop___ex_table = .;
65 }
67 .data.patch.vtop : AT(ADDR(.data.patch.vtop) - LOAD_OFFSET)
68 {
69 __start___vtop_patchlist = .;
70 *(.data.patch.vtop)
71 __end___vtop_patchlist = .;
72 }
74 .data.patch.mckinley_e9 : AT(ADDR(.data.patch.mckinley_e9) - LOAD_OFFSET)
75 {
76 __start___mckinley_e9_bundles = .;
77 *(.data.patch.mckinley_e9)
78 __end___mckinley_e9_bundles = .;
79 }
81 /* Global data */
82 _data = .;
84 #if defined(CONFIG_IA64_GENERIC)
85 /* Machine Vector */
86 . = ALIGN(16);
87 .machvec : AT(ADDR(.machvec) - LOAD_OFFSET)
88 {
89 machvec_start = .;
90 *(.machvec)
91 machvec_end = .;
92 }
93 #endif
95 /* Unwind info & table: */
96 . = ALIGN(8);
97 .IA_64.unwind_info : AT(ADDR(.IA_64.unwind_info) - LOAD_OFFSET)
98 { *(.IA_64.unwind_info*) }
99 .IA_64.unwind : AT(ADDR(.IA_64.unwind) - LOAD_OFFSET)
100 {
101 __start_unwind = .;
102 *(.IA_64.unwind*)
103 __end_unwind = .;
104 }
106 RODATA
108 .opd : AT(ADDR(.opd) - LOAD_OFFSET)
109 { *(.opd) }
111 /* Initialization code and data: */
113 . = ALIGN(PAGE_SIZE);
114 __init_begin = .;
115 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET)
116 {
117 _sinittext = .;
118 *(.init.text)
119 _einittext = .;
120 }
122 .init.data : AT(ADDR(.init.data) - LOAD_OFFSET)
123 { *(.init.data) }
125 .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET)
126 {
127 __initramfs_start = .;
128 *(.init.ramfs)
129 __initramfs_end = .;
130 }
132 . = ALIGN(16);
133 .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET)
134 {
135 __setup_start = .;
136 *(.init.setup)
137 __setup_end = .;
138 }
139 .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET)
140 {
141 __initcall_start = .;
142 *(.initcall1.init)
143 *(.initcall2.init)
144 *(.initcall3.init)
145 *(.initcall4.init)
146 *(.initcall5.init)
147 *(.initcall6.init)
148 *(.initcall7.init)
149 __initcall_end = .;
150 }
151 __con_initcall_start = .;
152 .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET)
153 { *(.con_initcall.init) }
154 __con_initcall_end = .;
155 __security_initcall_start = .;
156 .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET)
157 { *(.security_initcall.init) }
158 __security_initcall_end = .;
159 . = ALIGN(PAGE_SIZE);
160 __init_end = .;
162 /* The initial task and kernel stack */
163 .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET)
164 { *(.data.init_task) }
166 .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET)
167 { *(__special_page_section)
168 __start_gate_section = .;
169 *(.data.gate)
170 __stop_gate_section = .;
171 }
172 . = ALIGN(PAGE_SIZE); /* make sure the gate page doesn't expose
173 * kernel data
174 */
176 .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET)
177 { *(.data.read_mostly) }
179 .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET)
180 { *(.data.cacheline_aligned) }
182 /* Per-cpu data: */
183 percpu : { } :percpu
184 . = ALIGN(PERCPU_PAGE_SIZE);
185 __phys_per_cpu_start = .;
186 .data.percpu PERCPU_ADDR : AT(__phys_per_cpu_start - LOAD_OFFSET)
187 {
188 __per_cpu_start = .;
189 *(.data.percpu)
190 . = ALIGN(SMP_CACHE_BYTES);
191 *(.data.percpu.read_mostly)
192 __per_cpu_end = .;
193 }
194 . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
195 * into percpu page size
196 */
198 data : { } :data
199 .data : AT(ADDR(.data) - LOAD_OFFSET)
200 {
201 #ifdef CONFIG_SMP
202 . = ALIGN(PERCPU_PAGE_SIZE);
203 __cpu0_per_cpu = .;
204 . = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */
205 #endif
206 *(.data)
207 *(.data1)
208 *(.gnu.linkonce.d*)
209 CONSTRUCTORS
210 }
212 . = ALIGN(16); /* gp must be 16-byte aligned for exc. table */
213 .got : AT(ADDR(.got) - LOAD_OFFSET)
214 { *(.got.plt) *(.got) }
215 __gp = ADDR(.got) + 0x200000;
216 /* We want the small data sections together, so single-instruction offsets
217 can access them all, and initialized data all before uninitialized, so
218 we can shorten the on-disk segment size. */
219 .sdata : AT(ADDR(.sdata) - LOAD_OFFSET)
220 { *(.sdata) *(.sdata1) *(.srdata) }
221 _edata = .;
222 _bss = .;
223 .sbss : AT(ADDR(.sbss) - LOAD_OFFSET)
224 { *(.sbss) *(.scommon) }
225 .bss : AT(ADDR(.bss) - LOAD_OFFSET)
226 { *(.bss) *(COMMON) }
228 _end = .;
230 code : { } :code
231 /* Stabs debugging sections. */
232 .stab 0 : { *(.stab) }
233 .stabstr 0 : { *(.stabstr) }
234 .stab.excl 0 : { *(.stab.excl) }
235 .stab.exclstr 0 : { *(.stab.exclstr) }
236 .stab.index 0 : { *(.stab.index) }
237 .stab.indexstr 0 : { *(.stab.indexstr) }
238 /* DWARF debug sections.
239 Symbols in the DWARF debugging sections are relative to the beginning
240 of the section so we begin them at 0. */
241 /* DWARF 1 */
242 .debug 0 : { *(.debug) }
243 .line 0 : { *(.line) }
244 /* GNU DWARF 1 extensions */
245 .debug_srcinfo 0 : { *(.debug_srcinfo) }
246 .debug_sfnames 0 : { *(.debug_sfnames) }
247 /* DWARF 1.1 and DWARF 2 */
248 .debug_aranges 0 : { *(.debug_aranges) }
249 .debug_pubnames 0 : { *(.debug_pubnames) }
250 /* DWARF 2 */
251 .debug_info 0 : { *(.debug_info) }
252 .debug_abbrev 0 : { *(.debug_abbrev) }
253 .debug_line 0 : { *(.debug_line) }
254 .debug_frame 0 : { *(.debug_frame) }
255 .debug_str 0 : { *(.debug_str) }
256 .debug_loc 0 : { *(.debug_loc) }
257 .debug_macinfo 0 : { *(.debug_macinfo) }
258 /* SGI/MIPS DWARF 2 extensions */
259 .debug_weaknames 0 : { *(.debug_weaknames) }
260 .debug_funcnames 0 : { *(.debug_funcnames) }
261 .debug_typenames 0 : { *(.debug_typenames) }
262 .debug_varnames 0 : { *(.debug_varnames) }
263 /* These must appear regardless of . */
264 /* Discard them for now since Intel SoftSDV cannot handle them.
265 .comment 0 : { *(.comment) }
266 .note 0 : { *(.note) }
267 */
268 /DISCARD/ : { *(.comment) }
269 /DISCARD/ : { *(.note) }
270 }