debuggers.hg

view xen/arch/x86/x86_32/mm.c @ 3674:fb875591fd72

bitkeeper revision 1.1159.223.63 (42028527-fv-d9BM0_LRp8UKGP19gQ)

Fix NMI deferral.
Signed-off-by: keir.fraser@cl.cam.ac.uk
author kaf24@scramble.cl.cam.ac.uk
date Thu Feb 03 20:10:15 2005 +0000 (2005-02-03)
parents cfb5f80fb23e
children b96857892a2c
line source
1 /******************************************************************************
2 * arch/x86/x86_32/mm.c
3 *
4 * Modifications to Linux original are copyright (c) 2004, K A Fraser
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
21 #include <xen/config.h>
22 #include <xen/lib.h>
23 #include <xen/init.h>
24 #include <xen/mm.h>
25 #include <asm/page.h>
26 #include <asm/flushtlb.h>
27 #include <asm/fixmap.h>
28 #include <asm/domain_page.h>
30 unsigned long m2p_start_mfn;
32 static inline void set_pte_phys(unsigned long vaddr,
33 l1_pgentry_t entry)
34 {
35 l2_pgentry_t *l2ent;
36 l1_pgentry_t *l1ent;
38 l2ent = &idle_pg_table[l2_table_offset(vaddr)];
39 l1ent = l2_pgentry_to_l1(*l2ent) + l1_table_offset(vaddr);
40 *l1ent = entry;
42 /* It's enough to flush this one mapping. */
43 __flush_tlb_one(vaddr);
44 }
47 void __set_fixmap(enum fixed_addresses idx,
48 l1_pgentry_t entry)
49 {
50 unsigned long address = fix_to_virt(idx);
52 if ( likely(idx < __end_of_fixed_addresses) )
53 set_pte_phys(address, entry);
54 else
55 printk("Invalid __set_fixmap\n");
56 }
59 void __init paging_init(void)
60 {
61 void *ioremap_pt;
62 unsigned long v, l2e;
63 struct pfn_info *pg;
65 /* Allocate and map the machine-to-phys table. */
66 if ( (pg = alloc_domheap_pages(NULL, 10)) == NULL )
67 panic("Not enough memory to bootstrap Xen.\n");
68 m2p_start_mfn = page_to_pfn(pg);
69 idle_pg_table[RDWR_MPT_VIRT_START >> L2_PAGETABLE_SHIFT] =
70 mk_l2_pgentry(page_to_phys(pg) | __PAGE_HYPERVISOR | _PAGE_PSE);
72 /* Xen 4MB mappings can all be GLOBAL. */
73 if ( cpu_has_pge )
74 {
75 for ( v = HYPERVISOR_VIRT_START; v; v += (1 << L2_PAGETABLE_SHIFT) )
76 {
77 l2e = l2_pgentry_val(idle_pg_table[v >> L2_PAGETABLE_SHIFT]);
78 if ( l2e & _PAGE_PSE )
79 l2e |= _PAGE_GLOBAL;
80 idle_pg_table[v >> L2_PAGETABLE_SHIFT] = mk_l2_pgentry(l2e);
81 }
82 }
84 /* Create page table for ioremap(). */
85 ioremap_pt = (void *)alloc_xenheap_page();
86 clear_page(ioremap_pt);
87 idle_pg_table[IOREMAP_VIRT_START >> L2_PAGETABLE_SHIFT] =
88 mk_l2_pgentry(__pa(ioremap_pt) | __PAGE_HYPERVISOR);
90 /* Create read-only mapping of MPT for guest-OS use. */
91 idle_pg_table[RO_MPT_VIRT_START >> L2_PAGETABLE_SHIFT] =
92 mk_l2_pgentry(l2_pgentry_val(
93 idle_pg_table[RDWR_MPT_VIRT_START >> L2_PAGETABLE_SHIFT]) &
94 ~_PAGE_RW);
96 /* Set up mapping cache for domain pages. */
97 mapcache = (unsigned long *)alloc_xenheap_page();
98 clear_page(mapcache);
99 idle_pg_table[MAPCACHE_VIRT_START >> L2_PAGETABLE_SHIFT] =
100 mk_l2_pgentry(__pa(mapcache) | __PAGE_HYPERVISOR);
102 /* Set up linear page table mapping. */
103 idle_pg_table[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT] =
104 mk_l2_pgentry(__pa(idle_pg_table) | __PAGE_HYPERVISOR);
105 }
107 void __init zap_low_mappings(void)
108 {
109 int i;
110 for ( i = 0; i < DOMAIN_ENTRIES_PER_L2_PAGETABLE; i++ )
111 idle_pg_table[i] = mk_l2_pgentry(0);
112 flush_tlb_all_pge();
113 }
116 /*
117 * Allows shooting down of borrowed page-table use on specific CPUs.
118 * Specifically, we borrow page tables when running the idle domain.
119 */
120 static void __synchronise_pagetables(void *mask)
121 {
122 struct domain *d = current;
123 if ( ((unsigned long)mask & (1<<d->processor)) && is_idle_task(d) )
124 write_ptbase(&d->mm);
125 }
126 void synchronise_pagetables(unsigned long cpu_mask)
127 {
128 __synchronise_pagetables((void *)cpu_mask);
129 smp_call_function(__synchronise_pagetables, (void *)cpu_mask, 1, 1);
130 }
132 long do_stack_switch(unsigned long ss, unsigned long esp)
133 {
134 int nr = smp_processor_id();
135 struct tss_struct *t = &init_tss[nr];
137 /* We need to do this check as we load and use SS on guest's behalf. */
138 if ( (ss & 3) == 0 )
139 return -EPERM;
141 current->thread.guestos_ss = ss;
142 current->thread.guestos_sp = esp;
143 t->ss1 = ss;
144 t->esp1 = esp;
146 return 0;
147 }
150 /* Returns TRUE if given descriptor is valid for GDT or LDT. */
151 int check_descriptor(unsigned long *d)
152 {
153 unsigned long base, limit, a = d[0], b = d[1];
155 /* A not-present descriptor will always fault, so is safe. */
156 if ( !(b & _SEGMENT_P) )
157 goto good;
159 /*
160 * We don't allow a DPL of zero. There is no legitimate reason for
161 * specifying DPL==0, and it gets rather dangerous if we also accept call
162 * gates (consider a call gate pointing at another guestos descriptor with
163 * DPL 0 -- this would get the OS ring-0 privileges).
164 */
165 if ( (b & _SEGMENT_DPL) == 0 )
166 goto bad;
168 if ( !(b & _SEGMENT_S) )
169 {
170 /*
171 * System segment:
172 * 1. Don't allow interrupt or trap gates as they belong in the IDT.
173 * 2. Don't allow TSS descriptors or task gates as we don't
174 * virtualise x86 tasks.
175 * 3. Don't allow LDT descriptors because they're unnecessary and
176 * I'm uneasy about allowing an LDT page to contain LDT
177 * descriptors. In any case, Xen automatically creates the
178 * required descriptor when reloading the LDT register.
179 * 4. We allow call gates but they must not jump to a private segment.
180 */
182 /* Disallow everything but call gates. */
183 if ( (b & _SEGMENT_TYPE) != 0xc00 )
184 goto bad;
186 /* Can't allow far jump to a Xen-private segment. */
187 if ( !VALID_CODESEL(a>>16) )
188 goto bad;
190 /* Reserved bits must be zero. */
191 if ( (b & 0xe0) != 0 )
192 goto bad;
194 /* No base/limit check is needed for a call gate. */
195 goto good;
196 }
198 /* Check that base is at least a page away from Xen-private area. */
199 base = (b&(0xff<<24)) | ((b&0xff)<<16) | (a>>16);
200 if ( base >= (PAGE_OFFSET - PAGE_SIZE) )
201 goto bad;
203 /* Check and truncate the limit if necessary. */
204 limit = (b&0xf0000) | (a&0xffff);
205 limit++; /* We add one because limit is inclusive. */
206 if ( (b & _SEGMENT_G) )
207 limit <<= 12;
209 if ( (b & (_SEGMENT_CODE | _SEGMENT_EC)) == _SEGMENT_EC )
210 {
211 /*
212 * Grows-down limit check.
213 * NB. limit == 0xFFFFF provides no access (if G=1).
214 * limit == 0x00000 provides 4GB-4kB access (if G=1).
215 */
216 if ( (base + limit) > base )
217 {
218 limit = -(base & PAGE_MASK);
219 goto truncate;
220 }
221 }
222 else
223 {
224 /*
225 * Grows-up limit check.
226 * NB. limit == 0xFFFFF provides 4GB access (if G=1).
227 * limit == 0x00000 provides 4kB access (if G=1).
228 */
229 if ( ((base + limit) <= base) ||
230 ((base + limit) > PAGE_OFFSET) )
231 {
232 limit = PAGE_OFFSET - base;
233 truncate:
234 if ( !(b & _SEGMENT_G) )
235 goto bad; /* too dangerous; too hard to work out... */
236 limit = (limit >> 12) - 1;
237 d[0] &= ~0x0ffff; d[0] |= limit & 0x0ffff;
238 d[1] &= ~0xf0000; d[1] |= limit & 0xf0000;
239 }
240 }
242 good:
243 return 1;
244 bad:
245 return 0;
246 }
249 void destroy_gdt(struct domain *d)
250 {
251 int i;
252 unsigned long pfn;
254 for ( i = 0; i < 16; i++ )
255 {
256 if ( (pfn = l1_pgentry_to_pagenr(d->mm.perdomain_pt[i])) != 0 )
257 put_page_and_type(&frame_table[pfn]);
258 d->mm.perdomain_pt[i] = mk_l1_pgentry(0);
259 }
260 }
263 long set_gdt(struct domain *d,
264 unsigned long *frames,
265 unsigned int entries)
266 {
267 /* NB. There are 512 8-byte entries per GDT page. */
268 int i = 0, nr_pages = (entries + 511) / 512;
269 struct desc_struct *vgdt;
270 unsigned long pfn;
272 /* Check the first page in the new GDT. */
273 if ( (pfn = frames[0]) >= max_page )
274 goto fail;
276 /* The first page is special because Xen owns a range of entries in it. */
277 if ( !get_page_and_type(&frame_table[pfn], d, PGT_gdt_page) )
278 {
279 /* GDT checks failed: try zapping the Xen reserved entries. */
280 if ( !get_page_and_type(&frame_table[pfn], d, PGT_writable_page) )
281 goto fail;
282 vgdt = map_domain_mem(pfn << PAGE_SHIFT);
283 memset(vgdt + FIRST_RESERVED_GDT_ENTRY, 0,
284 NR_RESERVED_GDT_ENTRIES*8);
285 unmap_domain_mem(vgdt);
286 put_page_and_type(&frame_table[pfn]);
288 /* Okay, we zapped the entries. Now try the GDT checks again. */
289 if ( !get_page_and_type(&frame_table[pfn], d, PGT_gdt_page) )
290 goto fail;
291 }
293 /* Check the remaining pages in the new GDT. */
294 for ( i = 1; i < nr_pages; i++ )
295 if ( ((pfn = frames[i]) >= max_page) ||
296 !get_page_and_type(&frame_table[pfn], d, PGT_gdt_page) )
297 goto fail;
299 /* Copy reserved GDT entries to the new GDT. */
300 vgdt = map_domain_mem(frames[0] << PAGE_SHIFT);
301 memcpy(vgdt + FIRST_RESERVED_GDT_ENTRY,
302 gdt_table + FIRST_RESERVED_GDT_ENTRY,
303 NR_RESERVED_GDT_ENTRIES*8);
304 unmap_domain_mem(vgdt);
306 /* Tear down the old GDT. */
307 destroy_gdt(d);
309 /* Install the new GDT. */
310 for ( i = 0; i < nr_pages; i++ )
311 d->mm.perdomain_pt[i] =
312 mk_l1_pgentry((frames[i] << PAGE_SHIFT) | __PAGE_HYPERVISOR);
314 SET_GDT_ADDRESS(d, GDT_VIRT_START);
315 SET_GDT_ENTRIES(d, entries);
317 return 0;
319 fail:
320 while ( i-- > 0 )
321 put_page_and_type(&frame_table[frames[i]]);
322 return -EINVAL;
323 }
326 long do_set_gdt(unsigned long *frame_list, unsigned int entries)
327 {
328 int nr_pages = (entries + 511) / 512;
329 unsigned long frames[16];
330 long ret;
332 if ( (entries <= LAST_RESERVED_GDT_ENTRY) || (entries > 8192) )
333 return -EINVAL;
335 if ( copy_from_user(frames, frame_list, nr_pages * sizeof(unsigned long)) )
336 return -EFAULT;
338 if ( (ret = set_gdt(current, frames, entries)) == 0 )
339 {
340 local_flush_tlb();
341 __asm__ __volatile__ ("lgdt %0" : "=m" (*current->mm.gdt));
342 }
344 return ret;
345 }
348 long do_update_descriptor(
349 unsigned long pa, unsigned long word1, unsigned long word2)
350 {
351 unsigned long *gdt_pent, pfn = pa >> PAGE_SHIFT, d[2];
352 struct pfn_info *page;
353 long ret = -EINVAL;
355 d[0] = word1;
356 d[1] = word2;
358 if ( (pa & 7) || (pfn >= max_page) || !check_descriptor(d) )
359 return -EINVAL;
361 page = &frame_table[pfn];
362 if ( unlikely(!get_page(page, current)) )
363 return -EINVAL;
365 /* Check if the given frame is in use in an unsafe context. */
366 switch ( page->u.inuse.type_info & PGT_type_mask )
367 {
368 case PGT_gdt_page:
369 /* Disallow updates of Xen-reserved descriptors in the current GDT. */
370 if ( (l1_pgentry_to_pagenr(current->mm.perdomain_pt[0]) == pfn) &&
371 (((pa&(PAGE_SIZE-1))>>3) >= FIRST_RESERVED_GDT_ENTRY) &&
372 (((pa&(PAGE_SIZE-1))>>3) <= LAST_RESERVED_GDT_ENTRY) )
373 goto out;
374 if ( unlikely(!get_page_type(page, PGT_gdt_page)) )
375 goto out;
376 break;
377 case PGT_ldt_page:
378 if ( unlikely(!get_page_type(page, PGT_ldt_page)) )
379 goto out;
380 break;
381 default:
382 if ( unlikely(!get_page_type(page, PGT_writable_page)) )
383 goto out;
384 break;
385 }
387 /* All is good so make the update. */
388 gdt_pent = map_domain_mem(pa);
389 memcpy(gdt_pent, d, 8);
390 unmap_domain_mem(gdt_pent);
392 put_page_type(page);
394 ret = 0; /* success */
396 out:
397 put_page(page);
398 return ret;
399 }
401 #ifdef MEMORY_GUARD
403 void *memguard_init(void *heap_start)
404 {
405 l1_pgentry_t *l1;
406 int i, j;
408 /* Round the allocation pointer up to a page boundary. */
409 heap_start = (void *)(((unsigned long)heap_start + (PAGE_SIZE-1)) &
410 PAGE_MASK);
412 /* Memory guarding is incompatible with super pages. */
413 for ( i = 0; i < (xenheap_phys_end >> L2_PAGETABLE_SHIFT); i++ )
414 {
415 l1 = (l1_pgentry_t *)heap_start;
416 heap_start = (void *)((unsigned long)heap_start + PAGE_SIZE);
417 for ( j = 0; j < ENTRIES_PER_L1_PAGETABLE; j++ )
418 l1[j] = mk_l1_pgentry((i << L2_PAGETABLE_SHIFT) |
419 (j << L1_PAGETABLE_SHIFT) |
420 __PAGE_HYPERVISOR);
421 idle_pg_table[i + l2_table_offset(PAGE_OFFSET)] =
422 mk_l2_pgentry(virt_to_phys(l1) | __PAGE_HYPERVISOR);
423 }
425 return heap_start;
426 }
428 static void __memguard_change_range(void *p, unsigned long l, int guard)
429 {
430 l1_pgentry_t *l1;
431 l2_pgentry_t *l2;
432 unsigned long _p = (unsigned long)p;
433 unsigned long _l = (unsigned long)l;
435 /* Ensure we are dealing with a page-aligned whole number of pages. */
436 ASSERT((_p&PAGE_MASK) != 0);
437 ASSERT((_l&PAGE_MASK) != 0);
438 ASSERT((_p&~PAGE_MASK) == 0);
439 ASSERT((_l&~PAGE_MASK) == 0);
441 while ( _l != 0 )
442 {
443 l2 = &idle_pg_table[l2_table_offset(_p)];
444 l1 = l2_pgentry_to_l1(*l2) + l1_table_offset(_p);
445 if ( guard )
446 *l1 = mk_l1_pgentry(l1_pgentry_val(*l1) & ~_PAGE_PRESENT);
447 else
448 *l1 = mk_l1_pgentry(l1_pgentry_val(*l1) | _PAGE_PRESENT);
449 _p += PAGE_SIZE;
450 _l -= PAGE_SIZE;
451 }
452 }
454 void memguard_guard_range(void *p, unsigned long l)
455 {
456 __memguard_change_range(p, l, 1);
457 local_flush_tlb();
458 }
460 void memguard_unguard_range(void *p, unsigned long l)
461 {
462 __memguard_change_range(p, l, 0);
463 }
465 int memguard_is_guarded(void *p)
466 {
467 l1_pgentry_t *l1;
468 l2_pgentry_t *l2;
469 unsigned long _p = (unsigned long)p;
470 l2 = &idle_pg_table[l2_table_offset(_p)];
471 l1 = l2_pgentry_to_l1(*l2) + l1_table_offset(_p);
472 return !(l1_pgentry_val(*l1) & _PAGE_PRESENT);
473 }
475 #endif