debuggers.hg

annotate xen/arch/x86/x86_32/mm.c @ 3674:fb875591fd72

bitkeeper revision 1.1159.223.63 (42028527-fv-d9BM0_LRp8UKGP19gQ)

Fix NMI deferral.
Signed-off-by: keir.fraser@cl.cam.ac.uk
author kaf24@scramble.cl.cam.ac.uk
date Thu Feb 03 20:10:15 2005 +0000 (2005-02-03)
parents cfb5f80fb23e
children b96857892a2c
rev   line source
kaf24@1710 1 /******************************************************************************
kaf24@1854 2 * arch/x86/x86_32/mm.c
kaf24@1710 3 *
kaf24@1854 4 * Modifications to Linux original are copyright (c) 2004, K A Fraser
kaf24@1710 5 *
kaf24@1710 6 * This program is free software; you can redistribute it and/or modify
kaf24@1710 7 * it under the terms of the GNU General Public License as published by
kaf24@1710 8 * the Free Software Foundation; either version 2 of the License, or
kaf24@1710 9 * (at your option) any later version.
kaf24@1710 10 *
kaf24@1710 11 * This program is distributed in the hope that it will be useful,
kaf24@1710 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
kaf24@1710 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
kaf24@1710 14 * GNU General Public License for more details.
kaf24@1710 15 *
kaf24@1710 16 * You should have received a copy of the GNU General Public License
kaf24@1710 17 * along with this program; if not, write to the Free Software
kaf24@1710 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
kaf24@1710 19 */
kaf24@1710 20
kaf24@1710 21 #include <xen/config.h>
kaf24@1710 22 #include <xen/lib.h>
kaf24@1710 23 #include <xen/init.h>
kaf24@1710 24 #include <xen/mm.h>
kaf24@1710 25 #include <asm/page.h>
kaf24@1710 26 #include <asm/flushtlb.h>
kaf24@1710 27 #include <asm/fixmap.h>
kaf24@1710 28 #include <asm/domain_page.h>
kaf24@1710 29
sos22@3478 30 unsigned long m2p_start_mfn;
sos22@3478 31
kaf24@1710 32 static inline void set_pte_phys(unsigned long vaddr,
kaf24@1710 33 l1_pgentry_t entry)
kaf24@1710 34 {
kaf24@1710 35 l2_pgentry_t *l2ent;
kaf24@1710 36 l1_pgentry_t *l1ent;
kaf24@1710 37
kaf24@1710 38 l2ent = &idle_pg_table[l2_table_offset(vaddr)];
kaf24@1710 39 l1ent = l2_pgentry_to_l1(*l2ent) + l1_table_offset(vaddr);
kaf24@1710 40 *l1ent = entry;
kaf24@1710 41
kaf24@1710 42 /* It's enough to flush this one mapping. */
kaf24@1710 43 __flush_tlb_one(vaddr);
kaf24@1710 44 }
kaf24@1710 45
kaf24@1710 46
kaf24@1710 47 void __set_fixmap(enum fixed_addresses idx,
kaf24@1710 48 l1_pgentry_t entry)
kaf24@1710 49 {
kaf24@1710 50 unsigned long address = fix_to_virt(idx);
kaf24@1710 51
kaf24@1710 52 if ( likely(idx < __end_of_fixed_addresses) )
kaf24@1710 53 set_pte_phys(address, entry);
kaf24@1710 54 else
kaf24@1710 55 printk("Invalid __set_fixmap\n");
kaf24@1710 56 }
kaf24@1710 57
kaf24@1710 58
kaf24@1710 59 void __init paging_init(void)
kaf24@1710 60 {
kaf24@1710 61 void *ioremap_pt;
kaf24@3392 62 unsigned long v, l2e;
kaf24@3392 63 struct pfn_info *pg;
kaf24@1710 64
kaf24@3392 65 /* Allocate and map the machine-to-phys table. */
kaf24@3392 66 if ( (pg = alloc_domheap_pages(NULL, 10)) == NULL )
kaf24@3392 67 panic("Not enough memory to bootstrap Xen.\n");
sos22@3478 68 m2p_start_mfn = page_to_pfn(pg);
kaf24@3392 69 idle_pg_table[RDWR_MPT_VIRT_START >> L2_PAGETABLE_SHIFT] =
kaf24@3392 70 mk_l2_pgentry(page_to_phys(pg) | __PAGE_HYPERVISOR | _PAGE_PSE);
kaf24@3392 71
kaf24@3392 72 /* Xen 4MB mappings can all be GLOBAL. */
kaf24@3342 73 if ( cpu_has_pge )
kaf24@3342 74 {
kaf24@3392 75 for ( v = HYPERVISOR_VIRT_START; v; v += (1 << L2_PAGETABLE_SHIFT) )
kaf24@3392 76 {
kaf24@3392 77 l2e = l2_pgentry_val(idle_pg_table[v >> L2_PAGETABLE_SHIFT]);
kaf24@3392 78 if ( l2e & _PAGE_PSE )
kaf24@3392 79 l2e |= _PAGE_GLOBAL;
kaf24@3392 80 idle_pg_table[v >> L2_PAGETABLE_SHIFT] = mk_l2_pgentry(l2e);
kaf24@3392 81 }
kaf24@3342 82 }
kaf24@1710 83
kaf24@1710 84 /* Create page table for ioremap(). */
kaf24@1958 85 ioremap_pt = (void *)alloc_xenheap_page();
kaf24@1710 86 clear_page(ioremap_pt);
kaf24@1710 87 idle_pg_table[IOREMAP_VIRT_START >> L2_PAGETABLE_SHIFT] =
kaf24@1710 88 mk_l2_pgentry(__pa(ioremap_pt) | __PAGE_HYPERVISOR);
kaf24@1710 89
kaf24@1710 90 /* Create read-only mapping of MPT for guest-OS use. */
kaf24@1710 91 idle_pg_table[RO_MPT_VIRT_START >> L2_PAGETABLE_SHIFT] =
kaf24@1830 92 mk_l2_pgentry(l2_pgentry_val(
kaf24@1830 93 idle_pg_table[RDWR_MPT_VIRT_START >> L2_PAGETABLE_SHIFT]) &
kaf24@1830 94 ~_PAGE_RW);
kaf24@1710 95
kaf24@1710 96 /* Set up mapping cache for domain pages. */
kaf24@1958 97 mapcache = (unsigned long *)alloc_xenheap_page();
kaf24@1710 98 clear_page(mapcache);
kaf24@1710 99 idle_pg_table[MAPCACHE_VIRT_START >> L2_PAGETABLE_SHIFT] =
kaf24@1710 100 mk_l2_pgentry(__pa(mapcache) | __PAGE_HYPERVISOR);
kaf24@1710 101
kaf24@1710 102 /* Set up linear page table mapping. */
kaf24@1710 103 idle_pg_table[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT] =
kaf24@1710 104 mk_l2_pgentry(__pa(idle_pg_table) | __PAGE_HYPERVISOR);
kaf24@1710 105 }
kaf24@1710 106
kaf24@1710 107 void __init zap_low_mappings(void)
kaf24@1710 108 {
kaf24@1710 109 int i;
kaf24@1710 110 for ( i = 0; i < DOMAIN_ENTRIES_PER_L2_PAGETABLE; i++ )
kaf24@1710 111 idle_pg_table[i] = mk_l2_pgentry(0);
kaf24@1710 112 flush_tlb_all_pge();
kaf24@1710 113 }
kaf24@1710 114
kaf24@1710 115
kaf24@1806 116 /*
kaf24@1806 117 * Allows shooting down of borrowed page-table use on specific CPUs.
kaf24@1806 118 * Specifically, we borrow page tables when running the idle domain.
kaf24@1806 119 */
kaf24@1806 120 static void __synchronise_pagetables(void *mask)
kaf24@1806 121 {
kaf24@1806 122 struct domain *d = current;
kaf24@1806 123 if ( ((unsigned long)mask & (1<<d->processor)) && is_idle_task(d) )
kaf24@1806 124 write_ptbase(&d->mm);
kaf24@1806 125 }
kaf24@1806 126 void synchronise_pagetables(unsigned long cpu_mask)
kaf24@1806 127 {
kaf24@1806 128 __synchronise_pagetables((void *)cpu_mask);
kaf24@1806 129 smp_call_function(__synchronise_pagetables, (void *)cpu_mask, 1, 1);
kaf24@1806 130 }
kaf24@1806 131
kaf24@1710 132 long do_stack_switch(unsigned long ss, unsigned long esp)
kaf24@1710 133 {
kaf24@1710 134 int nr = smp_processor_id();
kaf24@1710 135 struct tss_struct *t = &init_tss[nr];
kaf24@1710 136
kaf24@1710 137 /* We need to do this check as we load and use SS on guest's behalf. */
kaf24@1710 138 if ( (ss & 3) == 0 )
kaf24@1710 139 return -EPERM;
kaf24@1710 140
kaf24@1710 141 current->thread.guestos_ss = ss;
kaf24@1710 142 current->thread.guestos_sp = esp;
kaf24@1710 143 t->ss1 = ss;
kaf24@1710 144 t->esp1 = esp;
kaf24@1710 145
kaf24@1710 146 return 0;
kaf24@1710 147 }
kaf24@1710 148
kaf24@1710 149
kaf24@1710 150 /* Returns TRUE if given descriptor is valid for GDT or LDT. */
kaf24@1854 151 int check_descriptor(unsigned long *d)
kaf24@1710 152 {
kaf24@1854 153 unsigned long base, limit, a = d[0], b = d[1];
kaf24@1710 154
kaf24@1710 155 /* A not-present descriptor will always fault, so is safe. */
kaf24@1710 156 if ( !(b & _SEGMENT_P) )
kaf24@1710 157 goto good;
kaf24@1710 158
kaf24@1710 159 /*
kaf24@1710 160 * We don't allow a DPL of zero. There is no legitimate reason for
kaf24@1710 161 * specifying DPL==0, and it gets rather dangerous if we also accept call
kaf24@1710 162 * gates (consider a call gate pointing at another guestos descriptor with
kaf24@1710 163 * DPL 0 -- this would get the OS ring-0 privileges).
kaf24@1710 164 */
kaf24@1710 165 if ( (b & _SEGMENT_DPL) == 0 )
kaf24@1710 166 goto bad;
kaf24@1710 167
kaf24@1710 168 if ( !(b & _SEGMENT_S) )
kaf24@1710 169 {
kaf24@1710 170 /*
kaf24@1710 171 * System segment:
kaf24@1710 172 * 1. Don't allow interrupt or trap gates as they belong in the IDT.
kaf24@1710 173 * 2. Don't allow TSS descriptors or task gates as we don't
kaf24@1710 174 * virtualise x86 tasks.
kaf24@1710 175 * 3. Don't allow LDT descriptors because they're unnecessary and
kaf24@1710 176 * I'm uneasy about allowing an LDT page to contain LDT
kaf24@1710 177 * descriptors. In any case, Xen automatically creates the
kaf24@1710 178 * required descriptor when reloading the LDT register.
kaf24@1710 179 * 4. We allow call gates but they must not jump to a private segment.
kaf24@1710 180 */
kaf24@1710 181
kaf24@1710 182 /* Disallow everything but call gates. */
kaf24@1710 183 if ( (b & _SEGMENT_TYPE) != 0xc00 )
kaf24@1710 184 goto bad;
kaf24@1710 185
kaf24@1710 186 /* Can't allow far jump to a Xen-private segment. */
kaf24@1710 187 if ( !VALID_CODESEL(a>>16) )
kaf24@1710 188 goto bad;
kaf24@1710 189
kaf24@1710 190 /* Reserved bits must be zero. */
kaf24@1710 191 if ( (b & 0xe0) != 0 )
kaf24@1710 192 goto bad;
kaf24@1710 193
kaf24@1710 194 /* No base/limit check is needed for a call gate. */
kaf24@1710 195 goto good;
kaf24@1710 196 }
kaf24@1710 197
kaf24@1854 198 /* Check that base is at least a page away from Xen-private area. */
kaf24@1710 199 base = (b&(0xff<<24)) | ((b&0xff)<<16) | (a>>16);
kaf24@1854 200 if ( base >= (PAGE_OFFSET - PAGE_SIZE) )
kaf24@1854 201 goto bad;
kaf24@1854 202
kaf24@1854 203 /* Check and truncate the limit if necessary. */
kaf24@1710 204 limit = (b&0xf0000) | (a&0xffff);
kaf24@1710 205 limit++; /* We add one because limit is inclusive. */
kaf24@1710 206 if ( (b & _SEGMENT_G) )
kaf24@1710 207 limit <<= 12;
kaf24@3137 208
mafetter@3155 209 if ( (b & (_SEGMENT_CODE | _SEGMENT_EC)) == _SEGMENT_EC )
kaf24@3137 210 {
kaf24@3137 211 /*
kaf24@3137 212 * Grows-down limit check.
kaf24@3137 213 * NB. limit == 0xFFFFF provides no access (if G=1).
kaf24@3137 214 * limit == 0x00000 provides 4GB-4kB access (if G=1).
kaf24@3137 215 */
kaf24@3137 216 if ( (base + limit) > base )
kaf24@3137 217 {
kaf24@3137 218 limit = -(base & PAGE_MASK);
kaf24@3137 219 goto truncate;
kaf24@3137 220 }
kaf24@3137 221 }
kaf24@3137 222 else
kaf24@1854 223 {
kaf24@3137 224 /*
kaf24@3137 225 * Grows-up limit check.
kaf24@3137 226 * NB. limit == 0xFFFFF provides 4GB access (if G=1).
kaf24@3137 227 * limit == 0x00000 provides 4kB access (if G=1).
kaf24@3137 228 */
kaf24@3137 229 if ( ((base + limit) <= base) ||
kaf24@3137 230 ((base + limit) > PAGE_OFFSET) )
kaf24@3137 231 {
kaf24@3137 232 limit = PAGE_OFFSET - base;
kaf24@3137 233 truncate:
kaf24@3137 234 if ( !(b & _SEGMENT_G) )
kaf24@3137 235 goto bad; /* too dangerous; too hard to work out... */
kaf24@3137 236 limit = (limit >> 12) - 1;
kaf24@3137 237 d[0] &= ~0x0ffff; d[0] |= limit & 0x0ffff;
kaf24@3137 238 d[1] &= ~0xf0000; d[1] |= limit & 0xf0000;
kaf24@3137 239 }
kaf24@1854 240 }
kaf24@1710 241
kaf24@1710 242 good:
kaf24@1710 243 return 1;
kaf24@1710 244 bad:
kaf24@1710 245 return 0;
kaf24@1710 246 }
kaf24@1710 247
kaf24@1710 248
kaf24@1787 249 void destroy_gdt(struct domain *d)
kaf24@1787 250 {
kaf24@1787 251 int i;
kaf24@1787 252 unsigned long pfn;
kaf24@1787 253
kaf24@1787 254 for ( i = 0; i < 16; i++ )
kaf24@1787 255 {
kaf24@1787 256 if ( (pfn = l1_pgentry_to_pagenr(d->mm.perdomain_pt[i])) != 0 )
kaf24@1787 257 put_page_and_type(&frame_table[pfn]);
kaf24@1787 258 d->mm.perdomain_pt[i] = mk_l1_pgentry(0);
kaf24@1787 259 }
kaf24@1787 260 }
kaf24@1787 261
kaf24@1787 262
kaf24@1787 263 long set_gdt(struct domain *d,
kaf24@1710 264 unsigned long *frames,
kaf24@1710 265 unsigned int entries)
kaf24@1710 266 {
kaf24@1710 267 /* NB. There are 512 8-byte entries per GDT page. */
kaf24@2703 268 int i = 0, nr_pages = (entries + 511) / 512;
kaf24@1710 269 struct desc_struct *vgdt;
kaf24@2703 270 unsigned long pfn;
kaf24@1710 271
kaf24@2703 272 /* Check the first page in the new GDT. */
kaf24@2703 273 if ( (pfn = frames[0]) >= max_page )
kaf24@2703 274 goto fail;
iap10@2265 275
kaf24@2703 276 /* The first page is special because Xen owns a range of entries in it. */
kaf24@2703 277 if ( !get_page_and_type(&frame_table[pfn], d, PGT_gdt_page) )
kaf24@1710 278 {
kaf24@2703 279 /* GDT checks failed: try zapping the Xen reserved entries. */
kaf24@2703 280 if ( !get_page_and_type(&frame_table[pfn], d, PGT_writable_page) )
kaf24@2703 281 goto fail;
kaf24@2703 282 vgdt = map_domain_mem(pfn << PAGE_SHIFT);
kaf24@2703 283 memset(vgdt + FIRST_RESERVED_GDT_ENTRY, 0,
kaf24@2703 284 NR_RESERVED_GDT_ENTRIES*8);
kaf24@2703 285 unmap_domain_mem(vgdt);
kaf24@2703 286 put_page_and_type(&frame_table[pfn]);
kaf24@2703 287
kaf24@2703 288 /* Okay, we zapped the entries. Now try the GDT checks again. */
kaf24@2703 289 if ( !get_page_and_type(&frame_table[pfn], d, PGT_gdt_page) )
kaf24@1710 290 goto fail;
kaf24@1710 291 }
kaf24@1710 292
kaf24@2703 293 /* Check the remaining pages in the new GDT. */
kaf24@2703 294 for ( i = 1; i < nr_pages; i++ )
kaf24@2703 295 if ( ((pfn = frames[i]) >= max_page) ||
kaf24@2703 296 !get_page_and_type(&frame_table[pfn], d, PGT_gdt_page) )
kaf24@2703 297 goto fail;
kaf24@2703 298
kaf24@1710 299 /* Copy reserved GDT entries to the new GDT. */
kaf24@2703 300 vgdt = map_domain_mem(frames[0] << PAGE_SHIFT);
kaf24@1710 301 memcpy(vgdt + FIRST_RESERVED_GDT_ENTRY,
kaf24@1710 302 gdt_table + FIRST_RESERVED_GDT_ENTRY,
kaf24@1710 303 NR_RESERVED_GDT_ENTRIES*8);
kaf24@1710 304 unmap_domain_mem(vgdt);
kaf24@1710 305
kaf24@1710 306 /* Tear down the old GDT. */
kaf24@1787 307 destroy_gdt(d);
kaf24@1710 308
kaf24@1710 309 /* Install the new GDT. */
kaf24@1710 310 for ( i = 0; i < nr_pages; i++ )
kaf24@1787 311 d->mm.perdomain_pt[i] =
kaf24@1710 312 mk_l1_pgentry((frames[i] << PAGE_SHIFT) | __PAGE_HYPERVISOR);
kaf24@1710 313
kaf24@1787 314 SET_GDT_ADDRESS(d, GDT_VIRT_START);
kaf24@1854 315 SET_GDT_ENTRIES(d, entries);
kaf24@1710 316
kaf24@1710 317 return 0;
kaf24@1710 318
kaf24@1710 319 fail:
kaf24@1710 320 while ( i-- > 0 )
kaf24@1710 321 put_page_and_type(&frame_table[frames[i]]);
kaf24@1710 322 return -EINVAL;
kaf24@1710 323 }
kaf24@1710 324
kaf24@1710 325
kaf24@1710 326 long do_set_gdt(unsigned long *frame_list, unsigned int entries)
kaf24@1710 327 {
kaf24@1710 328 int nr_pages = (entries + 511) / 512;
kaf24@1710 329 unsigned long frames[16];
kaf24@1710 330 long ret;
kaf24@1710 331
kaf24@1710 332 if ( (entries <= LAST_RESERVED_GDT_ENTRY) || (entries > 8192) )
kaf24@1710 333 return -EINVAL;
kaf24@1710 334
kaf24@1710 335 if ( copy_from_user(frames, frame_list, nr_pages * sizeof(unsigned long)) )
kaf24@1710 336 return -EFAULT;
kaf24@1710 337
kaf24@1710 338 if ( (ret = set_gdt(current, frames, entries)) == 0 )
kaf24@1710 339 {
kaf24@1710 340 local_flush_tlb();
kaf24@1710 341 __asm__ __volatile__ ("lgdt %0" : "=m" (*current->mm.gdt));
kaf24@1710 342 }
kaf24@1710 343
kaf24@1710 344 return ret;
kaf24@1710 345 }
kaf24@1710 346
kaf24@1710 347
kaf24@1710 348 long do_update_descriptor(
kaf24@1710 349 unsigned long pa, unsigned long word1, unsigned long word2)
kaf24@1710 350 {
kaf24@1854 351 unsigned long *gdt_pent, pfn = pa >> PAGE_SHIFT, d[2];
kaf24@1710 352 struct pfn_info *page;
kaf24@1710 353 long ret = -EINVAL;
kaf24@1710 354
kaf24@1854 355 d[0] = word1;
kaf24@1854 356 d[1] = word2;
kaf24@1854 357
kaf24@1854 358 if ( (pa & 7) || (pfn >= max_page) || !check_descriptor(d) )
kaf24@1710 359 return -EINVAL;
kaf24@1710 360
kaf24@1710 361 page = &frame_table[pfn];
kaf24@1710 362 if ( unlikely(!get_page(page, current)) )
kaf24@1843 363 return -EINVAL;
kaf24@1710 364
kaf24@1710 365 /* Check if the given frame is in use in an unsafe context. */
kaf24@1970 366 switch ( page->u.inuse.type_info & PGT_type_mask )
kaf24@1710 367 {
kaf24@1710 368 case PGT_gdt_page:
kaf24@1710 369 /* Disallow updates of Xen-reserved descriptors in the current GDT. */
kaf24@1710 370 if ( (l1_pgentry_to_pagenr(current->mm.perdomain_pt[0]) == pfn) &&
kaf24@1710 371 (((pa&(PAGE_SIZE-1))>>3) >= FIRST_RESERVED_GDT_ENTRY) &&
kaf24@1710 372 (((pa&(PAGE_SIZE-1))>>3) <= LAST_RESERVED_GDT_ENTRY) )
kaf24@1710 373 goto out;
kaf24@1710 374 if ( unlikely(!get_page_type(page, PGT_gdt_page)) )
kaf24@1710 375 goto out;
kaf24@1710 376 break;
kaf24@1710 377 case PGT_ldt_page:
kaf24@1710 378 if ( unlikely(!get_page_type(page, PGT_ldt_page)) )
kaf24@1710 379 goto out;
kaf24@1710 380 break;
kaf24@1710 381 default:
kaf24@2375 382 if ( unlikely(!get_page_type(page, PGT_writable_page)) )
kaf24@1710 383 goto out;
kaf24@1710 384 break;
kaf24@1710 385 }
kaf24@1710 386
kaf24@1710 387 /* All is good so make the update. */
kaf24@1710 388 gdt_pent = map_domain_mem(pa);
kaf24@1854 389 memcpy(gdt_pent, d, 8);
kaf24@1710 390 unmap_domain_mem(gdt_pent);
kaf24@1710 391
kaf24@1710 392 put_page_type(page);
kaf24@1710 393
kaf24@1710 394 ret = 0; /* success */
kaf24@1710 395
kaf24@1710 396 out:
kaf24@1710 397 put_page(page);
kaf24@1710 398 return ret;
kaf24@1710 399 }
kaf24@1710 400
kaf24@1710 401 #ifdef MEMORY_GUARD
kaf24@1710 402
kaf24@1710 403 void *memguard_init(void *heap_start)
kaf24@1710 404 {
kaf24@1710 405 l1_pgentry_t *l1;
kaf24@1710 406 int i, j;
kaf24@1710 407
kaf24@1710 408 /* Round the allocation pointer up to a page boundary. */
kaf24@1710 409 heap_start = (void *)(((unsigned long)heap_start + (PAGE_SIZE-1)) &
kaf24@1710 410 PAGE_MASK);
kaf24@1710 411
kaf24@1710 412 /* Memory guarding is incompatible with super pages. */
kaf24@1710 413 for ( i = 0; i < (xenheap_phys_end >> L2_PAGETABLE_SHIFT); i++ )
kaf24@1710 414 {
kaf24@1710 415 l1 = (l1_pgentry_t *)heap_start;
kaf24@1710 416 heap_start = (void *)((unsigned long)heap_start + PAGE_SIZE);
kaf24@1710 417 for ( j = 0; j < ENTRIES_PER_L1_PAGETABLE; j++ )
kaf24@1710 418 l1[j] = mk_l1_pgentry((i << L2_PAGETABLE_SHIFT) |
kaf24@1710 419 (j << L1_PAGETABLE_SHIFT) |
kaf24@1710 420 __PAGE_HYPERVISOR);
kaf24@3392 421 idle_pg_table[i + l2_table_offset(PAGE_OFFSET)] =
kaf24@1710 422 mk_l2_pgentry(virt_to_phys(l1) | __PAGE_HYPERVISOR);
kaf24@1710 423 }
kaf24@1710 424
kaf24@1710 425 return heap_start;
kaf24@1710 426 }
kaf24@1710 427
kaf24@1710 428 static void __memguard_change_range(void *p, unsigned long l, int guard)
kaf24@1710 429 {
kaf24@1710 430 l1_pgentry_t *l1;
kaf24@1710 431 l2_pgentry_t *l2;
kaf24@1710 432 unsigned long _p = (unsigned long)p;
kaf24@1710 433 unsigned long _l = (unsigned long)l;
kaf24@1710 434
kaf24@1710 435 /* Ensure we are dealing with a page-aligned whole number of pages. */
kaf24@1710 436 ASSERT((_p&PAGE_MASK) != 0);
kaf24@1710 437 ASSERT((_l&PAGE_MASK) != 0);
kaf24@1710 438 ASSERT((_p&~PAGE_MASK) == 0);
kaf24@1710 439 ASSERT((_l&~PAGE_MASK) == 0);
kaf24@1710 440
kaf24@1710 441 while ( _l != 0 )
kaf24@1710 442 {
kaf24@1710 443 l2 = &idle_pg_table[l2_table_offset(_p)];
kaf24@1710 444 l1 = l2_pgentry_to_l1(*l2) + l1_table_offset(_p);
kaf24@1710 445 if ( guard )
kaf24@1710 446 *l1 = mk_l1_pgentry(l1_pgentry_val(*l1) & ~_PAGE_PRESENT);
kaf24@1710 447 else
kaf24@1710 448 *l1 = mk_l1_pgentry(l1_pgentry_val(*l1) | _PAGE_PRESENT);
kaf24@1710 449 _p += PAGE_SIZE;
kaf24@1710 450 _l -= PAGE_SIZE;
kaf24@1710 451 }
kaf24@1710 452 }
kaf24@1710 453
kaf24@1710 454 void memguard_guard_range(void *p, unsigned long l)
kaf24@1710 455 {
kaf24@1710 456 __memguard_change_range(p, l, 1);
kaf24@1710 457 local_flush_tlb();
kaf24@1710 458 }
kaf24@1710 459
kaf24@1710 460 void memguard_unguard_range(void *p, unsigned long l)
kaf24@1710 461 {
kaf24@1710 462 __memguard_change_range(p, l, 0);
kaf24@1710 463 }
kaf24@1710 464
kaf24@1710 465 int memguard_is_guarded(void *p)
kaf24@1710 466 {
kaf24@1710 467 l1_pgentry_t *l1;
kaf24@1710 468 l2_pgentry_t *l2;
kaf24@1710 469 unsigned long _p = (unsigned long)p;
kaf24@1710 470 l2 = &idle_pg_table[l2_table_offset(_p)];
kaf24@1710 471 l1 = l2_pgentry_to_l1(*l2) + l1_table_offset(_p);
kaf24@1710 472 return !(l1_pgentry_val(*l1) & _PAGE_PRESENT);
kaf24@1710 473 }
kaf24@1710 474
kaf24@1710 475 #endif