debuggers.hg

annotate xen/arch/x86/x86_32/mm.c @ 3632:fec8b1778268

bitkeeper revision 1.1159.212.60 (41febc4bKKSkh9u-Zes9v2CmBuLZxA)

More bootstrap fixes for x86/64. Next thing to do is sort out the IDT and
get traps.c working; then we can get rid of a bunch of dummy labels from
end of boot/x86_64.S. We're also going to need some kind of entry.S before
we can safely enable interrupts. Also bear in mind that not all of physical
RAM may be mapped (only first 1GB) and no m2p table is yet allocated or
mapped. Plenty to be done!
author kaf24@viper.(none)
date Mon Jan 31 23:16:27 2005 +0000 (2005-01-31)
parents 2c56c6b39a48
children d55d523078f7
rev   line source
kaf24@1710 1 /******************************************************************************
kaf24@1854 2 * arch/x86/x86_32/mm.c
kaf24@1710 3 *
kaf24@1854 4 * Modifications to Linux original are copyright (c) 2004, K A Fraser
kaf24@1710 5 *
kaf24@1710 6 * This program is free software; you can redistribute it and/or modify
kaf24@1710 7 * it under the terms of the GNU General Public License as published by
kaf24@1710 8 * the Free Software Foundation; either version 2 of the License, or
kaf24@1710 9 * (at your option) any later version.
kaf24@1710 10 *
kaf24@1710 11 * This program is distributed in the hope that it will be useful,
kaf24@1710 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
kaf24@1710 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
kaf24@1710 14 * GNU General Public License for more details.
kaf24@1710 15 *
kaf24@1710 16 * You should have received a copy of the GNU General Public License
kaf24@1710 17 * along with this program; if not, write to the Free Software
kaf24@1710 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
kaf24@1710 19 */
kaf24@1710 20
kaf24@1710 21 #include <xen/config.h>
kaf24@1710 22 #include <xen/lib.h>
kaf24@1710 23 #include <xen/init.h>
kaf24@1710 24 #include <xen/mm.h>
kaf24@1710 25 #include <asm/page.h>
kaf24@1710 26 #include <asm/flushtlb.h>
kaf24@1710 27 #include <asm/fixmap.h>
kaf24@1710 28 #include <asm/domain_page.h>
kaf24@1710 29
sos22@3478 30 unsigned long m2p_start_mfn;
sos22@3478 31
kaf24@3632 32 /* Map physical byte range (@p, @p+@s) at virt address @v in pagetable @pt. */
kaf24@3632 33 int map_pages(
kaf24@3632 34 pagetable_t *pt,
kaf24@3632 35 unsigned long v,
kaf24@3632 36 unsigned long p,
kaf24@3632 37 unsigned long s,
kaf24@3632 38 unsigned long flags)
kaf24@1710 39 {
kaf24@3632 40 l2_pgentry_t *pl2e;
kaf24@3632 41 l1_pgentry_t *pl1e;
kaf24@3632 42 void *newpg;
kaf24@3632 43
kaf24@3632 44 while ( s != 0 )
kaf24@3632 45 {
kaf24@3632 46 pl2e = &pt[l2_table_offset(v)];
kaf24@3632 47
kaf24@3632 48 if ( ((s|v|p) & ((1<<L2_PAGETABLE_SHIFT)-1)) == 0 )
kaf24@3632 49 {
kaf24@3632 50 /* Super-page mapping. */
kaf24@3632 51 if ( (l2_pgentry_val(*pl2e) & _PAGE_PRESENT) )
kaf24@3632 52 __flush_tlb_pge();
kaf24@3632 53 *pl2e = mk_l2_pgentry(p|flags|_PAGE_PSE);
kaf24@1710 54
kaf24@3632 55 v += 1 << L2_PAGETABLE_SHIFT;
kaf24@3632 56 p += 1 << L2_PAGETABLE_SHIFT;
kaf24@3632 57 s -= 1 << L2_PAGETABLE_SHIFT;
kaf24@3632 58 }
kaf24@3632 59 else
kaf24@3632 60 {
kaf24@3632 61 /* Normal page mapping. */
kaf24@3632 62 if ( !(l2_pgentry_val(*pl2e) & _PAGE_PRESENT) )
kaf24@3632 63 {
kaf24@3632 64 newpg = (void *)alloc_xenheap_page();
kaf24@3632 65 clear_page(newpg);
kaf24@3632 66 *pl2e = mk_l2_pgentry(__pa(newpg) | __PAGE_HYPERVISOR);
kaf24@3632 67 }
kaf24@3632 68 pl1e = l2_pgentry_to_l1(*pl2e) + l1_table_offset(v);
kaf24@3632 69 if ( (l1_pgentry_val(*pl1e) & _PAGE_PRESENT) )
kaf24@3632 70 __flush_tlb_one(v);
kaf24@3632 71 *pl1e = mk_l1_pgentry(p|flags);
kaf24@1710 72
kaf24@3632 73 v += 1 << L1_PAGETABLE_SHIFT;
kaf24@3632 74 p += 1 << L1_PAGETABLE_SHIFT;
kaf24@3632 75 s -= 1 << L1_PAGETABLE_SHIFT;
kaf24@3632 76 }
kaf24@3632 77 }
kaf24@3632 78
kaf24@3632 79 return 0;
kaf24@1710 80 }
kaf24@1710 81
kaf24@3632 82 void __set_fixmap(
kaf24@3632 83 enum fixed_addresses idx, unsigned long p, unsigned long flags)
kaf24@1710 84 {
kaf24@3632 85 if ( unlikely(idx >= __end_of_fixed_addresses) )
kaf24@3632 86 BUG();
kaf24@3632 87 map_pages(idle_pg_table, fix_to_virt(idx), p, PAGE_SIZE, flags);
kaf24@1710 88 }
kaf24@1710 89
kaf24@1710 90
kaf24@1710 91 void __init paging_init(void)
kaf24@1710 92 {
kaf24@1710 93 void *ioremap_pt;
kaf24@3392 94 unsigned long v, l2e;
kaf24@3392 95 struct pfn_info *pg;
kaf24@1710 96
kaf24@3392 97 /* Allocate and map the machine-to-phys table. */
kaf24@3392 98 if ( (pg = alloc_domheap_pages(NULL, 10)) == NULL )
kaf24@3392 99 panic("Not enough memory to bootstrap Xen.\n");
sos22@3478 100 m2p_start_mfn = page_to_pfn(pg);
kaf24@3392 101 idle_pg_table[RDWR_MPT_VIRT_START >> L2_PAGETABLE_SHIFT] =
kaf24@3392 102 mk_l2_pgentry(page_to_phys(pg) | __PAGE_HYPERVISOR | _PAGE_PSE);
kaf24@3392 103
kaf24@3392 104 /* Xen 4MB mappings can all be GLOBAL. */
kaf24@3342 105 if ( cpu_has_pge )
kaf24@3342 106 {
kaf24@3392 107 for ( v = HYPERVISOR_VIRT_START; v; v += (1 << L2_PAGETABLE_SHIFT) )
kaf24@3392 108 {
kaf24@3392 109 l2e = l2_pgentry_val(idle_pg_table[v >> L2_PAGETABLE_SHIFT]);
kaf24@3392 110 if ( l2e & _PAGE_PSE )
kaf24@3392 111 l2e |= _PAGE_GLOBAL;
kaf24@3392 112 idle_pg_table[v >> L2_PAGETABLE_SHIFT] = mk_l2_pgentry(l2e);
kaf24@3392 113 }
kaf24@3342 114 }
kaf24@1710 115
kaf24@1710 116 /* Create page table for ioremap(). */
kaf24@1958 117 ioremap_pt = (void *)alloc_xenheap_page();
kaf24@1710 118 clear_page(ioremap_pt);
kaf24@1710 119 idle_pg_table[IOREMAP_VIRT_START >> L2_PAGETABLE_SHIFT] =
kaf24@1710 120 mk_l2_pgentry(__pa(ioremap_pt) | __PAGE_HYPERVISOR);
kaf24@1710 121
kaf24@1710 122 /* Create read-only mapping of MPT for guest-OS use. */
kaf24@1710 123 idle_pg_table[RO_MPT_VIRT_START >> L2_PAGETABLE_SHIFT] =
kaf24@1830 124 mk_l2_pgentry(l2_pgentry_val(
kaf24@1830 125 idle_pg_table[RDWR_MPT_VIRT_START >> L2_PAGETABLE_SHIFT]) &
kaf24@1830 126 ~_PAGE_RW);
kaf24@1710 127
kaf24@1710 128 /* Set up mapping cache for domain pages. */
kaf24@1958 129 mapcache = (unsigned long *)alloc_xenheap_page();
kaf24@1710 130 clear_page(mapcache);
kaf24@1710 131 idle_pg_table[MAPCACHE_VIRT_START >> L2_PAGETABLE_SHIFT] =
kaf24@1710 132 mk_l2_pgentry(__pa(mapcache) | __PAGE_HYPERVISOR);
kaf24@1710 133
kaf24@1710 134 /* Set up linear page table mapping. */
kaf24@1710 135 idle_pg_table[LINEAR_PT_VIRT_START >> L2_PAGETABLE_SHIFT] =
kaf24@1710 136 mk_l2_pgentry(__pa(idle_pg_table) | __PAGE_HYPERVISOR);
kaf24@1710 137 }
kaf24@1710 138
kaf24@1710 139 void __init zap_low_mappings(void)
kaf24@1710 140 {
kaf24@1710 141 int i;
kaf24@1710 142 for ( i = 0; i < DOMAIN_ENTRIES_PER_L2_PAGETABLE; i++ )
kaf24@1710 143 idle_pg_table[i] = mk_l2_pgentry(0);
kaf24@1710 144 flush_tlb_all_pge();
kaf24@1710 145 }
kaf24@1710 146
kaf24@1710 147
kaf24@1806 148 /*
kaf24@1806 149 * Allows shooting down of borrowed page-table use on specific CPUs.
kaf24@1806 150 * Specifically, we borrow page tables when running the idle domain.
kaf24@1806 151 */
kaf24@1806 152 static void __synchronise_pagetables(void *mask)
kaf24@1806 153 {
kaf24@3336 154 struct exec_domain *ed = current;
kaf24@3336 155 if ( ((unsigned long)mask & (1 << ed->processor)) &&
kaf24@3336 156 is_idle_task(ed->domain) )
kaf24@3336 157 write_ptbase(&ed->mm);
kaf24@1806 158 }
kaf24@1806 159 void synchronise_pagetables(unsigned long cpu_mask)
kaf24@1806 160 {
kaf24@1806 161 __synchronise_pagetables((void *)cpu_mask);
kaf24@1806 162 smp_call_function(__synchronise_pagetables, (void *)cpu_mask, 1, 1);
kaf24@1806 163 }
kaf24@1806 164
kaf24@1710 165 long do_stack_switch(unsigned long ss, unsigned long esp)
kaf24@1710 166 {
kaf24@1710 167 int nr = smp_processor_id();
kaf24@1710 168 struct tss_struct *t = &init_tss[nr];
kaf24@1710 169
kaf24@1710 170 /* We need to do this check as we load and use SS on guest's behalf. */
kaf24@1710 171 if ( (ss & 3) == 0 )
kaf24@1710 172 return -EPERM;
kaf24@1710 173
kaf24@1710 174 current->thread.guestos_ss = ss;
kaf24@1710 175 current->thread.guestos_sp = esp;
kaf24@1710 176 t->ss1 = ss;
kaf24@1710 177 t->esp1 = esp;
kaf24@1710 178
kaf24@1710 179 return 0;
kaf24@1710 180 }
kaf24@1710 181
kaf24@1710 182
kaf24@1710 183 /* Returns TRUE if given descriptor is valid for GDT or LDT. */
kaf24@1854 184 int check_descriptor(unsigned long *d)
kaf24@1710 185 {
kaf24@1854 186 unsigned long base, limit, a = d[0], b = d[1];
kaf24@1710 187
kaf24@1710 188 /* A not-present descriptor will always fault, so is safe. */
kaf24@1710 189 if ( !(b & _SEGMENT_P) )
kaf24@1710 190 goto good;
kaf24@1710 191
kaf24@1710 192 /*
kaf24@1710 193 * We don't allow a DPL of zero. There is no legitimate reason for
kaf24@1710 194 * specifying DPL==0, and it gets rather dangerous if we also accept call
kaf24@1710 195 * gates (consider a call gate pointing at another guestos descriptor with
kaf24@1710 196 * DPL 0 -- this would get the OS ring-0 privileges).
kaf24@1710 197 */
kaf24@1710 198 if ( (b & _SEGMENT_DPL) == 0 )
kaf24@1710 199 goto bad;
kaf24@1710 200
kaf24@1710 201 if ( !(b & _SEGMENT_S) )
kaf24@1710 202 {
kaf24@1710 203 /*
kaf24@1710 204 * System segment:
kaf24@1710 205 * 1. Don't allow interrupt or trap gates as they belong in the IDT.
kaf24@1710 206 * 2. Don't allow TSS descriptors or task gates as we don't
kaf24@1710 207 * virtualise x86 tasks.
kaf24@1710 208 * 3. Don't allow LDT descriptors because they're unnecessary and
kaf24@1710 209 * I'm uneasy about allowing an LDT page to contain LDT
kaf24@1710 210 * descriptors. In any case, Xen automatically creates the
kaf24@1710 211 * required descriptor when reloading the LDT register.
kaf24@1710 212 * 4. We allow call gates but they must not jump to a private segment.
kaf24@1710 213 */
kaf24@1710 214
kaf24@1710 215 /* Disallow everything but call gates. */
kaf24@1710 216 if ( (b & _SEGMENT_TYPE) != 0xc00 )
kaf24@1710 217 goto bad;
kaf24@1710 218
kaf24@1710 219 /* Can't allow far jump to a Xen-private segment. */
kaf24@1710 220 if ( !VALID_CODESEL(a>>16) )
kaf24@1710 221 goto bad;
kaf24@1710 222
kaf24@1710 223 /* Reserved bits must be zero. */
kaf24@1710 224 if ( (b & 0xe0) != 0 )
kaf24@1710 225 goto bad;
kaf24@1710 226
kaf24@1710 227 /* No base/limit check is needed for a call gate. */
kaf24@1710 228 goto good;
kaf24@1710 229 }
kaf24@1710 230
kaf24@1854 231 /* Check that base is at least a page away from Xen-private area. */
kaf24@1710 232 base = (b&(0xff<<24)) | ((b&0xff)<<16) | (a>>16);
kaf24@1854 233 if ( base >= (PAGE_OFFSET - PAGE_SIZE) )
kaf24@1854 234 goto bad;
kaf24@1854 235
kaf24@1854 236 /* Check and truncate the limit if necessary. */
kaf24@1710 237 limit = (b&0xf0000) | (a&0xffff);
kaf24@1710 238 limit++; /* We add one because limit is inclusive. */
kaf24@1710 239 if ( (b & _SEGMENT_G) )
kaf24@1710 240 limit <<= 12;
kaf24@3137 241
mafetter@3155 242 if ( (b & (_SEGMENT_CODE | _SEGMENT_EC)) == _SEGMENT_EC )
kaf24@3137 243 {
kaf24@3137 244 /*
kaf24@3137 245 * Grows-down limit check.
kaf24@3137 246 * NB. limit == 0xFFFFF provides no access (if G=1).
kaf24@3137 247 * limit == 0x00000 provides 4GB-4kB access (if G=1).
kaf24@3137 248 */
kaf24@3137 249 if ( (base + limit) > base )
kaf24@3137 250 {
kaf24@3137 251 limit = -(base & PAGE_MASK);
kaf24@3137 252 goto truncate;
kaf24@3137 253 }
kaf24@3137 254 }
kaf24@3137 255 else
kaf24@1854 256 {
kaf24@3137 257 /*
kaf24@3137 258 * Grows-up limit check.
kaf24@3137 259 * NB. limit == 0xFFFFF provides 4GB access (if G=1).
kaf24@3137 260 * limit == 0x00000 provides 4kB access (if G=1).
kaf24@3137 261 */
kaf24@3137 262 if ( ((base + limit) <= base) ||
kaf24@3137 263 ((base + limit) > PAGE_OFFSET) )
kaf24@3137 264 {
kaf24@3137 265 limit = PAGE_OFFSET - base;
kaf24@3137 266 truncate:
kaf24@3137 267 if ( !(b & _SEGMENT_G) )
kaf24@3137 268 goto bad; /* too dangerous; too hard to work out... */
kaf24@3137 269 limit = (limit >> 12) - 1;
kaf24@3137 270 d[0] &= ~0x0ffff; d[0] |= limit & 0x0ffff;
kaf24@3137 271 d[1] &= ~0xf0000; d[1] |= limit & 0xf0000;
kaf24@3137 272 }
kaf24@1854 273 }
kaf24@1710 274
kaf24@1710 275 good:
kaf24@1710 276 return 1;
kaf24@1710 277 bad:
kaf24@1710 278 return 0;
kaf24@1710 279 }
kaf24@1710 280
kaf24@1710 281
cl349@2964 282 void destroy_gdt(struct exec_domain *ed)
kaf24@1787 283 {
kaf24@1787 284 int i;
kaf24@1787 285 unsigned long pfn;
kaf24@1787 286
kaf24@1787 287 for ( i = 0; i < 16; i++ )
kaf24@1787 288 {
cl349@3036 289 if ( (pfn = l1_pgentry_to_pagenr(ed->mm.perdomain_ptes[i])) != 0 )
kaf24@1787 290 put_page_and_type(&frame_table[pfn]);
cl349@3036 291 ed->mm.perdomain_ptes[i] = mk_l1_pgentry(0);
kaf24@1787 292 }
kaf24@1787 293 }
kaf24@1787 294
kaf24@1787 295
cl349@2957 296 long set_gdt(struct exec_domain *ed,
kaf24@1710 297 unsigned long *frames,
kaf24@1710 298 unsigned int entries)
kaf24@1710 299 {
cl349@2957 300 struct domain *d = ed->domain;
kaf24@1710 301 /* NB. There are 512 8-byte entries per GDT page. */
kaf24@2703 302 int i = 0, nr_pages = (entries + 511) / 512;
kaf24@1710 303 struct desc_struct *vgdt;
kaf24@2703 304 unsigned long pfn;
kaf24@1710 305
kaf24@2703 306 /* Check the first page in the new GDT. */
kaf24@2703 307 if ( (pfn = frames[0]) >= max_page )
kaf24@2703 308 goto fail;
iap10@2265 309
kaf24@2703 310 /* The first page is special because Xen owns a range of entries in it. */
kaf24@2703 311 if ( !get_page_and_type(&frame_table[pfn], d, PGT_gdt_page) )
kaf24@1710 312 {
kaf24@2703 313 /* GDT checks failed: try zapping the Xen reserved entries. */
kaf24@2703 314 if ( !get_page_and_type(&frame_table[pfn], d, PGT_writable_page) )
kaf24@2703 315 goto fail;
kaf24@2703 316 vgdt = map_domain_mem(pfn << PAGE_SHIFT);
kaf24@2703 317 memset(vgdt + FIRST_RESERVED_GDT_ENTRY, 0,
kaf24@2703 318 NR_RESERVED_GDT_ENTRIES*8);
kaf24@2703 319 unmap_domain_mem(vgdt);
kaf24@2703 320 put_page_and_type(&frame_table[pfn]);
kaf24@2703 321
kaf24@2703 322 /* Okay, we zapped the entries. Now try the GDT checks again. */
kaf24@2703 323 if ( !get_page_and_type(&frame_table[pfn], d, PGT_gdt_page) )
kaf24@1710 324 goto fail;
kaf24@1710 325 }
kaf24@1710 326
kaf24@2703 327 /* Check the remaining pages in the new GDT. */
kaf24@2703 328 for ( i = 1; i < nr_pages; i++ )
kaf24@2703 329 if ( ((pfn = frames[i]) >= max_page) ||
kaf24@2703 330 !get_page_and_type(&frame_table[pfn], d, PGT_gdt_page) )
kaf24@2703 331 goto fail;
kaf24@2703 332
kaf24@1710 333 /* Copy reserved GDT entries to the new GDT. */
kaf24@2703 334 vgdt = map_domain_mem(frames[0] << PAGE_SHIFT);
kaf24@1710 335 memcpy(vgdt + FIRST_RESERVED_GDT_ENTRY,
kaf24@1710 336 gdt_table + FIRST_RESERVED_GDT_ENTRY,
kaf24@1710 337 NR_RESERVED_GDT_ENTRIES*8);
kaf24@1710 338 unmap_domain_mem(vgdt);
kaf24@1710 339
kaf24@1710 340 /* Tear down the old GDT. */
cl349@2964 341 destroy_gdt(ed);
kaf24@1710 342
kaf24@1710 343 /* Install the new GDT. */
kaf24@1710 344 for ( i = 0; i < nr_pages; i++ )
cl349@3036 345 ed->mm.perdomain_ptes[i] =
kaf24@1710 346 mk_l1_pgentry((frames[i] << PAGE_SHIFT) | __PAGE_HYPERVISOR);
kaf24@1710 347
cl349@3036 348 SET_GDT_ADDRESS(ed, GDT_VIRT_START(ed));
cl349@2957 349 SET_GDT_ENTRIES(ed, entries);
kaf24@1710 350
kaf24@1710 351 return 0;
kaf24@1710 352
kaf24@1710 353 fail:
kaf24@1710 354 while ( i-- > 0 )
kaf24@1710 355 put_page_and_type(&frame_table[frames[i]]);
kaf24@1710 356 return -EINVAL;
kaf24@1710 357 }
kaf24@1710 358
kaf24@1710 359
kaf24@1710 360 long do_set_gdt(unsigned long *frame_list, unsigned int entries)
kaf24@1710 361 {
kaf24@1710 362 int nr_pages = (entries + 511) / 512;
kaf24@1710 363 unsigned long frames[16];
kaf24@1710 364 long ret;
kaf24@1710 365
kaf24@1710 366 if ( (entries <= LAST_RESERVED_GDT_ENTRY) || (entries > 8192) )
kaf24@1710 367 return -EINVAL;
kaf24@1710 368
kaf24@1710 369 if ( copy_from_user(frames, frame_list, nr_pages * sizeof(unsigned long)) )
kaf24@1710 370 return -EFAULT;
kaf24@1710 371
cl349@3036 372 LOCK_BIGLOCK(current->domain);
cl349@3036 373
kaf24@1710 374 if ( (ret = set_gdt(current, frames, entries)) == 0 )
kaf24@1710 375 {
kaf24@1710 376 local_flush_tlb();
kaf24@1710 377 __asm__ __volatile__ ("lgdt %0" : "=m" (*current->mm.gdt));
kaf24@1710 378 }
kaf24@1710 379
cl349@3036 380 UNLOCK_BIGLOCK(current->domain);
cl349@3036 381
kaf24@1710 382 return ret;
kaf24@1710 383 }
kaf24@1710 384
kaf24@1710 385
kaf24@1710 386 long do_update_descriptor(
kaf24@1710 387 unsigned long pa, unsigned long word1, unsigned long word2)
kaf24@1710 388 {
kaf24@1854 389 unsigned long *gdt_pent, pfn = pa >> PAGE_SHIFT, d[2];
kaf24@1710 390 struct pfn_info *page;
cl349@3036 391 struct exec_domain *ed;
kaf24@1710 392 long ret = -EINVAL;
kaf24@1710 393
kaf24@1854 394 d[0] = word1;
kaf24@1854 395 d[1] = word2;
kaf24@1854 396
cl349@3036 397 LOCK_BIGLOCK(current->domain);
cl349@3036 398
cl349@3036 399 if ( (pa & 7) || (pfn >= max_page) || !check_descriptor(d) ) {
cl349@3036 400 UNLOCK_BIGLOCK(current->domain);
kaf24@1710 401 return -EINVAL;
cl349@3036 402 }
kaf24@1710 403
kaf24@1710 404 page = &frame_table[pfn];
cl349@3036 405 if ( unlikely(!get_page(page, current->domain)) ) {
cl349@3036 406 UNLOCK_BIGLOCK(current->domain);
kaf24@1843 407 return -EINVAL;
cl349@3036 408 }
kaf24@1710 409
kaf24@1710 410 /* Check if the given frame is in use in an unsafe context. */
kaf24@1970 411 switch ( page->u.inuse.type_info & PGT_type_mask )
kaf24@1710 412 {
kaf24@1710 413 case PGT_gdt_page:
kaf24@1710 414 /* Disallow updates of Xen-reserved descriptors in the current GDT. */
cl349@3036 415 for_each_exec_domain(current->domain, ed) {
cl349@3036 416 if ( (l1_pgentry_to_pagenr(ed->mm.perdomain_ptes[0]) == pfn) &&
cl349@3036 417 (((pa&(PAGE_SIZE-1))>>3) >= FIRST_RESERVED_GDT_ENTRY) &&
cl349@3036 418 (((pa&(PAGE_SIZE-1))>>3) <= LAST_RESERVED_GDT_ENTRY) )
cl349@3036 419 goto out;
cl349@3036 420 }
kaf24@1710 421 if ( unlikely(!get_page_type(page, PGT_gdt_page)) )
kaf24@1710 422 goto out;
kaf24@1710 423 break;
kaf24@1710 424 case PGT_ldt_page:
kaf24@1710 425 if ( unlikely(!get_page_type(page, PGT_ldt_page)) )
kaf24@1710 426 goto out;
kaf24@1710 427 break;
kaf24@1710 428 default:
kaf24@2375 429 if ( unlikely(!get_page_type(page, PGT_writable_page)) )
kaf24@1710 430 goto out;
kaf24@1710 431 break;
kaf24@1710 432 }
kaf24@1710 433
kaf24@1710 434 /* All is good so make the update. */
kaf24@1710 435 gdt_pent = map_domain_mem(pa);
kaf24@1854 436 memcpy(gdt_pent, d, 8);
kaf24@1710 437 unmap_domain_mem(gdt_pent);
kaf24@1710 438
kaf24@1710 439 put_page_type(page);
kaf24@1710 440
kaf24@1710 441 ret = 0; /* success */
kaf24@1710 442
kaf24@1710 443 out:
kaf24@1710 444 put_page(page);
cl349@3036 445
cl349@3036 446 UNLOCK_BIGLOCK(current->domain);
cl349@3036 447
kaf24@1710 448 return ret;
kaf24@1710 449 }
kaf24@1710 450
kaf24@1710 451 #ifdef MEMORY_GUARD
kaf24@1710 452
kaf24@1710 453 void *memguard_init(void *heap_start)
kaf24@1710 454 {
kaf24@1710 455 l1_pgentry_t *l1;
kaf24@1710 456 int i, j;
kaf24@1710 457
kaf24@1710 458 /* Round the allocation pointer up to a page boundary. */
kaf24@1710 459 heap_start = (void *)(((unsigned long)heap_start + (PAGE_SIZE-1)) &
kaf24@1710 460 PAGE_MASK);
kaf24@1710 461
kaf24@1710 462 /* Memory guarding is incompatible with super pages. */
kaf24@1710 463 for ( i = 0; i < (xenheap_phys_end >> L2_PAGETABLE_SHIFT); i++ )
kaf24@1710 464 {
kaf24@1710 465 l1 = (l1_pgentry_t *)heap_start;
kaf24@1710 466 heap_start = (void *)((unsigned long)heap_start + PAGE_SIZE);
kaf24@1710 467 for ( j = 0; j < ENTRIES_PER_L1_PAGETABLE; j++ )
kaf24@1710 468 l1[j] = mk_l1_pgentry((i << L2_PAGETABLE_SHIFT) |
kaf24@1710 469 (j << L1_PAGETABLE_SHIFT) |
kaf24@1710 470 __PAGE_HYPERVISOR);
kaf24@3392 471 idle_pg_table[i + l2_table_offset(PAGE_OFFSET)] =
kaf24@1710 472 mk_l2_pgentry(virt_to_phys(l1) | __PAGE_HYPERVISOR);
kaf24@1710 473 }
kaf24@1710 474
kaf24@1710 475 return heap_start;
kaf24@1710 476 }
kaf24@1710 477
kaf24@1710 478 static void __memguard_change_range(void *p, unsigned long l, int guard)
kaf24@1710 479 {
kaf24@1710 480 l1_pgentry_t *l1;
kaf24@1710 481 l2_pgentry_t *l2;
kaf24@1710 482 unsigned long _p = (unsigned long)p;
kaf24@1710 483 unsigned long _l = (unsigned long)l;
kaf24@1710 484
kaf24@1710 485 /* Ensure we are dealing with a page-aligned whole number of pages. */
kaf24@1710 486 ASSERT((_p&PAGE_MASK) != 0);
kaf24@1710 487 ASSERT((_l&PAGE_MASK) != 0);
kaf24@1710 488 ASSERT((_p&~PAGE_MASK) == 0);
kaf24@1710 489 ASSERT((_l&~PAGE_MASK) == 0);
kaf24@1710 490
kaf24@1710 491 while ( _l != 0 )
kaf24@1710 492 {
kaf24@1710 493 l2 = &idle_pg_table[l2_table_offset(_p)];
kaf24@1710 494 l1 = l2_pgentry_to_l1(*l2) + l1_table_offset(_p);
kaf24@1710 495 if ( guard )
kaf24@1710 496 *l1 = mk_l1_pgentry(l1_pgentry_val(*l1) & ~_PAGE_PRESENT);
kaf24@1710 497 else
kaf24@1710 498 *l1 = mk_l1_pgentry(l1_pgentry_val(*l1) | _PAGE_PRESENT);
kaf24@1710 499 _p += PAGE_SIZE;
kaf24@1710 500 _l -= PAGE_SIZE;
kaf24@1710 501 }
kaf24@1710 502 }
kaf24@1710 503
kaf24@1710 504 void memguard_guard_range(void *p, unsigned long l)
kaf24@1710 505 {
kaf24@1710 506 __memguard_change_range(p, l, 1);
kaf24@1710 507 local_flush_tlb();
kaf24@1710 508 }
kaf24@1710 509
kaf24@1710 510 void memguard_unguard_range(void *p, unsigned long l)
kaf24@1710 511 {
kaf24@1710 512 __memguard_change_range(p, l, 0);
kaf24@1710 513 }
kaf24@1710 514
kaf24@1710 515 #endif