debuggers.hg

annotate xen/arch/x86/x86_64/mm.c @ 3632:fec8b1778268

bitkeeper revision 1.1159.212.60 (41febc4bKKSkh9u-Zes9v2CmBuLZxA)

More bootstrap fixes for x86/64. Next thing to do is sort out the IDT and
get traps.c working; then we can get rid of a bunch of dummy labels from
end of boot/x86_64.S. We're also going to need some kind of entry.S before
we can safely enable interrupts. Also bear in mind that not all of physical
RAM may be mapped (only first 1GB) and no m2p table is yet allocated or
mapped. Plenty to be done!
author kaf24@viper.(none)
date Mon Jan 31 23:16:27 2005 +0000 (2005-01-31)
parents c754bd0be650
children d55d523078f7
rev   line source
kaf24@3314 1 /******************************************************************************
kaf24@3314 2 * arch/x86/x86_64/mm.c
kaf24@3314 3 *
kaf24@3314 4 * Modifications to Linux original are copyright (c) 2004, K A Fraser
kaf24@3314 5 *
kaf24@3314 6 * This program is free software; you can redistribute it and/or modify
kaf24@3314 7 * it under the terms of the GNU General Public License as published by
kaf24@3314 8 * the Free Software Foundation; either version 2 of the License, or
kaf24@3314 9 * (at your option) any later version.
kaf24@3314 10 *
kaf24@3314 11 * This program is distributed in the hope that it will be useful,
kaf24@3314 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
kaf24@3314 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
kaf24@3314 14 * GNU General Public License for more details.
kaf24@3314 15 *
kaf24@3314 16 * You should have received a copy of the GNU General Public License
kaf24@3314 17 * along with this program; if not, write to the Free Software
kaf24@3314 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
kaf24@3314 19 */
kaf24@3314 20
kaf24@3314 21 #include <xen/config.h>
kaf24@3314 22 #include <xen/lib.h>
kaf24@3314 23 #include <xen/init.h>
kaf24@3314 24 #include <xen/mm.h>
kaf24@3314 25 #include <asm/page.h>
kaf24@3314 26 #include <asm/flushtlb.h>
kaf24@3314 27 #include <asm/fixmap.h>
kaf24@3314 28 #include <asm/domain_page.h>
kaf24@3314 29
kaf24@3632 30 unsigned long m2p_start_mfn; /* XXX Kill this (in 32-bit code also). */
kaf24@3314 31
kaf24@3632 32 void *safe_page_alloc(void)
kaf24@3632 33 {
kaf24@3632 34 extern int early_boot;
kaf24@3632 35 if ( early_boot )
kaf24@3632 36 return __va(alloc_boot_pages(PAGE_SIZE, PAGE_SIZE));
kaf24@3632 37 return (void *)alloc_xenheap_page();
kaf24@3314 38 }
kaf24@3314 39
kaf24@3632 40 /* Map physical byte range (@p, @p+@s) at virt address @v in pagetable @pt. */
kaf24@3632 41 int map_pages(
kaf24@3632 42 pagetable_t *pt,
kaf24@3632 43 unsigned long v,
kaf24@3632 44 unsigned long p,
kaf24@3632 45 unsigned long s,
kaf24@3632 46 unsigned long flags)
kaf24@3314 47 {
kaf24@3632 48 l4_pgentry_t *pl4e;
kaf24@3632 49 l3_pgentry_t *pl3e;
kaf24@3632 50 l2_pgentry_t *pl2e;
kaf24@3632 51 l1_pgentry_t *pl1e;
kaf24@3632 52 void *newpg;
kaf24@3632 53
kaf24@3632 54 while ( s != 0 )
kaf24@3632 55 {
kaf24@3632 56 pl4e = &pt[l4_table_offset(v)];
kaf24@3632 57 if ( !(l4_pgentry_val(*pl4e) & _PAGE_PRESENT) )
kaf24@3632 58 {
kaf24@3632 59 newpg = safe_page_alloc();
kaf24@3632 60 clear_page(newpg);
kaf24@3632 61 *pl4e = mk_l4_pgentry(__pa(newpg) | __PAGE_HYPERVISOR);
kaf24@3632 62 }
kaf24@3632 63
kaf24@3632 64 pl3e = l4_pgentry_to_l3(*pl4e) + l3_table_offset(v);
kaf24@3632 65 if ( !(l3_pgentry_val(*pl3e) & _PAGE_PRESENT) )
kaf24@3632 66 {
kaf24@3632 67 newpg = safe_page_alloc();
kaf24@3632 68 clear_page(newpg);
kaf24@3632 69 *pl3e = mk_l3_pgentry(__pa(newpg) | __PAGE_HYPERVISOR);
kaf24@3632 70 }
kaf24@3632 71
kaf24@3632 72 pl2e = l3_pgentry_to_l2(*pl3e) + l2_table_offset(v);
kaf24@3632 73
kaf24@3632 74 if ( ((s|v|p) & ((1<<L2_PAGETABLE_SHIFT)-1)) == 0 )
kaf24@3632 75 {
kaf24@3632 76 /* Super-page mapping. */
kaf24@3632 77 if ( (l2_pgentry_val(*pl2e) & _PAGE_PRESENT) )
kaf24@3632 78 __flush_tlb_pge();
kaf24@3632 79 *pl2e = mk_l2_pgentry(p|flags|_PAGE_PSE);
kaf24@3314 80
kaf24@3632 81 v += 1 << L2_PAGETABLE_SHIFT;
kaf24@3632 82 p += 1 << L2_PAGETABLE_SHIFT;
kaf24@3632 83 s -= 1 << L2_PAGETABLE_SHIFT;
kaf24@3632 84 }
kaf24@3632 85 else
kaf24@3632 86 {
kaf24@3632 87 /* Normal page mapping. */
kaf24@3632 88 if ( !(l2_pgentry_val(*pl2e) & _PAGE_PRESENT) )
kaf24@3632 89 {
kaf24@3632 90 newpg = safe_page_alloc();
kaf24@3632 91 clear_page(newpg);
kaf24@3632 92 *pl2e = mk_l2_pgentry(__pa(newpg) | __PAGE_HYPERVISOR);
kaf24@3632 93 }
kaf24@3632 94 pl1e = l2_pgentry_to_l1(*pl2e) + l1_table_offset(v);
kaf24@3632 95 if ( (l1_pgentry_val(*pl1e) & _PAGE_PRESENT) )
kaf24@3632 96 __flush_tlb_one(v);
kaf24@3632 97 *pl1e = mk_l1_pgentry(p|flags);
kaf24@3632 98
kaf24@3632 99 v += 1 << L1_PAGETABLE_SHIFT;
kaf24@3632 100 p += 1 << L1_PAGETABLE_SHIFT;
kaf24@3632 101 s -= 1 << L1_PAGETABLE_SHIFT;
kaf24@3632 102 }
kaf24@3632 103 }
kaf24@3632 104
kaf24@3632 105 return 0;
kaf24@3632 106 }
kaf24@3632 107
kaf24@3632 108 void __set_fixmap(
kaf24@3632 109 enum fixed_addresses idx, unsigned long p, unsigned long flags)
kaf24@3632 110 {
kaf24@3632 111 if ( unlikely(idx >= __end_of_fixed_addresses) )
kaf24@3632 112 BUG();
kaf24@3632 113 map_pages(idle_pg_table, fix_to_virt(idx), p, PAGE_SIZE, flags);
kaf24@3314 114 }
kaf24@3314 115
kaf24@3314 116
kaf24@3314 117 void __init paging_init(void)
kaf24@3314 118 {
kaf24@3632 119 void *newpt;
kaf24@3632 120
kaf24@3632 121 /* Allocate and map the machine-to-phys table. */
kaf24@3632 122 /* XXX TODO XXX */
kaf24@3632 123
kaf24@3632 124 /* Create page table for ioremap(). */
kaf24@3632 125 newpt = (void *)alloc_xenheap_page();
kaf24@3632 126 clear_page(newpt);
kaf24@3632 127 idle_pg_table[IOREMAP_VIRT_START >> L4_PAGETABLE_SHIFT] =
kaf24@3632 128 mk_l4_pgentry(__pa(newpt) | __PAGE_HYPERVISOR);
kaf24@3632 129
kaf24@3632 130 /* Create read-only mapping of MPT for guest-OS use. */
kaf24@3632 131 newpt = (void *)alloc_xenheap_page();
kaf24@3632 132 clear_page(newpt);
kaf24@3632 133 idle_pg_table[RO_MPT_VIRT_START >> L4_PAGETABLE_SHIFT] =
kaf24@3632 134 mk_l4_pgentry((__pa(newpt) | __PAGE_HYPERVISOR | _PAGE_USER) &
kaf24@3632 135 ~_PAGE_RW);
kaf24@3632 136 /* XXX TODO: Copy appropriate L3 entries from RDWR_MPT_VIRT_START XXX */
kaf24@3632 137
kaf24@3314 138 /* Set up linear page table mapping. */
kaf24@3336 139 idle_pg_table[LINEAR_PT_VIRT_START >> L4_PAGETABLE_SHIFT] =
kaf24@3336 140 mk_l4_pgentry(__pa(idle_pg_table) | __PAGE_HYPERVISOR);
kaf24@3314 141 }
kaf24@3314 142
kaf24@3314 143 void __init zap_low_mappings(void)
kaf24@3314 144 {
kaf24@3336 145 idle_pg_table[0] = mk_l4_pgentry(0);
kaf24@3314 146 }
kaf24@3314 147
kaf24@3314 148
kaf24@3314 149 /*
kaf24@3314 150 * Allows shooting down of borrowed page-table use on specific CPUs.
kaf24@3314 151 * Specifically, we borrow page tables when running the idle domain.
kaf24@3314 152 */
kaf24@3314 153 static void __synchronise_pagetables(void *mask)
kaf24@3314 154 {
kaf24@3336 155 struct exec_domain *ed = current;
kaf24@3336 156 if ( ((unsigned long)mask & (1 << ed->processor)) &&
kaf24@3336 157 is_idle_task(ed->domain) )
kaf24@3336 158 write_ptbase(&ed->mm);
kaf24@3314 159 }
kaf24@3314 160 void synchronise_pagetables(unsigned long cpu_mask)
kaf24@3314 161 {
kaf24@3314 162 __synchronise_pagetables((void *)cpu_mask);
kaf24@3314 163 smp_call_function(__synchronise_pagetables, (void *)cpu_mask, 1, 1);
kaf24@3314 164 }
kaf24@3314 165
kaf24@3314 166 long do_stack_switch(unsigned long ss, unsigned long esp)
kaf24@3314 167 {
kaf24@3336 168 #if 0
kaf24@3314 169 int nr = smp_processor_id();
kaf24@3314 170 struct tss_struct *t = &init_tss[nr];
kaf24@3314 171
kaf24@3314 172 /* We need to do this check as we load and use SS on guest's behalf. */
kaf24@3314 173 if ( (ss & 3) == 0 )
kaf24@3314 174 return -EPERM;
kaf24@3314 175
kaf24@3314 176 current->thread.guestos_ss = ss;
kaf24@3314 177 current->thread.guestos_sp = esp;
kaf24@3314 178 t->ss1 = ss;
kaf24@3314 179 t->esp1 = esp;
kaf24@3336 180 #endif
kaf24@3314 181 return 0;
kaf24@3314 182 }
kaf24@3314 183
kaf24@3314 184
kaf24@3314 185 /* Returns TRUE if given descriptor is valid for GDT or LDT. */
kaf24@3314 186 int check_descriptor(unsigned long *d)
kaf24@3314 187 {
kaf24@3314 188 unsigned long base, limit, a = d[0], b = d[1];
kaf24@3314 189
kaf24@3314 190 /* A not-present descriptor will always fault, so is safe. */
kaf24@3314 191 if ( !(b & _SEGMENT_P) )
kaf24@3314 192 goto good;
kaf24@3314 193
kaf24@3314 194 /*
kaf24@3314 195 * We don't allow a DPL of zero. There is no legitimate reason for
kaf24@3314 196 * specifying DPL==0, and it gets rather dangerous if we also accept call
kaf24@3314 197 * gates (consider a call gate pointing at another guestos descriptor with
kaf24@3314 198 * DPL 0 -- this would get the OS ring-0 privileges).
kaf24@3314 199 */
kaf24@3314 200 if ( (b & _SEGMENT_DPL) == 0 )
kaf24@3314 201 goto bad;
kaf24@3314 202
kaf24@3314 203 if ( !(b & _SEGMENT_S) )
kaf24@3314 204 {
kaf24@3314 205 /*
kaf24@3314 206 * System segment:
kaf24@3314 207 * 1. Don't allow interrupt or trap gates as they belong in the IDT.
kaf24@3314 208 * 2. Don't allow TSS descriptors or task gates as we don't
kaf24@3314 209 * virtualise x86 tasks.
kaf24@3314 210 * 3. Don't allow LDT descriptors because they're unnecessary and
kaf24@3314 211 * I'm uneasy about allowing an LDT page to contain LDT
kaf24@3314 212 * descriptors. In any case, Xen automatically creates the
kaf24@3314 213 * required descriptor when reloading the LDT register.
kaf24@3314 214 * 4. We allow call gates but they must not jump to a private segment.
kaf24@3314 215 */
kaf24@3314 216
kaf24@3314 217 /* Disallow everything but call gates. */
kaf24@3314 218 if ( (b & _SEGMENT_TYPE) != 0xc00 )
kaf24@3314 219 goto bad;
kaf24@3314 220
kaf24@3336 221 #if 0
kaf24@3314 222 /* Can't allow far jump to a Xen-private segment. */
kaf24@3314 223 if ( !VALID_CODESEL(a>>16) )
kaf24@3314 224 goto bad;
kaf24@3336 225 #endif
kaf24@3314 226
kaf24@3314 227 /* Reserved bits must be zero. */
kaf24@3314 228 if ( (b & 0xe0) != 0 )
kaf24@3314 229 goto bad;
kaf24@3314 230
kaf24@3314 231 /* No base/limit check is needed for a call gate. */
kaf24@3314 232 goto good;
kaf24@3314 233 }
kaf24@3314 234
kaf24@3314 235 /* Check that base is at least a page away from Xen-private area. */
kaf24@3314 236 base = (b&(0xff<<24)) | ((b&0xff)<<16) | (a>>16);
kaf24@3314 237 if ( base >= (PAGE_OFFSET - PAGE_SIZE) )
kaf24@3314 238 goto bad;
kaf24@3314 239
kaf24@3314 240 /* Check and truncate the limit if necessary. */
kaf24@3314 241 limit = (b&0xf0000) | (a&0xffff);
kaf24@3314 242 limit++; /* We add one because limit is inclusive. */
kaf24@3314 243 if ( (b & _SEGMENT_G) )
kaf24@3314 244 limit <<= 12;
kaf24@3314 245
kaf24@3314 246 if ( (b & (_SEGMENT_CODE | _SEGMENT_EC)) == _SEGMENT_EC )
kaf24@3314 247 {
kaf24@3314 248 /*
kaf24@3314 249 * Grows-down limit check.
kaf24@3314 250 * NB. limit == 0xFFFFF provides no access (if G=1).
kaf24@3314 251 * limit == 0x00000 provides 4GB-4kB access (if G=1).
kaf24@3314 252 */
kaf24@3314 253 if ( (base + limit) > base )
kaf24@3314 254 {
kaf24@3314 255 limit = -(base & PAGE_MASK);
kaf24@3314 256 goto truncate;
kaf24@3314 257 }
kaf24@3314 258 }
kaf24@3314 259 else
kaf24@3314 260 {
kaf24@3314 261 /*
kaf24@3314 262 * Grows-up limit check.
kaf24@3314 263 * NB. limit == 0xFFFFF provides 4GB access (if G=1).
kaf24@3314 264 * limit == 0x00000 provides 4kB access (if G=1).
kaf24@3314 265 */
kaf24@3314 266 if ( ((base + limit) <= base) ||
kaf24@3314 267 ((base + limit) > PAGE_OFFSET) )
kaf24@3314 268 {
kaf24@3314 269 limit = PAGE_OFFSET - base;
kaf24@3314 270 truncate:
kaf24@3314 271 if ( !(b & _SEGMENT_G) )
kaf24@3314 272 goto bad; /* too dangerous; too hard to work out... */
kaf24@3314 273 limit = (limit >> 12) - 1;
kaf24@3314 274 d[0] &= ~0x0ffff; d[0] |= limit & 0x0ffff;
kaf24@3314 275 d[1] &= ~0xf0000; d[1] |= limit & 0xf0000;
kaf24@3314 276 }
kaf24@3314 277 }
kaf24@3314 278
kaf24@3314 279 good:
kaf24@3314 280 return 1;
kaf24@3314 281 bad:
kaf24@3314 282 return 0;
kaf24@3314 283 }
kaf24@3314 284
kaf24@3314 285
kaf24@3336 286 void destroy_gdt(struct exec_domain *ed)
kaf24@3314 287 {
kaf24@3314 288 int i;
kaf24@3314 289 unsigned long pfn;
kaf24@3314 290
kaf24@3314 291 for ( i = 0; i < 16; i++ )
kaf24@3314 292 {
kaf24@3336 293 if ( (pfn = l1_pgentry_to_pagenr(ed->mm.perdomain_ptes[i])) != 0 )
kaf24@3314 294 put_page_and_type(&frame_table[pfn]);
kaf24@3336 295 ed->mm.perdomain_ptes[i] = mk_l1_pgentry(0);
kaf24@3314 296 }
kaf24@3314 297 }
kaf24@3314 298
kaf24@3314 299
kaf24@3336 300 long set_gdt(struct exec_domain *ed,
kaf24@3314 301 unsigned long *frames,
kaf24@3314 302 unsigned int entries)
kaf24@3314 303 {
kaf24@3336 304 struct domain *d = ed->domain;
kaf24@3314 305 /* NB. There are 512 8-byte entries per GDT page. */
kaf24@3314 306 int i = 0, nr_pages = (entries + 511) / 512;
kaf24@3314 307 struct desc_struct *vgdt;
kaf24@3314 308 unsigned long pfn;
kaf24@3314 309
kaf24@3314 310 /* Check the first page in the new GDT. */
kaf24@3314 311 if ( (pfn = frames[0]) >= max_page )
kaf24@3314 312 goto fail;
kaf24@3314 313
kaf24@3314 314 /* The first page is special because Xen owns a range of entries in it. */
kaf24@3314 315 if ( !get_page_and_type(&frame_table[pfn], d, PGT_gdt_page) )
kaf24@3314 316 {
kaf24@3314 317 /* GDT checks failed: try zapping the Xen reserved entries. */
kaf24@3314 318 if ( !get_page_and_type(&frame_table[pfn], d, PGT_writable_page) )
kaf24@3314 319 goto fail;
kaf24@3314 320 vgdt = map_domain_mem(pfn << PAGE_SHIFT);
kaf24@3314 321 memset(vgdt + FIRST_RESERVED_GDT_ENTRY, 0,
kaf24@3314 322 NR_RESERVED_GDT_ENTRIES*8);
kaf24@3314 323 unmap_domain_mem(vgdt);
kaf24@3314 324 put_page_and_type(&frame_table[pfn]);
kaf24@3314 325
kaf24@3314 326 /* Okay, we zapped the entries. Now try the GDT checks again. */
kaf24@3314 327 if ( !get_page_and_type(&frame_table[pfn], d, PGT_gdt_page) )
kaf24@3314 328 goto fail;
kaf24@3314 329 }
kaf24@3314 330
kaf24@3314 331 /* Check the remaining pages in the new GDT. */
kaf24@3314 332 for ( i = 1; i < nr_pages; i++ )
kaf24@3314 333 if ( ((pfn = frames[i]) >= max_page) ||
kaf24@3314 334 !get_page_and_type(&frame_table[pfn], d, PGT_gdt_page) )
kaf24@3314 335 goto fail;
kaf24@3314 336
kaf24@3314 337 /* Copy reserved GDT entries to the new GDT. */
kaf24@3314 338 vgdt = map_domain_mem(frames[0] << PAGE_SHIFT);
kaf24@3314 339 memcpy(vgdt + FIRST_RESERVED_GDT_ENTRY,
kaf24@3314 340 gdt_table + FIRST_RESERVED_GDT_ENTRY,
kaf24@3314 341 NR_RESERVED_GDT_ENTRIES*8);
kaf24@3314 342 unmap_domain_mem(vgdt);
kaf24@3314 343
kaf24@3314 344 /* Tear down the old GDT. */
kaf24@3336 345 destroy_gdt(ed);
kaf24@3314 346
kaf24@3314 347 /* Install the new GDT. */
kaf24@3314 348 for ( i = 0; i < nr_pages; i++ )
kaf24@3336 349 ed->mm.perdomain_ptes[i] =
kaf24@3314 350 mk_l1_pgentry((frames[i] << PAGE_SHIFT) | __PAGE_HYPERVISOR);
kaf24@3314 351
kaf24@3336 352 SET_GDT_ADDRESS(ed, GDT_VIRT_START(ed));
kaf24@3336 353 SET_GDT_ENTRIES(ed, entries);
kaf24@3314 354
kaf24@3314 355 return 0;
kaf24@3314 356
kaf24@3314 357 fail:
kaf24@3314 358 while ( i-- > 0 )
kaf24@3314 359 put_page_and_type(&frame_table[frames[i]]);
kaf24@3314 360 return -EINVAL;
kaf24@3314 361 }
kaf24@3314 362
kaf24@3314 363
kaf24@3314 364 long do_set_gdt(unsigned long *frame_list, unsigned int entries)
kaf24@3314 365 {
kaf24@3314 366 int nr_pages = (entries + 511) / 512;
kaf24@3314 367 unsigned long frames[16];
kaf24@3314 368 long ret;
kaf24@3314 369
kaf24@3314 370 if ( (entries <= LAST_RESERVED_GDT_ENTRY) || (entries > 8192) )
kaf24@3314 371 return -EINVAL;
kaf24@3314 372
kaf24@3314 373 if ( copy_from_user(frames, frame_list, nr_pages * sizeof(unsigned long)) )
kaf24@3314 374 return -EFAULT;
kaf24@3314 375
kaf24@3314 376 if ( (ret = set_gdt(current, frames, entries)) == 0 )
kaf24@3314 377 {
kaf24@3314 378 local_flush_tlb();
kaf24@3314 379 __asm__ __volatile__ ("lgdt %0" : "=m" (*current->mm.gdt));
kaf24@3314 380 }
kaf24@3314 381
kaf24@3314 382 return ret;
kaf24@3314 383 }
kaf24@3314 384
kaf24@3314 385
kaf24@3314 386 long do_update_descriptor(
kaf24@3314 387 unsigned long pa, unsigned long word1, unsigned long word2)
kaf24@3314 388 {
kaf24@3314 389 unsigned long *gdt_pent, pfn = pa >> PAGE_SHIFT, d[2];
kaf24@3314 390 struct pfn_info *page;
kaf24@3314 391 long ret = -EINVAL;
kaf24@3314 392
kaf24@3314 393 d[0] = word1;
kaf24@3314 394 d[1] = word2;
kaf24@3314 395
kaf24@3314 396 if ( (pa & 7) || (pfn >= max_page) || !check_descriptor(d) )
kaf24@3314 397 return -EINVAL;
kaf24@3314 398
kaf24@3314 399 page = &frame_table[pfn];
kaf24@3336 400 if ( unlikely(!get_page(page, current->domain)) )
kaf24@3314 401 return -EINVAL;
kaf24@3314 402
kaf24@3314 403 /* Check if the given frame is in use in an unsafe context. */
kaf24@3314 404 switch ( page->u.inuse.type_info & PGT_type_mask )
kaf24@3314 405 {
kaf24@3314 406 case PGT_gdt_page:
kaf24@3314 407 /* Disallow updates of Xen-reserved descriptors in the current GDT. */
kaf24@3336 408 if ( (l1_pgentry_to_pagenr(current->mm.perdomain_ptes[0]) == pfn) &&
kaf24@3314 409 (((pa&(PAGE_SIZE-1))>>3) >= FIRST_RESERVED_GDT_ENTRY) &&
kaf24@3314 410 (((pa&(PAGE_SIZE-1))>>3) <= LAST_RESERVED_GDT_ENTRY) )
kaf24@3314 411 goto out;
kaf24@3314 412 if ( unlikely(!get_page_type(page, PGT_gdt_page)) )
kaf24@3314 413 goto out;
kaf24@3314 414 break;
kaf24@3314 415 case PGT_ldt_page:
kaf24@3314 416 if ( unlikely(!get_page_type(page, PGT_ldt_page)) )
kaf24@3314 417 goto out;
kaf24@3314 418 break;
kaf24@3314 419 default:
kaf24@3314 420 if ( unlikely(!get_page_type(page, PGT_writable_page)) )
kaf24@3314 421 goto out;
kaf24@3314 422 break;
kaf24@3314 423 }
kaf24@3314 424
kaf24@3314 425 /* All is good so make the update. */
kaf24@3314 426 gdt_pent = map_domain_mem(pa);
kaf24@3314 427 memcpy(gdt_pent, d, 8);
kaf24@3314 428 unmap_domain_mem(gdt_pent);
kaf24@3314 429
kaf24@3314 430 put_page_type(page);
kaf24@3314 431
kaf24@3314 432 ret = 0; /* success */
kaf24@3314 433
kaf24@3314 434 out:
kaf24@3314 435 put_page(page);
kaf24@3314 436 return ret;
kaf24@3314 437 }
kaf24@3314 438
kaf24@3314 439 #ifdef MEMORY_GUARD
kaf24@3314 440
kaf24@3349 441 #if 1
kaf24@3349 442
kaf24@3349 443 void *memguard_init(void *heap_start) { return heap_start; }
kaf24@3349 444 void memguard_guard_range(void *p, unsigned long l) {}
kaf24@3349 445 void memguard_unguard_range(void *p, unsigned long l) {}
kaf24@3349 446
kaf24@3349 447 #else
kaf24@3349 448
kaf24@3314 449 void *memguard_init(void *heap_start)
kaf24@3314 450 {
kaf24@3314 451 l1_pgentry_t *l1;
kaf24@3314 452 int i, j;
kaf24@3314 453
kaf24@3314 454 /* Round the allocation pointer up to a page boundary. */
kaf24@3314 455 heap_start = (void *)(((unsigned long)heap_start + (PAGE_SIZE-1)) &
kaf24@3314 456 PAGE_MASK);
kaf24@3314 457
kaf24@3314 458 /* Memory guarding is incompatible with super pages. */
kaf24@3314 459 for ( i = 0; i < (xenheap_phys_end >> L2_PAGETABLE_SHIFT); i++ )
kaf24@3314 460 {
kaf24@3314 461 l1 = (l1_pgentry_t *)heap_start;
kaf24@3314 462 heap_start = (void *)((unsigned long)heap_start + PAGE_SIZE);
kaf24@3314 463 for ( j = 0; j < ENTRIES_PER_L1_PAGETABLE; j++ )
kaf24@3314 464 l1[j] = mk_l1_pgentry((i << L2_PAGETABLE_SHIFT) |
kaf24@3314 465 (j << L1_PAGETABLE_SHIFT) |
kaf24@3314 466 __PAGE_HYPERVISOR);
kaf24@3314 467 idle_pg_table[i] = idle_pg_table[i + l2_table_offset(PAGE_OFFSET)] =
kaf24@3314 468 mk_l2_pgentry(virt_to_phys(l1) | __PAGE_HYPERVISOR);
kaf24@3314 469 }
kaf24@3314 470
kaf24@3314 471 return heap_start;
kaf24@3314 472 }
kaf24@3314 473
kaf24@3314 474 static void __memguard_change_range(void *p, unsigned long l, int guard)
kaf24@3314 475 {
kaf24@3314 476 l1_pgentry_t *l1;
kaf24@3314 477 l2_pgentry_t *l2;
kaf24@3314 478 unsigned long _p = (unsigned long)p;
kaf24@3314 479 unsigned long _l = (unsigned long)l;
kaf24@3314 480
kaf24@3314 481 /* Ensure we are dealing with a page-aligned whole number of pages. */
kaf24@3314 482 ASSERT((_p&PAGE_MASK) != 0);
kaf24@3314 483 ASSERT((_l&PAGE_MASK) != 0);
kaf24@3314 484 ASSERT((_p&~PAGE_MASK) == 0);
kaf24@3314 485 ASSERT((_l&~PAGE_MASK) == 0);
kaf24@3314 486
kaf24@3314 487 while ( _l != 0 )
kaf24@3314 488 {
kaf24@3314 489 l2 = &idle_pg_table[l2_table_offset(_p)];
kaf24@3314 490 l1 = l2_pgentry_to_l1(*l2) + l1_table_offset(_p);
kaf24@3314 491 if ( guard )
kaf24@3314 492 *l1 = mk_l1_pgentry(l1_pgentry_val(*l1) & ~_PAGE_PRESENT);
kaf24@3314 493 else
kaf24@3314 494 *l1 = mk_l1_pgentry(l1_pgentry_val(*l1) | _PAGE_PRESENT);
kaf24@3314 495 _p += PAGE_SIZE;
kaf24@3314 496 _l -= PAGE_SIZE;
kaf24@3314 497 }
kaf24@3314 498 }
kaf24@3314 499
kaf24@3314 500 void memguard_guard_range(void *p, unsigned long l)
kaf24@3314 501 {
kaf24@3314 502 __memguard_change_range(p, l, 1);
kaf24@3314 503 local_flush_tlb();
kaf24@3314 504 }
kaf24@3314 505
kaf24@3314 506 void memguard_unguard_range(void *p, unsigned long l)
kaf24@3314 507 {
kaf24@3314 508 __memguard_change_range(p, l, 0);
kaf24@3314 509 }
kaf24@3314 510
kaf24@3349 511 #endif
kaf24@3314 512
kaf24@3314 513 #endif