debuggers.hg
changeset 2663:f0ed7653341e
bitkeeper revision 1.1159.1.222 (416c00558HL2Jw-kOYa6NaZn4JiaJQ)
Cleaned up the writable p.t. code and fixed a bug when shadow mode
is enabled.
Cleaned up the writable p.t. code and fixed a bug when shadow mode
is enabled.
author | kaf24@freefall.cl.cam.ac.uk |
---|---|
date | Tue Oct 12 16:03:33 2004 +0000 (2004-10-12) |
parents | b3ae9d6dbece |
children | 5604871d7e94 |
files | xen/arch/x86/memory.c xen/arch/x86/traps.c xen/include/asm-x86/mm.h |
line diff
1.1 --- a/xen/arch/x86/memory.c Tue Oct 12 13:30:25 2004 +0000 1.2 +++ b/xen/arch/x86/memory.c Tue Oct 12 16:03:33 2004 +0000 1.3 @@ -367,19 +367,6 @@ get_linear_pagetable( 1.4 } 1.5 1.6 1.7 -static inline int 1.8 -readonly_page_from_l1e( 1.9 - l1_pgentry_t l1e) 1.10 -{ 1.11 - struct pfn_info *page = &frame_table[l1_pgentry_to_pagenr(l1e)]; 1.12 - unsigned long l1v = l1_pgentry_val(l1e); 1.13 - 1.14 - if ( !(l1v & _PAGE_PRESENT) || !pfn_is_ram(l1v >> PAGE_SHIFT) ) 1.15 - return 0; 1.16 - put_page_type(page); 1.17 - return 1; 1.18 -} 1.19 - 1.20 static int 1.21 get_page_from_l1e( 1.22 l1_pgentry_t l1e, struct domain *d) 1.23 @@ -1146,8 +1133,7 @@ static int do_extended_command(unsigned 1.24 d, d->domain, nd, x, page->u.inuse.type_info); 1.25 spin_unlock(&d->page_alloc_lock); 1.26 put_domain(e); 1.27 - okay = 0; 1.28 - break; 1.29 + return 0; 1.30 } 1.31 __asm__ __volatile__( 1.32 LOCK_PREFIX "cmpxchg8b %2" 1.33 @@ -1156,7 +1142,6 @@ static int do_extended_command(unsigned 1.34 : "0" (d), "1" (x), "c" (NULL), "b" (x) ); 1.35 } 1.36 while ( unlikely(nd != d) || unlikely(y != x) ); 1.37 - if (!okay) break; 1.38 1.39 /* 1.40 * Unlink from 'd'. At least one reference remains (now anonymous), so 1.41 @@ -1589,15 +1574,7 @@ int do_update_va_mapping_otherdomain(uns 1.42 * Writable Pagetables 1.43 */ 1.44 1.45 -ptwr_info_t ptwr_info[NR_CPUS] = 1.46 - { [ 0 ... NR_CPUS-1 ] = 1.47 - { 1.48 - .ptinfo[PTWR_PT_ACTIVE].l1va = 0, 1.49 - .ptinfo[PTWR_PT_ACTIVE].page = 0, 1.50 - .ptinfo[PTWR_PT_INACTIVE].l1va = 0, 1.51 - .ptinfo[PTWR_PT_INACTIVE].page = 0, 1.52 - } 1.53 - }; 1.54 +ptwr_info_t ptwr_info[NR_CPUS]; 1.55 1.56 #ifdef VERBOSE 1.57 int ptwr_debug = 0x0; 1.58 @@ -1608,19 +1585,24 @@ int ptwr_debug = 0x0; 1.59 #define PTWR_PRINTK(_f, _a...) ((void)0) 1.60 #endif 1.61 1.62 +/* Flush the given writable p.t. page and write-protect it again. */ 1.63 void ptwr_flush(const int which) 1.64 { 1.65 - unsigned long pte, *ptep, l1va; 1.66 - l1_pgentry_t *pl1e; 1.67 - l2_pgentry_t *pl2e, nl2e; 1.68 - int cpu = smp_processor_id(); 1.69 - int i; 1.70 + unsigned long sstat, spte, pte, *ptep, l1va; 1.71 + l1_pgentry_t *sl1e = NULL, *pl1e, ol1e, nl1e; 1.72 + l2_pgentry_t *pl2e, nl2e; 1.73 + int i, cpu = smp_processor_id(); 1.74 + struct domain *d = current; 1.75 1.76 l1va = ptwr_info[cpu].ptinfo[which].l1va; 1.77 ptep = (unsigned long *)&linear_pg_table[l1va>>PAGE_SHIFT]; 1.78 1.79 - /* make pt page write protected */ 1.80 - if ( unlikely(__get_user(pte, ptep)) ) { 1.81 + /* 1.82 + * STEP 1. Write-protect the p.t. page so no more updates can occur. 1.83 + */ 1.84 + 1.85 + if ( unlikely(__get_user(pte, ptep)) ) 1.86 + { 1.87 MEM_LOG("ptwr: Could not read pte at %p\n", ptep); 1.88 domain_crash(); 1.89 } 1.90 @@ -1628,157 +1610,182 @@ void ptwr_flush(const int which) 1.91 PTWR_PRINT_WHICH, ptep, pte); 1.92 pte &= ~_PAGE_RW; 1.93 1.94 - if ( unlikely(current->mm.shadow_mode) ) { 1.95 - unsigned long spte; 1.96 - l1pte_no_fault(¤t->mm, &pte, &spte); 1.97 - __put_user( spte, (unsigned long *)&shadow_linear_pg_table 1.98 - [l1va>>PAGE_SHIFT] ); 1.99 + if ( unlikely(d->mm.shadow_mode) ) 1.100 + { 1.101 + /* Write-protect the p.t. page in the shadow page table. */ 1.102 + l1pte_no_fault(&d->mm, &pte, &spte); 1.103 + __put_user( 1.104 + spte, (unsigned long *)&shadow_linear_pg_table[l1va>>PAGE_SHIFT]); 1.105 + 1.106 + /* Is the p.t. page itself shadowed? Map it into Xen space if so. */ 1.107 + sstat = get_shadow_status(&d->mm, pte >> PAGE_SHIFT); 1.108 + if ( sstat & PSH_shadowed ) 1.109 + sl1e = map_domain_mem((sstat & PSH_pfn_mask) << PAGE_SHIFT); 1.110 } 1.111 1.112 - if ( unlikely(__put_user(pte, ptep)) ) { 1.113 + /* Write-protect the p.t. page in the guest page table. */ 1.114 + if ( unlikely(__put_user(pte, ptep)) ) 1.115 + { 1.116 MEM_LOG("ptwr: Could not update pte at %p\n", ptep); 1.117 domain_crash(); 1.118 } 1.119 + 1.120 + /* Ensure that there are no stale writable mappings in any TLB. */ 1.121 __flush_tlb_one(l1va); 1.122 PTWR_PRINTK("[%c] disconnected_l1va at %p now %08lx\n", 1.123 PTWR_PRINT_WHICH, ptep, pte); 1.124 1.125 + /* 1.126 + * STEP 2. Validate any modified PTEs. 1.127 + */ 1.128 + 1.129 pl1e = ptwr_info[cpu].ptinfo[which].pl1e; 1.130 - for ( i = 0; i < ENTRIES_PER_L1_PAGETABLE; i++ ) { 1.131 - l1_pgentry_t ol1e, nl1e; 1.132 + for ( i = 0; i < ENTRIES_PER_L1_PAGETABLE; i++ ) 1.133 + { 1.134 ol1e = ptwr_info[cpu].ptinfo[which].page[i]; 1.135 nl1e = pl1e[i]; 1.136 - if (likely(l1_pgentry_val(ol1e) == l1_pgentry_val(nl1e))) 1.137 + 1.138 + if ( likely(l1_pgentry_val(ol1e) == l1_pgentry_val(nl1e)) ) 1.139 continue; 1.140 - if (likely(l1_pgentry_val(ol1e) == (l1_pgentry_val(nl1e) | _PAGE_RW)) 1.141 - && readonly_page_from_l1e(nl1e)) 1.142 + 1.143 + /* 1.144 + * Fast path for PTEs that have merely been write-protected 1.145 + * (e.g., during a Unix fork()). A strict reduction in privilege. 1.146 + */ 1.147 + if ( likely(l1_pgentry_val(ol1e) == (l1_pgentry_val(nl1e)|_PAGE_RW)) ) 1.148 + { 1.149 + if ( likely(l1_pgentry_val(nl1e) & _PAGE_PRESENT) ) 1.150 + { 1.151 + if ( unlikely(sl1e != NULL) ) 1.152 + l1pte_no_fault( 1.153 + &d->mm, &l1_pgentry_val(nl1e), 1.154 + &l1_pgentry_val(sl1e[i])); 1.155 + put_page_type(&frame_table[l1_pgentry_to_pagenr(nl1e)]); 1.156 + } 1.157 continue; 1.158 - if (unlikely(l1_pgentry_val(ol1e) & _PAGE_PRESENT)) 1.159 - put_page_from_l1e(ol1e, current); 1.160 - if (unlikely(!get_page_from_l1e(nl1e, current))) { 1.161 + } 1.162 + 1.163 + if ( unlikely(!get_page_from_l1e(nl1e, d)) ) 1.164 + { 1.165 MEM_LOG("ptwr: Could not re-validate l1 page\n"); 1.166 domain_crash(); 1.167 } 1.168 + 1.169 + if ( unlikely(sl1e != NULL) ) 1.170 + l1pte_no_fault( 1.171 + &d->mm, &l1_pgentry_val(nl1e), &l1_pgentry_val(sl1e[i])); 1.172 + 1.173 + if ( unlikely(l1_pgentry_val(ol1e) & _PAGE_PRESENT) ) 1.174 + put_page_from_l1e(ol1e, d); 1.175 } 1.176 unmap_domain_mem(pl1e); 1.177 1.178 - if (which == PTWR_PT_ACTIVE && likely(!current->mm.shadow_mode)) { 1.179 - /* reconnect l1 page (no need if shadow mode) */ 1.180 - pl2e = &linear_l2_table[ptwr_info[cpu].active_pteidx]; 1.181 + /* 1.182 + * STEP 3. Reattach the L1 p.t. page into the current address space. 1.183 + */ 1.184 + 1.185 + if ( (which == PTWR_PT_ACTIVE) && likely(!d->mm.shadow_mode) ) 1.186 + { 1.187 + pl2e = &linear_l2_table[ptwr_info[cpu].ptinfo[which].l2_idx]; 1.188 nl2e = mk_l2_pgentry(l2_pgentry_val(*pl2e) | _PAGE_PRESENT); 1.189 update_l2e(pl2e, *pl2e, nl2e); 1.190 } 1.191 1.192 - if ( unlikely(current->mm.shadow_mode) ) 1.193 - { 1.194 - unsigned long sstat = 1.195 - get_shadow_status(¤t->mm, pte >> PAGE_SHIFT); 1.196 - 1.197 - if ( sstat & PSH_shadowed ) 1.198 - { 1.199 - int i; 1.200 - unsigned long spfn = sstat & PSH_pfn_mask; 1.201 - l1_pgentry_t *sl1e = map_domain_mem( spfn << PAGE_SHIFT ); 1.202 - 1.203 - for ( i = 0; i < ENTRIES_PER_L1_PAGETABLE; i++ ) 1.204 - { 1.205 - l1pte_no_fault(¤t->mm, 1.206 - &l1_pgentry_val( 1.207 - ptwr_info[cpu].ptinfo[which].page[i]), 1.208 - &l1_pgentry_val(sl1e[i])); 1.209 - } 1.210 - unmap_domain_mem(sl1e); 1.211 - put_shadow_status(¤t->mm); 1.212 - } 1.213 - 1.214 - } 1.215 + /* 1.216 + * STEP 4. Final tidy-up. 1.217 + */ 1.218 1.219 ptwr_info[cpu].ptinfo[which].l1va = 0; 1.220 + 1.221 + if ( unlikely(sl1e != NULL) ) 1.222 + { 1.223 + unmap_domain_mem(sl1e); 1.224 + put_shadow_status(&d->mm); 1.225 + } 1.226 } 1.227 1.228 +/* Write page fault handler: check if guest is trying to modify a PTE. */ 1.229 int ptwr_do_page_fault(unsigned long addr) 1.230 { 1.231 - /* write page fault, check if we're trying to modify an l1 page table */ 1.232 - unsigned long pte, pfn; 1.233 + unsigned long pte, pfn; 1.234 struct pfn_info *page; 1.235 - l2_pgentry_t *pl2e, nl2e; 1.236 - int cpu = smp_processor_id(); 1.237 - int which; 1.238 - 1.239 -#if 0 1.240 - PTWR_PRINTK("get user %p for va %08lx\n", 1.241 - &linear_pg_table[addr>>PAGE_SHIFT], addr); 1.242 -#endif 1.243 + l2_pgentry_t *pl2e, nl2e; 1.244 + int which, cpu = smp_processor_id(); 1.245 + u32 l2_idx; 1.246 1.247 - /* Testing for page_present in the L2 avoids lots of unncessary fixups */ 1.248 - if ( (l2_pgentry_val(linear_l2_table[addr >> L2_PAGETABLE_SHIFT]) & 1.249 - _PAGE_PRESENT) && 1.250 - (__get_user(pte, (unsigned long *) 1.251 - &linear_pg_table[addr >> PAGE_SHIFT]) == 0) ) 1.252 - { 1.253 - if ( (pte & _PAGE_RW) && (pte & _PAGE_PRESENT) ) 1.254 - return 0; /* we can't help. Maybe shadow mode can? */ 1.255 + /* 1.256 + * Attempt to read the PTE that maps the VA being accessed. By checking for 1.257 + * PDE validity in the L2 we avoid many expensive fixups in __get_user(). 1.258 + */ 1.259 + if ( !(l2_pgentry_val(linear_l2_table[addr>>L2_PAGETABLE_SHIFT]) & 1.260 + _PAGE_PRESENT) || 1.261 + __get_user(pte, (unsigned long *)&linear_pg_table[addr>>PAGE_SHIFT]) ) 1.262 + return 0; 1.263 1.264 - pfn = pte >> PAGE_SHIFT; 1.265 -#if 0 1.266 - PTWR_PRINTK("check pte %08lx = pfn %08lx for va %08lx\n", 1.267 - pte, pfn, addr); 1.268 -#endif 1.269 - page = &frame_table[pfn]; 1.270 - if ( (page->u.inuse.type_info & PGT_type_mask) == PGT_l1_page_table ) 1.271 - { 1.272 - u32 va_mask = page->u.inuse.type_info & PGT_va_mask; 1.273 - 1.274 - if ( unlikely(va_mask >= PGT_va_unknown) ) 1.275 - domain_crash(); 1.276 - va_mask >>= PGT_va_shift; 1.277 - 1.278 - pl2e = &linear_l2_table[va_mask]; 1.279 - 1.280 - which = (l2_pgentry_val(*pl2e) >> PAGE_SHIFT != pfn) ? 1.281 - PTWR_PT_INACTIVE : PTWR_PT_ACTIVE; 1.282 + pfn = pte >> PAGE_SHIFT; 1.283 + page = &frame_table[pfn]; 1.284 1.285 - PTWR_PRINTK("[%c] page_fault on l1 pt at va %08lx, pt for %08x, " 1.286 - "pfn %08lx\n", PTWR_PRINT_WHICH, 1.287 - addr, va_mask << L2_PAGETABLE_SHIFT, pfn); 1.288 - 1.289 - if ( ptwr_info[cpu].ptinfo[which].l1va ) 1.290 - ptwr_flush(which); 1.291 - ptwr_info[cpu].ptinfo[which].l1va = addr | 1; 1.292 - 1.293 - if (which == PTWR_PT_ACTIVE) { 1.294 - ptwr_info[cpu].active_pteidx = va_mask; 1.295 - if ( likely(!current->mm.shadow_mode) ) { 1.296 - /* disconnect l1 page (unnecessary in shadow mode) */ 1.297 - nl2e = mk_l2_pgentry((l2_pgentry_val(*pl2e) & 1.298 - ~_PAGE_PRESENT)); 1.299 - update_l2e(pl2e, *pl2e, nl2e); 1.300 - flush_tlb(); 1.301 - } 1.302 - } 1.303 + /* We are looking only for read-only mappings of p.t. pages. */ 1.304 + if ( ((pte & (_PAGE_RW | _PAGE_PRESENT)) != _PAGE_PRESENT) || 1.305 + ((page->u.inuse.type_info & PGT_type_mask) != PGT_l1_page_table) ) 1.306 + return 0; 1.307 + 1.308 + /* Get the L2 index at which this L1 p.t. is always mapped. */ 1.309 + l2_idx = page->u.inuse.type_info & PGT_va_mask; 1.310 + if ( unlikely(l2_idx >= PGT_va_unknown) ) 1.311 + domain_crash(); /* Urk! This L1 is mapped in multiple L2 slots! */ 1.312 + l2_idx >>= PGT_va_shift; 1.313 + 1.314 + /* 1.315 + * Is the L1 p.t. mapped into the current address space? If so we call it 1.316 + * an ACTIVE p.t., otherwise it is INACTIVE. 1.317 + */ 1.318 + pl2e = &linear_l2_table[l2_idx]; 1.319 + which = (l2_pgentry_val(*pl2e) >> PAGE_SHIFT != pfn) ? 1.320 + PTWR_PT_INACTIVE : PTWR_PT_ACTIVE; 1.321 + 1.322 + PTWR_PRINTK("[%c] page_fault on l1 pt at va %08lx, pt for %08x, " 1.323 + "pfn %08lx\n", PTWR_PRINT_WHICH, 1.324 + addr, l2_idx << L2_PAGETABLE_SHIFT, pfn); 1.325 + 1.326 + /* 1.327 + * We only allow one ACTIVE and one INACTIVE p.t. to be updated at at 1.328 + * time. If there is already one, we must flush it out. 1.329 + */ 1.330 + if ( ptwr_info[cpu].ptinfo[which].l1va ) 1.331 + ptwr_flush(which); 1.332 1.333 - ptwr_info[cpu].ptinfo[which].pl1e = 1.334 - map_domain_mem(pfn << PAGE_SHIFT); 1.335 - memcpy(ptwr_info[cpu].ptinfo[which].page, 1.336 - ptwr_info[cpu].ptinfo[which].pl1e, 1.337 - ENTRIES_PER_L1_PAGETABLE * sizeof(l1_pgentry_t)); 1.338 - 1.339 - /* make pt page writable */ 1.340 - pte |= _PAGE_RW; 1.341 - PTWR_PRINTK("[%c] update %p pte to %08lx\n", PTWR_PRINT_WHICH, 1.342 - &linear_pg_table[addr>>PAGE_SHIFT], pte); 1.343 - if ( unlikely(__put_user(pte, (unsigned long *) 1.344 - &linear_pg_table[addr>>PAGE_SHIFT])) ) { 1.345 - MEM_LOG("ptwr: Could not update pte at %p\n", (unsigned long *) 1.346 - &linear_pg_table[addr>>PAGE_SHIFT]); 1.347 - domain_crash(); 1.348 - } 1.349 - 1.350 - /* maybe fall through to shadow mode to propagate writeable L1 */ 1.351 - return ( !current->mm.shadow_mode ); 1.352 - } 1.353 + ptwr_info[cpu].ptinfo[which].l1va = addr | 1; 1.354 + ptwr_info[cpu].ptinfo[which].l2_idx = l2_idx; 1.355 + 1.356 + /* For safety, disconnect the L1 p.t. page from current space. */ 1.357 + if ( (which == PTWR_PT_ACTIVE) && likely(!current->mm.shadow_mode) ) 1.358 + { 1.359 + nl2e = mk_l2_pgentry(l2_pgentry_val(*pl2e) & ~_PAGE_PRESENT); 1.360 + update_l2e(pl2e, *pl2e, nl2e); 1.361 + flush_tlb(); 1.362 } 1.363 - return 0; 1.364 + 1.365 + /* Temporarily map the L1 page, and make a copy of it. */ 1.366 + ptwr_info[cpu].ptinfo[which].pl1e = map_domain_mem(pfn << PAGE_SHIFT); 1.367 + memcpy(ptwr_info[cpu].ptinfo[which].page, 1.368 + ptwr_info[cpu].ptinfo[which].pl1e, 1.369 + ENTRIES_PER_L1_PAGETABLE * sizeof(l1_pgentry_t)); 1.370 + 1.371 + /* Finally, make the p.t. page writable by the guest OS. */ 1.372 + pte |= _PAGE_RW; 1.373 + PTWR_PRINTK("[%c] update %p pte to %08lx\n", PTWR_PRINT_WHICH, 1.374 + &linear_pg_table[addr>>PAGE_SHIFT], pte); 1.375 + if ( unlikely(__put_user(pte, (unsigned long *) 1.376 + &linear_pg_table[addr>>PAGE_SHIFT])) ) 1.377 + { 1.378 + MEM_LOG("ptwr: Could not update pte at %p\n", (unsigned long *) 1.379 + &linear_pg_table[addr>>PAGE_SHIFT]); 1.380 + domain_crash(); 1.381 + } 1.382 + 1.383 + /* Maybe fall through to shadow mode to propagate writable L1. */ 1.384 + return !current->mm.shadow_mode; 1.385 } 1.386 1.387 static __init int ptwr_init(void) 1.388 @@ -1791,19 +1798,21 @@ static __init int ptwr_init(void) 1.389 (void *)alloc_xenheap_page(); 1.390 ptwr_info[i].ptinfo[PTWR_PT_INACTIVE].page = 1.391 (void *)alloc_xenheap_page(); 1.392 - machine_to_phys_mapping[virt_to_phys( 1.393 - ptwr_info[i].ptinfo[PTWR_PT_ACTIVE].page)>>PAGE_SHIFT] = 1.394 - INVALID_P2M_ENTRY; 1.395 - machine_to_phys_mapping[virt_to_phys( 1.396 - ptwr_info[i].ptinfo[PTWR_PT_INACTIVE].page)>>PAGE_SHIFT] = 1.397 - INVALID_P2M_ENTRY; 1.398 } 1.399 1.400 return 0; 1.401 } 1.402 __initcall(ptwr_init); 1.403 1.404 + 1.405 + 1.406 + 1.407 +/************************************************************************/ 1.408 +/************************************************************************/ 1.409 +/************************************************************************/ 1.410 + 1.411 #ifndef NDEBUG 1.412 + 1.413 void ptwr_status(void) 1.414 { 1.415 unsigned long pte, *ptep, pfn; 1.416 @@ -1838,10 +1847,6 @@ void ptwr_status(void) 1.417 page = &frame_table[pfn]; 1.418 } 1.419 1.420 - 1.421 -/************************************************************************/ 1.422 - 1.423 - 1.424 void audit_domain(struct domain *d) 1.425 { 1.426 int ttot=0, ctot=0, io_mappings=0, lowmem_mappings=0; 1.427 @@ -2256,5 +2261,4 @@ void audit_domains_key(unsigned char key 1.428 raise_softirq(MEMAUDIT_SOFTIRQ); 1.429 } 1.430 1.431 - 1.432 #endif
2.1 --- a/xen/arch/x86/traps.c Tue Oct 12 13:30:25 2004 +0000 2.2 +++ b/xen/arch/x86/traps.c Tue Oct 12 16:03:33 2004 +0000 2.3 @@ -347,9 +347,9 @@ asmlinkage void do_page_fault(struct pt_ 2.4 2.5 if ( likely(VM_ASSIST(d, VMASST_TYPE_writable_pagetables)) ) 2.6 { 2.7 - if ( unlikely(ptwr_info[cpu].ptinfo[PTWR_PT_ACTIVE].l1va && 2.8 - (addr >> L2_PAGETABLE_SHIFT) == 2.9 - ptwr_info[cpu].active_pteidx )) 2.10 + if ( unlikely(ptwr_info[cpu].ptinfo[PTWR_PT_ACTIVE].l1va) && 2.11 + unlikely((addr >> L2_PAGETABLE_SHIFT) == 2.12 + ptwr_info[cpu].ptinfo[PTWR_PT_ACTIVE].l2_idx) ) 2.13 { 2.14 ptwr_flush(PTWR_PT_ACTIVE); 2.15 return;
3.1 --- a/xen/include/asm-x86/mm.h Tue Oct 12 13:30:25 2004 +0000 3.2 +++ b/xen/include/asm-x86/mm.h Tue Oct 12 16:03:33 2004 +0000 3.3 @@ -249,14 +249,18 @@ extern vm_assist_info_t vm_assist_info[] 3.4 3.5 /* Writable Pagetables */ 3.6 typedef struct { 3.7 + /* Linear address where the guest is updating the p.t. page. */ 3.8 unsigned long l1va; 3.9 + /* Copy of the p.t. page, taken before guest is given write access. */ 3.10 l1_pgentry_t *page; 3.11 + /* A temporary Xen mapping of the actual p.t. page. */ 3.12 l1_pgentry_t *pl1e; 3.13 + /* Index in L2 page table where this L1 p.t. is always hooked. */ 3.14 + unsigned int l2_idx; /* NB. Only used for PTWR_PT_ACTIVE. */ 3.15 } ptwr_ptinfo_t; 3.16 3.17 typedef struct { 3.18 ptwr_ptinfo_t ptinfo[2]; 3.19 - long active_pteidx; 3.20 } __cacheline_aligned ptwr_info_t; 3.21 3.22 extern ptwr_info_t ptwr_info[];