Coverage Report

Created: 2017-10-25 09:10

/root/src/xen/xen/arch/x86/mm/p2m-pt.c
Line
Count
Source (jump to first uncovered line)
1
/******************************************************************************
2
 * arch/x86/mm/p2m-pt.c
3
 *
4
 * Implementation of p2m datastructures as pagetables, for use by 
5
 * NPT and shadow-pagetable code
6
 *
7
 * Parts of this code are Copyright (c) 2009-2011 by Citrix Systems, Inc.
8
 * Parts of this code are Copyright (c) 2007 by Advanced Micro Devices.
9
 * Parts of this code are Copyright (c) 2006-2007 by XenSource Inc.
10
 * Parts of this code are Copyright (c) 2006 by Michael A Fetterman
11
 * Parts based on earlier work by Michael A Fetterman, Ian Pratt et al.
12
 *
13
 * This program is free software; you can redistribute it and/or modify
14
 * it under the terms of the GNU General Public License as published by
15
 * the Free Software Foundation; either version 2 of the License, or
16
 * (at your option) any later version.
17
 *
18
 * This program is distributed in the hope that it will be useful,
19
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21
 * GNU General Public License for more details.
22
 *
23
 * You should have received a copy of the GNU General Public License
24
 * along with this program; If not, see <http://www.gnu.org/licenses/>.
25
 */
26
27
#include <xen/iommu.h>
28
#include <xen/vm_event.h>
29
#include <xen/event.h>
30
#include <xen/trace.h>
31
#include <public/vm_event.h>
32
#include <asm/domain.h>
33
#include <asm/page.h>
34
#include <asm/paging.h>
35
#include <asm/p2m.h>
36
#include <asm/mem_sharing.h>
37
#include <asm/hvm/nestedhvm.h>
38
#include <asm/hvm/svm/amd-iommu-proto.h>
39
40
#include "mm-locks.h"
41
42
/* Override macros from asm/page.h to make them work with mfn_t */
43
#undef mfn_to_page
44
#define mfn_to_page(_m) __mfn_to_page(mfn_x(_m))
45
#undef page_to_mfn
46
#define page_to_mfn(_pg) _mfn(__page_to_mfn(_pg))
47
48
/*
49
 * We may store INVALID_MFN in PTEs.  We need to clip this to avoid trampling
50
 * over higher-order bits (NX, p2m type, IOMMU flags).  We seem to not need
51
 * to unclip on the read path, as callers are concerned only with p2m type in
52
 * such cases.
53
 */
54
#define p2m_l1e_from_pfn(pfn, flags)    \
55
0
    l1e_from_pfn((pfn) & (PADDR_MASK >> PAGE_SHIFT), (flags))
56
#define p2m_l2e_from_pfn(pfn, flags)    \
57
0
    l2e_from_pfn((pfn) & ((PADDR_MASK & ~(_PAGE_PSE_PAT | 0UL)) \
58
0
                          >> PAGE_SHIFT), (flags) | _PAGE_PSE)
59
#define p2m_l3e_from_pfn(pfn, flags)    \
60
0
    l3e_from_pfn((pfn) & ((PADDR_MASK & ~(_PAGE_PSE_PAT | 0UL)) \
61
0
                          >> PAGE_SHIFT), (flags) | _PAGE_PSE)
62
63
/* PTE flags for the various types of p2m entry */
64
#define P2M_BASE_FLAGS \
65
0
        (_PAGE_PRESENT | _PAGE_USER | _PAGE_DIRTY | _PAGE_ACCESSED)
66
67
#define RECALC_FLAGS (_PAGE_USER|_PAGE_ACCESSED)
68
0
#define set_recalc(level, ent) level##e_remove_flags(ent, RECALC_FLAGS)
69
0
#define clear_recalc(level, ent) level##e_add_flags(ent, RECALC_FLAGS)
70
0
#define _needs_recalc(flags) (!((flags) & _PAGE_USER))
71
0
#define needs_recalc(level, ent) _needs_recalc(level##e_get_flags(ent))
72
0
#define valid_recalc(level, ent) (!(level##e_get_flags(ent) & _PAGE_ACCESSED))
73
74
static unsigned long p2m_type_to_flags(const struct p2m_domain *p2m,
75
                                       p2m_type_t t,
76
                                       mfn_t mfn,
77
                                       unsigned int level)
78
0
{
79
0
    unsigned long flags;
80
0
    /*
81
0
     * AMD IOMMU: When we share p2m table with iommu, bit 9 - bit 11 will be
82
0
     * used for iommu hardware to encode next io page level. Bit 59 - bit 62
83
0
     * are used for iommu flags, We could not use these bits to store p2m types.
84
0
     */
85
0
    flags = (unsigned long)(t & 0x7f) << 12;
86
0
87
0
    switch(t)
88
0
    {
89
0
    case p2m_invalid:
90
0
    case p2m_mmio_dm:
91
0
    case p2m_populate_on_demand:
92
0
    case p2m_ram_paging_out:
93
0
    case p2m_ram_paged:
94
0
    case p2m_ram_paging_in:
95
0
    default:
96
0
        return flags | _PAGE_NX_BIT;
97
0
    case p2m_grant_map_ro:
98
0
        return flags | P2M_BASE_FLAGS | _PAGE_NX_BIT;
99
0
    case p2m_ioreq_server:
100
0
        flags |= P2M_BASE_FLAGS | _PAGE_RW | _PAGE_NX_BIT;
101
0
        if ( p2m->ioreq.flags & XEN_DMOP_IOREQ_MEM_ACCESS_WRITE )
102
0
            return flags & ~_PAGE_RW;
103
0
        return flags;
104
0
    case p2m_ram_ro:
105
0
    case p2m_ram_logdirty:
106
0
    case p2m_ram_shared:
107
0
        return flags | P2M_BASE_FLAGS;
108
0
    case p2m_ram_rw:
109
0
        return flags | P2M_BASE_FLAGS | _PAGE_RW;
110
0
    case p2m_grant_map_rw:
111
0
    case p2m_map_foreign:
112
0
        return flags | P2M_BASE_FLAGS | _PAGE_RW | _PAGE_NX_BIT;
113
0
    case p2m_mmio_direct:
114
0
        if ( !rangeset_contains_singleton(mmio_ro_ranges, mfn_x(mfn)) )
115
0
            flags |= _PAGE_RW;
116
0
        else
117
0
        {
118
0
            flags |= _PAGE_PWT;
119
0
            ASSERT(!level);
120
0
        }
121
0
        return flags | P2M_BASE_FLAGS | _PAGE_PCD;
122
0
    }
123
0
}
124
125
126
// Find the next level's P2M entry, checking for out-of-range gfn's...
127
// Returns NULL on error.
128
//
129
static l1_pgentry_t *
130
p2m_find_entry(void *table, unsigned long *gfn_remainder,
131
                   unsigned long gfn, uint32_t shift, uint32_t max)
132
0
{
133
0
    u32 index;
134
0
135
0
    index = *gfn_remainder >> shift;
136
0
    if ( index >= max )
137
0
    {
138
0
        P2M_DEBUG("gfn=%#lx out of range "
139
0
                  "(gfn_remainder=%#lx shift=%d index=%#x max=%#x)\n",
140
0
                  gfn, *gfn_remainder, shift, index, max);
141
0
        return NULL;
142
0
    }
143
0
    *gfn_remainder &= (1 << shift) - 1;
144
0
    return (l1_pgentry_t *)table + index;
145
0
}
146
147
/* Free intermediate tables from a p2m sub-tree */
148
static void
149
p2m_free_entry(struct p2m_domain *p2m, l1_pgentry_t *p2m_entry, int page_order)
150
0
{
151
0
    /* End if the entry is a leaf entry. */
152
0
    if ( page_order == PAGE_ORDER_4K 
153
0
         || !(l1e_get_flags(*p2m_entry) & _PAGE_PRESENT)
154
0
         || (l1e_get_flags(*p2m_entry) & _PAGE_PSE) )
155
0
        return;
156
0
157
0
    if ( page_order > PAGE_ORDER_2M )
158
0
    {
159
0
        l1_pgentry_t *l3_table = map_domain_page(l1e_get_mfn(*p2m_entry));
160
0
161
0
        for ( int i = 0; i < L3_PAGETABLE_ENTRIES; i++ )
162
0
            p2m_free_entry(p2m, l3_table + i, page_order - 9);
163
0
        unmap_domain_page(l3_table);
164
0
    }
165
0
166
0
    p2m_free_ptp(p2m, l1e_get_page(*p2m_entry));
167
0
}
168
169
// Walk one level of the P2M table, allocating a new table if required.
170
// Returns 0 on error.
171
//
172
173
/* AMD IOMMU: Convert next level bits and r/w bits into 24 bits p2m flags */
174
0
#define iommu_nlevel_to_flags(nl, f) ((((nl) & 0x7) << 9 )|(((f) & 0x3) << 21))
175
176
static void p2m_add_iommu_flags(l1_pgentry_t *p2m_entry,
177
                                unsigned int nlevel, unsigned int flags)
178
0
{
179
0
    if ( iommu_hap_pt_share )
180
0
        l1e_add_flags(*p2m_entry, iommu_nlevel_to_flags(nlevel, flags));
181
0
}
182
183
/* Returns: 0 for success, -errno for failure */
184
static int
185
p2m_next_level(struct p2m_domain *p2m, void **table,
186
               unsigned long *gfn_remainder, unsigned long gfn, u32 shift,
187
               u32 max, unsigned int level, bool_t unmap)
188
0
{
189
0
    l1_pgentry_t *p2m_entry, new_entry;
190
0
    void *next;
191
0
    unsigned int flags;
192
0
193
0
    if ( !(p2m_entry = p2m_find_entry(*table, gfn_remainder, gfn,
194
0
                                      shift, max)) )
195
0
        return -ENOENT;
196
0
197
0
    flags = l1e_get_flags(*p2m_entry);
198
0
199
0
    /* PoD/paging: Not present doesn't imply empty. */
200
0
    if ( !flags )
201
0
    {
202
0
        mfn_t mfn = p2m_alloc_ptp(p2m, level);
203
0
204
0
        if ( mfn_eq(mfn, INVALID_MFN) )
205
0
            return -ENOMEM;
206
0
207
0
        new_entry = l1e_from_mfn(mfn, P2M_BASE_FLAGS | _PAGE_RW);
208
0
209
0
        p2m_add_iommu_flags(&new_entry, level, IOMMUF_readable|IOMMUF_writable);
210
0
        p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, level + 1);
211
0
    }
212
0
    else if ( flags & _PAGE_PSE )
213
0
    {
214
0
        /* Split superpages pages into smaller ones. */
215
0
        unsigned long pfn = l1e_get_pfn(*p2m_entry);
216
0
        mfn_t mfn;
217
0
        l1_pgentry_t *l1_entry;
218
0
        unsigned int i;
219
0
220
0
        switch ( level )
221
0
        {
222
0
        case 2:
223
0
            break;
224
0
225
0
        case 1:
226
0
            /*
227
0
             * New splintered mappings inherit the flags of the old superpage,
228
0
             * with a little reorganisation for the _PAGE_PSE_PAT bit.
229
0
             */
230
0
            if ( pfn & 1 )           /* ==> _PAGE_PSE_PAT was set */
231
0
                pfn -= 1;            /* Clear it; _PAGE_PSE becomes _PAGE_PAT */
232
0
            else
233
0
                flags &= ~_PAGE_PSE; /* Clear _PAGE_PSE (== _PAGE_PAT) */
234
0
            break;
235
0
236
0
        default:
237
0
            ASSERT_UNREACHABLE();
238
0
            return -EINVAL;
239
0
        }
240
0
241
0
        mfn = p2m_alloc_ptp(p2m, level);
242
0
        if ( mfn_eq(mfn, INVALID_MFN) )
243
0
            return -ENOMEM;
244
0
245
0
        l1_entry = map_domain_page(mfn);
246
0
247
0
        /* Inherit original IOMMU permissions, but update Next Level. */
248
0
        if ( iommu_hap_pt_share )
249
0
        {
250
0
            flags &= ~iommu_nlevel_to_flags(~0, 0);
251
0
            flags |= iommu_nlevel_to_flags(level - 1, 0);
252
0
        }
253
0
254
0
        for ( i = 0; i < (1u << PAGETABLE_ORDER); i++ )
255
0
        {
256
0
            new_entry = l1e_from_pfn(pfn | (i << ((level - 1) * PAGETABLE_ORDER)),
257
0
                                     flags);
258
0
            p2m->write_p2m_entry(p2m, gfn, l1_entry + i, new_entry, level);
259
0
        }
260
0
261
0
        unmap_domain_page(l1_entry);
262
0
263
0
        new_entry = l1e_from_mfn(mfn, P2M_BASE_FLAGS | _PAGE_RW);
264
0
        p2m_add_iommu_flags(&new_entry, level, IOMMUF_readable|IOMMUF_writable);
265
0
        p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, level + 1);
266
0
    }
267
0
    else
268
0
        ASSERT(flags & _PAGE_PRESENT);
269
0
270
0
    next = map_domain_page(l1e_get_mfn(*p2m_entry));
271
0
    if ( unmap )
272
0
        unmap_domain_page(*table);
273
0
    *table = next;
274
0
275
0
    return 0;
276
0
}
277
278
/*
279
 * Mark (via clearing the U flag) as needing P2M type re-calculation all valid
280
 * present entries at the targeted level for the passed in GFN range, which is
281
 * guaranteed to not cross a page (table) boundary at that level.
282
 */
283
static int p2m_pt_set_recalc_range(struct p2m_domain *p2m,
284
                                   unsigned int level,
285
                                   unsigned long first_gfn,
286
                                   unsigned long last_gfn)
287
0
{
288
0
    void *table;
289
0
    unsigned long gfn_remainder = first_gfn, remainder;
290
0
    unsigned int i;
291
0
    l1_pgentry_t *pent, *plast;
292
0
    int err = 0;
293
0
294
0
    table = map_domain_page(pagetable_get_mfn(p2m_get_pagetable(p2m)));
295
0
    for ( i = 4; i-- > level; )
296
0
    {
297
0
        remainder = gfn_remainder;
298
0
        pent = p2m_find_entry(table, &remainder, first_gfn,
299
0
                              i * PAGETABLE_ORDER, 1 << PAGETABLE_ORDER);
300
0
        if ( !pent )
301
0
        {
302
0
            err = -EINVAL;
303
0
            goto out;
304
0
        }
305
0
306
0
        if ( !(l1e_get_flags(*pent) & _PAGE_PRESENT) )
307
0
            goto out;
308
0
309
0
        err = p2m_next_level(p2m, &table, &gfn_remainder, first_gfn,
310
0
                             i * PAGETABLE_ORDER, 1 << PAGETABLE_ORDER,
311
0
                             i, 1);
312
0
        if ( err )
313
0
            goto out;
314
0
    }
315
0
316
0
    remainder = gfn_remainder + (last_gfn - first_gfn);
317
0
    pent = p2m_find_entry(table, &gfn_remainder, first_gfn,
318
0
                          i * PAGETABLE_ORDER, 1 << PAGETABLE_ORDER);
319
0
    plast = p2m_find_entry(table, &remainder, last_gfn,
320
0
                           i * PAGETABLE_ORDER, 1 << PAGETABLE_ORDER);
321
0
    if ( pent && plast )
322
0
        for ( ; pent <= plast; ++pent )
323
0
        {
324
0
            l1_pgentry_t e = *pent;
325
0
326
0
            if ( (l1e_get_flags(e) & _PAGE_PRESENT) && !needs_recalc(l1, e) )
327
0
            {
328
0
                set_recalc(l1, e);
329
0
                p2m->write_p2m_entry(p2m, first_gfn, pent, e, level);
330
0
            }
331
0
            first_gfn += 1UL << (i * PAGETABLE_ORDER);
332
0
        }
333
0
    else
334
0
        err = -EIO;
335
0
336
0
 out:
337
0
    unmap_domain_page(table);
338
0
339
0
    return err;
340
0
}
341
342
/*
343
 * Handle possibly necessary P2M type re-calculation (U flag clear for a
344
 * present entry) for the entries in the page table hierarchy for the given
345
 * GFN. Propagate the re-calculation flag down to the next page table level
346
 * for entries not involved in the translation of the given GFN.
347
 */
348
static int do_recalc(struct p2m_domain *p2m, unsigned long gfn)
349
0
{
350
0
    void *table;
351
0
    unsigned long gfn_remainder = gfn;
352
0
    unsigned int level = 4;
353
0
    l1_pgentry_t *pent;
354
0
    int err = 0;
355
0
356
0
    table = map_domain_page(pagetable_get_mfn(p2m_get_pagetable(p2m)));
357
0
    while ( --level )
358
0
    {
359
0
        unsigned long remainder = gfn_remainder;
360
0
361
0
        pent = p2m_find_entry(table, &remainder, gfn,
362
0
                              level * PAGETABLE_ORDER, 1 << PAGETABLE_ORDER);
363
0
        if ( !pent || !(l1e_get_flags(*pent) & _PAGE_PRESENT) )
364
0
            goto out;
365
0
366
0
        if ( l1e_get_flags(*pent) & _PAGE_PSE )
367
0
        {
368
0
            unsigned long mask = ~0UL << (level * PAGETABLE_ORDER);
369
0
370
0
            ASSERT(p2m_flags_to_type(l1e_get_flags(*pent)) != p2m_ioreq_server);
371
0
            if ( !needs_recalc(l1, *pent) ||
372
0
                 !p2m_is_changeable(p2m_flags_to_type(l1e_get_flags(*pent))) ||
373
0
                 p2m_is_logdirty_range(p2m, gfn & mask, gfn | ~mask) >= 0 )
374
0
                break;
375
0
        }
376
0
377
0
        err = p2m_next_level(p2m, &table, &gfn_remainder, gfn,
378
0
                             level * PAGETABLE_ORDER, 1 << PAGETABLE_ORDER,
379
0
                             level, 0);
380
0
        if ( err )
381
0
            goto out;
382
0
383
0
        if ( needs_recalc(l1, *pent) )
384
0
        {
385
0
            l1_pgentry_t e = *pent, *ptab = table;
386
0
            unsigned int i;
387
0
388
0
            if ( !valid_recalc(l1, e) )
389
0
                P2M_DEBUG("bogus recalc state at d%d:%lx:%u\n",
390
0
                          p2m->domain->domain_id, gfn, level);
391
0
            remainder = gfn_remainder;
392
0
            for ( i = 0; i < (1 << PAGETABLE_ORDER); ++i )
393
0
            {
394
0
                l1_pgentry_t ent = ptab[i];
395
0
396
0
                if ( (l1e_get_flags(ent) & _PAGE_PRESENT) &&
397
0
                     !needs_recalc(l1, ent) )
398
0
                {
399
0
                    set_recalc(l1, ent);
400
0
                    p2m->write_p2m_entry(p2m, gfn - remainder, &ptab[i],
401
0
                                         ent, level);
402
0
                }
403
0
                remainder -= 1UL << ((level - 1) * PAGETABLE_ORDER);
404
0
            }
405
0
            smp_wmb();
406
0
            clear_recalc(l1, e);
407
0
            p2m->write_p2m_entry(p2m, gfn, pent, e, level + 1);
408
0
        }
409
0
        unmap_domain_page((void *)((unsigned long)pent & PAGE_MASK));
410
0
    }
411
0
412
0
    pent = p2m_find_entry(table, &gfn_remainder, gfn,
413
0
                          level * PAGETABLE_ORDER, 1 << PAGETABLE_ORDER);
414
0
    if ( pent && (l1e_get_flags(*pent) & _PAGE_PRESENT) &&
415
0
         needs_recalc(l1, *pent) )
416
0
    {
417
0
        l1_pgentry_t e = *pent;
418
0
        p2m_type_t ot, nt;
419
0
        unsigned long mask = ~0UL << (level * PAGETABLE_ORDER);
420
0
421
0
        if ( !valid_recalc(l1, e) )
422
0
            P2M_DEBUG("bogus recalc leaf at d%d:%lx:%u\n",
423
0
                      p2m->domain->domain_id, gfn, level);
424
0
        ot = p2m_flags_to_type(l1e_get_flags(e));
425
0
        nt = p2m_recalc_type_range(true, ot, p2m, gfn & mask, gfn | ~mask);
426
0
        if ( nt != ot )
427
0
        {
428
0
            unsigned long mfn = l1e_get_pfn(e);
429
0
            unsigned long flags = p2m_type_to_flags(p2m, nt,
430
0
                                                    _mfn(mfn), level);
431
0
432
0
            if ( level )
433
0
            {
434
0
                if ( flags & _PAGE_PAT )
435
0
                {
436
0
                     BUILD_BUG_ON(_PAGE_PAT != _PAGE_PSE);
437
0
                     mfn |= _PAGE_PSE_PAT >> PAGE_SHIFT;
438
0
                }
439
0
                else
440
0
                     mfn &= ~((unsigned long)_PAGE_PSE_PAT >> PAGE_SHIFT);
441
0
                flags |= _PAGE_PSE;
442
0
            }
443
0
444
0
            if ( ot == p2m_ioreq_server )
445
0
            {
446
0
                ASSERT(p2m->ioreq.entry_count > 0);
447
0
                ASSERT(level == 0);
448
0
                p2m->ioreq.entry_count--;
449
0
            }
450
0
451
0
            e = l1e_from_pfn(mfn, flags);
452
0
            p2m_add_iommu_flags(&e, level,
453
0
                                (nt == p2m_ram_rw)
454
0
                                ? IOMMUF_readable|IOMMUF_writable : 0);
455
0
            ASSERT(!needs_recalc(l1, e));
456
0
        }
457
0
        else
458
0
            clear_recalc(l1, e);
459
0
        p2m->write_p2m_entry(p2m, gfn, pent, e, level + 1);
460
0
    }
461
0
462
0
 out:
463
0
    unmap_domain_page(table);
464
0
465
0
    return err;
466
0
}
467
468
int p2m_pt_handle_deferred_changes(uint64_t gpa)
469
0
{
470
0
    struct p2m_domain *p2m = p2m_get_hostp2m(current->domain);
471
0
    int rc;
472
0
473
0
    p2m_lock(p2m);
474
0
    rc = do_recalc(p2m, PFN_DOWN(gpa));
475
0
    p2m_unlock(p2m);
476
0
477
0
    return rc;
478
0
}
479
480
/* Returns: 0 for success, -errno for failure */
481
static int
482
p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
483
                 unsigned int page_order, p2m_type_t p2mt, p2m_access_t p2ma,
484
                 int sve)
485
0
{
486
0
    /* XXX -- this might be able to be faster iff current->domain == d */
487
0
    void *table;
488
0
    unsigned long gfn = gfn_x(gfn_);
489
0
    unsigned long i, gfn_remainder = gfn;
490
0
    l1_pgentry_t *p2m_entry, entry_content;
491
0
    /* Intermediate table to free if we're replacing it with a superpage. */
492
0
    l1_pgentry_t intermediate_entry = l1e_empty();
493
0
    l2_pgentry_t l2e_content;
494
0
    l3_pgentry_t l3e_content;
495
0
    int rc;
496
0
    unsigned int iommu_pte_flags = p2m_get_iommu_flags(p2mt, mfn);
497
0
    /*
498
0
     * old_mfn and iommu_old_flags control possible flush/update needs on the
499
0
     * IOMMU: We need to flush when MFN or flags (i.e. permissions) change.
500
0
     * iommu_old_flags being initialized to zero covers the case of the entry
501
0
     * getting replaced being a non-present (leaf or intermediate) one. For
502
0
     * present leaf entries the real value will get calculated below, while
503
0
     * for present intermediate entries ~0 (guaranteed != iommu_pte_flags)
504
0
     * will be used (to cover all cases of what the leaf entries underneath
505
0
     * the intermediate one might be).
506
0
     */
507
0
    unsigned int flags, iommu_old_flags = 0;
508
0
    unsigned long old_mfn = mfn_x(INVALID_MFN);
509
0
510
0
    ASSERT(sve != 0);
511
0
512
0
    if ( tb_init_done )
513
0
    {
514
0
        struct {
515
0
            u64 gfn, mfn;
516
0
            int p2mt;
517
0
            int d:16,order:16;
518
0
        } t;
519
0
520
0
        t.gfn = gfn;
521
0
        t.mfn = mfn_x(mfn);
522
0
        t.p2mt = p2mt;
523
0
        t.d = p2m->domain->domain_id;
524
0
        t.order = page_order;
525
0
526
0
        __trace_var(TRC_MEM_SET_P2M_ENTRY, 0, sizeof(t), &t);
527
0
    }
528
0
529
0
    if ( unlikely(p2m_is_foreign(p2mt)) )
530
0
    {
531
0
        /* hvm fixme: foreign types are only supported on ept at present */
532
0
        gdprintk(XENLOG_WARNING, "Unimplemented foreign p2m type.\n");
533
0
        return -EINVAL;
534
0
    }
535
0
536
0
    /* Carry out any eventually pending earlier changes first. */
537
0
    rc = do_recalc(p2m, gfn);
538
0
    if ( rc < 0 )
539
0
        return rc;
540
0
541
0
    table = map_domain_page(pagetable_get_mfn(p2m_get_pagetable(p2m)));
542
0
    rc = p2m_next_level(p2m, &table, &gfn_remainder, gfn,
543
0
                        L4_PAGETABLE_SHIFT - PAGE_SHIFT,
544
0
                        L4_PAGETABLE_ENTRIES, 3, 1);
545
0
    if ( rc )
546
0
        goto out;
547
0
548
0
    /*
549
0
     * Try to allocate 1GB page table if this feature is supported.
550
0
     */
551
0
    if ( page_order == PAGE_ORDER_1G )
552
0
    {
553
0
        p2m_entry = p2m_find_entry(table, &gfn_remainder, gfn,
554
0
                                   L3_PAGETABLE_SHIFT - PAGE_SHIFT,
555
0
                                   L3_PAGETABLE_ENTRIES);
556
0
        ASSERT(p2m_entry);
557
0
        flags = l1e_get_flags(*p2m_entry);
558
0
        if ( flags & _PAGE_PRESENT )
559
0
        {
560
0
            if ( flags & _PAGE_PSE )
561
0
            {
562
0
                old_mfn = l1e_get_pfn(*p2m_entry);
563
0
                iommu_old_flags =
564
0
                    p2m_get_iommu_flags(p2m_flags_to_type(flags),
565
0
                                        _mfn(old_mfn));
566
0
            }
567
0
            else
568
0
            {
569
0
                iommu_old_flags = ~0;
570
0
                intermediate_entry = *p2m_entry;
571
0
            }
572
0
        }
573
0
574
0
        ASSERT(p2m_flags_to_type(flags) != p2m_ioreq_server);
575
0
        ASSERT(!mfn_valid(mfn) || p2mt != p2m_mmio_direct);
576
0
        l3e_content = mfn_valid(mfn) || p2m_allows_invalid_mfn(p2mt)
577
0
            ? p2m_l3e_from_pfn(mfn_x(mfn),
578
0
                               p2m_type_to_flags(p2m, p2mt, mfn, 2))
579
0
            : l3e_empty();
580
0
        entry_content.l1 = l3e_content.l3;
581
0
582
0
        if ( entry_content.l1 != 0 )
583
0
            p2m_add_iommu_flags(&entry_content, 0, iommu_pte_flags);
584
0
585
0
        p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 3);
586
0
        /* NB: paging_write_p2m_entry() handles tlb flushes properly */
587
0
    }
588
0
    else 
589
0
    {
590
0
        rc = p2m_next_level(p2m, &table, &gfn_remainder, gfn,
591
0
                            L3_PAGETABLE_SHIFT - PAGE_SHIFT,
592
0
                            L3_PAGETABLE_ENTRIES, 2, 1);
593
0
        if ( rc )
594
0
            goto out;
595
0
    }
596
0
597
0
    if ( page_order == PAGE_ORDER_4K )
598
0
    {
599
0
        p2m_type_t p2mt_old;
600
0
601
0
        rc = p2m_next_level(p2m, &table, &gfn_remainder, gfn,
602
0
                            L2_PAGETABLE_SHIFT - PAGE_SHIFT,
603
0
                            L2_PAGETABLE_ENTRIES, 1, 1);
604
0
        if ( rc )
605
0
            goto out;
606
0
607
0
        p2m_entry = p2m_find_entry(table, &gfn_remainder, gfn,
608
0
                                   0, L1_PAGETABLE_ENTRIES);
609
0
        ASSERT(p2m_entry);
610
0
        old_mfn = l1e_get_pfn(*p2m_entry);
611
0
        iommu_old_flags =
612
0
            p2m_get_iommu_flags(p2m_flags_to_type(l1e_get_flags(*p2m_entry)),
613
0
                                _mfn(old_mfn));
614
0
615
0
        if ( mfn_valid(mfn) || p2m_allows_invalid_mfn(p2mt) )
616
0
            entry_content = p2m_l1e_from_pfn(mfn_x(mfn),
617
0
                                         p2m_type_to_flags(p2m, p2mt, mfn, 0));
618
0
        else
619
0
            entry_content = l1e_empty();
620
0
621
0
        if ( entry_content.l1 != 0 )
622
0
            p2m_add_iommu_flags(&entry_content, 0, iommu_pte_flags);
623
0
624
0
        p2mt_old = p2m_flags_to_type(l1e_get_flags(*p2m_entry));
625
0
626
0
        /*
627
0
         * p2m_ioreq_server is only used for 4K pages, so
628
0
         * the count is only done for level 1 entries.
629
0
         */
630
0
        if ( p2mt == p2m_ioreq_server )
631
0
            p2m->ioreq.entry_count++;
632
0
633
0
        if ( p2mt_old == p2m_ioreq_server )
634
0
        {
635
0
            ASSERT(p2m->ioreq.entry_count > 0);
636
0
            p2m->ioreq.entry_count--;
637
0
        }
638
0
639
0
        /* level 1 entry */
640
0
        p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 1);
641
0
        /* NB: paging_write_p2m_entry() handles tlb flushes properly */
642
0
    }
643
0
    else if ( page_order == PAGE_ORDER_2M )
644
0
    {
645
0
        p2m_entry = p2m_find_entry(table, &gfn_remainder, gfn,
646
0
                                   L2_PAGETABLE_SHIFT - PAGE_SHIFT,
647
0
                                   L2_PAGETABLE_ENTRIES);
648
0
        ASSERT(p2m_entry);
649
0
        flags = l1e_get_flags(*p2m_entry);
650
0
        if ( flags & _PAGE_PRESENT )
651
0
        {
652
0
            if ( flags & _PAGE_PSE )
653
0
            {
654
0
                old_mfn = l1e_get_pfn(*p2m_entry);
655
0
                iommu_old_flags =
656
0
                    p2m_get_iommu_flags(p2m_flags_to_type(flags),
657
0
                                        _mfn(old_mfn));
658
0
            }
659
0
            else
660
0
            {
661
0
                iommu_old_flags = ~0;
662
0
                intermediate_entry = *p2m_entry;
663
0
            }
664
0
        }
665
0
666
0
        ASSERT(p2m_flags_to_type(flags) != p2m_ioreq_server);
667
0
        ASSERT(!mfn_valid(mfn) || p2mt != p2m_mmio_direct);
668
0
        l2e_content = mfn_valid(mfn) || p2m_allows_invalid_mfn(p2mt)
669
0
            ? p2m_l2e_from_pfn(mfn_x(mfn),
670
0
                               p2m_type_to_flags(p2m, p2mt, mfn, 1))
671
0
            : l2e_empty();
672
0
        entry_content.l1 = l2e_content.l2;
673
0
674
0
        if ( entry_content.l1 != 0 )
675
0
            p2m_add_iommu_flags(&entry_content, 0, iommu_pte_flags);
676
0
677
0
        p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 2);
678
0
        /* NB: paging_write_p2m_entry() handles tlb flushes properly */
679
0
    }
680
0
681
0
    /* Track the highest gfn for which we have ever had a valid mapping */
682
0
    if ( p2mt != p2m_invalid
683
0
         && (gfn + (1UL << page_order) - 1 > p2m->max_mapped_pfn) )
684
0
        p2m->max_mapped_pfn = gfn + (1UL << page_order) - 1;
685
0
686
0
    if ( iommu_enabled && need_iommu(p2m->domain) &&
687
0
         (iommu_old_flags != iommu_pte_flags || old_mfn != mfn_x(mfn)) )
688
0
    {
689
0
        ASSERT(rc == 0);
690
0
691
0
        if ( iommu_use_hap_pt(p2m->domain) )
692
0
        {
693
0
            if ( iommu_old_flags )
694
0
                amd_iommu_flush_pages(p2m->domain, gfn, page_order);
695
0
        }
696
0
        else if ( iommu_pte_flags )
697
0
            for ( i = 0; i < (1UL << page_order); i++ )
698
0
            {
699
0
                rc = iommu_map_page(p2m->domain, gfn + i, mfn_x(mfn) + i,
700
0
                                    iommu_pte_flags);
701
0
                if ( unlikely(rc) )
702
0
                {
703
0
                    while ( i-- )
704
0
                        /* If statement to satisfy __must_check. */
705
0
                        if ( iommu_unmap_page(p2m->domain, gfn + i) )
706
0
                            continue;
707
0
708
0
                    break;
709
0
                }
710
0
            }
711
0
        else
712
0
            for ( i = 0; i < (1UL << page_order); i++ )
713
0
            {
714
0
                int ret = iommu_unmap_page(p2m->domain, gfn + i);
715
0
716
0
                if ( !rc )
717
0
                    rc = ret;
718
0
            }
719
0
    }
720
0
721
0
    /*
722
0
     * Free old intermediate tables if necessary.  This has to be the
723
0
     * last thing we do, after removal from the IOMMU tables, so as to
724
0
     * avoid a potential use-after-free.
725
0
     */
726
0
    if ( l1e_get_flags(intermediate_entry) & _PAGE_PRESENT )
727
0
        p2m_free_entry(p2m, &intermediate_entry, page_order);
728
0
729
0
 out:
730
0
    unmap_domain_page(table);
731
0
    return rc;
732
0
}
733
734
static mfn_t
735
p2m_pt_get_entry(struct p2m_domain *p2m, gfn_t gfn_,
736
                 p2m_type_t *t, p2m_access_t *a, p2m_query_t q,
737
                 unsigned int *page_order, bool_t *sve)
738
0
{
739
0
    mfn_t mfn;
740
0
    unsigned long gfn = gfn_x(gfn_);
741
0
    paddr_t addr = ((paddr_t)gfn) << PAGE_SHIFT;
742
0
    l2_pgentry_t *l2e;
743
0
    l1_pgentry_t *l1e;
744
0
    unsigned int flags;
745
0
    p2m_type_t l1t;
746
0
    bool_t recalc;
747
0
748
0
    ASSERT(paging_mode_translate(p2m->domain));
749
0
750
0
    if ( sve )
751
0
        *sve = 1;
752
0
753
0
    /* XXX This is for compatibility with the old model, where anything not 
754
0
     * XXX marked as RAM was considered to be emulated MMIO space.
755
0
     * XXX Once we start explicitly registering MMIO regions in the p2m 
756
0
     * XXX we will return p2m_invalid for unmapped gfns */
757
0
    *t = p2m_mmio_dm;
758
0
    /* Not implemented except with EPT */
759
0
    *a = p2m_access_rwx; 
760
0
761
0
    if ( gfn > p2m->max_mapped_pfn )
762
0
    {
763
0
        /* This pfn is higher than the highest the p2m map currently holds */
764
0
        if ( page_order )
765
0
        {
766
0
            for ( *page_order = 3 * PAGETABLE_ORDER; *page_order;
767
0
                  *page_order -= PAGETABLE_ORDER )
768
0
                if ( (gfn & ~((1UL << *page_order) - 1)) >
769
0
                     p2m->max_mapped_pfn )
770
0
                    break;
771
0
        }
772
0
        return INVALID_MFN;
773
0
    }
774
0
775
0
    mfn = pagetable_get_mfn(p2m_get_pagetable(p2m));
776
0
777
0
    {
778
0
        l4_pgentry_t *l4e = map_domain_page(mfn);
779
0
        l4e += l4_table_offset(addr);
780
0
        if ( page_order )
781
0
            *page_order = 3 * PAGETABLE_ORDER;
782
0
        if ( (l4e_get_flags(*l4e) & _PAGE_PRESENT) == 0 )
783
0
        {
784
0
            unmap_domain_page(l4e);
785
0
            return INVALID_MFN;
786
0
        }
787
0
        mfn = l4e_get_mfn(*l4e);
788
0
        recalc = needs_recalc(l4, *l4e);
789
0
        unmap_domain_page(l4e);
790
0
    }
791
0
    {
792
0
        l3_pgentry_t *l3e = map_domain_page(mfn);
793
0
        l3e += l3_table_offset(addr);
794
0
        if ( page_order )
795
0
            *page_order = 2 * PAGETABLE_ORDER;
796
0
797
0
pod_retry_l3:
798
0
        flags = l3e_get_flags(*l3e);
799
0
        if ( !(flags & _PAGE_PRESENT) )
800
0
        {
801
0
            if ( p2m_flags_to_type(flags) == p2m_populate_on_demand )
802
0
            {
803
0
                if ( q & P2M_ALLOC )
804
0
                {
805
0
                    if ( p2m_pod_demand_populate(p2m, gfn_, PAGE_ORDER_1G) )
806
0
                        goto pod_retry_l3;
807
0
                    gdprintk(XENLOG_ERR, "%s: Allocate 1GB failed!\n", __func__);
808
0
                }
809
0
                else
810
0
                    *t = p2m_populate_on_demand;
811
0
            }
812
0
            unmap_domain_page(l3e);
813
0
            return INVALID_MFN;
814
0
        }
815
0
        if ( flags & _PAGE_PSE )
816
0
        {
817
0
            mfn = _mfn(l3e_get_pfn(*l3e) +
818
0
                       l2_table_offset(addr) * L1_PAGETABLE_ENTRIES +
819
0
                       l1_table_offset(addr));
820
0
            *t = p2m_recalc_type(recalc || _needs_recalc(flags),
821
0
                                 p2m_flags_to_type(flags), p2m, gfn);
822
0
            unmap_domain_page(l3e);
823
0
824
0
            ASSERT(mfn_valid(mfn) || !p2m_is_ram(*t));
825
0
            return (p2m_is_valid(*t)) ? mfn : INVALID_MFN;
826
0
        }
827
0
828
0
        mfn = l3e_get_mfn(*l3e);
829
0
        if ( _needs_recalc(flags) )
830
0
            recalc = 1;
831
0
        unmap_domain_page(l3e);
832
0
    }
833
0
834
0
    l2e = map_domain_page(mfn);
835
0
    l2e += l2_table_offset(addr);
836
0
    if ( page_order )
837
0
        *page_order = PAGETABLE_ORDER;
838
0
839
0
pod_retry_l2:
840
0
    flags = l2e_get_flags(*l2e);
841
0
    if ( !(flags & _PAGE_PRESENT) )
842
0
    {
843
0
        /* PoD: Try to populate a 2-meg chunk */
844
0
        if ( p2m_flags_to_type(flags) == p2m_populate_on_demand )
845
0
        {
846
0
            if ( q & P2M_ALLOC ) {
847
0
                if ( p2m_pod_demand_populate(p2m, gfn_, PAGE_ORDER_2M) )
848
0
                    goto pod_retry_l2;
849
0
            } else
850
0
                *t = p2m_populate_on_demand;
851
0
        }
852
0
    
853
0
        unmap_domain_page(l2e);
854
0
        return INVALID_MFN;
855
0
    }
856
0
    if ( flags & _PAGE_PSE )
857
0
    {
858
0
        mfn = _mfn(l2e_get_pfn(*l2e) + l1_table_offset(addr));
859
0
        *t = p2m_recalc_type(recalc || _needs_recalc(flags),
860
0
                             p2m_flags_to_type(flags), p2m, gfn);
861
0
        unmap_domain_page(l2e);
862
0
        
863
0
        ASSERT(mfn_valid(mfn) || !p2m_is_ram(*t));
864
0
        return (p2m_is_valid(*t)) ? mfn : INVALID_MFN;
865
0
    }
866
0
867
0
    mfn = l2e_get_mfn(*l2e);
868
0
    if ( needs_recalc(l2, *l2e) )
869
0
        recalc = 1;
870
0
    unmap_domain_page(l2e);
871
0
872
0
    l1e = map_domain_page(mfn);
873
0
    l1e += l1_table_offset(addr);
874
0
    if ( page_order )
875
0
        *page_order = 0;
876
0
877
0
pod_retry_l1:
878
0
    flags = l1e_get_flags(*l1e);
879
0
    l1t = p2m_flags_to_type(flags);
880
0
    if ( !(flags & _PAGE_PRESENT) && !p2m_is_paging(l1t) )
881
0
    {
882
0
        /* PoD: Try to populate */
883
0
        if ( l1t == p2m_populate_on_demand )
884
0
        {
885
0
            if ( q & P2M_ALLOC ) {
886
0
                if ( p2m_pod_demand_populate(p2m, gfn_, PAGE_ORDER_4K) )
887
0
                    goto pod_retry_l1;
888
0
            } else
889
0
                *t = p2m_populate_on_demand;
890
0
        }
891
0
    
892
0
        unmap_domain_page(l1e);
893
0
        return INVALID_MFN;
894
0
    }
895
0
    mfn = l1e_get_mfn(*l1e);
896
0
    *t = p2m_recalc_type(recalc || _needs_recalc(flags), l1t, p2m, gfn);
897
0
    unmap_domain_page(l1e);
898
0
899
0
    ASSERT(mfn_valid(mfn) || !p2m_is_ram(*t) || p2m_is_paging(*t));
900
0
    return (p2m_is_valid(*t) || p2m_is_grant(*t)) ? mfn : INVALID_MFN;
901
0
}
902
903
static void p2m_pt_change_entry_type_global(struct p2m_domain *p2m,
904
                                            p2m_type_t ot, p2m_type_t nt)
905
0
{
906
0
    l1_pgentry_t *tab;
907
0
    unsigned long gfn = 0;
908
0
    unsigned int i, changed;
909
0
910
0
    if ( pagetable_get_pfn(p2m_get_pagetable(p2m)) == 0 )
911
0
        return;
912
0
913
0
    ASSERT(hap_enabled(p2m->domain));
914
0
915
0
    tab = map_domain_page(pagetable_get_mfn(p2m_get_pagetable(p2m)));
916
0
    for ( changed = i = 0; i < (1 << PAGETABLE_ORDER); ++i )
917
0
    {
918
0
        l1_pgentry_t e = tab[i];
919
0
920
0
        if ( (l1e_get_flags(e) & _PAGE_PRESENT) &&
921
0
             !needs_recalc(l1, e) )
922
0
        {
923
0
            set_recalc(l1, e);
924
0
            p2m->write_p2m_entry(p2m, gfn, &tab[i], e, 4);
925
0
            ++changed;
926
0
        }
927
0
        gfn += 1UL << (L4_PAGETABLE_SHIFT - PAGE_SHIFT);
928
0
    }
929
0
    unmap_domain_page(tab);
930
0
931
0
    if ( changed )
932
0
         flush_tlb_mask(p2m->domain->domain_dirty_cpumask);
933
0
}
934
935
static int p2m_pt_change_entry_type_range(struct p2m_domain *p2m,
936
                                          p2m_type_t ot, p2m_type_t nt,
937
                                          unsigned long first_gfn,
938
                                          unsigned long last_gfn)
939
0
{
940
0
    unsigned long mask = (1 << PAGETABLE_ORDER) - 1;
941
0
    unsigned int i;
942
0
    int err = 0;
943
0
944
0
    ASSERT(hap_enabled(p2m->domain));
945
0
946
0
    for ( i = 1; i <= 4; )
947
0
    {
948
0
        if ( first_gfn & mask )
949
0
        {
950
0
            unsigned long end_gfn = min(first_gfn | mask, last_gfn);
951
0
952
0
            err = p2m_pt_set_recalc_range(p2m, i, first_gfn, end_gfn);
953
0
            if ( err || end_gfn >= last_gfn )
954
0
                break;
955
0
            first_gfn = end_gfn + 1;
956
0
        }
957
0
        else if ( (last_gfn & mask) != mask )
958
0
        {
959
0
            unsigned long start_gfn = max(first_gfn, last_gfn & ~mask);
960
0
961
0
            err = p2m_pt_set_recalc_range(p2m, i, start_gfn, last_gfn);
962
0
            if ( err || start_gfn <= first_gfn )
963
0
                break;
964
0
            last_gfn = start_gfn - 1;
965
0
        }
966
0
        else
967
0
        {
968
0
            ++i;
969
0
            mask |= mask << PAGETABLE_ORDER;
970
0
        }
971
0
    }
972
0
973
0
    return err;
974
0
}
975
976
#if P2M_AUDIT
977
long p2m_pt_audit_p2m(struct p2m_domain *p2m)
978
0
{
979
0
    unsigned long entry_count = 0, pmbad = 0;
980
0
    unsigned long mfn, gfn, m2pfn;
981
0
982
0
    ASSERT(p2m_locked_by_me(p2m));
983
0
    ASSERT(pod_locked_by_me(p2m));
984
0
985
0
    /* Audit part one: walk the domain's p2m table, checking the entries. */
986
0
    if ( pagetable_get_pfn(p2m_get_pagetable(p2m)) != 0 )
987
0
    {
988
0
        l2_pgentry_t *l2e;
989
0
        l1_pgentry_t *l1e;
990
0
        int i1, i2;
991
0
992
0
        l4_pgentry_t *l4e;
993
0
        l3_pgentry_t *l3e;
994
0
        int i4, i3;
995
0
        l4e = map_domain_page(pagetable_get_mfn(p2m_get_pagetable(p2m)));
996
0
997
0
        gfn = 0;
998
0
        for ( i4 = 0; i4 < L4_PAGETABLE_ENTRIES; i4++ )
999
0
        {
1000
0
            if ( !(l4e_get_flags(l4e[i4]) & _PAGE_PRESENT) )
1001
0
            {
1002
0
                gfn += 1 << (L4_PAGETABLE_SHIFT - PAGE_SHIFT);
1003
0
                continue;
1004
0
            }
1005
0
            l3e = map_l3t_from_l4e(l4e[i4]);
1006
0
            for ( i3 = 0;
1007
0
                  i3 < L3_PAGETABLE_ENTRIES;
1008
0
                  i3++ )
1009
0
            {
1010
0
                if ( !(l3e_get_flags(l3e[i3]) & _PAGE_PRESENT) )
1011
0
                {
1012
0
                    gfn += 1 << (L3_PAGETABLE_SHIFT - PAGE_SHIFT);
1013
0
                    continue;
1014
0
                }
1015
0
1016
0
                /* check for 1GB super page */
1017
0
                if ( l3e_get_flags(l3e[i3]) & _PAGE_PSE )
1018
0
                {
1019
0
                    mfn = l3e_get_pfn(l3e[i3]);
1020
0
                    ASSERT(mfn_valid(_mfn(mfn)));
1021
0
                    /* we have to cover 512x512 4K pages */
1022
0
                    for ( i2 = 0; 
1023
0
                          i2 < (L2_PAGETABLE_ENTRIES * L1_PAGETABLE_ENTRIES);
1024
0
                          i2++)
1025
0
                    {
1026
0
                        m2pfn = get_gpfn_from_mfn(mfn+i2);
1027
0
                        if ( m2pfn != (gfn + i2) )
1028
0
                        {
1029
0
                            pmbad++;
1030
0
                            P2M_PRINTK("mismatch: gfn %#lx -> mfn %#lx"
1031
0
                                       " -> gfn %#lx\n", gfn+i2, mfn+i2,
1032
0
                                       m2pfn);
1033
0
                            BUG();
1034
0
                        }
1035
0
                        gfn += 1 << (L3_PAGETABLE_SHIFT - PAGE_SHIFT);
1036
0
                        continue;
1037
0
                    }
1038
0
                }
1039
0
1040
0
                l2e = map_l2t_from_l3e(l3e[i3]);
1041
0
                for ( i2 = 0; i2 < L2_PAGETABLE_ENTRIES; i2++ )
1042
0
                {
1043
0
                    if ( !(l2e_get_flags(l2e[i2]) & _PAGE_PRESENT) )
1044
0
                    {
1045
0
                        if ( (l2e_get_flags(l2e[i2]) & _PAGE_PSE)
1046
0
                             && ( p2m_flags_to_type(l2e_get_flags(l2e[i2]))
1047
0
                                  == p2m_populate_on_demand ) )
1048
0
                            entry_count+=SUPERPAGE_PAGES;
1049
0
                        gfn += 1 << (L2_PAGETABLE_SHIFT - PAGE_SHIFT);
1050
0
                        continue;
1051
0
                    }
1052
0
                    
1053
0
                    /* check for super page */
1054
0
                    if ( l2e_get_flags(l2e[i2]) & _PAGE_PSE )
1055
0
                    {
1056
0
                        mfn = l2e_get_pfn(l2e[i2]);
1057
0
                        ASSERT(mfn_valid(_mfn(mfn)));
1058
0
                        for ( i1 = 0; i1 < L1_PAGETABLE_ENTRIES; i1++)
1059
0
                        {
1060
0
                            m2pfn = get_gpfn_from_mfn(mfn+i1);
1061
0
                            /* Allow shared M2Ps */
1062
0
                            if ( (m2pfn != (gfn + i1)) &&
1063
0
                                 (m2pfn != SHARED_M2P_ENTRY) )
1064
0
                            {
1065
0
                                pmbad++;
1066
0
                                P2M_PRINTK("mismatch: gfn %#lx -> mfn %#lx"
1067
0
                                           " -> gfn %#lx\n", gfn+i1, mfn+i1,
1068
0
                                           m2pfn);
1069
0
                                BUG();
1070
0
                            }
1071
0
                        }
1072
0
                        gfn += 1 << (L2_PAGETABLE_SHIFT - PAGE_SHIFT);
1073
0
                        continue;
1074
0
                    }
1075
0
1076
0
                    l1e = map_l1t_from_l2e(l2e[i2]);
1077
0
1078
0
                    for ( i1 = 0; i1 < L1_PAGETABLE_ENTRIES; i1++, gfn++ )
1079
0
                    {
1080
0
                        p2m_type_t type;
1081
0
1082
0
                        type = p2m_flags_to_type(l1e_get_flags(l1e[i1]));
1083
0
                        if ( !(l1e_get_flags(l1e[i1]) & _PAGE_PRESENT) )
1084
0
                        {
1085
0
                            if ( type == p2m_populate_on_demand )
1086
0
                                entry_count++;
1087
0
                            continue;
1088
0
                        }
1089
0
                        mfn = l1e_get_pfn(l1e[i1]);
1090
0
                        ASSERT(mfn_valid(_mfn(mfn)));
1091
0
                        m2pfn = get_gpfn_from_mfn(mfn);
1092
0
                        if ( m2pfn != gfn &&
1093
0
                             type != p2m_mmio_direct &&
1094
0
                             !p2m_is_grant(type) &&
1095
0
                             !p2m_is_shared(type) )
1096
0
                        {
1097
0
                            pmbad++;
1098
0
                            printk("mismatch: gfn %#lx -> mfn %#lx"
1099
0
                                   " -> gfn %#lx\n", gfn, mfn, m2pfn);
1100
0
                            P2M_PRINTK("mismatch: gfn %#lx -> mfn %#lx"
1101
0
                                       " -> gfn %#lx\n", gfn, mfn, m2pfn);
1102
0
                            BUG();
1103
0
                        }
1104
0
                    }
1105
0
                    unmap_domain_page(l1e);
1106
0
                }
1107
0
                unmap_domain_page(l2e);
1108
0
            }
1109
0
            unmap_domain_page(l3e);
1110
0
        }
1111
0
1112
0
        unmap_domain_page(l4e);
1113
0
    }
1114
0
1115
0
    if ( entry_count != p2m->pod.entry_count )
1116
0
    {
1117
0
        printk("%s: refcounted entry count %ld, audit count %lu!\n",
1118
0
               __func__,
1119
0
               p2m->pod.entry_count,
1120
0
               entry_count);
1121
0
        BUG();
1122
0
    }
1123
0
1124
0
    return pmbad;
1125
0
}
1126
#endif /* P2M_AUDIT */
1127
1128
/* Set up the p2m function pointers for pagetable format */
1129
void p2m_pt_init(struct p2m_domain *p2m)
1130
0
{
1131
0
    p2m->set_entry = p2m_pt_set_entry;
1132
0
    p2m->get_entry = p2m_pt_get_entry;
1133
0
    p2m->recalc = do_recalc;
1134
0
    p2m->change_entry_type_global = p2m_pt_change_entry_type_global;
1135
0
    p2m->change_entry_type_range = p2m_pt_change_entry_type_range;
1136
0
    p2m->write_p2m_entry = paging_write_p2m_entry;
1137
0
#if P2M_AUDIT
1138
0
    p2m->audit_p2m = p2m_pt_audit_p2m;
1139
0
#else
1140
    p2m->audit_p2m = NULL;
1141
#endif
1142
0
}
1143
1144