debuggers.hg

view xen/include/asm-x86/p2m.h @ 0:7d21f7218375

Exact replica of unstable on 051908 + README-this
author Mukesh Rathor
date Mon May 19 15:34:57 2008 -0700 (2008-05-19)
parents
children 5c0bf00e371d
line source
1 /******************************************************************************
2 * include/asm-x86/paging.h
3 *
4 * physical-to-machine mappings for automatically-translated domains.
5 *
6 * Copyright (c) 2007 Advanced Micro Devices (Wei Huang)
7 * Parts of this code are Copyright (c) 2006-2007 by XenSource Inc.
8 * Parts of this code are Copyright (c) 2006 by Michael A Fetterman
9 * Parts based on earlier work by Michael A Fetterman, Ian Pratt et al.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 */
26 #ifndef _XEN_P2M_H
27 #define _XEN_P2M_H
29 #include <xen/config.h>
30 #include <xen/paging.h>
32 /*
33 * The phys_to_machine_mapping maps guest physical frame numbers
34 * to machine frame numbers. It only exists for paging_mode_translate
35 * guests. It is organised in page-table format, which:
36 *
37 * (1) allows us to use it directly as the second pagetable in hardware-
38 * assisted paging and (hopefully) iommu support; and
39 * (2) lets us map it directly into the guest vcpus' virtual address space
40 * as a linear pagetable, so we can read and write it easily.
41 *
42 * For (2) we steal the address space that would have normally been used
43 * by the read-only MPT map in a non-translated guest. (For
44 * paging_mode_external() guests this mapping is in the monitor table.)
45 */
46 #define phys_to_machine_mapping ((l1_pgentry_t *)RO_MPT_VIRT_START)
48 /*
49 * The upper levels of the p2m pagetable always contain full rights; all
50 * variation in the access control bits is made in the level-1 PTEs.
51 *
52 * In addition to the phys-to-machine translation, each p2m PTE contains
53 * *type* information about the gfn it translates, helping Xen to decide
54 * on the correct course of action when handling a page-fault to that
55 * guest frame. We store the type in the "available" bits of the PTEs
56 * in the table, which gives us 8 possible types on 32-bit systems.
57 * Further expansions of the type system will only be supported on
58 * 64-bit Xen.
59 */
60 typedef enum {
61 p2m_invalid = 0, /* Nothing mapped here */
62 p2m_ram_rw = 1, /* Normal read/write guest RAM */
63 p2m_ram_logdirty = 2, /* Temporarily read-only for log-dirty */
64 p2m_ram_ro = 3, /* Read-only; writes go to the device model */
65 p2m_mmio_dm = 4, /* Reads and write go to the device model */
66 p2m_mmio_direct = 5, /* Read/write mapping of genuine MMIO area */
67 } p2m_type_t;
69 /* We use bitmaps and maks to handle groups of types */
70 #define p2m_to_mask(_t) (1UL << (_t))
72 /* RAM types, which map to real machine frames */
73 #define P2M_RAM_TYPES (p2m_to_mask(p2m_ram_rw) \
74 | p2m_to_mask(p2m_ram_logdirty) \
75 | p2m_to_mask(p2m_ram_ro))
77 /* MMIO types, which don't have to map to anything in the frametable */
78 #define P2M_MMIO_TYPES (p2m_to_mask(p2m_mmio_dm) \
79 | p2m_to_mask(p2m_mmio_direct))
81 /* Read-only types, which must have the _PAGE_RW bit clear in their PTEs */
82 #define P2M_RO_TYPES (p2m_to_mask(p2m_ram_logdirty) \
83 | p2m_to_mask(p2m_ram_ro))
85 /* Useful predicates */
86 #define p2m_is_ram(_t) (p2m_to_mask(_t) & P2M_RAM_TYPES)
87 #define p2m_is_mmio(_t) (p2m_to_mask(_t) & P2M_MMIO_TYPES)
88 #define p2m_is_readonly(_t) (p2m_to_mask(_t) & P2M_RO_TYPES)
89 #define p2m_is_valid(_t) (p2m_to_mask(_t) & (P2M_RAM_TYPES | P2M_MMIO_TYPES))
91 struct p2m_domain {
92 /* Lock that protects updates to the p2m */
93 spinlock_t lock;
94 int locker; /* processor which holds the lock */
95 const char *locker_function; /* Func that took it */
97 /* Pages used to construct the p2m */
98 struct list_head pages;
100 /* Functions to call to get or free pages for the p2m */
101 struct page_info * (*alloc_page )(struct domain *d);
102 void (*free_page )(struct domain *d,
103 struct page_info *pg);
104 int (*set_entry )(struct domain *d, unsigned long gfn,
105 mfn_t mfn, unsigned int page_order,
106 p2m_type_t p2mt);
107 mfn_t (*get_entry )(struct domain *d, unsigned long gfn,
108 p2m_type_t *p2mt);
109 mfn_t (*get_entry_current)(unsigned long gfn,
110 p2m_type_t *p2mt);
111 void (*change_entry_type_global)(struct domain *d,
112 p2m_type_t ot,
113 p2m_type_t nt);
115 /* Highest guest frame that's ever been mapped in the p2m */
116 unsigned long max_mapped_pfn;
117 };
119 /* Extract the type from the PTE flags that store it */
120 static inline p2m_type_t p2m_flags_to_type(unsigned long flags)
121 {
122 /* Type is stored in the "available" bits, 9, 10 and 11 */
123 return (flags >> 9) & 0x7;
124 }
126 /* Read the current domain's p2m table. */
127 static inline mfn_t gfn_to_mfn_current(unsigned long gfn, p2m_type_t *t)
128 {
129 return current->domain->arch.p2m->get_entry_current(gfn, t);
130 }
132 /* Read another domain's P2M table, mapping pages as we go */
133 static inline
134 mfn_t gfn_to_mfn_foreign(struct domain *d, unsigned long gfn, p2m_type_t *t)
135 {
136 return d->arch.p2m->get_entry(d, gfn, t);
137 }
139 /* General conversion function from gfn to mfn */
140 #define gfn_to_mfn(d, g, t) _gfn_to_mfn((d), (g), (t))
141 static inline mfn_t _gfn_to_mfn(struct domain *d,
142 unsigned long gfn, p2m_type_t *t)
143 {
144 if ( !paging_mode_translate(d) )
145 {
146 /* Not necessarily true, but for non-translated guests, we claim
147 * it's the most generic kind of memory */
148 *t = p2m_ram_rw;
149 return _mfn(gfn);
150 }
151 if ( likely(current->domain == d) )
152 return gfn_to_mfn_current(gfn, t);
153 else
154 return gfn_to_mfn_foreign(d, gfn, t);
155 }
157 /* Compatibility function exporting the old untyped interface */
158 static inline unsigned long gmfn_to_mfn(struct domain *d, unsigned long gpfn)
159 {
160 mfn_t mfn;
161 p2m_type_t t;
162 mfn = gfn_to_mfn(d, gpfn, &t);
163 if ( p2m_is_valid(t) )
164 return mfn_x(mfn);
165 return INVALID_MFN;
166 }
168 /* General conversion function from mfn to gfn */
169 static inline unsigned long mfn_to_gfn(struct domain *d, mfn_t mfn)
170 {
171 if ( paging_mode_translate(d) )
172 return get_gpfn_from_mfn(mfn_x(mfn));
173 else
174 return mfn_x(mfn);
175 }
177 /* Translate the frame number held in an l1e from guest to machine */
178 static inline l1_pgentry_t
179 gl1e_to_ml1e(struct domain *d, l1_pgentry_t l1e)
180 {
181 if ( unlikely(paging_mode_translate(d)) )
182 l1e = l1e_from_pfn(gmfn_to_mfn(d, l1e_get_pfn(l1e)),
183 l1e_get_flags(l1e));
184 return l1e;
185 }
188 /* Init the datastructures for later use by the p2m code */
189 int p2m_init(struct domain *d);
191 /* Allocate a new p2m table for a domain.
192 *
193 * The alloc_page and free_page functions will be used to get memory to
194 * build the p2m, and to release it again at the end of day.
195 *
196 * Returns 0 for success or -errno. */
197 int p2m_alloc_table(struct domain *d,
198 struct page_info * (*alloc_page)(struct domain *d),
199 void (*free_page)(struct domain *d, struct page_info *pg));
201 /* Return all the p2m resources to Xen. */
202 void p2m_teardown(struct domain *d);
203 void p2m_final_teardown(struct domain *d);
205 /* Add a page to a domain's p2m table */
206 int guest_physmap_add_entry(struct domain *d, unsigned long gfn,
207 unsigned long mfn, unsigned int page_order,
208 p2m_type_t t);
210 /* Untyped version for RAM only, for compatibility
211 *
212 * Return 0 for success
213 */
214 static inline int guest_physmap_add_page(struct domain *d, unsigned long gfn,
215 unsigned long mfn,
216 unsigned int page_order)
217 {
218 return guest_physmap_add_entry(d, gfn, mfn, page_order, p2m_ram_rw);
219 }
221 /* Remove a page from a domain's p2m table */
222 void guest_physmap_remove_page(struct domain *d, unsigned long gfn,
223 unsigned long mfn, unsigned int page_order);
225 /* Change types across all p2m entries in a domain */
226 void p2m_change_type_global(struct domain *d, p2m_type_t ot, p2m_type_t nt);
227 void p2m_change_entry_type_global(struct domain *d, p2m_type_t ot, p2m_type_t nt);
229 /* Compare-exchange the type of a single p2m entry */
230 p2m_type_t p2m_change_type(struct domain *d, unsigned long gfn,
231 p2m_type_t ot, p2m_type_t nt);
233 /* Set mmio addresses in the p2m table (for pass-through) */
234 int set_mmio_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn);
235 int clear_mmio_p2m_entry(struct domain *d, unsigned long gfn);
237 #endif /* _XEN_P2M_H */
239 /*
240 * Local variables:
241 * mode: C
242 * c-set-style: "BSD"
243 * c-basic-offset: 4
244 * indent-tabs-mode: nil
245 * End:
246 */