debuggers.hg

view xen/drivers/passthrough/vtd/x86/vtd.c @ 20974:9a9ea52c3680

VT-d: get rid of duplicated definition

free_pgtable_maddr was implemented the same for x86 and IA64, so it's
not necessary to define it separately for x86 and IA64. This patch
moves free_pgtable_maddr definition to iommu.c to avoid duplicated
definition.

Signed-off-by: Weidong Han <weidong.han@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Feb 11 19:51:15 2010 +0000 (2010-02-11)
parents 07f95839e431
children 220fa418eaae
line source
1 /*
2 * Copyright (c) 2008, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Copyright (C) Allen Kay <allen.m.kay@intel.com>
18 * Copyright (C) Weidong Han <weidong.han@intel.com>
19 */
21 #include <xen/sched.h>
22 #include <xen/domain_page.h>
23 #include <asm/paging.h>
24 #include <xen/iommu.h>
25 #include <xen/numa.h>
26 #include "../iommu.h"
27 #include "../dmar.h"
28 #include "../vtd.h"
30 /*
31 * iommu_inclusive_mapping: when set, all memory below 4GB is included in dom0
32 * 1:1 iommu mappings except xen and unusable regions.
33 */
34 static int iommu_inclusive_mapping = 1;
35 boolean_param("iommu_inclusive_mapping", iommu_inclusive_mapping);
37 void *map_vtd_domain_page(u64 maddr)
38 {
39 return map_domain_page(maddr >> PAGE_SHIFT_4K);
40 }
42 void unmap_vtd_domain_page(void *va)
43 {
44 unmap_domain_page(va);
45 }
47 unsigned int get_cache_line_size(void)
48 {
49 return ((cpuid_ebx(1) >> 8) & 0xff) * 8;
50 }
52 void cacheline_flush(char * addr)
53 {
54 clflush(addr);
55 }
57 void flush_all_cache()
58 {
59 wbinvd();
60 }
62 void *map_to_nocache_virt(int nr_iommus, u64 maddr)
63 {
64 set_fixmap_nocache(FIX_IOMMU_REGS_BASE_0 + nr_iommus, maddr);
65 return (void *)fix_to_virt(FIX_IOMMU_REGS_BASE_0 + nr_iommus);
66 }
68 struct hvm_irq_dpci *domain_get_irq_dpci(struct domain *domain)
69 {
70 if ( !domain )
71 return NULL;
73 return domain->arch.hvm_domain.irq.dpci;
74 }
76 int domain_set_irq_dpci(struct domain *domain, struct hvm_irq_dpci *dpci)
77 {
78 if ( !domain || !dpci )
79 return 0;
81 domain->arch.hvm_domain.irq.dpci = dpci;
82 return 1;
83 }
85 void hvm_dpci_isairq_eoi(struct domain *d, unsigned int isairq)
86 {
87 struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
88 struct hvm_irq_dpci *dpci = NULL;
89 struct dev_intx_gsi_link *digl, *tmp;
90 int i;
92 ASSERT(isairq < NR_ISAIRQS);
93 if ( !iommu_enabled)
94 return;
96 spin_lock(&d->event_lock);
98 dpci = domain_get_irq_dpci(d);
100 if ( !dpci || !test_bit(isairq, dpci->isairq_map) )
101 {
102 spin_unlock(&d->event_lock);
103 return;
104 }
105 /* Multiple mirq may be mapped to one isa irq */
106 for ( i = find_first_bit(dpci->mapping, d->nr_pirqs);
107 i < d->nr_pirqs;
108 i = find_next_bit(dpci->mapping, d->nr_pirqs, i + 1) )
109 {
110 list_for_each_entry_safe ( digl, tmp,
111 &dpci->mirq[i].digl_list, list )
112 {
113 if ( hvm_irq->pci_link.route[digl->link] == isairq )
114 {
115 hvm_pci_intx_deassert(d, digl->device, digl->intx);
116 if ( --dpci->mirq[i].pending == 0 )
117 {
118 stop_timer(&dpci->hvm_timer[domain_pirq_to_irq(d, i)]);
119 pirq_guest_eoi(d, i);
120 }
121 }
122 }
123 }
124 spin_unlock(&d->event_lock);
125 }
127 void iommu_set_dom0_mapping(struct domain *d)
128 {
129 u64 i, j, tmp, max_pfn;
130 extern int xen_in_range(unsigned long mfn);
132 BUG_ON(d->domain_id != 0);
134 max_pfn = max_t(u64, max_page, 0x100000000ull >> PAGE_SHIFT);
136 for ( i = 0; i < max_pfn; i++ )
137 {
138 /*
139 * Set up 1:1 mapping for dom0. Default to use only conventional RAM
140 * areas and let RMRRs include needed reserved regions. When set, the
141 * inclusive mapping maps in everything below 4GB except unusable
142 * ranges.
143 */
144 if ( !page_is_ram_type(i, RAM_TYPE_CONVENTIONAL) &&
145 (!iommu_inclusive_mapping ||
146 page_is_ram_type(i, RAM_TYPE_UNUSABLE)) )
147 continue;
149 /* Exclude Xen bits */
150 if ( xen_in_range(i) )
151 continue;
153 tmp = 1 << (PAGE_SHIFT - PAGE_SHIFT_4K);
154 for ( j = 0; j < tmp; j++ )
155 iommu_map_page(d, (i*tmp+j), (i*tmp+j));
156 }
157 }