debuggers.hg

view xen/drivers/passthrough/vtd/ia64/vtd.c @ 20974:9a9ea52c3680

VT-d: get rid of duplicated definition

free_pgtable_maddr was implemented the same for x86 and IA64, so it's
not necessary to define it separately for x86 and IA64. This patch
moves free_pgtable_maddr definition to iommu.c to avoid duplicated
definition.

Signed-off-by: Weidong Han <weidong.han@intel.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Feb 11 19:51:15 2010 +0000 (2010-02-11)
parents aa472909b39c
children 3ffdb094c2c0 220fa418eaae
line source
1 /*
2 * Copyright (c) 2008, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Copyright (C) Allen Kay <allen.m.kay@intel.com>
18 * Copyright (C) Weidong Han <weidong.han@intel.com>
19 */
21 #include <xen/sched.h>
22 #include <xen/domain_page.h>
23 #include <xen/iommu.h>
24 #include <xen/numa.h>
25 #include <asm/xensystem.h>
26 #include <asm/sal.h>
27 #include "../iommu.h"
28 #include "../dmar.h"
29 #include "../vtd.h"
32 int vector_irq[NR_VECTORS] __read_mostly = {
33 [0 ... NR_VECTORS - 1] = FREE_TO_ASSIGN_IRQ
34 };
35 /* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
36 u8 irq_vector[NR_IRQS] __read_mostly;
38 void *map_vtd_domain_page(u64 maddr)
39 {
40 return (void *)((u64)map_domain_page(maddr >> PAGE_SHIFT) |
41 (maddr & (PAGE_SIZE - PAGE_SIZE_4K)));
42 }
44 void unmap_vtd_domain_page(void *va)
45 {
46 unmap_domain_page(va);
47 }
49 unsigned int get_cache_line_size(void)
50 {
51 return L1_CACHE_BYTES;
52 }
54 void cacheline_flush(char * addr)
55 {
56 ia64_fc(addr);
57 ia64_sync_i();
58 ia64_srlz_i();
59 }
61 void flush_all_cache()
62 {
63 ia64_sal_cache_flush(3);
64 }
66 void * map_to_nocache_virt(int nr_iommus, u64 maddr)
67 {
68 return (void *) ( maddr + __IA64_UNCACHED_OFFSET);
69 }
71 struct hvm_irq_dpci *domain_get_irq_dpci(struct domain *domain)
72 {
73 if ( !domain )
74 return NULL;
76 return domain->arch.hvm_domain.irq.dpci;
77 }
79 int domain_set_irq_dpci(struct domain *domain, struct hvm_irq_dpci *dpci)
80 {
81 if ( !domain || !dpci )
82 return 0;
84 domain->arch.hvm_domain.irq.dpci = dpci;
85 return 1;
86 }
88 void hvm_dpci_isairq_eoi(struct domain *d, unsigned int isairq)
89 {
90 /* dummy */
91 }
93 static int do_dom0_iommu_mapping(unsigned long start, unsigned long end,
94 void *arg)
95 {
96 unsigned long tmp, pfn, j, page_addr = start;
97 struct domain *d = (struct domain *)arg;
99 extern int xen_in_range(paddr_t start, paddr_t end);
100 /* Set up 1:1 page table for dom0 for all Ram except Xen bits.*/
102 while (page_addr < end)
103 {
104 if (xen_in_range(page_addr, page_addr + PAGE_SIZE))
105 continue;
107 pfn = page_addr >> PAGE_SHIFT;
108 tmp = 1 << (PAGE_SHIFT - PAGE_SHIFT_4K);
109 for ( j = 0; j < tmp; j++ )
110 iommu_map_page(d, (pfn*tmp+j), (pfn*tmp+j));
112 page_addr += PAGE_SIZE;
113 }
114 return 0;
115 }
117 void iommu_set_dom0_mapping(struct domain *d)
118 {
119 if (dom0)
120 BUG_ON(d != dom0);
121 efi_memmap_walk(do_dom0_iommu_mapping, d);
122 }