debuggers.hg
changeset 9830:70467f5491d8
[IA64] dom0 vp model linux part: import machvec.h from linux-2.6.16-rc3
[note: verified same as linux-2.6.16]
Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
[note: verified same as linux-2.6.16]
Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
author | awilliam@xenbuild.aw |
---|---|
date | Fri Apr 07 14:05:40 2006 -0600 (2006-04-07) |
parents | ebec4edfa8e4 |
children | bfc00c83f083 |
files | linux-2.6-xen-sparse/include/asm-ia64/machvec.h |
line diff
1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/linux-2.6-xen-sparse/include/asm-ia64/machvec.h Fri Apr 07 14:05:40 2006 -0600 1.3 @@ -0,0 +1,390 @@ 1.4 +/* 1.5 + * Machine vector for IA-64. 1.6 + * 1.7 + * Copyright (C) 1999 Silicon Graphics, Inc. 1.8 + * Copyright (C) Srinivasa Thirumalachar <sprasad@engr.sgi.com> 1.9 + * Copyright (C) Vijay Chander <vijay@engr.sgi.com> 1.10 + * Copyright (C) 1999-2001, 2003-2004 Hewlett-Packard Co. 1.11 + * David Mosberger-Tang <davidm@hpl.hp.com> 1.12 + */ 1.13 +#ifndef _ASM_IA64_MACHVEC_H 1.14 +#define _ASM_IA64_MACHVEC_H 1.15 + 1.16 +#include <linux/config.h> 1.17 +#include <linux/types.h> 1.18 + 1.19 +/* forward declarations: */ 1.20 +struct device; 1.21 +struct pt_regs; 1.22 +struct scatterlist; 1.23 +struct page; 1.24 +struct mm_struct; 1.25 +struct pci_bus; 1.26 + 1.27 +typedef void ia64_mv_setup_t (char **); 1.28 +typedef void ia64_mv_cpu_init_t (void); 1.29 +typedef void ia64_mv_irq_init_t (void); 1.30 +typedef void ia64_mv_send_ipi_t (int, int, int, int); 1.31 +typedef void ia64_mv_timer_interrupt_t (int, void *, struct pt_regs *); 1.32 +typedef void ia64_mv_global_tlb_purge_t (struct mm_struct *, unsigned long, unsigned long, unsigned long); 1.33 +typedef void ia64_mv_tlb_migrate_finish_t (struct mm_struct *); 1.34 +typedef unsigned int ia64_mv_local_vector_to_irq (u8); 1.35 +typedef char *ia64_mv_pci_get_legacy_mem_t (struct pci_bus *); 1.36 +typedef int ia64_mv_pci_legacy_read_t (struct pci_bus *, u16 port, u32 *val, 1.37 + u8 size); 1.38 +typedef int ia64_mv_pci_legacy_write_t (struct pci_bus *, u16 port, u32 val, 1.39 + u8 size); 1.40 + 1.41 +/* DMA-mapping interface: */ 1.42 +typedef void ia64_mv_dma_init (void); 1.43 +typedef void *ia64_mv_dma_alloc_coherent (struct device *, size_t, dma_addr_t *, gfp_t); 1.44 +typedef void ia64_mv_dma_free_coherent (struct device *, size_t, void *, dma_addr_t); 1.45 +typedef dma_addr_t ia64_mv_dma_map_single (struct device *, void *, size_t, int); 1.46 +typedef void ia64_mv_dma_unmap_single (struct device *, dma_addr_t, size_t, int); 1.47 +typedef int ia64_mv_dma_map_sg (struct device *, struct scatterlist *, int, int); 1.48 +typedef void ia64_mv_dma_unmap_sg (struct device *, struct scatterlist *, int, int); 1.49 +typedef void ia64_mv_dma_sync_single_for_cpu (struct device *, dma_addr_t, size_t, int); 1.50 +typedef void ia64_mv_dma_sync_sg_for_cpu (struct device *, struct scatterlist *, int, int); 1.51 +typedef void ia64_mv_dma_sync_single_for_device (struct device *, dma_addr_t, size_t, int); 1.52 +typedef void ia64_mv_dma_sync_sg_for_device (struct device *, struct scatterlist *, int, int); 1.53 +typedef int ia64_mv_dma_mapping_error (dma_addr_t dma_addr); 1.54 +typedef int ia64_mv_dma_supported (struct device *, u64); 1.55 + 1.56 +/* 1.57 + * WARNING: The legacy I/O space is _architected_. Platforms are 1.58 + * expected to follow this architected model (see Section 10.7 in the 1.59 + * IA-64 Architecture Software Developer's Manual). Unfortunately, 1.60 + * some broken machines do not follow that model, which is why we have 1.61 + * to make the inX/outX operations part of the machine vector. 1.62 + * Platform designers should follow the architected model whenever 1.63 + * possible. 1.64 + */ 1.65 +typedef unsigned int ia64_mv_inb_t (unsigned long); 1.66 +typedef unsigned int ia64_mv_inw_t (unsigned long); 1.67 +typedef unsigned int ia64_mv_inl_t (unsigned long); 1.68 +typedef void ia64_mv_outb_t (unsigned char, unsigned long); 1.69 +typedef void ia64_mv_outw_t (unsigned short, unsigned long); 1.70 +typedef void ia64_mv_outl_t (unsigned int, unsigned long); 1.71 +typedef void ia64_mv_mmiowb_t (void); 1.72 +typedef unsigned char ia64_mv_readb_t (const volatile void __iomem *); 1.73 +typedef unsigned short ia64_mv_readw_t (const volatile void __iomem *); 1.74 +typedef unsigned int ia64_mv_readl_t (const volatile void __iomem *); 1.75 +typedef unsigned long ia64_mv_readq_t (const volatile void __iomem *); 1.76 +typedef unsigned char ia64_mv_readb_relaxed_t (const volatile void __iomem *); 1.77 +typedef unsigned short ia64_mv_readw_relaxed_t (const volatile void __iomem *); 1.78 +typedef unsigned int ia64_mv_readl_relaxed_t (const volatile void __iomem *); 1.79 +typedef unsigned long ia64_mv_readq_relaxed_t (const volatile void __iomem *); 1.80 + 1.81 +static inline void 1.82 +machvec_noop (void) 1.83 +{ 1.84 +} 1.85 + 1.86 +static inline void 1.87 +machvec_noop_mm (struct mm_struct *mm) 1.88 +{ 1.89 +} 1.90 + 1.91 +extern void machvec_setup (char **); 1.92 +extern void machvec_timer_interrupt (int, void *, struct pt_regs *); 1.93 +extern void machvec_dma_sync_single (struct device *, dma_addr_t, size_t, int); 1.94 +extern void machvec_dma_sync_sg (struct device *, struct scatterlist *, int, int); 1.95 +extern void machvec_tlb_migrate_finish (struct mm_struct *); 1.96 + 1.97 +# if defined (CONFIG_IA64_HP_SIM) 1.98 +# include <asm/machvec_hpsim.h> 1.99 +# elif defined (CONFIG_IA64_DIG) 1.100 +# include <asm/machvec_dig.h> 1.101 +# elif defined (CONFIG_IA64_HP_ZX1) 1.102 +# include <asm/machvec_hpzx1.h> 1.103 +# elif defined (CONFIG_IA64_HP_ZX1_SWIOTLB) 1.104 +# include <asm/machvec_hpzx1_swiotlb.h> 1.105 +# elif defined (CONFIG_IA64_SGI_SN2) 1.106 +# include <asm/machvec_sn2.h> 1.107 +# elif defined (CONFIG_IA64_GENERIC) 1.108 + 1.109 +# ifdef MACHVEC_PLATFORM_HEADER 1.110 +# include MACHVEC_PLATFORM_HEADER 1.111 +# else 1.112 +# define platform_name ia64_mv.name 1.113 +# define platform_setup ia64_mv.setup 1.114 +# define platform_cpu_init ia64_mv.cpu_init 1.115 +# define platform_irq_init ia64_mv.irq_init 1.116 +# define platform_send_ipi ia64_mv.send_ipi 1.117 +# define platform_timer_interrupt ia64_mv.timer_interrupt 1.118 +# define platform_global_tlb_purge ia64_mv.global_tlb_purge 1.119 +# define platform_tlb_migrate_finish ia64_mv.tlb_migrate_finish 1.120 +# define platform_dma_init ia64_mv.dma_init 1.121 +# define platform_dma_alloc_coherent ia64_mv.dma_alloc_coherent 1.122 +# define platform_dma_free_coherent ia64_mv.dma_free_coherent 1.123 +# define platform_dma_map_single ia64_mv.dma_map_single 1.124 +# define platform_dma_unmap_single ia64_mv.dma_unmap_single 1.125 +# define platform_dma_map_sg ia64_mv.dma_map_sg 1.126 +# define platform_dma_unmap_sg ia64_mv.dma_unmap_sg 1.127 +# define platform_dma_sync_single_for_cpu ia64_mv.dma_sync_single_for_cpu 1.128 +# define platform_dma_sync_sg_for_cpu ia64_mv.dma_sync_sg_for_cpu 1.129 +# define platform_dma_sync_single_for_device ia64_mv.dma_sync_single_for_device 1.130 +# define platform_dma_sync_sg_for_device ia64_mv.dma_sync_sg_for_device 1.131 +# define platform_dma_mapping_error ia64_mv.dma_mapping_error 1.132 +# define platform_dma_supported ia64_mv.dma_supported 1.133 +# define platform_local_vector_to_irq ia64_mv.local_vector_to_irq 1.134 +# define platform_pci_get_legacy_mem ia64_mv.pci_get_legacy_mem 1.135 +# define platform_pci_legacy_read ia64_mv.pci_legacy_read 1.136 +# define platform_pci_legacy_write ia64_mv.pci_legacy_write 1.137 +# define platform_inb ia64_mv.inb 1.138 +# define platform_inw ia64_mv.inw 1.139 +# define platform_inl ia64_mv.inl 1.140 +# define platform_outb ia64_mv.outb 1.141 +# define platform_outw ia64_mv.outw 1.142 +# define platform_outl ia64_mv.outl 1.143 +# define platform_mmiowb ia64_mv.mmiowb 1.144 +# define platform_readb ia64_mv.readb 1.145 +# define platform_readw ia64_mv.readw 1.146 +# define platform_readl ia64_mv.readl 1.147 +# define platform_readq ia64_mv.readq 1.148 +# define platform_readb_relaxed ia64_mv.readb_relaxed 1.149 +# define platform_readw_relaxed ia64_mv.readw_relaxed 1.150 +# define platform_readl_relaxed ia64_mv.readl_relaxed 1.151 +# define platform_readq_relaxed ia64_mv.readq_relaxed 1.152 +# endif 1.153 + 1.154 +/* __attribute__((__aligned__(16))) is required to make size of the 1.155 + * structure multiple of 16 bytes. 1.156 + * This will fillup the holes created because of section 3.3.1 in 1.157 + * Software Conventions guide. 1.158 + */ 1.159 +struct ia64_machine_vector { 1.160 + const char *name; 1.161 + ia64_mv_setup_t *setup; 1.162 + ia64_mv_cpu_init_t *cpu_init; 1.163 + ia64_mv_irq_init_t *irq_init; 1.164 + ia64_mv_send_ipi_t *send_ipi; 1.165 + ia64_mv_timer_interrupt_t *timer_interrupt; 1.166 + ia64_mv_global_tlb_purge_t *global_tlb_purge; 1.167 + ia64_mv_tlb_migrate_finish_t *tlb_migrate_finish; 1.168 + ia64_mv_dma_init *dma_init; 1.169 + ia64_mv_dma_alloc_coherent *dma_alloc_coherent; 1.170 + ia64_mv_dma_free_coherent *dma_free_coherent; 1.171 + ia64_mv_dma_map_single *dma_map_single; 1.172 + ia64_mv_dma_unmap_single *dma_unmap_single; 1.173 + ia64_mv_dma_map_sg *dma_map_sg; 1.174 + ia64_mv_dma_unmap_sg *dma_unmap_sg; 1.175 + ia64_mv_dma_sync_single_for_cpu *dma_sync_single_for_cpu; 1.176 + ia64_mv_dma_sync_sg_for_cpu *dma_sync_sg_for_cpu; 1.177 + ia64_mv_dma_sync_single_for_device *dma_sync_single_for_device; 1.178 + ia64_mv_dma_sync_sg_for_device *dma_sync_sg_for_device; 1.179 + ia64_mv_dma_mapping_error *dma_mapping_error; 1.180 + ia64_mv_dma_supported *dma_supported; 1.181 + ia64_mv_local_vector_to_irq *local_vector_to_irq; 1.182 + ia64_mv_pci_get_legacy_mem_t *pci_get_legacy_mem; 1.183 + ia64_mv_pci_legacy_read_t *pci_legacy_read; 1.184 + ia64_mv_pci_legacy_write_t *pci_legacy_write; 1.185 + ia64_mv_inb_t *inb; 1.186 + ia64_mv_inw_t *inw; 1.187 + ia64_mv_inl_t *inl; 1.188 + ia64_mv_outb_t *outb; 1.189 + ia64_mv_outw_t *outw; 1.190 + ia64_mv_outl_t *outl; 1.191 + ia64_mv_mmiowb_t *mmiowb; 1.192 + ia64_mv_readb_t *readb; 1.193 + ia64_mv_readw_t *readw; 1.194 + ia64_mv_readl_t *readl; 1.195 + ia64_mv_readq_t *readq; 1.196 + ia64_mv_readb_relaxed_t *readb_relaxed; 1.197 + ia64_mv_readw_relaxed_t *readw_relaxed; 1.198 + ia64_mv_readl_relaxed_t *readl_relaxed; 1.199 + ia64_mv_readq_relaxed_t *readq_relaxed; 1.200 +} __attribute__((__aligned__(16))); /* align attrib? see above comment */ 1.201 + 1.202 +#define MACHVEC_INIT(name) \ 1.203 +{ \ 1.204 + #name, \ 1.205 + platform_setup, \ 1.206 + platform_cpu_init, \ 1.207 + platform_irq_init, \ 1.208 + platform_send_ipi, \ 1.209 + platform_timer_interrupt, \ 1.210 + platform_global_tlb_purge, \ 1.211 + platform_tlb_migrate_finish, \ 1.212 + platform_dma_init, \ 1.213 + platform_dma_alloc_coherent, \ 1.214 + platform_dma_free_coherent, \ 1.215 + platform_dma_map_single, \ 1.216 + platform_dma_unmap_single, \ 1.217 + platform_dma_map_sg, \ 1.218 + platform_dma_unmap_sg, \ 1.219 + platform_dma_sync_single_for_cpu, \ 1.220 + platform_dma_sync_sg_for_cpu, \ 1.221 + platform_dma_sync_single_for_device, \ 1.222 + platform_dma_sync_sg_for_device, \ 1.223 + platform_dma_mapping_error, \ 1.224 + platform_dma_supported, \ 1.225 + platform_local_vector_to_irq, \ 1.226 + platform_pci_get_legacy_mem, \ 1.227 + platform_pci_legacy_read, \ 1.228 + platform_pci_legacy_write, \ 1.229 + platform_inb, \ 1.230 + platform_inw, \ 1.231 + platform_inl, \ 1.232 + platform_outb, \ 1.233 + platform_outw, \ 1.234 + platform_outl, \ 1.235 + platform_mmiowb, \ 1.236 + platform_readb, \ 1.237 + platform_readw, \ 1.238 + platform_readl, \ 1.239 + platform_readq, \ 1.240 + platform_readb_relaxed, \ 1.241 + platform_readw_relaxed, \ 1.242 + platform_readl_relaxed, \ 1.243 + platform_readq_relaxed, \ 1.244 +} 1.245 + 1.246 +extern struct ia64_machine_vector ia64_mv; 1.247 +extern void machvec_init (const char *name); 1.248 + 1.249 +# else 1.250 +# error Unknown configuration. Update asm-ia64/machvec.h. 1.251 +# endif /* CONFIG_IA64_GENERIC */ 1.252 + 1.253 +/* 1.254 + * Declare default routines which aren't declared anywhere else: 1.255 + */ 1.256 +extern ia64_mv_dma_init swiotlb_init; 1.257 +extern ia64_mv_dma_alloc_coherent swiotlb_alloc_coherent; 1.258 +extern ia64_mv_dma_free_coherent swiotlb_free_coherent; 1.259 +extern ia64_mv_dma_map_single swiotlb_map_single; 1.260 +extern ia64_mv_dma_unmap_single swiotlb_unmap_single; 1.261 +extern ia64_mv_dma_map_sg swiotlb_map_sg; 1.262 +extern ia64_mv_dma_unmap_sg swiotlb_unmap_sg; 1.263 +extern ia64_mv_dma_sync_single_for_cpu swiotlb_sync_single_for_cpu; 1.264 +extern ia64_mv_dma_sync_sg_for_cpu swiotlb_sync_sg_for_cpu; 1.265 +extern ia64_mv_dma_sync_single_for_device swiotlb_sync_single_for_device; 1.266 +extern ia64_mv_dma_sync_sg_for_device swiotlb_sync_sg_for_device; 1.267 +extern ia64_mv_dma_mapping_error swiotlb_dma_mapping_error; 1.268 +extern ia64_mv_dma_supported swiotlb_dma_supported; 1.269 + 1.270 +/* 1.271 + * Define default versions so we can extend machvec for new platforms without having 1.272 + * to update the machvec files for all existing platforms. 1.273 + */ 1.274 +#ifndef platform_setup 1.275 +# define platform_setup machvec_setup 1.276 +#endif 1.277 +#ifndef platform_cpu_init 1.278 +# define platform_cpu_init machvec_noop 1.279 +#endif 1.280 +#ifndef platform_irq_init 1.281 +# define platform_irq_init machvec_noop 1.282 +#endif 1.283 + 1.284 +#ifndef platform_send_ipi 1.285 +# define platform_send_ipi ia64_send_ipi /* default to architected version */ 1.286 +#endif 1.287 +#ifndef platform_timer_interrupt 1.288 +# define platform_timer_interrupt machvec_timer_interrupt 1.289 +#endif 1.290 +#ifndef platform_global_tlb_purge 1.291 +# define platform_global_tlb_purge ia64_global_tlb_purge /* default to architected version */ 1.292 +#endif 1.293 +#ifndef platform_tlb_migrate_finish 1.294 +# define platform_tlb_migrate_finish machvec_noop_mm 1.295 +#endif 1.296 +#ifndef platform_dma_init 1.297 +# define platform_dma_init swiotlb_init 1.298 +#endif 1.299 +#ifndef platform_dma_alloc_coherent 1.300 +# define platform_dma_alloc_coherent swiotlb_alloc_coherent 1.301 +#endif 1.302 +#ifndef platform_dma_free_coherent 1.303 +# define platform_dma_free_coherent swiotlb_free_coherent 1.304 +#endif 1.305 +#ifndef platform_dma_map_single 1.306 +# define platform_dma_map_single swiotlb_map_single 1.307 +#endif 1.308 +#ifndef platform_dma_unmap_single 1.309 +# define platform_dma_unmap_single swiotlb_unmap_single 1.310 +#endif 1.311 +#ifndef platform_dma_map_sg 1.312 +# define platform_dma_map_sg swiotlb_map_sg 1.313 +#endif 1.314 +#ifndef platform_dma_unmap_sg 1.315 +# define platform_dma_unmap_sg swiotlb_unmap_sg 1.316 +#endif 1.317 +#ifndef platform_dma_sync_single_for_cpu 1.318 +# define platform_dma_sync_single_for_cpu swiotlb_sync_single_for_cpu 1.319 +#endif 1.320 +#ifndef platform_dma_sync_sg_for_cpu 1.321 +# define platform_dma_sync_sg_for_cpu swiotlb_sync_sg_for_cpu 1.322 +#endif 1.323 +#ifndef platform_dma_sync_single_for_device 1.324 +# define platform_dma_sync_single_for_device swiotlb_sync_single_for_device 1.325 +#endif 1.326 +#ifndef platform_dma_sync_sg_for_device 1.327 +# define platform_dma_sync_sg_for_device swiotlb_sync_sg_for_device 1.328 +#endif 1.329 +#ifndef platform_dma_mapping_error 1.330 +# define platform_dma_mapping_error swiotlb_dma_mapping_error 1.331 +#endif 1.332 +#ifndef platform_dma_supported 1.333 +# define platform_dma_supported swiotlb_dma_supported 1.334 +#endif 1.335 +#ifndef platform_local_vector_to_irq 1.336 +# define platform_local_vector_to_irq __ia64_local_vector_to_irq 1.337 +#endif 1.338 +#ifndef platform_pci_get_legacy_mem 1.339 +# define platform_pci_get_legacy_mem ia64_pci_get_legacy_mem 1.340 +#endif 1.341 +#ifndef platform_pci_legacy_read 1.342 +# define platform_pci_legacy_read ia64_pci_legacy_read 1.343 +#endif 1.344 +#ifndef platform_pci_legacy_write 1.345 +# define platform_pci_legacy_write ia64_pci_legacy_write 1.346 +#endif 1.347 +#ifndef platform_inb 1.348 +# define platform_inb __ia64_inb 1.349 +#endif 1.350 +#ifndef platform_inw 1.351 +# define platform_inw __ia64_inw 1.352 +#endif 1.353 +#ifndef platform_inl 1.354 +# define platform_inl __ia64_inl 1.355 +#endif 1.356 +#ifndef platform_outb 1.357 +# define platform_outb __ia64_outb 1.358 +#endif 1.359 +#ifndef platform_outw 1.360 +# define platform_outw __ia64_outw 1.361 +#endif 1.362 +#ifndef platform_outl 1.363 +# define platform_outl __ia64_outl 1.364 +#endif 1.365 +#ifndef platform_mmiowb 1.366 +# define platform_mmiowb __ia64_mmiowb 1.367 +#endif 1.368 +#ifndef platform_readb 1.369 +# define platform_readb __ia64_readb 1.370 +#endif 1.371 +#ifndef platform_readw 1.372 +# define platform_readw __ia64_readw 1.373 +#endif 1.374 +#ifndef platform_readl 1.375 +# define platform_readl __ia64_readl 1.376 +#endif 1.377 +#ifndef platform_readq 1.378 +# define platform_readq __ia64_readq 1.379 +#endif 1.380 +#ifndef platform_readb_relaxed 1.381 +# define platform_readb_relaxed __ia64_readb_relaxed 1.382 +#endif 1.383 +#ifndef platform_readw_relaxed 1.384 +# define platform_readw_relaxed __ia64_readw_relaxed 1.385 +#endif 1.386 +#ifndef platform_readl_relaxed 1.387 +# define platform_readl_relaxed __ia64_readl_relaxed 1.388 +#endif 1.389 +#ifndef platform_readq_relaxed 1.390 +# define platform_readq_relaxed __ia64_readq_relaxed 1.391 +#endif 1.392 + 1.393 +#endif /* _ASM_IA64_MACHVEC_H */