debuggers.hg
changeset 17575:ad55c06c9bbc
MSI 5/6: add MSI support to passthrough HVM domain
Currently it only inercept access to MSI config space, no MSI-x support.
Signed-off-by: Jiang Yunhong <yunhong.jiang@intel.com>
Signed-off-by: Shan Haitao <haitao.shan@intel.com>
Currently it only inercept access to MSI config space, no MSI-x support.
Signed-off-by: Jiang Yunhong <yunhong.jiang@intel.com>
Signed-off-by: Shan Haitao <haitao.shan@intel.com>
author | Keir Fraser <keir.fraser@citrix.com> |
---|---|
date | Thu May 01 10:33:03 2008 +0100 (2008-05-01) |
parents | a0ebceaf41ff |
children | 5bb9093eb0e9 |
files | tools/ioemu/Makefile.target tools/ioemu/hw/pass-through.c tools/ioemu/hw/pass-through.h tools/ioemu/hw/pt-msi.c tools/ioemu/hw/pt-msi.h tools/libxc/xc_domain.c tools/libxc/xc_physdev.c tools/libxc/xenctrl.h xen/arch/x86/hvm/Makefile xen/arch/x86/hvm/vlapic.c xen/arch/x86/hvm/vmsi.c xen/arch/x86/hvm/vmx/intr.c xen/drivers/passthrough/io.c xen/drivers/passthrough/iommu.c xen/drivers/passthrough/vtd/x86/vtd.c xen/include/asm-x86/hvm/io.h xen/include/asm-x86/hvm/irq.h xen/include/public/domctl.h |
line diff
1.1 --- a/tools/ioemu/Makefile.target Thu May 01 10:32:10 2008 +0100 1.2 +++ b/tools/ioemu/Makefile.target Thu May 01 10:33:03 2008 +0100 1.3 @@ -370,7 +370,7 @@ endif 1.4 1.5 ifdef CONFIG_PASSTHROUGH 1.6 LIBS+=-lpci 1.7 -VL_OBJS+= pass-through.o 1.8 +VL_OBJS+= pass-through.o pt-msi.o 1.9 CFLAGS += -DCONFIG_PASSTHROUGH 1.10 $(info *** PCI passthrough capability has been enabled ***) 1.11 endif
2.1 --- a/tools/ioemu/hw/pass-through.c Thu May 01 10:32:10 2008 +0100 2.2 +++ b/tools/ioemu/hw/pass-through.c Thu May 01 10:33:03 2008 +0100 2.3 @@ -26,6 +26,7 @@ 2.4 #include "pass-through.h" 2.5 #include "pci/header.h" 2.6 #include "pci/pci.h" 2.7 +#include "pt-msi.h" 2.8 2.9 extern FILE *logfile; 2.10 2.11 @@ -287,6 +288,9 @@ static void pt_pci_write_config(PCIDevic 2.12 return; 2.13 } 2.14 2.15 + if ( pt_msi_write(assigned_device, address, val, len) ) 2.16 + return; 2.17 + 2.18 /* PCI config pass-through */ 2.19 if (address == 0x4) { 2.20 switch (len){ 2.21 @@ -333,6 +337,7 @@ static uint32_t pt_pci_read_config(PCIDe 2.22 break; 2.23 } 2.24 2.25 + pt_msi_read(assigned_device, address, len, &val); 2.26 exit: 2.27 2.28 #ifdef PT_DEBUG_PCI_CONFIG_ACCESS 2.29 @@ -445,11 +450,41 @@ static int pt_unregister_regions(struct 2.30 2.31 } 2.32 2.33 +uint8_t find_cap_offset(struct pci_dev *pci_dev, uint8_t cap) 2.34 +{ 2.35 + int id; 2.36 + int max_cap = 48; 2.37 + int pos = PCI_CAPABILITY_LIST; 2.38 + int status; 2.39 + 2.40 + status = pci_read_byte(pci_dev, PCI_STATUS); 2.41 + if ( (status & PCI_STATUS_CAP_LIST) == 0 ) 2.42 + return 0; 2.43 + 2.44 + while ( max_cap-- ) 2.45 + { 2.46 + pos = pci_read_byte(pci_dev, pos); 2.47 + if ( pos < 0x40 ) 2.48 + break; 2.49 + 2.50 + pos &= ~3; 2.51 + id = pci_read_byte(pci_dev, pos + PCI_CAP_LIST_ID); 2.52 + 2.53 + if ( id == 0xff ) 2.54 + break; 2.55 + if ( id == cap ) 2.56 + return pos; 2.57 + 2.58 + pos += PCI_CAP_LIST_NEXT; 2.59 + } 2.60 + return 0; 2.61 +} 2.62 + 2.63 struct pt_dev * register_real_device(PCIBus *e_bus, 2.64 const char *e_dev_name, int e_devfn, uint8_t r_bus, uint8_t r_dev, 2.65 uint8_t r_func, uint32_t machine_irq, struct pci_access *pci_access) 2.66 { 2.67 - int rc = -1, i; 2.68 + int rc = -1, i, pos; 2.69 struct pt_dev *assigned_device = NULL; 2.70 struct pci_dev *pci_dev; 2.71 uint8_t e_device, e_intx; 2.72 @@ -511,6 +546,9 @@ struct pt_dev * register_real_device(PCI 2.73 for ( i = 0; i < PCI_CONFIG_SIZE; i++ ) 2.74 assigned_device->dev.config[i] = pci_read_byte(pci_dev, i); 2.75 2.76 + if ( (pos = find_cap_offset(pci_dev, PCI_CAP_ID_MSI)) ) 2.77 + pt_msi_init(assigned_device, pos); 2.78 + 2.79 /* Handle real device's MMIO/PIO BARs */ 2.80 pt_register_regions(assigned_device); 2.81
3.1 --- a/tools/ioemu/hw/pass-through.h Thu May 01 10:32:10 2008 +0100 3.2 +++ b/tools/ioemu/hw/pass-through.h Thu May 01 10:33:03 2008 +0100 3.3 @@ -57,6 +57,14 @@ struct pt_region { 3.4 } access; 3.5 }; 3.6 3.7 +struct pt_msi_info { 3.8 + uint32_t flags; 3.9 + int offset; 3.10 + int size; 3.11 + int pvec; /* physical vector used */ 3.12 + int pirq; /* guest pirq corresponding */ 3.13 +}; 3.14 + 3.15 /* 3.16 This structure holds the context of the mapping functions 3.17 and data that is relevant for qemu device management. 3.18 @@ -65,6 +73,7 @@ struct pt_dev { 3.19 PCIDevice dev; 3.20 struct pci_dev *pci_dev; /* libpci struct */ 3.21 struct pt_region bases[PCI_NUM_REGIONS]; /* Access regions */ 3.22 + struct pt_msi_info *msi; /* MSI virtualization */ 3.23 }; 3.24 3.25 /* Used for formatting PCI BDF into cf8 format */
4.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 4.2 +++ b/tools/ioemu/hw/pt-msi.c Thu May 01 10:33:03 2008 +0100 4.3 @@ -0,0 +1,488 @@ 4.4 +/* 4.5 + * Copyright (c) 2007, Intel Corporation. 4.6 + * 4.7 + * This program is free software; you can redistribute it and/or modify it 4.8 + * under the terms and conditions of the GNU General Public License, 4.9 + * version 2, as published by the Free Software Foundation. 4.10 + * 4.11 + * This program is distributed in the hope it will be useful, but WITHOUT 4.12 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 4.13 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 4.14 + * more details. 4.15 + * 4.16 + * You should have received a copy of the GNU General Public License along with 4.17 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple 4.18 + * Place - Suite 330, Boston, MA 02111-1307 USA. 4.19 + * 4.20 + * Jiang Yunhong <yunhong.jiang@intel.com> 4.21 + * 4.22 + * This file implements direct PCI assignment to a HVM guest 4.23 + */ 4.24 + 4.25 +#include "pt-msi.h" 4.26 + 4.27 +#define PT_MSI_CTRL_WR_MASK_HI (0x1) 4.28 +#define PT_MSI_CTRL_WR_MASK_LO (0x8E) 4.29 +#define PT_MSI_DATA_WR_MASK (0x38) 4.30 +int pt_msi_init(struct pt_dev *dev, int pos) 4.31 +{ 4.32 + uint8_t id; 4.33 + uint16_t flags; 4.34 + struct pci_dev *pd = dev->pci_dev; 4.35 + PCIDevice *d = (struct PCIDevice *)dev; 4.36 + 4.37 + id = pci_read_byte(pd, pos + PCI_CAP_LIST_ID); 4.38 + 4.39 + if ( id != PCI_CAP_ID_MSI ) 4.40 + { 4.41 + PT_LOG("pt_msi_init: error id %x pos %x\n", id, pos); 4.42 + return -1; 4.43 + } 4.44 + 4.45 + dev->msi = malloc(sizeof(struct pt_msi_info)); 4.46 + if ( !dev->msi ) 4.47 + { 4.48 + PT_LOG("pt_msi_init: error allocation pt_msi_info\n"); 4.49 + return -1; 4.50 + } 4.51 + memset(dev->msi, 0, sizeof(struct pt_msi_info)); 4.52 + 4.53 + dev->msi->offset = pos; 4.54 + dev->msi->size = 0xa; 4.55 + 4.56 + flags = pci_read_byte(pd, pos + PCI_MSI_FLAGS); 4.57 + if ( flags & PCI_MSI_FLAGS_ENABLE ) 4.58 + { 4.59 + PT_LOG("pt_msi_init: MSI enabled already, disable first\n"); 4.60 + pci_write_byte(pd, pos + PCI_MSI_FLAGS, flags & ~PCI_MSI_FLAGS_ENABLE); 4.61 + } 4.62 + dev->msi->flags |= (flags | MSI_FLAG_UNINIT); 4.63 + 4.64 + if ( flags & PCI_MSI_FLAGS_64BIT ) 4.65 + dev->msi->size += 4; 4.66 + if ( flags & PCI_MSI_FLAGS_PVMASK ) 4.67 + dev->msi->size += 10; 4.68 + 4.69 + /* All register is 0 after reset, except first 4 byte */ 4.70 + *(uint32_t *)(&d->config[pos]) = pci_read_long(pd, pos); 4.71 + d->config[pos + 2] &= PT_MSI_CTRL_WR_MASK_LO; 4.72 + d->config[pos + 3] &= PT_MSI_CTRL_WR_MASK_HI; 4.73 + 4.74 + return 0; 4.75 +} 4.76 + 4.77 +/* 4.78 + * setup physical msi, but didn't enable it 4.79 + */ 4.80 +static int pt_msi_setup(struct pt_dev *dev) 4.81 +{ 4.82 + int vector = -1, pirq = -1; 4.83 + 4.84 + if ( !(dev->msi->flags & MSI_FLAG_UNINIT) ) 4.85 + { 4.86 + PT_LOG("setup physical after initialized?? \n"); 4.87 + return -1; 4.88 + } 4.89 + 4.90 + if ( xc_physdev_map_pirq_msi(xc_handle, domid, MAP_PIRQ_TYPE_MSI, 4.91 + vector, &pirq, 4.92 + dev->pci_dev->dev << 3 | dev->pci_dev->func, 4.93 + dev->pci_dev->bus, 1) ) 4.94 + { 4.95 + PT_LOG("error map vector %x\n", vector); 4.96 + return -1; 4.97 + } 4.98 + dev->msi->pirq = pirq; 4.99 + PT_LOG("vector %x pirq %x\n", vector, pirq); 4.100 + 4.101 + return 0; 4.102 +} 4.103 + 4.104 +/* 4.105 + * caller should make sure mask is supported 4.106 + */ 4.107 +static uint32_t get_msi_gmask(struct pt_dev *d) 4.108 +{ 4.109 + struct PCIDevice *pd = (struct PCIDevice *)d; 4.110 + 4.111 + if ( d->msi->flags & PCI_MSI_FLAGS_64BIT ) 4.112 + return *(uint32_t *)(pd->config + d->msi->offset + 0xc); 4.113 + else 4.114 + return *(uint32_t *)(pd->config + d->msi->offset + 0x10); 4.115 + 4.116 +} 4.117 + 4.118 +static uint16_t get_msi_gdata(struct pt_dev *d) 4.119 +{ 4.120 + struct PCIDevice *pd = (struct PCIDevice *)d; 4.121 + 4.122 + if ( d->msi->flags & PCI_MSI_FLAGS_64BIT ) 4.123 + return *(uint16_t *)(pd->config + d->msi->offset + PCI_MSI_DATA_64); 4.124 + else 4.125 + return *(uint16_t *)(pd->config + d->msi->offset + PCI_MSI_DATA_32); 4.126 +} 4.127 + 4.128 +static uint64_t get_msi_gaddr(struct pt_dev *d) 4.129 +{ 4.130 + struct PCIDevice *pd = (struct PCIDevice *)d; 4.131 + uint32_t addr_hi; 4.132 + uint64_t addr = 0; 4.133 + 4.134 + addr =(uint64_t)(*(uint32_t *)(pd->config + 4.135 + d->msi->offset + PCI_MSI_ADDRESS_LO)); 4.136 + 4.137 + if ( d->msi->flags & PCI_MSI_FLAGS_64BIT ) 4.138 + { 4.139 + addr_hi = *(uint32_t *)(pd->config + d->msi->offset 4.140 + + PCI_MSI_ADDRESS_HI); 4.141 + addr |= (uint64_t)addr_hi << 32; 4.142 + } 4.143 + return addr; 4.144 +} 4.145 + 4.146 +static uint8_t get_msi_gctrl(struct pt_dev *d) 4.147 +{ 4.148 + struct PCIDevice *pd = (struct PCIDevice *)d; 4.149 + 4.150 + return *(uint8_t *)(pd->config + d->msi->offset + PCI_MSI_FLAGS); 4.151 +} 4.152 + 4.153 +static uint32_t get_msi_gflags(struct pt_dev *d) 4.154 +{ 4.155 + uint32_t result = 0; 4.156 + int rh, dm, dest_id, deliv_mode, trig_mode; 4.157 + uint16_t data; 4.158 + uint64_t addr; 4.159 + 4.160 + data = get_msi_gdata(d); 4.161 + addr = get_msi_gaddr(d); 4.162 + 4.163 + rh = (addr >> MSI_ADDR_REDIRECTION_SHIFT) & 0x1; 4.164 + dm = (addr >> MSI_ADDR_DESTMODE_SHIFT) & 0x1; 4.165 + dest_id = (addr >> MSI_TARGET_CPU_SHIFT) & 0xff; 4.166 + deliv_mode = (data >> MSI_DATA_DELIVERY_SHIFT) & 0x7; 4.167 + trig_mode = (data >> MSI_DATA_TRIGGER_SHIFT) & 0x1; 4.168 + 4.169 + result |= dest_id | (rh << GFLAGS_SHIFT_RH) | (dm << GFLAGS_SHIFT_DM) | \ 4.170 + (deliv_mode << GLFAGS_SHIFT_DELIV_MODE) | 4.171 + (trig_mode << GLFAGS_SHIFT_TRG_MODE); 4.172 + 4.173 + return result; 4.174 +} 4.175 + 4.176 +/* 4.177 + * This may be arch different 4.178 + */ 4.179 +static inline uint8_t get_msi_gvec(struct pt_dev *d) 4.180 +{ 4.181 + return get_msi_gdata(d) & 0xff; 4.182 +} 4.183 + 4.184 +static inline uint8_t get_msi_hvec(struct pt_dev *d) 4.185 +{ 4.186 + struct pci_dev *pd = d->pci_dev; 4.187 + uint16_t data; 4.188 + 4.189 + if ( d->msi->flags & PCI_MSI_FLAGS_64BIT ) 4.190 + data = pci_read_word(pd, PCI_MSI_DATA_64); 4.191 + else 4.192 + data = pci_read_word(pd, PCI_MSI_DATA_32); 4.193 + 4.194 + return data & 0xff; 4.195 +} 4.196 + 4.197 +/* 4.198 + * Update msi mapping, usually called when MSI enabled, 4.199 + * except the first time 4.200 + */ 4.201 +static int pt_msi_update(struct pt_dev *d) 4.202 +{ 4.203 + PT_LOG("now update msi with pirq %x gvec %x\n", 4.204 + get_msi_gvec(d), d->msi->pirq); 4.205 + return xc_domain_update_msi_irq(xc_handle, domid, get_msi_gvec(d), 4.206 + d->msi->pirq, get_msi_gflags(d)); 4.207 +} 4.208 + 4.209 +static int pt_msi_enable(struct pt_dev *d, int enable) 4.210 +{ 4.211 + uint16_t ctrl; 4.212 + struct pci_dev *pd = d->pci_dev; 4.213 + 4.214 + if ( !pd ) 4.215 + return -1; 4.216 + 4.217 + ctrl = pci_read_word(pd, d->msi->offset + PCI_MSI_FLAGS); 4.218 + 4.219 + if ( enable ) 4.220 + ctrl |= PCI_MSI_FLAGS_ENABLE; 4.221 + else 4.222 + ctrl &= ~PCI_MSI_FLAGS_ENABLE; 4.223 + 4.224 + pci_write_word(pd, d->msi->offset + PCI_MSI_FLAGS, ctrl); 4.225 + return 0; 4.226 +} 4.227 + 4.228 +static int pt_msi_control_update(struct pt_dev *d, uint16_t old_ctrl) 4.229 +{ 4.230 + uint16_t new_ctrl; 4.231 + PCIDevice *pd = (PCIDevice *)d; 4.232 + 4.233 + new_ctrl = get_msi_gctrl(d); 4.234 + 4.235 + PT_LOG("old_ctrl %x new_Ctrl %x\n", old_ctrl, new_ctrl); 4.236 + 4.237 + if ( new_ctrl & PCI_MSI_FLAGS_ENABLE ) 4.238 + { 4.239 + if ( d->msi->flags & MSI_FLAG_UNINIT ) 4.240 + { 4.241 + /* Init physical one */ 4.242 + PT_LOG("setup msi for dev %x\n", pd->devfn); 4.243 + if ( pt_msi_setup(d) ) 4.244 + { 4.245 + PT_LOG("pt_msi_setup error!!!\n"); 4.246 + return -1; 4.247 + } 4.248 + pt_msi_update(d); 4.249 + 4.250 + d->msi->flags &= ~MSI_FLAG_UNINIT; 4.251 + d->msi->flags |= PT_MSI_MAPPED; 4.252 + 4.253 + /* Enable physical MSI only after bind */ 4.254 + pt_msi_enable(d, 1); 4.255 + } 4.256 + else if ( !(old_ctrl & PCI_MSI_FLAGS_ENABLE) ) 4.257 + pt_msi_enable(d, 1); 4.258 + } 4.259 + else if ( old_ctrl & PCI_MSI_FLAGS_ENABLE ) 4.260 + pt_msi_enable(d, 0); 4.261 + 4.262 + /* Currently no support for multi-vector */ 4.263 + if ( (new_ctrl & PCI_MSI_FLAGS_QSIZE) != 0x0 ) 4.264 + PT_LOG("try to set more than 1 vector ctrl %x\n", new_ctrl); 4.265 + 4.266 + return 0; 4.267 +} 4.268 + 4.269 +static int 4.270 +pt_msi_map_update(struct pt_dev *d, uint32_t old_data, uint64_t old_addr) 4.271 +{ 4.272 + uint16_t pctrl; 4.273 + uint32_t data; 4.274 + uint64_t addr; 4.275 + 4.276 + data = get_msi_gdata(d); 4.277 + addr = get_msi_gaddr(d); 4.278 + 4.279 + PT_LOG("old_data %x old_addr %lx data %x addr %lx\n", 4.280 + old_data, old_addr, data, addr); 4.281 + 4.282 + if ( data != old_data || addr != old_addr ) 4.283 + if ( get_msi_gctrl(d) & PCI_MSI_FLAGS_ENABLE ) 4.284 + pt_msi_update(d); 4.285 + 4.286 + return 0; 4.287 +} 4.288 + 4.289 +static int pt_msi_mask_update(struct pt_dev *d, uint32_t old_mask) 4.290 +{ 4.291 + struct pci_dev *pd = d->pci_dev; 4.292 + uint32_t mask; 4.293 + int offset; 4.294 + 4.295 + if ( !(d->msi->flags & PCI_MSI_FLAGS_PVMASK) ) 4.296 + return -1; 4.297 + 4.298 + mask = get_msi_gmask(d); 4.299 + 4.300 + if ( d->msi->flags & PCI_MSI_FLAGS_64BIT ) 4.301 + offset = d->msi->offset + 0xc; 4.302 + else 4.303 + offset = d->msi->offset + 0x10; 4.304 + 4.305 + if ( old_mask != mask ) 4.306 + pci_write_long(pd, offset, mask); 4.307 +} 4.308 + 4.309 +#define ACCESSED_DATA 0x2 4.310 +#define ACCESSED_MASK 0x4 4.311 +#define ACCESSED_ADDR 0x8 4.312 +#define ACCESSED_CTRL 0x10 4.313 + 4.314 +int pt_msi_write(struct pt_dev *d, uint32_t addr, uint32_t val, uint32_t len) 4.315 +{ 4.316 + struct pci_dev *pd; 4.317 + int i, cur = addr; 4.318 + uint8_t value, flags = 0; 4.319 + uint16_t old_ctrl = 0, old_data = 0; 4.320 + uint32_t old_mask = 0; 4.321 + uint64_t old_addr = 0; 4.322 + PCIDevice *dev = (PCIDevice *)d; 4.323 + int can_write = 1; 4.324 + 4.325 + if ( !d || !d->msi ) 4.326 + return 0; 4.327 + 4.328 + if ( (addr >= (d->msi->offset + d->msi->size) ) || 4.329 + (addr + len) < d->msi->offset) 4.330 + return 0; 4.331 + 4.332 + PT_LOG("addr %x val %x len %x offset %x size %x\n", 4.333 + addr, val, len, d->msi->offset, d->msi->size); 4.334 + 4.335 + pd = d->pci_dev; 4.336 + old_ctrl = get_msi_gctrl(d); 4.337 + old_addr = get_msi_gaddr(d); 4.338 + old_data = get_msi_gdata(d); 4.339 + 4.340 + if ( d->msi->flags & PCI_MSI_FLAGS_PVMASK ) 4.341 + old_mask = get_msi_gmask(d); 4.342 + 4.343 + for ( i = 0; i < len; i++, cur++ ) 4.344 + { 4.345 + int off; 4.346 + uint8_t orig_value; 4.347 + 4.348 + if ( cur < d->msi->offset ) 4.349 + continue; 4.350 + else if ( cur >= (d->msi->offset + d->msi->size) ) 4.351 + break; 4.352 + 4.353 + off = cur - d->msi->offset; 4.354 + value = (val >> (i * 8)) & 0xff; 4.355 + 4.356 + switch ( off ) 4.357 + { 4.358 + case 0x0 ... 0x1: 4.359 + can_write = 0; 4.360 + break; 4.361 + case 0x2: 4.362 + case 0x3: 4.363 + flags |= ACCESSED_CTRL; 4.364 + 4.365 + orig_value = pci_read_byte(pd, d->msi->offset + off); 4.366 + 4.367 + orig_value &= (off == 2) ? PT_MSI_CTRL_WR_MASK_LO: 4.368 + PT_MSI_CTRL_WR_MASK_HI; 4.369 + 4.370 + orig_value |= value & ( (off == 2) ? ~PT_MSI_CTRL_WR_MASK_LO: 4.371 + ~PT_MSI_CTRL_WR_MASK_HI); 4.372 + value = orig_value; 4.373 + break; 4.374 + case 0x4 ... 0x7: 4.375 + flags |= ACCESSED_ADDR; 4.376 + /* bit 4 ~ 11 is reserved for MSI in x86 */ 4.377 + if ( off == 0x4 ) 4.378 + value &= 0x0f; 4.379 + if ( off == 0x5 ) 4.380 + value &= 0xf0; 4.381 + break; 4.382 + case 0x8 ... 0xb: 4.383 + if ( d->msi->flags & PCI_MSI_FLAGS_64BIT ) 4.384 + { 4.385 + /* Up 32bit is reserved in x86 */ 4.386 + flags |= ACCESSED_ADDR; 4.387 + if ( value ) 4.388 + PT_LOG("Write up32 addr with %x \n", value); 4.389 + } 4.390 + else 4.391 + { 4.392 + if ( off == 0xa || off == 0xb ) 4.393 + can_write = 0; 4.394 + else 4.395 + flags |= ACCESSED_DATA; 4.396 + if ( off == 0x9 ) 4.397 + value &= ~PT_MSI_DATA_WR_MASK; 4.398 + } 4.399 + break; 4.400 + case 0xc ... 0xf: 4.401 + if ( d->msi->flags & PCI_MSI_FLAGS_64BIT ) 4.402 + { 4.403 + if ( off == 0xe || off == 0xf ) 4.404 + can_write = 0; 4.405 + else 4.406 + { 4.407 + flags |= ACCESSED_DATA; 4.408 + if (off == 0xd) 4.409 + value &= ~PT_MSI_DATA_WR_MASK; 4.410 + } 4.411 + } 4.412 + else 4.413 + { 4.414 + if ( d->msi->flags & PCI_MSI_FLAGS_PVMASK ) 4.415 + flags |= ACCESSED_MASK; 4.416 + else 4.417 + PT_LOG("why comes to MASK without mask support??\n"); 4.418 + } 4.419 + break; 4.420 + case 0x10 ... 0x13: 4.421 + if ( d->msi->flags & PCI_MSI_FLAGS_64BIT ) 4.422 + { 4.423 + if ( d->msi->flags & PCI_MSI_FLAGS_PVMASK ) 4.424 + flags |= ACCESSED_MASK; 4.425 + else 4.426 + PT_LOG("why comes to MASK without mask support??\n"); 4.427 + } 4.428 + else 4.429 + can_write = 0; 4.430 + break; 4.431 + case 0x14 ... 0x18: 4.432 + can_write = 0; 4.433 + break; 4.434 + default: 4.435 + PT_LOG("Non MSI register!!!\n"); 4.436 + break; 4.437 + } 4.438 + 4.439 + if ( can_write ) 4.440 + dev->config[cur] = value; 4.441 + } 4.442 + 4.443 + if ( flags & ACCESSED_DATA || flags & ACCESSED_ADDR ) 4.444 + pt_msi_map_update(d, old_data, old_addr); 4.445 + 4.446 + if ( flags & ACCESSED_MASK ) 4.447 + pt_msi_mask_update(d, old_mask); 4.448 + 4.449 + /* This will enable physical one, do it in last step */ 4.450 + if ( flags & ACCESSED_CTRL ) 4.451 + pt_msi_control_update(d, old_ctrl); 4.452 + 4.453 + return 1; 4.454 +} 4.455 + 4.456 +int pt_msi_read(struct pt_dev *d, int addr, int len, uint32_t *val) 4.457 +{ 4.458 + int e_addr = addr, e_len = len, offset = 0, i; 4.459 + uint8_t e_val = 0; 4.460 + PCIDevice *pd = (PCIDevice *)d; 4.461 + 4.462 + if ( !d || !d->msi ) 4.463 + return 0; 4.464 + 4.465 + if ( (addr > (d->msi->offset + d->msi->size) ) || 4.466 + (addr + len) <= d->msi->offset ) 4.467 + return 0; 4.468 + 4.469 + PT_LOG("pt_msi_read addr %x len %x val %x offset %x size %x\n", 4.470 + addr, len, *val, d->msi->offset, d->msi->size); 4.471 + 4.472 + if ( (addr + len ) > (d->msi->offset + d->msi->size) ) 4.473 + e_len -= addr + len - d->msi->offset - d->msi->size; 4.474 + 4.475 + if ( addr < d->msi->offset ) 4.476 + { 4.477 + e_addr = d->msi->offset; 4.478 + offset = d->msi->offset - addr; 4.479 + e_len -= offset; 4.480 + } 4.481 + 4.482 + for ( i = 0; i < e_len; i++ ) 4.483 + { 4.484 + e_val = *(uint8_t *)(&pd->config[e_addr] + i); 4.485 + *val &= ~(0xff << ( (offset + i) * 8)); 4.486 + *val |= (e_val << ( (offset + i) * 8)); 4.487 + } 4.488 + 4.489 + return e_len; 4.490 +} 4.491 +
5.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 5.2 +++ b/tools/ioemu/hw/pt-msi.h Thu May 01 10:33:03 2008 +0100 5.3 @@ -0,0 +1,65 @@ 5.4 +#ifndef _PT_MSI_H 5.5 +#define _PT_MSI_H 5.6 + 5.7 +#include "vl.h" 5.8 +#include "pci/header.h" 5.9 +#include "pci/pci.h" 5.10 +#include "pass-through.h" 5.11 + 5.12 +#define MSI_FLAG_UNINIT 0x1000 5.13 +#define PT_MSI_MAPPED 0x2000 5.14 + 5.15 +#define MSI_DATA_VECTOR_SHIFT 0 5.16 +#define MSI_DATA_VECTOR(v) (((u8)v) << MSI_DATA_VECTOR_SHIFT) 5.17 + 5.18 +#define MSI_DATA_DELIVERY_SHIFT 8 5.19 +#define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_SHIFT) 5.20 +#define MSI_DATA_DELIVERY_LOWPRI (1 << MSI_DATA_DELIVERY_SHIFT) 5.21 + 5.22 +#define MSI_DATA_LEVEL_SHIFT 14 5.23 +#define MSI_DATA_LEVEL_DEASSERT (0 << MSI_DATA_LEVEL_SHIFT) 5.24 +#define MSI_DATA_LEVEL_ASSERT (1 << MSI_DATA_LEVEL_SHIFT) 5.25 + 5.26 +#define MSI_DATA_TRIGGER_SHIFT 15 5.27 +#define MSI_DATA_TRIGGER_EDGE (0 << MSI_DATA_TRIGGER_SHIFT) 5.28 +#define MSI_DATA_TRIGGER_LEVEL (1 << MSI_DATA_TRIGGER_SHIFT) 5.29 + 5.30 +/* 5.31 + + * Shift/mask fields for APIC-based bus address 5.32 + + */ 5.33 + 5.34 +#define MSI_ADDR_HEADER 0xfee00000 5.35 +#define MSI_TARGET_CPU_SHIFT 12 5.36 + 5.37 +#define MSI_ADDR_DESTID_MASK 0xfff0000f 5.38 +#define MSI_ADDR_DESTID_CPU(cpu) ((cpu) << MSI_TARGET_CPU_SHIFT) 5.39 + 5.40 +#define MSI_ADDR_DESTMODE_SHIFT 2 5.41 +#define MSI_ADDR_DESTMODE_PHYS (0 << MSI_ADDR_DESTMODE_SHIFT) 5.42 +#define MSI_ADDR_DESTMODE_LOGIC (1 << MSI_ADDR_DESTMODE_SHIFT) 5.43 + 5.44 +#define MSI_ADDR_REDIRECTION_SHIFT 3 5.45 +#define MSI_ADDR_REDIRECTION_CPU (0 << MSI_ADDR_REDIRECTION_SHIFT) 5.46 +#define MSI_ADDR_REDIRECTION_LOWPRI (1 << MSI_ADDR_REDIRECTION_SHIFT) 5.47 + 5.48 +#define PCI_MSI_FLAGS_PVMASK 0x100 5.49 + 5.50 +#define AUTO_ASSIGN -1 5.51 + 5.52 +/* shift count for gflags */ 5.53 +#define GFLAGS_SHIFT_DEST_ID 0 5.54 +#define GFLAGS_SHIFT_RH 8 5.55 +#define GFLAGS_SHIFT_DM 9 5.56 +#define GLFAGS_SHIFT_DELIV_MODE 12 5.57 +#define GLFAGS_SHIFT_TRG_MODE 15 5.58 + 5.59 +int 5.60 +pt_msi_init(struct pt_dev *dev, int pos); 5.61 + 5.62 +int 5.63 +pt_msi_write(struct pt_dev *d, uint32_t addr, uint32_t val, uint32_t len); 5.64 + 5.65 +int 5.66 +pt_msi_read(struct pt_dev *d, int addr, int len, uint32_t *val); 5.67 + 5.68 +#endif
6.1 --- a/tools/libxc/xc_domain.c Thu May 01 10:32:10 2008 +0100 6.2 +++ b/tools/libxc/xc_domain.c Thu May 01 10:33:03 2008 +0100 6.3 @@ -795,6 +795,32 @@ int xc_deassign_device( 6.4 return do_domctl(xc_handle, &domctl); 6.5 } 6.6 6.7 +int xc_domain_update_msi_irq( 6.8 + int xc_handle, 6.9 + uint32_t domid, 6.10 + uint32_t gvec, 6.11 + uint32_t pirq, 6.12 + uint32_t gflags) 6.13 +{ 6.14 + int rc; 6.15 + xen_domctl_bind_pt_irq_t *bind; 6.16 + 6.17 + DECLARE_DOMCTL; 6.18 + 6.19 + domctl.cmd = XEN_DOMCTL_bind_pt_irq; 6.20 + domctl.domain = (domid_t)domid; 6.21 + 6.22 + bind = &(domctl.u.bind_pt_irq); 6.23 + bind->hvm_domid = domid; 6.24 + bind->irq_type = PT_IRQ_TYPE_MSI; 6.25 + bind->machine_irq = pirq; 6.26 + bind->u.msi.gvec = gvec; 6.27 + bind->u.msi.gflags = gflags; 6.28 + 6.29 + rc = do_domctl(xc_handle, &domctl); 6.30 + return rc; 6.31 +} 6.32 + 6.33 /* Pass-through: binds machine irq to guests irq */ 6.34 int xc_domain_bind_pt_irq( 6.35 int xc_handle,
7.1 --- a/tools/libxc/xc_physdev.c Thu May 01 10:32:10 2008 +0100 7.2 +++ b/tools/libxc/xc_physdev.c Thu May 01 10:33:03 2008 +0100 7.3 @@ -45,6 +45,37 @@ int xc_physdev_map_pirq(int xc_handle, 7.4 return rc; 7.5 } 7.6 7.7 +int xc_physdev_map_pirq_msi(int xc_handle, 7.8 + int domid, 7.9 + int type, 7.10 + int index, 7.11 + int *pirq, 7.12 + int devfn, 7.13 + int bus, 7.14 + int msi_type) 7.15 +{ 7.16 + int rc; 7.17 + struct physdev_map_pirq map; 7.18 + 7.19 + if ( !pirq ) 7.20 + return -EINVAL; 7.21 + 7.22 + map.domid = domid; 7.23 + map.type = type; 7.24 + map.index = index; 7.25 + map.pirq = *pirq; 7.26 + map.msi_info.devfn = devfn; 7.27 + map.msi_info.bus = bus; 7.28 + map.msi_info.msi = msi_type; 7.29 + 7.30 + rc = do_physdev_op(xc_handle, PHYSDEVOP_map_pirq, &map); 7.31 + 7.32 + if ( !rc ) 7.33 + *pirq = map.pirq; 7.34 + 7.35 + return rc; 7.36 +} 7.37 + 7.38 int xc_physdev_unmap_pirq(int xc_handle, 7.39 int domid, 7.40 int pirq) 7.41 @@ -59,3 +90,4 @@ int xc_physdev_unmap_pirq(int xc_handle, 7.42 7.43 return rc; 7.44 } 7.45 +
8.1 --- a/tools/libxc/xenctrl.h Thu May 01 10:32:10 2008 +0100 8.2 +++ b/tools/libxc/xenctrl.h Thu May 01 10:33:03 2008 +0100 8.3 @@ -856,6 +856,15 @@ int xc_physdev_map_pirq(int xc_handle, 8.4 int index, 8.5 int *pirq); 8.6 8.7 +int xc_physdev_map_pirq_msi(int xc_handle, 8.8 + int domid, 8.9 + int type, 8.10 + int index, 8.11 + int *pirq, 8.12 + int devfn, 8.13 + int bus, 8.14 + int msi_type); 8.15 + 8.16 int xc_physdev_unmap_pirq(int xc_handle, 8.17 int domid, 8.18 int pirq); 8.19 @@ -960,6 +969,13 @@ int xc_domain_ioport_mapping(int xc_hand 8.20 uint32_t nr_ports, 8.21 uint32_t add_mapping); 8.22 8.23 +int xc_domain_update_msi_irq( 8.24 + int xc_handle, 8.25 + uint32_t domid, 8.26 + uint32_t gvec, 8.27 + uint32_t pirq, 8.28 + uint32_t gflags); 8.29 + 8.30 int xc_domain_bind_pt_irq(int xc_handle, 8.31 uint32_t domid, 8.32 uint8_t machine_irq,
9.1 --- a/xen/arch/x86/hvm/Makefile Thu May 01 10:32:10 2008 +0100 9.2 +++ b/xen/arch/x86/hvm/Makefile Thu May 01 10:33:03 2008 +0100 9.3 @@ -16,4 +16,5 @@ obj-y += vioapic.o 9.4 obj-y += vlapic.o 9.5 obj-y += vpic.o 9.6 obj-y += save.o 9.7 +obj-y += vmsi.o 9.8 obj-y += stdvga.o
10.1 --- a/xen/arch/x86/hvm/vlapic.c Thu May 01 10:32:10 2008 +0100 10.2 +++ b/xen/arch/x86/hvm/vlapic.c Thu May 01 10:33:03 2008 +0100 10.3 @@ -476,6 +476,9 @@ void vlapic_EOI_set(struct vlapic *vlapi 10.4 10.5 if ( vlapic_test_and_clear_vector(vector, &vlapic->regs->data[APIC_TMR]) ) 10.6 vioapic_update_EOI(vlapic_domain(vlapic), vector); 10.7 + 10.8 + if ( vtd_enabled ) 10.9 + hvm_dpci_msi_eoi(current->domain, vector); 10.10 } 10.11 10.12 static int vlapic_ipi(
11.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 11.2 +++ b/xen/arch/x86/hvm/vmsi.c Thu May 01 10:33:03 2008 +0100 11.3 @@ -0,0 +1,189 @@ 11.4 +/* 11.5 + * Copyright (C) 2001 MandrakeSoft S.A. 11.6 + * 11.7 + * MandrakeSoft S.A. 11.8 + * 43, rue d'Aboukir 11.9 + * 75002 Paris - France 11.10 + * http://www.linux-mandrake.com/ 11.11 + * http://www.mandrakesoft.com/ 11.12 + * 11.13 + * This library is free software; you can redistribute it and/or 11.14 + * modify it under the terms of the GNU Lesser General Public 11.15 + * License as published by the Free Software Foundation; either 11.16 + * version 2 of the License, or (at your option) any later version. 11.17 + * 11.18 + * This library is distributed in the hope that it will be useful, 11.19 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 11.20 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11.21 + * Lesser General Public License for more details. 11.22 + * 11.23 + * You should have received a copy of the GNU Lesser General Public 11.24 + * License along with this library; if not, write to the Free Software 11.25 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 11.26 + * 11.27 + * Support for virtual MSI logic 11.28 + * Will be merged it with virtual IOAPIC logic, since most is the same 11.29 +*/ 11.30 + 11.31 +#include <xen/config.h> 11.32 +#include <xen/types.h> 11.33 +#include <xen/mm.h> 11.34 +#include <xen/xmalloc.h> 11.35 +#include <xen/lib.h> 11.36 +#include <xen/errno.h> 11.37 +#include <xen/sched.h> 11.38 +#include <public/hvm/ioreq.h> 11.39 +#include <asm/hvm/io.h> 11.40 +#include <asm/hvm/vpic.h> 11.41 +#include <asm/hvm/vlapic.h> 11.42 +#include <asm/hvm/support.h> 11.43 +#include <asm/current.h> 11.44 +#include <asm/event.h> 11.45 + 11.46 +static uint32_t vmsi_get_delivery_bitmask( 11.47 + struct domain *d, uint16_t dest, uint8_t dest_mode) 11.48 +{ 11.49 + uint32_t mask = 0; 11.50 + struct vcpu *v; 11.51 + 11.52 + HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "ioapic_get_delivery_bitmask " 11.53 + "dest %d dest_mode %d\n", dest, dest_mode); 11.54 + 11.55 + if ( dest_mode == 0 ) /* Physical mode. */ 11.56 + { 11.57 + if ( dest == 0xFF ) /* Broadcast. */ 11.58 + { 11.59 + for_each_vcpu ( d, v ) 11.60 + mask |= 1 << v->vcpu_id; 11.61 + goto out; 11.62 + } 11.63 + 11.64 + for_each_vcpu ( d, v ) 11.65 + { 11.66 + if ( VLAPIC_ID(vcpu_vlapic(v)) == dest ) 11.67 + { 11.68 + mask = 1 << v->vcpu_id; 11.69 + break; 11.70 + } 11.71 + } 11.72 + } 11.73 + else if ( dest != 0 ) /* Logical mode, MDA non-zero. */ 11.74 + { 11.75 + for_each_vcpu ( d, v ) 11.76 + if ( vlapic_match_logical_addr(vcpu_vlapic(v), dest) ) 11.77 + mask |= 1 << v->vcpu_id; 11.78 + } 11.79 + 11.80 + out: 11.81 + HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "ioapic_get_delivery_bitmask mask %x\n", 11.82 + mask); 11.83 + return mask; 11.84 +} 11.85 + 11.86 +static void vmsi_inj_irq( 11.87 + struct domain *d, 11.88 + struct vlapic *target, 11.89 + uint8_t vector, 11.90 + uint8_t trig_mode, 11.91 + uint8_t delivery_mode) 11.92 +{ 11.93 + HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "ioapic_inj_irq " 11.94 + "irq %d trig %d delive mode %d\n", 11.95 + vector, trig_mode, delivery_mode); 11.96 + 11.97 + switch ( delivery_mode ) 11.98 + { 11.99 + case dest_Fixed: 11.100 + case dest_LowestPrio: 11.101 + if ( vlapic_set_irq(target, vector, trig_mode) ) 11.102 + vcpu_kick(vlapic_vcpu(target)); 11.103 + break; 11.104 + default: 11.105 + gdprintk(XENLOG_WARNING, "error delivery mode %d\n", delivery_mode); 11.106 + break; 11.107 + } 11.108 +} 11.109 + 11.110 +#define VMSI_DEST_ID_MASK 0xff 11.111 +#define VMSI_RH_MASK 0x100 11.112 +#define VMSI_DM_MASK 0x200 11.113 +#define VMSI_DELIV_MASK 0x7000 11.114 +#define VMSI_TRIG_MODE 0x8000 11.115 + 11.116 +int vmsi_deliver(struct domain *d, int pirq) 11.117 +{ 11.118 + struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci; 11.119 + uint32_t flags = hvm_irq_dpci->mirq[pirq].gmsi.gflags; 11.120 + int vector = hvm_irq_dpci->mirq[pirq].gmsi.gvec; 11.121 + uint16_t dest = flags & VMSI_DEST_ID_MASK; 11.122 + uint8_t dest_mode = flags & VMSI_DM_MASK; 11.123 + uint8_t delivery_mode = flags & VMSI_DELIV_MASK; 11.124 + uint8_t trig_mode = flags & VMSI_TRIG_MODE; 11.125 + uint32_t deliver_bitmask; 11.126 + struct vlapic *target; 11.127 + struct vcpu *v; 11.128 + 11.129 + HVM_DBG_LOG(DBG_LEVEL_IOAPIC, 11.130 + "msi: dest=%x dest_mode=%x delivery_mode=%x " 11.131 + "vector=%x trig_mode=%x\n", 11.132 + dest, dest_mode, delivery_mode, vector, trig_mode); 11.133 + 11.134 + if ( !(hvm_irq_dpci->mirq[pirq].flags & HVM_IRQ_DPCI_MSI) ) 11.135 + { 11.136 + gdprintk(XENLOG_WARNING, "pirq %x not msi \n", pirq); 11.137 + return 0; 11.138 + } 11.139 + 11.140 + deliver_bitmask = vmsi_get_delivery_bitmask(d, dest, dest_mode); 11.141 + if ( !deliver_bitmask ) 11.142 + { 11.143 + HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "ioapic deliver " 11.144 + "no target on destination\n"); 11.145 + return 0; 11.146 + } 11.147 + 11.148 + switch ( delivery_mode ) 11.149 + { 11.150 + case dest_LowestPrio: 11.151 + { 11.152 + target = apic_round_robin(d, vector, deliver_bitmask); 11.153 + if ( target != NULL ) 11.154 + vmsi_inj_irq(d, target, vector, trig_mode, delivery_mode); 11.155 + else 11.156 + HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "null round robin: " 11.157 + "mask=%x vector=%x delivery_mode=%x\n", 11.158 + deliver_bitmask, vector, dest_LowestPrio); 11.159 + break; 11.160 + } 11.161 + 11.162 + case dest_Fixed: 11.163 + case dest_ExtINT: 11.164 + { 11.165 + uint8_t bit; 11.166 + for ( bit = 0; deliver_bitmask != 0; bit++ ) 11.167 + { 11.168 + if ( !(deliver_bitmask & (1 << bit)) ) 11.169 + continue; 11.170 + deliver_bitmask &= ~(1 << bit); 11.171 + v = d->vcpu[bit]; 11.172 + if ( v != NULL ) 11.173 + { 11.174 + target = vcpu_vlapic(v); 11.175 + vmsi_inj_irq(d, target, vector, trig_mode, delivery_mode); 11.176 + } 11.177 + } 11.178 + break; 11.179 + } 11.180 + 11.181 + case dest_SMI: 11.182 + case dest_NMI: 11.183 + case dest_INIT: 11.184 + case dest__reserved_2: 11.185 + default: 11.186 + gdprintk(XENLOG_WARNING, "Unsupported delivery mode %d\n", 11.187 + delivery_mode); 11.188 + break; 11.189 + } 11.190 + return 1; 11.191 +} 11.192 +
12.1 --- a/xen/arch/x86/hvm/vmx/intr.c Thu May 01 10:32:10 2008 +0100 12.2 +++ b/xen/arch/x86/hvm/vmx/intr.c Thu May 01 10:33:03 2008 +0100 12.3 @@ -103,6 +103,12 @@ static void enable_intr_window(struct vc 12.4 } 12.5 } 12.6 12.7 +extern int vmsi_deliver(struct domain *d, int pirq); 12.8 +static int hvm_pci_msi_assert(struct domain *d, int pirq) 12.9 +{ 12.10 + return vmsi_deliver(d, pirq); 12.11 +} 12.12 + 12.13 static void vmx_dirq_assist(struct vcpu *v) 12.14 { 12.15 unsigned int irq; 12.16 @@ -121,6 +127,12 @@ static void vmx_dirq_assist(struct vcpu 12.17 if ( !test_and_clear_bit(irq, &hvm_irq_dpci->dirq_mask) ) 12.18 continue; 12.19 12.20 + if ( test_bit(_HVM_IRQ_DPCI_MSI, &hvm_irq_dpci->mirq[irq].flags) ) 12.21 + { 12.22 + hvm_pci_msi_assert(d, irq); 12.23 + continue; 12.24 + } 12.25 + 12.26 stop_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, irq)]); 12.27 12.28 list_for_each_entry ( digl, &hvm_irq_dpci->mirq[irq].digl_list, list )
13.1 --- a/xen/drivers/passthrough/io.c Thu May 01 10:32:10 2008 +0100 13.2 +++ b/xen/drivers/passthrough/io.c Thu May 01 10:33:03 2008 +0100 13.3 @@ -71,44 +71,59 @@ int pt_irq_create_bind_vtd( 13.4 xfree(hvm_irq_dpci); 13.5 } 13.6 13.7 - machine_gsi = pt_irq_bind->machine_irq; 13.8 - device = pt_irq_bind->u.pci.device; 13.9 - intx = pt_irq_bind->u.pci.intx; 13.10 - guest_gsi = hvm_pci_intx_gsi(device, intx); 13.11 - link = hvm_pci_intx_link(device, intx); 13.12 - hvm_irq_dpci->link_cnt[link]++; 13.13 + if ( pt_irq_bind->irq_type == PT_IRQ_TYPE_MSI ) 13.14 + { 13.15 + int pirq = pt_irq_bind->machine_irq; 13.16 + 13.17 + hvm_irq_dpci->mirq[pirq].flags |= HVM_IRQ_DPCI_VALID |HVM_IRQ_DPCI_MSI ; 13.18 + hvm_irq_dpci->mirq[pirq].gmsi.gvec = pt_irq_bind->u.msi.gvec; 13.19 + hvm_irq_dpci->mirq[pirq].gmsi.gflags = pt_irq_bind->u.msi.gflags; 13.20 + 13.21 + hvm_irq_dpci->msi_gvec_pirq[pt_irq_bind->u.msi.gvec] = pirq; 13.22 13.23 - digl = xmalloc(struct dev_intx_gsi_link); 13.24 - if ( !digl ) 13.25 - return -ENOMEM; 13.26 + pirq_guest_bind(d->vcpu[0], pirq, BIND_PIRQ__WILL_SHARE); 13.27 + } 13.28 + else 13.29 + { 13.30 + machine_gsi = pt_irq_bind->machine_irq; 13.31 + device = pt_irq_bind->u.pci.device; 13.32 + intx = pt_irq_bind->u.pci.intx; 13.33 + guest_gsi = hvm_pci_intx_gsi(device, intx); 13.34 + link = hvm_pci_intx_link(device, intx); 13.35 + hvm_irq_dpci->link_cnt[link]++; 13.36 13.37 - digl->device = device; 13.38 - digl->intx = intx; 13.39 - digl->gsi = guest_gsi; 13.40 - digl->link = link; 13.41 - list_add_tail(&digl->list, 13.42 - &hvm_irq_dpci->mirq[machine_gsi].digl_list); 13.43 + digl = xmalloc(struct dev_intx_gsi_link); 13.44 + if ( !digl ) 13.45 + return -ENOMEM; 13.46 13.47 - hvm_irq_dpci->girq[guest_gsi].valid = 1; 13.48 - hvm_irq_dpci->girq[guest_gsi].device = device; 13.49 - hvm_irq_dpci->girq[guest_gsi].intx = intx; 13.50 - hvm_irq_dpci->girq[guest_gsi].machine_gsi = machine_gsi; 13.51 + digl->device = device; 13.52 + digl->intx = intx; 13.53 + digl->gsi = guest_gsi; 13.54 + digl->link = link; 13.55 + list_add_tail(&digl->list, 13.56 + &hvm_irq_dpci->mirq[machine_gsi].digl_list); 13.57 + 13.58 + hvm_irq_dpci->girq[guest_gsi].valid = 1; 13.59 + hvm_irq_dpci->girq[guest_gsi].device = device; 13.60 + hvm_irq_dpci->girq[guest_gsi].intx = intx; 13.61 + hvm_irq_dpci->girq[guest_gsi].machine_gsi = machine_gsi; 13.62 13.63 - /* Bind the same mirq once in the same domain */ 13.64 - if ( !hvm_irq_dpci->mirq[machine_gsi].valid ) 13.65 - { 13.66 - hvm_irq_dpci->mirq[machine_gsi].valid = 1; 13.67 - hvm_irq_dpci->mirq[machine_gsi].dom = d; 13.68 + /* Bind the same mirq once in the same domain */ 13.69 + if ( !(hvm_irq_dpci->mirq[machine_gsi].flags & HVM_IRQ_DPCI_VALID) ) 13.70 + { 13.71 + hvm_irq_dpci->mirq[machine_gsi].flags |= HVM_IRQ_DPCI_VALID; 13.72 + hvm_irq_dpci->mirq[machine_gsi].dom = d; 13.73 13.74 - init_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, machine_gsi)], 13.75 - pt_irq_time_out, &hvm_irq_dpci->mirq[machine_gsi], 0); 13.76 - /* Deal with gsi for legacy devices */ 13.77 - pirq_guest_bind(d->vcpu[0], machine_gsi, BIND_PIRQ__WILL_SHARE); 13.78 + init_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, machine_gsi)], 13.79 + pt_irq_time_out, &hvm_irq_dpci->mirq[machine_gsi], 0); 13.80 + /* Deal with gsi for legacy devices */ 13.81 + pirq_guest_bind(d->vcpu[0], machine_gsi, BIND_PIRQ__WILL_SHARE); 13.82 + } 13.83 + 13.84 + gdprintk(XENLOG_INFO VTDPREFIX, 13.85 + "VT-d irq bind: m_irq = %x device = %x intx = %x\n", 13.86 + machine_gsi, device, intx); 13.87 } 13.88 - 13.89 - gdprintk(XENLOG_INFO VTDPREFIX, 13.90 - "VT-d irq bind: m_irq = %x device = %x intx = %x\n", 13.91 - machine_gsi, device, intx); 13.92 return 0; 13.93 } 13.94 13.95 @@ -139,7 +154,7 @@ int pt_irq_destroy_bind_vtd( 13.96 sizeof(struct hvm_girq_dpci_mapping)); 13.97 13.98 /* clear the mirq info */ 13.99 - if ( hvm_irq_dpci->mirq[machine_gsi].valid ) 13.100 + if ( (hvm_irq_dpci->mirq[machine_gsi].flags & HVM_IRQ_DPCI_VALID) ) 13.101 { 13.102 list_for_each_safe ( digl_list, tmp, 13.103 &hvm_irq_dpci->mirq[machine_gsi].digl_list ) 13.104 @@ -161,7 +176,7 @@ int pt_irq_destroy_bind_vtd( 13.105 pirq_guest_unbind(d, machine_gsi); 13.106 kill_timer(&hvm_irq_dpci->hvm_timer[domain_irq_to_vector(d, machine_gsi)]); 13.107 hvm_irq_dpci->mirq[machine_gsi].dom = NULL; 13.108 - hvm_irq_dpci->mirq[machine_gsi].valid = 0; 13.109 + hvm_irq_dpci->mirq[machine_gsi].flags = 0; 13.110 } 13.111 } 13.112 13.113 @@ -177,7 +192,7 @@ int hvm_do_IRQ_dpci(struct domain *d, un 13.114 struct hvm_irq_dpci *dpci = domain_get_irq_dpci(d); 13.115 13.116 if ( !iommu_enabled || (d == dom0) || !dpci || 13.117 - !dpci->mirq[mirq].valid ) 13.118 + !dpci->mirq[mirq].flags & HVM_IRQ_DPCI_VALID ) 13.119 return 0; 13.120 13.121 /* 13.122 @@ -187,13 +202,30 @@ int hvm_do_IRQ_dpci(struct domain *d, un 13.123 * PIC) and we need to detect that. 13.124 */ 13.125 set_bit(mirq, dpci->dirq_mask); 13.126 - set_timer(&dpci->hvm_timer[domain_irq_to_vector(d, mirq)], 13.127 - NOW() + PT_IRQ_TIME_OUT); 13.128 + if ( !test_bit(_HVM_IRQ_DPCI_MSI, &dpci->mirq[mirq].flags) ) 13.129 + set_timer(&dpci->hvm_timer[domain_irq_to_vector(d, mirq)], 13.130 + NOW() + PT_IRQ_TIME_OUT); 13.131 vcpu_kick(d->vcpu[0]); 13.132 13.133 return 1; 13.134 } 13.135 13.136 + 13.137 +void hvm_dpci_msi_eoi(struct domain *d, int vector) 13.138 +{ 13.139 + struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci; 13.140 + int pirq; 13.141 + 13.142 + if ( !vtd_enabled || (hvm_irq_dpci == NULL) ) 13.143 + return; 13.144 + 13.145 + pirq = hvm_irq_dpci->msi_gvec_pirq[vector]; 13.146 + if ( ( pirq >= 0 ) && (pirq < NR_PIRQS) && 13.147 + (hvm_irq_dpci->mirq[pirq].flags & HVM_IRQ_DPCI_VALID) && 13.148 + (hvm_irq_dpci->mirq[pirq].flags & HVM_IRQ_DPCI_MSI) ) 13.149 + pirq_guest_eoi(d, pirq); 13.150 +} 13.151 + 13.152 void hvm_dpci_eoi(struct domain *d, unsigned int guest_gsi, 13.153 union vioapic_redir_entry *ent) 13.154 {
14.1 --- a/xen/drivers/passthrough/iommu.c Thu May 01 10:32:10 2008 +0100 14.2 +++ b/xen/drivers/passthrough/iommu.c Thu May 01 10:33:03 2008 +0100 14.3 @@ -77,7 +77,7 @@ void iommu_domain_destroy(struct domain 14.4 { 14.5 for ( i = 0; i < NR_IRQS; i++ ) 14.6 { 14.7 - if ( !hvm_irq_dpci->mirq[i].valid ) 14.8 + if ( !(hvm_irq_dpci->mirq[i].flags & HVM_IRQ_DPCI_VALID) ) 14.9 continue; 14.10 14.11 pirq_guest_unbind(d, i);
15.1 --- a/xen/drivers/passthrough/vtd/x86/vtd.c Thu May 01 10:32:10 2008 +0100 15.2 +++ b/xen/drivers/passthrough/vtd/x86/vtd.c Thu May 01 10:33:03 2008 +0100 15.3 @@ -101,7 +101,7 @@ void hvm_dpci_isairq_eoi(struct domain * 15.4 /* Multiple mirq may be mapped to one isa irq */ 15.5 for ( i = 0; i < NR_IRQS; i++ ) 15.6 { 15.7 - if ( !dpci->mirq[i].valid ) 15.8 + if ( !dpci->mirq[i].flags & HVM_IRQ_DPCI_VALID ) 15.9 continue; 15.10 15.11 list_for_each_entry_safe ( digl, tmp,
16.1 --- a/xen/include/asm-x86/hvm/io.h Thu May 01 10:32:10 2008 +0100 16.2 +++ b/xen/include/asm-x86/hvm/io.h Thu May 01 10:33:03 2008 +0100 16.3 @@ -120,5 +120,6 @@ struct hvm_hw_stdvga { 16.4 void stdvga_init(struct domain *d); 16.5 void stdvga_deinit(struct domain *d); 16.6 16.7 +extern void hvm_dpci_msi_eoi(struct domain *d, int vector); 16.8 #endif /* __ASM_X86_HVM_IO_H__ */ 16.9
17.1 --- a/xen/include/asm-x86/hvm/irq.h Thu May 01 10:32:10 2008 +0100 17.2 +++ b/xen/include/asm-x86/hvm/irq.h Thu May 01 10:33:03 2008 +0100 17.3 @@ -38,11 +38,21 @@ struct dev_intx_gsi_link { 17.4 uint8_t link; 17.5 }; 17.6 17.7 +#define HVM_IRQ_DPCI_VALID 0x1 17.8 +#define HVM_IRQ_DPCI_MSI 0x2 17.9 +#define _HVM_IRQ_DPCI_MSI 0x1 17.10 + 17.11 +struct hvm_gmsi_info { 17.12 + uint32_t gvec; 17.13 + uint32_t gflags; 17.14 +}; 17.15 + 17.16 struct hvm_mirq_dpci_mapping { 17.17 - uint8_t valid; 17.18 + uint32_t flags; 17.19 int pending; 17.20 struct list_head digl_list; 17.21 struct domain *dom; 17.22 + struct hvm_gmsi_info gmsi; 17.23 }; 17.24 17.25 struct hvm_girq_dpci_mapping { 17.26 @@ -60,6 +70,7 @@ struct hvm_irq_dpci { 17.27 struct hvm_mirq_dpci_mapping mirq[NR_IRQS]; 17.28 /* Guest IRQ to guest device/intx mapping. */ 17.29 struct hvm_girq_dpci_mapping girq[NR_IRQS]; 17.30 + uint8_t msi_gvec_pirq[NR_VECTORS]; 17.31 DECLARE_BITMAP(dirq_mask, NR_IRQS); 17.32 /* Record of mapped ISA IRQs */ 17.33 DECLARE_BITMAP(isairq_map, NR_ISAIRQS);
18.1 --- a/xen/include/public/domctl.h Thu May 01 10:32:10 2008 +0100 18.2 +++ b/xen/include/public/domctl.h Thu May 01 10:33:03 2008 +0100 18.3 @@ -454,7 +454,8 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_assig 18.4 #define XEN_DOMCTL_unbind_pt_irq 48 18.5 typedef enum pt_irq_type_e { 18.6 PT_IRQ_TYPE_PCI, 18.7 - PT_IRQ_TYPE_ISA 18.8 + PT_IRQ_TYPE_ISA, 18.9 + PT_IRQ_TYPE_MSI, 18.10 } pt_irq_type_t; 18.11 struct xen_domctl_bind_pt_irq { 18.12 uint32_t machine_irq; 18.13 @@ -470,6 +471,10 @@ struct xen_domctl_bind_pt_irq { 18.14 uint8_t device; 18.15 uint8_t intx; 18.16 } pci; 18.17 + struct { 18.18 + uint8_t gvec; 18.19 + uint32_t gflags; 18.20 + } msi; 18.21 } u; 18.22 }; 18.23 typedef struct xen_domctl_bind_pt_irq xen_domctl_bind_pt_irq_t;