/root/src/xen/xen/include/asm/msi.h
Line | Count | Source (jump to first uncovered line) |
1 | | #ifndef __ASM_MSI_H |
2 | | #define __ASM_MSI_H |
3 | | |
4 | | #include <xen/cpumask.h> |
5 | | #include <xen/pci.h> |
6 | | #include <asm/byteorder.h> |
7 | | #include <asm/hvm/vmx/vmcs.h> |
8 | | |
9 | | /* |
10 | | * Constants for Intel APIC based MSI messages. |
11 | | */ |
12 | | |
13 | | /* |
14 | | * Shifts for MSI data |
15 | | */ |
16 | | |
17 | 170 | #define MSI_DATA_VECTOR_SHIFT 0 |
18 | 212 | #define MSI_DATA_VECTOR_MASK 0x000000ff |
19 | 86 | #define MSI_DATA_VECTOR(v) (((v) << MSI_DATA_VECTOR_SHIFT) & MSI_DATA_VECTOR_MASK) |
20 | | |
21 | 170 | #define MSI_DATA_DELIVERY_MODE_SHIFT 8 |
22 | 0 | #define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_MODE_SHIFT) |
23 | 86 | #define MSI_DATA_DELIVERY_LOWPRI (1 << MSI_DATA_DELIVERY_MODE_SHIFT) |
24 | 0 | #define MSI_DATA_DELIVERY_MODE_MASK 0x00000700 |
25 | | |
26 | 86 | #define MSI_DATA_LEVEL_SHIFT 14 |
27 | | #define MSI_DATA_LEVEL_DEASSERT (0 << MSI_DATA_LEVEL_SHIFT) |
28 | 86 | #define MSI_DATA_LEVEL_ASSERT (1 << MSI_DATA_LEVEL_SHIFT) |
29 | | |
30 | 170 | #define MSI_DATA_TRIGGER_SHIFT 15 |
31 | 86 | #define MSI_DATA_TRIGGER_EDGE (0 << MSI_DATA_TRIGGER_SHIFT) |
32 | 0 | #define MSI_DATA_TRIGGER_LEVEL (1 << MSI_DATA_TRIGGER_SHIFT) |
33 | 0 | #define MSI_DATA_TRIGGER_MASK 0x00008000 |
34 | | |
35 | | /* |
36 | | * Shift/mask fields for msi address |
37 | | */ |
38 | | |
39 | 86 | #define MSI_ADDR_BASE_HI 0 |
40 | 86 | #define MSI_ADDR_BASE_LO 0xfee00000 |
41 | | #define MSI_ADDR_HEADER MSI_ADDR_BASE_LO |
42 | | |
43 | 170 | #define MSI_ADDR_DESTMODE_SHIFT 2 |
44 | 0 | #define MSI_ADDR_DESTMODE_PHYS (0 << MSI_ADDR_DESTMODE_SHIFT) |
45 | 86 | #define MSI_ADDR_DESTMODE_LOGIC (1 << MSI_ADDR_DESTMODE_SHIFT) |
46 | 0 | #define MSI_ADDR_DESTMODE_MASK 0x4 |
47 | | |
48 | 86 | #define MSI_ADDR_REDIRECTION_SHIFT 3 |
49 | 0 | #define MSI_ADDR_REDIRECTION_CPU (0 << MSI_ADDR_REDIRECTION_SHIFT) |
50 | 86 | #define MSI_ADDR_REDIRECTION_LOWPRI (1 << MSI_ADDR_REDIRECTION_SHIFT) |
51 | | #define MSI_ADDR_REDIRECTION_MASK (1 << MSI_ADDR_REDIRECTION_SHIFT) |
52 | | |
53 | 130 | #define MSI_ADDR_DEST_ID_SHIFT 12 |
54 | 172 | #define MSI_ADDR_DEST_ID_MASK 0x00ff000 |
55 | 130 | #define MSI_ADDR_DEST_ID(dest) (((dest) << MSI_ADDR_DEST_ID_SHIFT) & MSI_ADDR_DEST_ID_MASK) |
56 | | |
57 | | /* MAX fixed pages reserved for mapping MSIX tables. */ |
58 | 14 | #define FIX_MSIX_MAX_PAGES 512 |
59 | | |
60 | | struct msi_info { |
61 | | u16 seg; |
62 | | u8 bus; |
63 | | u8 devfn; |
64 | | int irq; |
65 | | int entry_nr; |
66 | | uint64_t table_base; |
67 | | }; |
68 | | |
69 | | struct msi_msg { |
70 | | union { |
71 | | u64 address; /* message address */ |
72 | | struct { |
73 | | u32 address_lo; /* message address low 32 bits */ |
74 | | u32 address_hi; /* message address high 32 bits */ |
75 | | }; |
76 | | }; |
77 | | u32 data; /* 16 bits of msi message data */ |
78 | | u32 dest32; /* used when Interrupt Remapping with EIM is enabled */ |
79 | | }; |
80 | | |
81 | | struct irq_desc; |
82 | | struct hw_interrupt_type; |
83 | | struct msi_desc; |
84 | | /* Helper functions */ |
85 | | extern int pci_enable_msi(struct msi_info *msi, struct msi_desc **desc); |
86 | | extern void pci_disable_msi(struct msi_desc *desc); |
87 | | extern int pci_prepare_msix(u16 seg, u8 bus, u8 devfn, bool off); |
88 | | extern void pci_cleanup_msi(struct pci_dev *pdev); |
89 | | extern int setup_msi_irq(struct irq_desc *, struct msi_desc *); |
90 | | extern int __setup_msi_irq(struct irq_desc *, struct msi_desc *, |
91 | | const struct hw_interrupt_type *); |
92 | | extern void teardown_msi_irq(int irq); |
93 | | extern int msi_free_vector(struct msi_desc *entry); |
94 | | extern int pci_restore_msi_state(struct pci_dev *pdev); |
95 | | |
96 | | struct msi_desc { |
97 | | struct msi_attrib { |
98 | | __u8 type; /* {0: unused, 5h:MSI, 11h:MSI-X} */ |
99 | | __u8 pos; /* Location of the MSI capability */ |
100 | | __u8 maskbit : 1; /* mask/pending bit supported ? */ |
101 | | __u8 is_64 : 1; /* Address size: 0=32bit 1=64bit */ |
102 | | __u8 host_masked : 1; |
103 | | __u8 guest_masked : 1; |
104 | | __u16 entry_nr; /* specific enabled entry */ |
105 | | } msi_attrib; |
106 | | |
107 | | bool irte_initialized; |
108 | | uint8_t gvec; /* guest vector. valid when pi_desc isn't NULL */ |
109 | | const struct pi_desc *pi_desc; /* pointer to posted descriptor */ |
110 | | |
111 | | struct list_head list; |
112 | | |
113 | | union { |
114 | | void __iomem *mask_base;/* va for the entry in mask table */ |
115 | | struct { |
116 | | unsigned int nvec;/* number of vectors */ |
117 | | unsigned int mpos;/* location of mask register */ |
118 | | } msi; |
119 | | unsigned int hpet_id; /* HPET (dev is NULL) */ |
120 | | }; |
121 | | struct pci_dev *dev; |
122 | | int irq; |
123 | | int remap_index; /* index in interrupt remapping table */ |
124 | | |
125 | | struct msi_msg msg; /* Last set MSI message */ |
126 | | }; |
127 | | |
128 | | /* |
129 | | * Values stored into msi_desc.msi_attrib.pos for non-PCI devices |
130 | | * (msi_desc.msi_attrib.type is zero): |
131 | | */ |
132 | | #define MSI_TYPE_UNKNOWN 0 |
133 | 0 | #define MSI_TYPE_HPET 1 |
134 | 1 | #define MSI_TYPE_IOMMU 2 |
135 | | |
136 | | int msi_maskable_irq(const struct msi_desc *); |
137 | | int msi_free_irq(struct msi_desc *entry); |
138 | | |
139 | | /* |
140 | | * Assume the maximum number of hot plug slots supported by the system is about |
141 | | * ten. The worstcase is that each of these slots is hot-added with a device, |
142 | | * which has two MSI/MSI-X capable functions. To avoid any MSI-X driver, which |
143 | | * attempts to request all available vectors, NR_HP_RESERVED_VECTORS is defined |
144 | | * as below to ensure at least one message is assigned to each detected MSI/ |
145 | | * MSI-X device function. |
146 | | */ |
147 | | #define NR_HP_RESERVED_VECTORS 20 |
148 | | |
149 | 54 | #define msi_control_reg(base) (base + PCI_MSI_FLAGS) |
150 | 39 | #define msi_lower_address_reg(base) (base + PCI_MSI_ADDRESS_LO) |
151 | 29 | #define msi_upper_address_reg(base) (base + PCI_MSI_ADDRESS_HI) |
152 | | #define msi_data_reg(base, is64bit) \ |
153 | 39 | ( (is64bit == 1) ? base+PCI_MSI_DATA_64 : base+PCI_MSI_DATA_32 ) |
154 | | #define msi_mask_bits_reg(base, is64bit) \ |
155 | 13 | ( (is64bit == 1) ? base+PCI_MSI_MASK_BIT : base+PCI_MSI_MASK_BIT-4) |
156 | | #define msi_pending_bits_reg(base, is64bit) \ |
157 | 7 | ( (is64bit == 1) ? base+PCI_MSI_MASK_BIT+4 : base+PCI_MSI_MASK_BIT) |
158 | | #define msi_disable(control) control &= ~PCI_MSI_FLAGS_ENABLE |
159 | | #define multi_msi_capable(control) \ |
160 | 27 | (1 << ((control & PCI_MSI_FLAGS_QMASK) >> 1)) |
161 | | #define multi_msi_enable(control, num) \ |
162 | 6 | control |= (((fls(num) - 1) << 4) & PCI_MSI_FLAGS_QSIZE); |
163 | 27 | #define is_64bit_address(control) (!!(control & PCI_MSI_FLAGS_64BIT)) |
164 | 27 | #define is_mask_bit_support(control) (!!(control & PCI_MSI_FLAGS_MASKBIT)) |
165 | | #define msi_enable(control, num) multi_msi_enable(control, num); \ |
166 | | control |= PCI_MSI_FLAGS_ENABLE |
167 | | |
168 | 380 | #define msix_control_reg(base) (base + PCI_MSIX_FLAGS) |
169 | 41 | #define msix_table_offset_reg(base) (base + PCI_MSIX_TABLE) |
170 | 13 | #define msix_pba_offset_reg(base) (base + PCI_MSIX_PBA) |
171 | | #define msix_enable(control) control |= PCI_MSIX_FLAGS_ENABLE |
172 | | #define msix_disable(control) control &= ~PCI_MSIX_FLAGS_ENABLE |
173 | 41 | #define msix_table_size(control) ((control & PCI_MSIX_FLAGS_QSIZE)+1) |
174 | 36 | #define multi_msix_capable msix_table_size |
175 | | #define msix_unmask(address) (address & ~PCI_MSIX_VECTOR_BITMASK) |
176 | | #define msix_mask(address) (address | PCI_MSIX_VECTOR_BITMASK) |
177 | | |
178 | | /* |
179 | | * MSI Defined Data Structures |
180 | | */ |
181 | | |
182 | | struct __packed msg_data { |
183 | | #if defined(__LITTLE_ENDIAN_BITFIELD) |
184 | | __u32 vector : 8; |
185 | | __u32 delivery_mode : 3; /* 000b: FIXED | 001b: lowest prior */ |
186 | | __u32 reserved_1 : 3; |
187 | | __u32 level : 1; /* 0: deassert | 1: assert */ |
188 | | __u32 trigger : 1; /* 0: edge | 1: level */ |
189 | | __u32 reserved_2 : 16; |
190 | | #elif defined(__BIG_ENDIAN_BITFIELD) |
191 | | __u32 reserved_2 : 16; |
192 | | __u32 trigger : 1; /* 0: edge | 1: level */ |
193 | | __u32 level : 1; /* 0: deassert | 1: assert */ |
194 | | __u32 reserved_1 : 3; |
195 | | __u32 delivery_mode : 3; /* 000b: FIXED | 001b: lowest prior */ |
196 | | __u32 vector : 8; |
197 | | #else |
198 | | #error "Bitfield endianness not defined! Check your byteorder.h" |
199 | | #endif |
200 | | }; |
201 | | |
202 | | struct __packed msg_address { |
203 | | union { |
204 | | struct { |
205 | | #if defined(__LITTLE_ENDIAN_BITFIELD) |
206 | | __u32 reserved_1 : 2; |
207 | | __u32 dest_mode : 1; /*0:physic | 1:logic */ |
208 | | __u32 redirection_hint: 1; /*0: dedicated CPU |
209 | | 1: lowest priority */ |
210 | | __u32 reserved_2 : 4; |
211 | | __u32 dest_id : 24; /* Destination ID */ |
212 | | #elif defined(__BIG_ENDIAN_BITFIELD) |
213 | | __u32 dest_id : 24; /* Destination ID */ |
214 | | __u32 reserved_2 : 4; |
215 | | __u32 redirection_hint: 1; /*0: dedicated CPU |
216 | | 1: lowest priority */ |
217 | | __u32 dest_mode : 1; /*0:physic | 1:logic */ |
218 | | __u32 reserved_1 : 2; |
219 | | #else |
220 | | #error "Bitfield endianness not defined! Check your byteorder.h" |
221 | | #endif |
222 | | }u; |
223 | | __u32 value; |
224 | | }lo_address; |
225 | | __u32 hi_address; |
226 | | }; |
227 | | |
228 | | #define MAX_MSIX_TABLE_ENTRIES (PCI_MSIX_FLAGS_QSIZE + 1) |
229 | 36 | #define MAX_MSIX_TABLE_PAGES PFN_UP(MAX_MSIX_TABLE_ENTRIES * \ |
230 | | PCI_MSIX_ENTRY_SIZE + \ |
231 | | (~PCI_MSIX_BIRMASK & (PAGE_SIZE - 1))) |
232 | | |
233 | | struct arch_msix { |
234 | | unsigned int nr_entries, used_entries; |
235 | | struct { |
236 | | unsigned long first, last; |
237 | | } table, pba; |
238 | | int table_refcnt[MAX_MSIX_TABLE_PAGES]; |
239 | | int table_idx[MAX_MSIX_TABLE_PAGES]; |
240 | | spinlock_t table_lock; |
241 | | bool host_maskall, guest_maskall; |
242 | | domid_t warned; |
243 | | }; |
244 | | |
245 | | void early_msi_init(void); |
246 | | void msi_compose_msg(unsigned vector, const cpumask_t *mask, |
247 | | struct msi_msg *msg); |
248 | | void __msi_set_enable(u16 seg, u8 bus, u8 slot, u8 func, int pos, int enable); |
249 | | void mask_msi_irq(struct irq_desc *); |
250 | | void unmask_msi_irq(struct irq_desc *); |
251 | | void guest_mask_msi_irq(struct irq_desc *, bool mask); |
252 | | void ack_nonmaskable_msi_irq(struct irq_desc *); |
253 | | void end_nonmaskable_msi_irq(struct irq_desc *, u8 vector); |
254 | | void set_msi_affinity(struct irq_desc *, const cpumask_t *); |
255 | | |
256 | | #endif /* __ASM_MSI_H */ |