VirtualBox

source: vbox/trunk/src/VBox/Devices/Bus/MsiCommon.cpp@ 71768

Last change on this file since 71768 was 71768, checked in by vboxsync, 7 years ago

Msi: Some R3 prefixes and some clenaup.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 10.5 KB
Line 
1/* $Id: MsiCommon.cpp 71768 2018-04-09 14:10:52Z vboxsync $ */
2/** @file
3 * MSI support routines
4 *
5 * @todo Straighten up this file!!
6 */
7
8/*
9 * Copyright (C) 2010-2017 Oracle Corporation
10 *
11 * This file is part of VirtualBox Open Source Edition (OSE), as
12 * available from http://www.215389.xyz. This file is free software;
13 * you can redistribute it and/or modify it under the terms of the GNU
14 * General Public License (GPL) as published by the Free Software
15 * Foundation, in version 2 as it comes in the "COPYING" file of the
16 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18 */
19
20#define LOG_GROUP LOG_GROUP_DEV_PCI
21#define PDMPCIDEV_INCLUDE_PRIVATE /* Hack to get pdmpcidevint.h included at the right point. */
22#include <VBox/pci.h>
23#include <VBox/msi.h>
24#include <VBox/vmm/pdmdev.h>
25#include <VBox/log.h>
26
27#include "MsiCommon.h"
28#include "PciInline.h"
29
30
31DECLINLINE(uint16_t) msiGetMessageControl(PPDMPCIDEV pDev)
32{
33 uint32_t idxMessageControl = pDev->Int.s.u8MsiCapOffset + VBOX_MSI_CAP_MESSAGE_CONTROL;
34#ifdef IN_RING3
35 if (pciDevIsPassthrough(pDev))
36 return pDev->Int.s.pfnConfigRead(pDev->Int.s.CTX_SUFF(pDevIns), pDev, idxMessageControl, 2);
37#endif
38 return PCIDevGetWord(pDev, idxMessageControl);
39}
40
41DECLINLINE(bool) msiIs64Bit(PPDMPCIDEV pDev)
42{
43 return pciDevIsMsi64Capable(pDev);
44}
45
46/** @todo r=klaus This design assumes that the config space cache is always
47 * up to date, which is a wrong assumption for the "emulate passthrough" case
48 * where only the callbacks give the correct data. */
49DECLINLINE(uint32_t *) msiGetMaskBits(PPDMPCIDEV pDev)
50{
51 uint8_t iOff = msiIs64Bit(pDev) ? VBOX_MSI_CAP_MASK_BITS_64 : VBOX_MSI_CAP_MASK_BITS_32;
52 /* devices may have no masked/pending support */
53 if (iOff >= pDev->Int.s.u8MsiCapSize)
54 return NULL;
55 iOff += pDev->Int.s.u8MsiCapOffset;
56 return (uint32_t*)(pDev->abConfig + iOff);
57}
58
59/** @todo r=klaus This design assumes that the config space cache is always
60 * up to date, which is a wrong assumption for the "emulate passthrough" case
61 * where only the callbacks give the correct data. */
62DECLINLINE(uint32_t*) msiGetPendingBits(PPDMPCIDEV pDev)
63{
64 uint8_t iOff = msiIs64Bit(pDev) ? VBOX_MSI_CAP_PENDING_BITS_64 : VBOX_MSI_CAP_PENDING_BITS_32;
65 /* devices may have no masked/pending support */
66 if (iOff >= pDev->Int.s.u8MsiCapSize)
67 return NULL;
68 iOff += pDev->Int.s.u8MsiCapOffset;
69 return (uint32_t*)(pDev->abConfig + iOff);
70}
71
72DECLINLINE(bool) msiIsEnabled(PPDMPCIDEV pDev)
73{
74 return (msiGetMessageControl(pDev) & VBOX_PCI_MSI_FLAGS_ENABLE) != 0;
75}
76
77DECLINLINE(uint8_t) msiGetMme(PPDMPCIDEV pDev)
78{
79 return (msiGetMessageControl(pDev) & VBOX_PCI_MSI_FLAGS_QSIZE) >> 4;
80}
81
82DECLINLINE(RTGCPHYS) msiGetMsiAddress(PPDMPCIDEV pDev)
83{
84 if (msiIs64Bit(pDev))
85 {
86 uint32_t lo = PCIDevGetDWord(pDev, pDev->Int.s.u8MsiCapOffset + VBOX_MSI_CAP_MESSAGE_ADDRESS_LO);
87 uint32_t hi = PCIDevGetDWord(pDev, pDev->Int.s.u8MsiCapOffset + VBOX_MSI_CAP_MESSAGE_ADDRESS_HI);
88 return RT_MAKE_U64(lo, hi);
89 }
90 return PCIDevGetDWord(pDev, pDev->Int.s.u8MsiCapOffset + VBOX_MSI_CAP_MESSAGE_ADDRESS_32);
91}
92
93DECLINLINE(uint32_t) msiGetMsiData(PPDMPCIDEV pDev, int32_t iVector)
94{
95 int16_t iOff = msiIs64Bit(pDev) ? VBOX_MSI_CAP_MESSAGE_DATA_64 : VBOX_MSI_CAP_MESSAGE_DATA_32;
96 uint16_t lo = PCIDevGetWord(pDev, pDev->Int.s.u8MsiCapOffset + iOff);
97
98 // vector encoding into lower bits of message data
99 uint8_t bits = msiGetMme(pDev);
100 uint16_t uMask = ((1 << bits) - 1);
101 lo &= ~uMask;
102 lo |= iVector & uMask;
103
104 return RT_MAKE_U32(lo, 0);
105}
106
107#ifdef IN_RING3
108
109DECLINLINE(bool) msiR3BitJustCleared(uint32_t uOldValue, uint32_t uNewValue, uint32_t uMask)
110{
111 return !!(uOldValue & uMask) && !(uNewValue & uMask);
112}
113
114DECLINLINE(bool) msiR3BitJustSet(uint32_t uOldValue, uint32_t uNewValue, uint32_t uMask)
115{
116 return !(uOldValue & uMask) && !!(uNewValue & uMask);
117}
118
119void MsiR3PciConfigWrite(PPDMDEVINS pDevIns, PCPDMPCIHLP pPciHlp, PPDMPCIDEV pDev,
120 uint32_t u32Address, uint32_t val, unsigned len)
121{
122 int32_t iOff = u32Address - pDev->Int.s.u8MsiCapOffset;
123 Assert(iOff >= 0 && (pciDevIsMsiCapable(pDev) && iOff < pDev->Int.s.u8MsiCapSize));
124
125 Log2(("MsiR3PciConfigWrite: %d <- %x (%d)\n", iOff, val, len));
126
127 uint32_t uAddr = u32Address;
128 bool f64Bit = msiIs64Bit(pDev);
129
130 for (uint32_t i = 0; i < len; i++)
131 {
132 uint32_t reg = i + iOff;
133 uint8_t u8Val = (uint8_t)val;
134 switch (reg)
135 {
136 case 0: /* Capability ID, ro */
137 case 1: /* Next pointer, ro */
138 break;
139 case VBOX_MSI_CAP_MESSAGE_CONTROL:
140 /* don't change read-only bits: 1-3,7 */
141 u8Val &= UINT8_C(~0x8e);
142 pDev->abConfig[uAddr] = u8Val | (pDev->abConfig[uAddr] & UINT8_C(0x8e));
143 break;
144 case VBOX_MSI_CAP_MESSAGE_CONTROL + 1:
145 /* don't change read-only bit 8, and reserved 9-15 */
146 break;
147 default:
148 if (pDev->abConfig[uAddr] != u8Val)
149 {
150 int32_t maskUpdated = -1;
151
152 /* If we're enabling masked vector, and have pending messages
153 for this vector, we have to send this message now */
154 if ( !f64Bit
155 && (reg >= VBOX_MSI_CAP_MASK_BITS_32)
156 && (reg < VBOX_MSI_CAP_MASK_BITS_32 + 4)
157 )
158 {
159 maskUpdated = reg - VBOX_MSI_CAP_MASK_BITS_32;
160 }
161 if ( f64Bit
162 && (reg >= VBOX_MSI_CAP_MASK_BITS_64)
163 && (reg < VBOX_MSI_CAP_MASK_BITS_64 + 4)
164 )
165 {
166 maskUpdated = reg - VBOX_MSI_CAP_MASK_BITS_64;
167 }
168
169 if (maskUpdated != -1 && msiIsEnabled(pDev))
170 {
171 uint32_t* puPending = msiGetPendingBits(pDev);
172 for (int iBitNum = 0; iBitNum < 8; iBitNum++)
173 {
174 int32_t iBit = 1 << iBitNum;
175 uint32_t uVector = maskUpdated*8 + iBitNum;
176
177 if (msiR3BitJustCleared(pDev->abConfig[uAddr], u8Val, iBit))
178 {
179 Log(("msi: mask updated bit %d@%x (%d)\n", iBitNum, uAddr, maskUpdated));
180
181 /* To ensure that we're no longer masked */
182 pDev->abConfig[uAddr] &= ~iBit;
183 if ((*puPending & (1 << uVector)) != 0)
184 {
185 Log(("msi: notify earlier masked pending vector: %d\n", uVector));
186 MsiNotify(pDevIns, pPciHlp, pDev, uVector, PDM_IRQ_LEVEL_HIGH, 0 /*uTagSrc*/);
187 }
188 }
189 if (msiR3BitJustSet(pDev->abConfig[uAddr], u8Val, iBit))
190 {
191 Log(("msi: mask vector: %d\n", uVector));
192 }
193 }
194 }
195
196 pDev->abConfig[uAddr] = u8Val;
197 }
198 }
199 uAddr++;
200 val >>= 8;
201 }
202}
203
204int MsiR3Init(PPDMPCIDEV pDev, PPDMMSIREG pMsiReg)
205{
206 if (pMsiReg->cMsiVectors == 0)
207 return VINF_SUCCESS;
208
209 /* XXX: done in pcirawAnalyzePciCaps() */
210 if (pciDevIsPassthrough(pDev))
211 return VINF_SUCCESS;
212
213 uint16_t cVectors = pMsiReg->cMsiVectors;
214 uint8_t iCapOffset = pMsiReg->iMsiCapOffset;
215 uint8_t iNextOffset = pMsiReg->iMsiNextOffset;
216 bool f64bit = pMsiReg->fMsi64bit;
217 bool fNoMasking = pMsiReg->fMsiNoMasking;
218 uint16_t iFlags = 0;
219
220 Assert(iCapOffset != 0 && iCapOffset < 0xff && iNextOffset < 0xff);
221
222 if (!fNoMasking)
223 {
224 int iMmc;
225
226 /* Compute multiple-message capable bitfield */
227 for (iMmc = 0; iMmc < 6; iMmc++)
228 {
229 if ((1 << iMmc) >= cVectors)
230 break;
231 }
232
233 if ((cVectors > VBOX_MSI_MAX_ENTRIES) || (1 << iMmc) < cVectors)
234 return VERR_TOO_MUCH_DATA;
235
236 /* We support per-vector masking */
237 iFlags |= VBOX_PCI_MSI_FLAGS_MASKBIT;
238 /* How many vectors we're capable of */
239 iFlags |= iMmc;
240 }
241
242 if (f64bit)
243 iFlags |= VBOX_PCI_MSI_FLAGS_64BIT;
244
245 pDev->Int.s.u8MsiCapOffset = iCapOffset;
246 pDev->Int.s.u8MsiCapSize = f64bit ? VBOX_MSI_CAP_SIZE_64 : VBOX_MSI_CAP_SIZE_32;
247
248 PCIDevSetByte(pDev, iCapOffset + 0, VBOX_PCI_CAP_ID_MSI);
249 PCIDevSetByte(pDev, iCapOffset + 1, iNextOffset); /* next */
250 PCIDevSetWord(pDev, iCapOffset + VBOX_MSI_CAP_MESSAGE_CONTROL, iFlags);
251
252 if (!fNoMasking)
253 {
254 *msiGetMaskBits(pDev) = 0;
255 *msiGetPendingBits(pDev) = 0;
256 }
257
258 pciDevSetMsiCapable(pDev);
259 if (f64bit)
260 pciDevSetMsi64Capable(pDev);
261
262 return VINF_SUCCESS;
263}
264
265#endif /* IN_RING3 */
266
267
268bool MsiIsEnabled(PPDMPCIDEV pDev)
269{
270 return pciDevIsMsiCapable(pDev) && msiIsEnabled(pDev);
271}
272
273void MsiNotify(PPDMDEVINS pDevIns, PCPDMPCIHLP pPciHlp, PPDMPCIDEV pDev, int iVector, int iLevel, uint32_t uTagSrc)
274{
275 AssertMsg(msiIsEnabled(pDev), ("Must be enabled to use that"));
276
277 uint32_t uMask;
278 uint32_t *puPending = msiGetPendingBits(pDev);
279 if (puPending)
280 {
281 uint32_t *puMask = msiGetMaskBits(pDev);
282 AssertPtr(puMask);
283 uMask = *puMask;
284 LogFlow(("MsiNotify: %d pending=%x mask=%x\n", iVector, *puPending, uMask));
285 }
286 else
287 {
288 uMask = 0;
289 LogFlow(("MsiNotify: %d\n", iVector));
290 }
291
292 /* We only trigger MSI on level up */
293 if ((iLevel & PDM_IRQ_LEVEL_HIGH) == 0)
294 {
295 /** @todo maybe clear pending interrupts on level down? */
296#if 0
297 if (puPending)
298 {
299 *puPending &= ~(1<<iVector);
300 LogFlow(("msi: clear pending %d, now %x\n", iVector, *puPending));
301 }
302#endif
303 return;
304 }
305
306 if ((uMask & (1<<iVector)) != 0)
307 {
308 *puPending |= (1<<iVector);
309 LogFlow(("msi: %d is masked, mark pending, now %x\n", iVector, *puPending));
310 return;
311 }
312
313 RTGCPHYS GCAddr = msiGetMsiAddress(pDev);
314 uint32_t u32Value = msiGetMsiData(pDev, iVector);
315
316 if (puPending)
317 *puPending &= ~(1<<iVector);
318
319 Assert(pPciHlp->pfnIoApicSendMsi != NULL);
320 pPciHlp->pfnIoApicSendMsi(pDevIns, GCAddr, u32Value, uTagSrc);
321}
322
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette