VirtualBox

source: vbox/trunk/src/VBox/Devices/VirtIO/VirtioCore.cpp@ 109188

Last change on this file since 109188 was 109188, checked in by vboxsync, 2 weeks ago

VirtioNet: Temporary code from 7.0 branch for debugging descriptor loop issue ​bugref:10572

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 147.5 KB
Line 
1/* $Id: VirtioCore.cpp 109188 2025-05-07 07:42:49Z vboxsync $ */
2
3/** @file
4 * VirtioCore - Virtio Core (PCI, feature & config mgt, queue mgt & proxy, notification mgt)
5 */
6
7/*
8 * Copyright (C) 2009-2024 Oracle and/or its affiliates.
9 *
10 * This file is part of VirtualBox base platform packages, as
11 * available from https://www.215389.xyz.
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation, in version 3 of the
16 * License.
17 *
18 * This program is distributed in the hope that it will be useful, but
19 * WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 * General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, see <https://www.gnu.org/licenses>.
25 *
26 * SPDX-License-Identifier: GPL-3.0-only
27 */
28
29
30/*********************************************************************************************************************************
31* Header Files *
32*********************************************************************************************************************************/
33#define LOG_GROUP LOG_GROUP_DEV_VIRTIO
34
35#include <iprt/assert.h>
36#include <iprt/uuid.h>
37#include <iprt/mem.h>
38#include <iprt/sg.h>
39#include <iprt/assert.h>
40#include <iprt/string.h>
41#include <iprt/param.h>
42#include <iprt/types.h>
43#include <VBox/log.h>
44#include <VBox/msi.h>
45#include <iprt/types.h>
46#include <VBox/AssertGuest.h>
47#include <VBox/vmm/pdmdev.h>
48#include "VirtioCore.h"
49
50#ifdef VIRTIO_REL_INFO_DUMP
51#include <iprt/trace.h>
52#endif /* VIRTIO_REL_INFO_DUMP */
53
54/*********************************************************************************************************************************
55* Defined Constants And Macros *
56*********************************************************************************************************************************/
57
58#define INSTANCE(a_pVirtio) ((a_pVirtio)->szInstance)
59#define VIRTQNAME(a_pVirtio, a_uVirtq) ((a_pVirtio)->aVirtqueues[(a_uVirtq)].szName)
60
61#define IS_VIRTQ_EMPTY(pDevIns, pVirtio, pVirtq) \
62 (virtioCoreVirtqAvailCnt(pDevIns, pVirtio, pVirtq) == 0)
63
64#define IS_DRIVER_OK(a_pVirtio) ((a_pVirtio)->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK)
65#define WAS_DRIVER_OK(a_pVirtio) ((a_pVirtio)->fPrevDeviceStatus & VIRTIO_STATUS_DRIVER_OK)
66
67/**
68 * These defines are used to track guest virtio-net driver writing driver features accepted flags
69 * in two 32-bit operations (in arbitrary order), and one bit dedicated to ensured 'features complete'
70 * is handled once.
71 */
72#define DRIVER_FEATURES_0_WRITTEN 1 /**< fDriverFeatures[0] written by guest virtio-net */
73#define DRIVER_FEATURES_1_WRITTEN 2 /**< fDriverFeatures[1] written by guest virtio-net */
74#define DRIVER_FEATURES_0_AND_1_WRITTEN 3 /**< Both 32-bit parts of fDriverFeatures[] written */
75#define DRIVER_FEATURES_COMPLETE_HANDLED 4 /**< Features negotiation complete handler called */
76
77/**
78 * This macro returns true if the @a a_offAccess and access length (@a
79 * a_cbAccess) are within the range of the mapped capability struct described by
80 * @a a_LocCapData.
81 *
82 * @param[in] a_offAccess Input: The offset into the MMIO bar of the access.
83 * @param[in] a_cbAccess Input: The access size.
84 * @param[out] a_offsetIntoCap Output: uint32_t variable to return the intra-capability offset into.
85 * @param[in] a_LocCapData Input: The capability location info.
86 */
87#define MATCHES_VIRTIO_CAP_STRUCT(a_offAccess, a_cbAccess, a_offsetIntoCap, a_LocCapData) \
88 ( ((a_offsetIntoCap) = (uint32_t)((a_offAccess) - (a_LocCapData).offMmio)) < (uint32_t)(a_LocCapData).cbMmio \
89 && (a_offsetIntoCap) + (uint32_t)(a_cbAccess) <= (uint32_t)(a_LocCapData).cbMmio )
90
91
92/*********************************************************************************************************************************
93* Structures and Typedefs *
94*********************************************************************************************************************************/
95
96/** @name virtq related flags
97 * @{ */
98#define VIRTQ_DESC_F_NEXT 1 /**< Indicates this descriptor chains to next */
99#define VIRTQ_DESC_F_WRITE 2 /**< Marks buffer as write-only (default ro) */
100#define VIRTQ_DESC_F_INDIRECT 4 /**< Buffer is list of buffer descriptors */
101
102#define VIRTQ_USED_F_NO_NOTIFY 1 /**< Dev to Drv: Don't notify when buf added */
103#define VIRTQ_AVAIL_F_NO_INTERRUPT 1 /**< Drv to Dev: Don't notify when buf eaten */
104/** @} */
105
106/**
107 * virtq-related structs
108 * (struct names follow VirtIO 1.0 spec, field names use VBox styled naming, w/respective spec'd name in comments)
109 */
110typedef struct virtq_desc
111{
112 uint64_t GCPhysBuf; /**< addr GC Phys. address of buffer */
113 uint32_t cb; /**< len Buffer length */
114 uint16_t fFlags; /**< flags Buffer specific flags */
115 uint16_t uDescIdxNext; /**< next Idx set if VIRTIO_DESC_F_NEXT */
116} VIRTQ_DESC_T, *PVIRTQ_DESC_T;
117
118typedef struct virtq_avail
119{
120 uint16_t fFlags; /**< flags avail ring guest-to-host flags */
121 uint16_t uIdx; /**< idx Index of next free ring slot */
122 RT_FLEXIBLE_ARRAY_EXTENSION
123 uint16_t auRing[RT_FLEXIBLE_ARRAY]; /**< ring Ring: avail drv to dev bufs */
124 //uint16_t uUsedEventIdx; /**< used_event (if VIRTQ_USED_F_EVENT_IDX) */
125} VIRTQ_AVAIL_T, *PVIRTQ_AVAIL_T;
126
127typedef struct virtq_used_elem
128{
129 uint32_t uDescIdx; /**< idx Start of used desc chain */
130 uint32_t cbElem; /**< len Total len of used desc chain */
131} VIRTQ_USED_ELEM_T;
132
133typedef struct virt_used
134{
135 uint16_t fFlags; /**< flags used ring host-to-guest flags */
136 uint16_t uIdx; /**< idx Index of next ring slot */
137 RT_FLEXIBLE_ARRAY_EXTENSION
138 VIRTQ_USED_ELEM_T aRing[RT_FLEXIBLE_ARRAY]; /**< ring Ring: used dev to drv bufs */
139 //uint16_t uAvailEventIdx; /**< avail_event if (VIRTQ_USED_F_EVENT_IDX) */
140} VIRTQ_USED_T, *PVIRTQ_USED_T;
141
142DECLHIDDEN(const char *) virtioCoreGetStateChangeText(VIRTIOVMSTATECHANGED enmState)
143{
144 switch (enmState)
145 {
146 case kvirtIoVmStateChangedReset: return "VM RESET";
147 case kvirtIoVmStateChangedSuspend: return "VM SUSPEND";
148 case kvirtIoVmStateChangedPowerOff: return "VM POWER OFF";
149 case kvirtIoVmStateChangedResume: return "VM RESUME";
150 default: return "<BAD ENUM>";
151 }
152}
153
154/* Internal Functions */
155
156static void virtioCoreNotifyGuestDriver(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq);
157static int virtioNudgeGuest(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint8_t uCause, uint16_t uVec);
158
159#ifdef IN_RING3
160# ifdef LOG_ENABLED
161DECLINLINE(uint16_t) virtioCoreR3CountPendingBufs(uint16_t uRingIdx, uint16_t uShadowIdx, uint16_t uQueueSize)
162{
163 if (uShadowIdx == uRingIdx)
164 return 0;
165 else
166 if (uShadowIdx > uRingIdx)
167 return uShadowIdx - uRingIdx;
168 return uQueueSize - (uRingIdx - uShadowIdx);
169}
170# endif
171#endif
172/** @name Internal queue operations
173 * @{ */
174
175/**
176 * Accessor for virtq descriptor
177 */
178#ifdef IN_RING3
179DECLINLINE(void) virtioReadDesc(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq,
180 uint32_t idxDesc, PVIRTQ_DESC_T pDesc)
181{
182 /*
183 * Shut up assertion for legacy virtio-net driver in FreeBSD up to 12.3 (see virtioCoreR3VirtqUsedBufPut()
184 * for more information).
185 */
186 AssertMsg( IS_DRIVER_OK(pVirtio)
187 || ( pVirtio->fLegacyDriver
188 && pVirtq->GCPhysVirtqDesc),
189 ("Called with guest driver not ready\n"));
190 uint16_t const cVirtqItems = RT_MAX(pVirtq->uQueueSize, 1); /* Make sure to avoid div-by-zero. */
191
192 virtioCoreGCPhysRead(pVirtio, pDevIns,
193 pVirtq->GCPhysVirtqDesc + sizeof(VIRTQ_DESC_T) * (idxDesc % cVirtqItems),
194 pDesc, sizeof(VIRTQ_DESC_T));
195}
196#endif
197
198/**
199 * Accessors for virtq avail ring
200 */
201#ifdef IN_RING3
202DECLINLINE(uint16_t) virtioReadAvailDescIdx(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq, uint32_t availIdx)
203{
204 uint16_t uDescIdx;
205
206 AssertMsg(pVirtio->fLegacyDriver || IS_DRIVER_OK(pVirtio), ("Called with guest driver not ready\n"));
207 uint16_t const cVirtqItems = RT_MAX(pVirtq->uQueueSize, 1); /* Make sure to avoid div-by-zero. */
208 virtioCoreGCPhysRead(pVirtio, pDevIns,
209 pVirtq->GCPhysVirtqAvail + RT_UOFFSETOF_DYN(VIRTQ_AVAIL_T, auRing[availIdx % cVirtqItems]),
210 &uDescIdx, sizeof(uDescIdx));
211 return uDescIdx;
212}
213
214DECLINLINE(uint16_t) virtioReadAvailUsedEvent(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq)
215{
216 uint16_t uUsedEventIdx;
217 /* VirtIO 1.0 uUsedEventIdx (used_event) immediately follows ring */
218 AssertMsg(pVirtio->fLegacyDriver || IS_DRIVER_OK(pVirtio), ("Called with guest driver not ready\n"));
219 virtioCoreGCPhysRead(pVirtio, pDevIns,
220 pVirtq->GCPhysVirtqAvail + RT_UOFFSETOF_DYN(VIRTQ_AVAIL_T, auRing[pVirtq->uQueueSize]),
221 &uUsedEventIdx, sizeof(uUsedEventIdx));
222 return uUsedEventIdx;
223}
224#endif
225
226DECLINLINE(uint16_t) virtioReadAvailRingIdx(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq)
227{
228 uint16_t uIdx = 0;
229 AssertMsg(pVirtio->fLegacyDriver || IS_DRIVER_OK(pVirtio), ("Called with guest driver not ready\n"));
230 virtioCoreGCPhysRead(pVirtio, pDevIns,
231 pVirtq->GCPhysVirtqAvail + RT_UOFFSETOF(VIRTQ_AVAIL_T, uIdx),
232 &uIdx, sizeof(uIdx));
233 return uIdx;
234}
235
236DECLINLINE(uint16_t) virtioReadAvailRingFlags(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq)
237{
238 uint16_t fFlags = 0;
239 AssertMsg(pVirtio->fLegacyDriver || IS_DRIVER_OK(pVirtio), ("Called with guest driver not ready\n"));
240 virtioCoreGCPhysRead(pVirtio, pDevIns,
241 pVirtq->GCPhysVirtqAvail + RT_UOFFSETOF(VIRTQ_AVAIL_T, fFlags),
242 &fFlags, sizeof(fFlags));
243 return fFlags;
244}
245
246/** @} */
247
248/** @name Accessors for virtq used ring
249 * @{
250 */
251
252#ifdef IN_RING3
253DECLINLINE(void) virtioWriteUsedElem(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq,
254 uint32_t usedIdx, uint32_t uDescIdx, uint32_t uLen)
255{
256 VIRTQ_USED_ELEM_T elem = { uDescIdx, uLen };
257 AssertMsg(pVirtio->fLegacyDriver || IS_DRIVER_OK(pVirtio), ("Called with guest driver not ready\n"));
258 uint16_t const cVirtqItems = RT_MAX(pVirtq->uQueueSize, 1); /* Make sure to avoid div-by-zero. */
259 virtioCoreGCPhysWrite(pVirtio, pDevIns,
260 pVirtq->GCPhysVirtqUsed
261 + RT_UOFFSETOF_DYN(VIRTQ_USED_T, aRing[usedIdx % cVirtqItems]),
262 &elem, sizeof(elem));
263}
264
265DECLINLINE(void) virtioWriteUsedRingFlags(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq, uint16_t fFlags)
266{
267 AssertMsg(pVirtio->fLegacyDriver || IS_DRIVER_OK(pVirtio), ("Called with guest driver not ready\n"));
268 RT_UNTRUSTED_VALIDATED_FENCE(); /* VirtIO 1.0, Section 3.2.1.4.1 */
269 virtioCoreGCPhysWrite(pVirtio, pDevIns,
270 pVirtq->GCPhysVirtqUsed + RT_UOFFSETOF(VIRTQ_USED_T, fFlags),
271 &fFlags, sizeof(fFlags));
272}
273#endif
274
275DECLINLINE(void) virtioWriteUsedRingIdx(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq, uint16_t uIdx)
276{
277 AssertMsg(pVirtio->fLegacyDriver || IS_DRIVER_OK(pVirtio), ("Called with guest driver not ready\n"));
278 RT_UNTRUSTED_VALIDATED_FENCE(); /* VirtIO 1.0, Section 3.2.1.4.1 */
279 virtioCoreGCPhysWrite(pVirtio, pDevIns,
280 pVirtq->GCPhysVirtqUsed + RT_UOFFSETOF(VIRTQ_USED_T, uIdx),
281 &uIdx, sizeof(uIdx));
282}
283
284#ifdef IN_RING3
285DECLINLINE(uint16_t) virtioReadUsedRingIdx(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq)
286{
287 uint16_t uIdx = 0;
288 AssertMsg(pVirtio->fLegacyDriver || IS_DRIVER_OK(pVirtio), ("Called with guest driver not ready\n"));
289 virtioCoreGCPhysRead(pVirtio, pDevIns,
290 pVirtq->GCPhysVirtqUsed + RT_UOFFSETOF(VIRTQ_USED_T, uIdx),
291 &uIdx, sizeof(uIdx));
292 return uIdx;
293}
294
295DECLINLINE(uint16_t) virtioReadUsedRingFlags(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq)
296{
297 uint16_t fFlags = 0;
298 AssertMsg(pVirtio->fLegacyDriver || IS_DRIVER_OK(pVirtio), ("Called with guest driver not ready\n"));
299 virtioCoreGCPhysRead(pVirtio, pDevIns,
300 pVirtq->GCPhysVirtqUsed + RT_UOFFSETOF(VIRTQ_USED_T, fFlags),
301 &fFlags, sizeof(fFlags));
302 return fFlags;
303}
304
305DECLINLINE(void) virtioWriteUsedAvailEvent(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq, uint32_t uAvailEventIdx)
306{
307 /** VirtIO 1.0 uAvailEventIdx (avail_event) immediately follows ring */
308 AssertMsg(pVirtio->fLegacyDriver || IS_DRIVER_OK(pVirtio), ("Called with guest driver not ready\n"));
309 virtioCoreGCPhysWrite(pVirtio, pDevIns,
310 pVirtq->GCPhysVirtqUsed
311 + RT_UOFFSETOF_DYN(VIRTQ_USED_T, aRing[pVirtq->uQueueSize]),
312 &uAvailEventIdx, sizeof(uAvailEventIdx));
313}
314#endif
315/** @} */
316
317
318DECLINLINE(uint16_t) virtioCoreVirtqAvailCnt(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq)
319{
320 uint16_t uIdxActual = virtioReadAvailRingIdx(pDevIns, pVirtio, pVirtq);
321 uint16_t uIdxShadow = pVirtq->uAvailIdxShadow;
322 uint16_t uIdxDelta;
323
324 if (uIdxActual < uIdxShadow)
325 uIdxDelta = (uIdxActual + pVirtq->uQueueSize) - uIdxShadow;
326 else
327 uIdxDelta = uIdxActual - uIdxShadow;
328
329 return uIdxDelta;
330}
331/**
332 * Get count of new (e.g. pending) elements in available ring.
333 *
334 * @param pDevIns The device instance.
335 * @param pVirtio Pointer to the shared virtio state.
336 * @param uVirtq Virtq number
337 *
338 * @returns how many entries have been added to ring as a delta of the consumer's
339 * avail index and the queue's guest-side current avail index.
340 */
341DECLHIDDEN(uint16_t) virtioCoreVirtqAvailBufCount(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq)
342{
343 AssertMsgReturn(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues), ("uVirtq out of range"), 0);
344 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
345
346 if (!IS_DRIVER_OK(pVirtio))
347 {
348 LogRelFunc(("Driver not ready\n"));
349 return 0;
350 }
351 if (!pVirtio->fLegacyDriver && !pVirtq->uEnable)
352 {
353 LogRelFunc(("virtq: %s not enabled\n", VIRTQNAME(pVirtio, uVirtq)));
354 return 0;
355 }
356 return virtioCoreVirtqAvailCnt(pDevIns, pVirtio, pVirtq);
357}
358
359#ifdef IN_RING3
360
361static void virtioCoreR3FeatureDump(VIRTIOCORE *pVirtio, PCDBGFINFOHLP pHlp, const VIRTIO_FEATURES_LIST *s_aFeatures, int cFeatures, int fBanner)
362{
363#define MAXLINE 80
364 /* Display as a single buf to prevent interceding log messages */
365 uint16_t cbBuf = cFeatures * 132;
366 char *pszBuf = (char *)RTMemAllocZ(cbBuf);
367 Assert(pszBuf);
368 char *cp = pszBuf;
369 for (int i = 0; i < cFeatures; ++i)
370 {
371 bool isOffered = RT_BOOL(pVirtio->uDeviceFeatures & s_aFeatures[i].fFeatureBit);
372 bool isNegotiated = RT_BOOL(pVirtio->uDriverFeatures & s_aFeatures[i].fFeatureBit);
373 cp += RTStrPrintf(cp, cbBuf - (cp - pszBuf), " %s %s %s",
374 isOffered ? "+" : "-", isNegotiated ? "x" : " ", s_aFeatures[i].pcszDesc);
375 }
376 if (pHlp) {
377 if (fBanner)
378 pHlp->pfnPrintf(pHlp, "VirtIO Features Configuration\n\n"
379 " Offered Accepted Feature Description\n"
380 " ------- -------- ------- -----------\n");
381 pHlp->pfnPrintf(pHlp, "%s\n", pszBuf);
382 }
383#ifdef LOG_ENABLED
384 else
385 {
386 if (fBanner)
387 Log(("VirtIO Features Configuration\n\n"
388 " Offered Accepted Feature Description\n"
389 " ------- -------- ------- -----------\n"));
390 Log(("%s\n", pszBuf));
391 }
392#endif
393 RTMemFree(pszBuf);
394}
395
396/** API Function: See header file*/
397DECLHIDDEN(void) virtioCorePrintDeviceFeatures(VIRTIOCORE *pVirtio, PCDBGFINFOHLP pHlp,
398 const VIRTIO_FEATURES_LIST *s_aDevSpecificFeatures, int cFeatures) {
399 virtioCoreR3FeatureDump(pVirtio, pHlp, s_aCoreFeatures, RT_ELEMENTS(s_aCoreFeatures), 1 /*fBanner */);
400 virtioCoreR3FeatureDump(pVirtio, pHlp, s_aDevSpecificFeatures, cFeatures, 0 /*fBanner */);
401}
402
403#endif
404
405#ifdef LOG_ENABLED
406
407/** API Function: See header file */
408DECLHIDDEN(void) virtioCoreHexDump(uint8_t *pv, uint32_t cb, uint32_t uBase, const char *pszTitle)
409{
410#define ADJCURSOR(cb) pszOut += cb; cbRemain -= cb;
411 size_t cbPrint = 0, cbRemain = ((cb / 16) + 1) * 80;
412 char *pszBuf = (char *)RTMemAllocZ(cbRemain), *pszOut = pszBuf;
413 AssertMsgReturnVoid(pszBuf, ("Out of Memory"));
414 if (pszTitle)
415 {
416 cbPrint = RTStrPrintf(pszOut, cbRemain, "%s [%d bytes]:\n", pszTitle, cb);
417 ADJCURSOR(cbPrint);
418 }
419 for (uint32_t row = 0; row < RT_MAX(1, (cb / 16) + 1) && row * 16 < cb; row++)
420 {
421 cbPrint = RTStrPrintf(pszOut, cbRemain, "%04x: ", row * 16 + uBase); /* line address */
422 ADJCURSOR(cbPrint);
423 for (uint8_t col = 0; col < 16; col++)
424 {
425 uint32_t idx = row * 16 + col;
426 if (idx >= cb)
427 cbPrint = RTStrPrintf(pszOut, cbRemain, "-- %s", (col + 1) % 8 ? "" : " ");
428 else
429 cbPrint = RTStrPrintf(pszOut, cbRemain, "%02x %s", pv[idx], (col + 1) % 8 ? "" : " ");
430 ADJCURSOR(cbPrint);
431 }
432 for (uint32_t idx = row * 16; idx < row * 16 + 16; idx++)
433 {
434 cbPrint = RTStrPrintf(pszOut, cbRemain, "%c", (idx >= cb) ? ' ' : (pv[idx] >= 0x20 && pv[idx] <= 0x7e ? pv[idx] : '.'));
435 ADJCURSOR(cbPrint);
436 }
437 *pszOut++ = '\n';
438 --cbRemain;
439 }
440 Log(("%s\n", pszBuf));
441 RTMemFree(pszBuf);
442 RT_NOREF2(uBase, pv);
443#undef ADJCURSOR
444}
445
446/* API FUnction: See header file */
447DECLHIDDEN(void) virtioCoreGCPhysHexDump(PPDMDEVINS pDevIns, RTGCPHYS GCPhys, uint16_t cb, uint32_t uBase, const char *pszTitle)
448{
449 PVIRTIOCORE pVirtio = PDMDEVINS_2_DATA(pDevIns, PVIRTIOCORE);
450#define ADJCURSOR(cb) pszOut += cb; cbRemain -= cb;
451 size_t cbPrint = 0, cbRemain = ((cb / 16) + 1) * 80;
452 char *pszBuf = (char *)RTMemAllocZ(cbRemain), *pszOut = pszBuf;
453 AssertMsgReturnVoid(pszBuf, ("Out of Memory"));
454 if (pszTitle)
455 {
456 cbPrint = RTStrPrintf(pszOut, cbRemain, "%s [%d bytes]:\n", pszTitle, cb);
457 ADJCURSOR(cbPrint);
458 }
459 for (uint16_t row = 0; row < (uint16_t)RT_MAX(1, (cb / 16) + 1) && row * 16 < cb; row++)
460 {
461 uint8_t c;
462 cbPrint = RTStrPrintf(pszOut, cbRemain, "%04x: ", row * 16 + uBase); /* line address */
463 ADJCURSOR(cbPrint);
464 for (uint8_t col = 0; col < 16; col++)
465 {
466 uint32_t idx = row * 16 + col;
467 virtioCoreGCPhysRead(pVirtio, pDevIns, GCPhys + idx, &c, 1);
468 if (idx >= cb)
469 cbPrint = RTStrPrintf(pszOut, cbRemain, "-- %s", (col + 1) % 8 ? "" : " ");
470 else
471 cbPrint = RTStrPrintf(pszOut, cbRemain, "%02x %s", c, (col + 1) % 8 ? "" : " ");
472 ADJCURSOR(cbPrint);
473 }
474 for (uint16_t idx = row * 16; idx < row * 16 + 16; idx++)
475 {
476 virtioCoreGCPhysRead(pVirtio, pDevIns, GCPhys + idx, &c, 1);
477 cbPrint = RTStrPrintf(pszOut, cbRemain, "%c", (idx >= cb) ? ' ' : (c >= 0x20 && c <= 0x7e ? c : '.'));
478 ADJCURSOR(cbPrint);
479 }
480 *pszOut++ = '\n';
481 --cbRemain;
482 }
483 Log(("%s\n", pszBuf));
484 RTMemFree(pszBuf);
485 RT_NOREF(uBase);
486#undef ADJCURSOR
487}
488
489
490/** API function: See header file */
491DECLHIDDEN(void) virtioCoreLogMappedIoValue(const char *pszFunc, const char *pszMember, uint32_t uMemberSize,
492 const void *pv, uint32_t cb, uint32_t uOffset, int fWrite,
493 int fHasIndex, uint32_t idx)
494{
495 if (LogIs6Enabled())
496 {
497 char szIdx[16];
498 if (fHasIndex)
499 RTStrPrintf(szIdx, sizeof(szIdx), "[%d]", idx);
500 else
501 szIdx[0] = '\0';
502
503 if (cb == 1 || cb == 2 || cb == 4 || cb == 8)
504 {
505 char szDepiction[64];
506 size_t cchDepiction;
507 if (uOffset != 0 || cb != uMemberSize) /* display bounds if partial member access */
508 cchDepiction = RTStrPrintf(szDepiction, sizeof(szDepiction), "%s%s[%d:%d]",
509 pszMember, szIdx, uOffset, uOffset + cb - 1);
510 else
511 cchDepiction = RTStrPrintf(szDepiction, sizeof(szDepiction), "%s%s", pszMember, szIdx);
512
513 /* padding */
514 if (cchDepiction < 30)
515 szDepiction[cchDepiction++] = ' ';
516 while (cchDepiction < 30)
517 szDepiction[cchDepiction++] = '.';
518 szDepiction[cchDepiction] = '\0';
519
520 RTUINT64U uValue;
521 uValue.u = 0;
522 memcpy(uValue.au8, pv, cb);
523 Log6(("%-23s: Guest %s %s %#0*RX64\n",
524 pszFunc, fWrite ? "wrote" : "read ", szDepiction, 2 + cb * 2, uValue.u));
525 }
526 else /* odd number or oversized access, ... log inline hex-dump style */
527 {
528 Log6(("%-23s: Guest %s %s%s[%d:%d]: %.*Rhxs\n",
529 pszFunc, fWrite ? "wrote" : "read ", pszMember,
530 szIdx, uOffset, uOffset + cb, cb, pv));
531 }
532 }
533 RT_NOREF2(fWrite, pszFunc);
534}
535
536/**
537 * Log MMIO-mapped Virtio fDeviceStatus register bitmask, naming the bits
538 */
539DECLINLINE(void) virtioCoreFormatDeviceStatus(uint8_t bStatus, char *pszBuf, size_t uSize)
540{
541# define ADJCURSOR(len) { cp += len; uSize -= len; sep = (char *)" | "; }
542 memset(pszBuf, 0, uSize);
543 char *cp = pszBuf, *sep = (char *)"";
544 size_t len;
545 if (bStatus == 0)
546 RTStrPrintf(cp, uSize, "RESET");
547 else
548 {
549 if (bStatus & VIRTIO_STATUS_ACKNOWLEDGE)
550 {
551 len = RTStrPrintf(cp, uSize, "ACKNOWLEDGE");
552 ADJCURSOR(len);
553 }
554 if (bStatus & VIRTIO_STATUS_DRIVER)
555 {
556 len = RTStrPrintf(cp, uSize, "%sDRIVER", sep);
557 ADJCURSOR(len);
558 }
559 if (bStatus & VIRTIO_STATUS_FEATURES_OK)
560 {
561 len = RTStrPrintf(cp, uSize, "%sFEATURES_OK", sep);
562 ADJCURSOR(len);
563 }
564 if (bStatus & VIRTIO_STATUS_DRIVER_OK)
565 {
566 len = RTStrPrintf(cp, uSize, "%sDRIVER_OK", sep);
567 ADJCURSOR(len);
568 }
569 if (bStatus & VIRTIO_STATUS_FAILED)
570 {
571 len = RTStrPrintf(cp, uSize, "%sFAILED", sep);
572 ADJCURSOR(len);
573 }
574 if (bStatus & VIRTIO_STATUS_DEVICE_NEEDS_RESET)
575 RTStrPrintf(cp, uSize, "%sNEEDS_RESET", sep);
576 }
577# undef ADJCURSOR
578}
579
580#endif /* LOG_ENABLED */
581
582/** API function: See header file */
583DECLHIDDEN(int) virtioCoreIsLegacyMode(PVIRTIOCORE pVirtio)
584{
585 return pVirtio->fLegacyDriver;
586}
587
588#ifdef IN_RING3
589
590DECLHIDDEN(int) virtioCoreR3VirtqAttach(PVIRTIOCORE pVirtio, uint16_t uVirtq, const char *pcszName)
591{
592 LogFunc(("Attaching %s to VirtIO core\n", pcszName));
593 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
594 pVirtq->uVirtq = uVirtq;
595 pVirtq->fUsedRingEvent = false;
596 pVirtq->fAttached = true;
597 RTStrCopy(pVirtq->szName, sizeof(pVirtq->szName), pcszName);
598 return VINF_SUCCESS;
599}
600
601DECLHIDDEN(int) virtioCoreR3VirtqDetach(PVIRTIOCORE pVirtio, uint16_t uVirtqNbr)
602{
603 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtqNbr];
604 pVirtq->uVirtq = 0;
605 pVirtq->uAvailIdxShadow = 0;
606 pVirtq->uUsedIdxShadow = 0;
607 pVirtq->fUsedRingEvent = false;
608 pVirtq->fAttached = false;
609 memset(pVirtq->szName, 0, sizeof(pVirtq->szName));
610 return VINF_SUCCESS;
611}
612
613DECLHIDDEN(bool) virtioCoreR3VirtqIsAttached(PVIRTIOCORE pVirtio, uint16_t uVirtqNbr)
614{
615 return pVirtio->aVirtqueues[uVirtqNbr].fAttached;
616}
617
618DECLHIDDEN(bool) virtioCoreR3VirtqIsEnabled(PVIRTIOCORE pVirtio, uint16_t uVirtqNbr)
619{
620 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtqNbr];
621 return (bool)pVirtq->uEnable && pVirtq->GCPhysVirtqDesc;
622}
623
624DECLINLINE(void) virtioCoreR3DescInfo(PCDBGFINFOHLP pHlp, PVIRTQ_DESC_T pDesc, uint16_t iDesc, const char *cszTail)
625{
626 if (pDesc->fFlags & VIRTQ_DESC_F_NEXT)
627 pHlp->pfnPrintf(pHlp, " [%4d]%c%c %5d bytes @ %p [%4d] %s\n",
628 iDesc, pDesc->fFlags & VIRTQ_DESC_F_INDIRECT ? 'I' : ' ',
629 pDesc->fFlags & VIRTQ_DESC_F_WRITE ? 'W' : 'R',
630 pDesc->cb, pDesc->GCPhysBuf, pDesc->uDescIdxNext, cszTail);
631 else
632 pHlp->pfnPrintf(pHlp, " [%4d]%c%c %5d bytes @ %p %s\n",
633 iDesc, pDesc->fFlags & VIRTQ_DESC_F_INDIRECT ? 'I' : ' ',
634 pDesc->fFlags & VIRTQ_DESC_F_WRITE ? 'W' : 'R',
635 pDesc->cb, pDesc->GCPhysBuf, cszTail);
636}
637
638#ifdef VIRTIO_REL_INFO_DUMP
639DECLHIDDEN(void) virtioCoreR3DumpAvailRing(PPDMDEVINS pDevIns, PCDBGFINFOHLP pHlp, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq)
640{
641 uint16_t auTmp[VIRTQ_SIZE];
642 virtioCoreGCPhysRead(pVirtio, pDevIns,
643 pVirtq->GCPhysVirtqAvail + RT_UOFFSETOF_DYN(VIRTQ_AVAIL_T, auRing[0]),
644 auTmp, pVirtq->uQueueSize * sizeof(uint16_t));
645 pHlp->pfnPrintf(pHlp, " avail ring dump:\n%.*RhXd\n", pVirtq->uQueueSize * sizeof(uint16_t), auTmp,
646 pVirtq->GCPhysVirtqAvail + RT_UOFFSETOF_DYN(VIRTQ_AVAIL_T, auRing[0]));
647}
648
649DECLHIDDEN(void) virtioCoreR3DumpUsedRing(PPDMDEVINS pDevIns, PCDBGFINFOHLP pHlp, PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq)
650{
651 VIRTQ_USED_ELEM_T aTmp[VIRTQ_SIZE];
652 virtioCoreGCPhysRead(pVirtio, pDevIns,
653 pVirtq->GCPhysVirtqUsed + RT_UOFFSETOF_DYN(VIRTQ_USED_T, aRing[0]),
654 aTmp, pVirtq->uQueueSize * sizeof(VIRTQ_USED_ELEM_T));
655 pHlp->pfnPrintf(pHlp, " used ring dump:\n%.*RhXd\n", pVirtq->uQueueSize * sizeof(VIRTQ_USED_ELEM_T), aTmp,
656 pVirtq->GCPhysVirtqUsed + RT_UOFFSETOF_DYN(VIRTQ_USED_T, aRing[0]));
657}
658
659#define VIRTIO_CORE_EVENT_INVALID 0
660#define VIRTIO_CORE_EVENT_AVAIL_GET 1 // pVirtq->uAvailIdxShadow, pVirtqBuf->uHeadIdx
661#define VIRTIO_CORE_EVENT_AVAIL_PEEK 2 // pVirtq->uAvailIdxShadow, pVirtqBuf->uHeadIdx
662#define VIRTIO_CORE_EVENT_AVAIL_NEXT 3 // pVirtq->uAvailIdxShadow, N/A
663#define VIRTIO_CORE_EVENT_USED_PUT 4 // pVirtq->uUsedIdxShadow, pVirtqBuf->uHeadIdx
664#define VIRTIO_CORE_EVENT_USED_SYNC 5 // pVirtq->uUsedIdxShadow, N/A
665
666static const char *virtioCoreEventText[] = {
667 "none <n/a>",
668 " get avail",
669 "peek avail",
670 "next avail",
671 " put used",
672 "sync used"
673};
674
675static void virtioCoreTraceEvent(PVIRTIOCORE pVirtio, PVIRTQUEUE pVirtq, const uint8_t event, uint16_t ringIdx, uint16_t descIdx)
676{
677 if (pVirtq->uQueueSize)
678 {
679 if (event == VIRTIO_CORE_EVENT_AVAIL_NEXT || event == VIRTIO_CORE_EVENT_USED_SYNC)
680 RTTraceBufAddMsgF(pVirtio->hTraceBuf, "%s: %s[%u]\n", pVirtq->szName, virtioCoreEventText[event], ringIdx % pVirtq->uQueueSize);
681 else
682 RTTraceBufAddMsgF(pVirtio->hTraceBuf, "%s: %s[%u]=%u\n", pVirtq->szName, virtioCoreEventText[event], ringIdx % pVirtq->uQueueSize, descIdx);
683 }
684 else
685 {
686 if (event == VIRTIO_CORE_EVENT_AVAIL_NEXT || event == VIRTIO_CORE_EVENT_USED_SYNC)
687 RTTraceBufAddMsgF(pVirtio->hTraceBuf, "%s: %s[%u]\n", pVirtq->szName, virtioCoreEventText[event], ringIdx);
688 else
689 RTTraceBufAddMsgF(pVirtio->hTraceBuf, "%s: %s[%u]=%u\n", pVirtq->szName, virtioCoreEventText[event], ringIdx, descIdx);
690 }
691}
692
693static DECLCALLBACK(int) dumpOneEntryToRelLog(RTTRACEBUF hTraceBuf, uint32_t iEntry, uint64_t NanoTS, RTCPUID idCpu, const char *pszMsg, void *pvUser)
694{
695 RT_NOREF(hTraceBuf, pvUser);
696 LogRel(("%03u / %llu / %02u / %s", iEntry, NanoTS, idCpu, pszMsg));
697 return VINF_SUCCESS;
698}
699
700static inline void virtioCoreDumpTraceBufToRelLog(RTTRACEBUF hTraceBuf)
701{
702 RTTraceBufEnumEntries(hTraceBuf, dumpOneEntryToRelLog, NULL);
703}
704#endif /* VIRTIO_REL_INFO_DUMP */
705
706/** API Fuunction: See header file */
707DECLHIDDEN(void) virtioCoreR3VirtqInfo(PPDMDEVINS pDevIns, PCDBGFINFOHLP pHlp, PVIRTIOCORE pVirtio, const char *pszArgs, int uVirtq)
708{
709 RT_NOREF(pszArgs);
710 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
711
712 /** @todo add ability to dump physical contents described by any descriptor (using existing VirtIO core API function) */
713// bool fDump = pszArgs && (*pszArgs == 'd' || *pszArgs == 'D'); /* "dump" (avail phys descriptor)"
714
715 uint16_t uAvailIdx = virtioReadAvailRingIdx(pDevIns, pVirtio, pVirtq);
716 uint16_t uAvailIdxShadow = pVirtq->uAvailIdxShadow;
717
718 uint16_t uUsedIdx = virtioReadUsedRingIdx(pDevIns, pVirtio, pVirtq);
719 uint16_t uUsedIdxShadow = pVirtq->uUsedIdxShadow;
720
721 uint16_t uAvailEventIdx = 0;
722 uint16_t uUsedEventIdx = 0;
723 bool fNotify = !!(pVirtio->uDriverFeatures & VIRTIO_F_EVENT_IDX);
724 if (fNotify)
725 {
726 uUsedEventIdx = virtioReadAvailUsedEvent(pDevIns, pVirtio, pVirtq);
727 /* There is no helper for reading AvailEvent since the device is not supposed to read it. */
728 virtioCoreGCPhysRead(pVirtio, pDevIns,
729 pVirtq->GCPhysVirtqUsed
730 + RT_UOFFSETOF_DYN(VIRTQ_USED_T, aRing[pVirtq->uQueueSize]),
731 &uAvailEventIdx, sizeof(uAvailEventIdx));
732 }
733
734#ifndef VIRTIO_REL_INFO_DUMP
735 VIRTQBUF_T VirtqBuf;
736 PVIRTQBUF pVirtqBuf = &VirtqBuf;
737 RT_ZERO(VirtqBuf); /* Make sure pSgPhysSend and pSgPhysReturn are initialized. */
738 bool fEmpty = IS_VIRTQ_EMPTY(pDevIns, pVirtio, pVirtq);
739
740 LogFunc(("%s, empty = %s\n", pVirtq->szName, fEmpty ? "true" : "false"));
741
742 int cSendSegs = 0, cReturnSegs = 0;
743 if (!fEmpty)
744 {
745 virtioCoreR3VirtqAvailBufPeek(pDevIns, pVirtio, uVirtq, pVirtqBuf);
746 cSendSegs = pVirtqBuf->pSgPhysSend ? pVirtqBuf->pSgPhysSend->cSegs : 0;
747 cReturnSegs = pVirtqBuf->pSgPhysReturn ? pVirtqBuf->pSgPhysReturn->cSegs : 0;
748 }
749#endif /* !VIRTIO_REL_INFO_DUMP */
750
751 bool fAvailNoInterrupt = virtioReadAvailRingFlags(pDevIns, pVirtio, pVirtq) & VIRTQ_AVAIL_F_NO_INTERRUPT;
752 bool fUsedNoNotify = virtioReadUsedRingFlags(pDevIns, pVirtio, pVirtq) & VIRTQ_USED_F_NO_NOTIFY;
753
754 pHlp->pfnPrintf(pHlp, " queue enabled: ........... %s\n", pVirtq->uEnable ? "true" : "false");
755 pHlp->pfnPrintf(pHlp, " size: .................... %d\n", pVirtq->uQueueSize);
756 pHlp->pfnPrintf(pHlp, " notify offset: ........... %d\n", pVirtq->uNotifyOffset);
757 if (pVirtio->fMsiSupport)
758 pHlp->pfnPrintf(pHlp, " MSIX vector: ....... %4.4x\n", pVirtq->uMsixVector);
759 pHlp->pfnPrintf(pHlp, "\n");
760 pHlp->pfnPrintf(pHlp, " avail ring (%d entries):\n", uAvailIdx - uAvailIdxShadow);
761 pHlp->pfnPrintf(pHlp, " index: ................ %d (%d)\n", pVirtq->uQueueSize ? uAvailIdx % pVirtq->uQueueSize : uAvailIdx, uAvailIdx);
762 pHlp->pfnPrintf(pHlp, " shadow: ............... %d (%d)\n", pVirtq->uQueueSize ? uAvailIdxShadow % pVirtq->uQueueSize : uAvailIdxShadow, uAvailIdxShadow);
763 pHlp->pfnPrintf(pHlp, " flags: ................ %s\n", fAvailNoInterrupt ? "NO_INTERRUPT" : "");
764 pHlp->pfnPrintf(pHlp, "\n");
765 pHlp->pfnPrintf(pHlp, " used ring (%d entries):\n", uUsedIdxShadow - uUsedIdx);
766 pHlp->pfnPrintf(pHlp, " index: ................ %d (%d)\n", pVirtq->uQueueSize ? uUsedIdx % pVirtq->uQueueSize : uUsedIdx, uUsedIdx);
767 pHlp->pfnPrintf(pHlp, " shadow: ............... %d (%d)\n", pVirtq->uQueueSize ? uUsedIdxShadow % pVirtq->uQueueSize : uUsedIdxShadow, uUsedIdxShadow);
768 pHlp->pfnPrintf(pHlp, " flags: ................ %s\n", fUsedNoNotify ? "NO_NOTIFY" : "");
769 pHlp->pfnPrintf(pHlp, "\n");
770#ifndef VIRTIO_REL_INFO_DUMP
771 if (!fEmpty)
772 {
773 pHlp->pfnPrintf(pHlp, " desc chain:\n");
774 pHlp->pfnPrintf(pHlp, " head idx: ............. %d (%d)\n", pVirtq->uQueueSize ? uUsedIdx % pVirtq->uQueueSize : uUsedIdx, uUsedIdx);
775 pHlp->pfnPrintf(pHlp, " segs: ................. %d\n", cSendSegs + cReturnSegs);
776 pHlp->pfnPrintf(pHlp, " refCnt ................ %d\n", pVirtqBuf->cRefs);
777 pHlp->pfnPrintf(pHlp, "\n");
778 pHlp->pfnPrintf(pHlp, " host-to-guest (%d bytes):\n", pVirtqBuf->cbPhysSend);
779 pHlp->pfnPrintf(pHlp, " segs: .............. %d\n", cSendSegs);
780 if (cSendSegs)
781 {
782 pHlp->pfnPrintf(pHlp, " index: ............. %d\n", pVirtqBuf->pSgPhysSend->idxSeg);
783 pHlp->pfnPrintf(pHlp, " unsent ............. %d\n", pVirtqBuf->pSgPhysSend->cbSegLeft);
784 }
785 pHlp->pfnPrintf(pHlp, "\n");
786 pHlp->pfnPrintf(pHlp, " guest-to-host (%d bytes):\n", pVirtqBuf->cbPhysReturn);
787 pHlp->pfnPrintf(pHlp, " segs: .............. %d\n", cReturnSegs);
788 if (cReturnSegs)
789 {
790 pHlp->pfnPrintf(pHlp, " index: ............. %d\n", pVirtqBuf->pSgPhysReturn->idxSeg);
791 pHlp->pfnPrintf(pHlp, " unsent ............. %d\n", pVirtqBuf->pSgPhysReturn->cbSegLeft);
792 }
793 } else
794 pHlp->pfnPrintf(pHlp, " no desc chains available\n");
795#endif /* !VIRTIO_REL_INFO_DUMP */
796 pHlp->pfnPrintf(pHlp, "\n");
797
798 /* Avoid handling zero-sized queues, there is nothing to show anyway. */
799 if (pVirtq->uQueueSize == 0)
800 return;
801
802 pHlp->pfnPrintf(pHlp, " desc table:\n");
803 /*
804 * Each line in the descriptor table output consists of two parts: a fixed part and a variable "tail".
805 * The fixed part shows the descriptor index, its writability, size, physical address, and optionally
806 * which descriptor is next the chain. The tail shows which elements of avail/used rings point to
807 * this descriptor.
808 */
809 VIRTQ_DESC_T descTable[VIRTQ_SIZE];
810 char aszTails[VIRTQ_SIZE][32];
811 virtioCoreGCPhysRead(pVirtio, pDevIns, pVirtq->GCPhysVirtqDesc,
812 &descTable, sizeof(VIRTQ_DESC_T) * pVirtq->uQueueSize);
813 RT_BZERO(aszTails, sizeof(aszTails)); /* No tails by default */
814
815 /* Fill avail tail fields. */
816
817 /* The first available descriptor gets outer reverse angle brackets. */
818 char chOuterLeft = '>', chOuterRight = '<';
819 char chLeft = '[', chRight = ']';
820 /* Use 'not-equal' instead of 'less' because of uint16_t wrapping! */
821 for (uint16_t i = uAvailIdxShadow; i != uAvailIdx; i++)
822 {
823 /* The last descriptor gets inner curly braces, inner square brackets for the rest. */
824 if (i + 1 == uAvailIdx) { chLeft = '{'; chRight = '}'; }
825 uint16_t uDescIdx = virtioReadAvailDescIdx(pDevIns, pVirtio, pVirtq, i);
826 /* Print an exclamation sign instead of outer right bracket if this descriptor triggers notification. */
827 RTStrPrintf(aszTails[uDescIdx], sizeof(aszTails[0]), "%c%c%4d%c%c ",
828 chOuterLeft, chLeft, i % pVirtq->uQueueSize, chRight,
829 fNotify ? ((i % pVirtq->uQueueSize) == (uAvailEventIdx % pVirtq->uQueueSize) ? '!' : chOuterRight) : chOuterRight);
830 chOuterLeft = chOuterRight = ' ';
831 }
832
833 /* Fill used tail fields, see comments in the similar loop above. */
834
835 chOuterLeft = '>'; chOuterRight = '<';
836 chLeft = '['; chRight = ']';
837 for (uint16_t i = uUsedIdx; i != uUsedIdxShadow; i++)
838 {
839 VIRTQ_USED_ELEM_T elem;
840 virtioCoreGCPhysRead(pVirtio, pDevIns,
841 pVirtq->GCPhysVirtqUsed
842 + RT_UOFFSETOF_DYN(VIRTQ_USED_T, aRing[i % pVirtq->uQueueSize]),
843 &elem, sizeof(elem));
844 if (i + 1 == uUsedIdxShadow) { chLeft = '{'; chRight = '}'; }
845 char *szTail = aszTails[elem.uDescIdx % pVirtq->uQueueSize];
846 /* Add empty avail field if none is present, 9 spaces + terminating zero. */
847 if (*szTail == '\0')
848 RTStrCopy(szTail, 10, " ");
849 RTStrPrintf(szTail + 9, sizeof(aszTails[0]) - 9, " %c%c%4d%c%c %d bytes",
850 chOuterLeft, chLeft, i % pVirtq->uQueueSize, chRight,
851 fNotify ? ((i % pVirtq->uQueueSize) == (uUsedEventIdx % pVirtq->uQueueSize) ? '!' : chOuterRight) : chOuterRight,
852 elem.cbElem);
853 chOuterLeft = chOuterRight = ' ';
854 }
855
856 pHlp->pfnPrintf(pHlp, " index w/r size phys addr next @avail @used\n");
857 pHlp->pfnPrintf(pHlp, " ------ - ----------- ---------------- ------- -------- ------------------\n");
858 for (uint16_t i = 0; i < pVirtq->uQueueSize; i++)
859 virtioCoreR3DescInfo(pHlp, &descTable[i], i, aszTails[i]);
860#ifdef VIRTIO_REL_INFO_DUMP
861 pHlp->pfnPrintf(pHlp, "\n");
862 virtioCoreR3DumpAvailRing(pDevIns, pHlp, pVirtio, pVirtq);
863 pHlp->pfnPrintf(pHlp, "\n");
864 virtioCoreR3DumpUsedRing(pDevIns, pHlp, pVirtio, pVirtq);
865#endif /* VIRTIO_REL_INFO_DUMP */
866}
867
868
869/** API Function: See header file */
870DECLHIDDEN(PVIRTQBUF) virtioCoreR3VirtqBufAlloc(void)
871{
872 PVIRTQBUF pVirtqBuf = (PVIRTQBUF)RTMemAllocZ(sizeof(VIRTQBUF_T));
873 AssertReturn(pVirtqBuf, NULL);
874 pVirtqBuf->u32Magic = VIRTQBUF_MAGIC;
875 pVirtqBuf->cRefs = 1;
876 return pVirtqBuf;
877}
878
879
880/** API Function: See header file */
881DECLHIDDEN(uint32_t) virtioCoreR3VirtqBufRetain(PVIRTQBUF pVirtqBuf)
882{
883 AssertReturn(pVirtqBuf, UINT32_MAX);
884 AssertReturn(pVirtqBuf->u32Magic == VIRTQBUF_MAGIC, UINT32_MAX);
885 uint32_t cRefs = ASMAtomicIncU32(&pVirtqBuf->cRefs);
886 Assert(cRefs > 1);
887 Assert(cRefs < 16);
888 return cRefs;
889}
890
891/** API Function: See header file */
892DECLHIDDEN(uint32_t) virtioCoreR3VirtqBufRelease(PVIRTIOCORE pVirtio, PVIRTQBUF pVirtqBuf)
893{
894 if (!pVirtqBuf)
895 return 0;
896 AssertReturn(pVirtqBuf, 0);
897 AssertReturn(pVirtqBuf->u32Magic == VIRTQBUF_MAGIC, 0);
898 uint32_t cRefs = ASMAtomicDecU32(&pVirtqBuf->cRefs);
899 Assert(cRefs < 16);
900 if (cRefs == 0)
901 {
902 pVirtqBuf->u32Magic = ~VIRTQBUF_MAGIC;
903 RTMemFree(pVirtqBuf);
904#ifdef VBOX_WITH_STATISTICS
905 STAM_REL_COUNTER_INC(&pVirtio->StatDescChainsFreed);
906#endif
907 }
908 RT_NOREF(pVirtio);
909 return cRefs;
910}
911
912/** API Function: See header file */
913DECLHIDDEN(void) virtioCoreNotifyConfigChanged(PVIRTIOCORE pVirtio)
914{
915 virtioNudgeGuest(pVirtio->pDevInsR3, pVirtio, VIRTIO_ISR_DEVICE_CONFIG, pVirtio->uMsixConfig);
916}
917
918
919/** API Function: See header file */
920DECLHIDDEN(void) virtioCoreVirtqEnableNotify(PVIRTIOCORE pVirtio, uint16_t uVirtq, bool fEnable)
921{
922 Assert(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues));
923 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
924
925 if (IS_DRIVER_OK(pVirtio))
926 {
927 uint16_t fFlags = virtioReadUsedRingFlags(pVirtio->pDevInsR3, pVirtio, pVirtq);
928
929 if (fEnable)
930 fFlags &= ~VIRTQ_USED_F_NO_NOTIFY;
931 else
932 fFlags |= VIRTQ_USED_F_NO_NOTIFY;
933
934 virtioWriteUsedRingFlags(pVirtio->pDevInsR3, pVirtio, pVirtq, fFlags);
935 }
936}
937
938/** API function: See Header file */
939DECLHIDDEN(void) virtioCoreResetAll(PVIRTIOCORE pVirtio)
940{
941 LogFunc(("\n"));
942 pVirtio->fDeviceStatus |= VIRTIO_STATUS_DEVICE_NEEDS_RESET;
943 if (IS_DRIVER_OK(pVirtio))
944 {
945 if (!pVirtio->fLegacyDriver)
946 pVirtio->fGenUpdatePending = true;
947 virtioNudgeGuest(pVirtio->pDevInsR3, pVirtio, VIRTIO_ISR_DEVICE_CONFIG, pVirtio->uMsixConfig);
948 }
949}
950
951/** API function: See Header file */
952DECLHIDDEN(int) virtioCoreR3VirtqAvailBufPeek(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq, PVIRTQBUF pVirtqBuf)
953{
954 return virtioCoreR3VirtqAvailBufGet(pDevIns, pVirtio, uVirtq, pVirtqBuf, false);
955}
956
957
958/** API function: See Header file */
959DECLHIDDEN(int) virtioCoreR3VirtqAvailBufNext(PVIRTIOCORE pVirtio, uint16_t uVirtq)
960{
961 Assert(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues));
962 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
963
964 if (!pVirtio->fLegacyDriver)
965 AssertMsgReturn((pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK) && pVirtq->uEnable,
966 ("Guest driver not in ready state.\n"), VERR_INVALID_STATE);
967
968 if (IS_VIRTQ_EMPTY(pVirtio->pDevInsR3, pVirtio, pVirtq))
969 return VERR_NOT_AVAILABLE;
970
971 Log6Func(("%s avail shadow idx: %u\n", pVirtq->szName, pVirtq->uAvailIdxShadow));
972 pVirtq->uAvailIdxShadow++;
973#ifdef VIRTIO_REL_INFO_DUMP
974 virtioCoreTraceEvent(pVirtio, pVirtq, VIRTIO_CORE_EVENT_AVAIL_NEXT, pVirtq->uAvailIdxShadow, 0);
975#endif /* VIRTIO_REL_INFO_DUMP */
976
977 return VINF_SUCCESS;
978}
979
980#ifdef VIRTIO_REL_INFO_DUMP
981DECLCALLBACK(void) virtioNetR3Info(PPDMDEVINS pDevIns, PCDBGFINFOHLP pHlp, const char *pszArgs);
982
983static DECLCALLBACK(void) dbgVirtio_Printf(PCDBGFINFOHLP pHlp, const char *pszFormat, ...)
984{
985 RT_NOREF(pHlp);
986 va_list va;
987 va_start(va, pszFormat);
988 RTLogRelPrintfV(pszFormat, va);
989 va_end(va);
990}
991
992
993static DECLCALLBACK(void) dbgVirtio_PrintfV(PCDBGFINFOHLP pHlp, const char *pszFormat, va_list args)
994{
995 RT_NOREF(pHlp);
996 RTLogRelPrintfV(pszFormat, args);
997}
998
999
1000/**
1001 * @interface_method_impl{DBGCCMDHLP,pfnGetDbgfOutputHlp}
1002 */
1003static void dbgVirtioDump(PPDMDEVINS pDevIns)
1004{
1005 PVIRTIOCORE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOCORE);
1006 LogRel(("dbgVirtioDump(%s)\n", pVirtio->szInstance));
1007 if (RTStrNCmp("virtio-net", pVirtio->szInstance, 10) == 0)
1008 {
1009 DBGFINFOHLP DbgHlp;
1010
1011 DbgHlp.pfnPrintf = dbgVirtio_Printf;
1012 DbgHlp.pfnPrintfV = dbgVirtio_PrintfV;
1013 DbgHlp.pfnGetOptError = NULL;
1014
1015 virtioNetR3Info(pDevIns, &DbgHlp, "a"); // Print everything!
1016 }
1017}
1018
1019/** Temporary API function: See Header file */
1020void virtioCorePutAllAvailBufsToUsedRing(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq)
1021{
1022 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
1023 uint16_t uStartAvailIdx = pVirtq->uAvailIdxShadow;
1024 uint16_t uStartUsedIdx = pVirtq->uUsedIdxShadow;
1025
1026 /*
1027 * Copy chain head indices from avail to used, specifying zero processed length, which should be ok
1028 * for TX queue of virtio-net only!
1029 */
1030 while (pVirtq->uAvailIdxShadow != virtioReadAvailRingIdx(pDevIns, pVirtio, pVirtq))
1031 virtioWriteUsedElem(pDevIns, pVirtio, pVirtq, pVirtq->uUsedIdxShadow++,
1032 virtioReadAvailDescIdx(pDevIns, pVirtio, pVirtq, pVirtq->uAvailIdxShadow++), 0);
1033
1034 if (pVirtq->uAvailIdxShadow != uStartAvailIdx)
1035 {
1036 LogRel(("[%s] Copied indices for %s [avail(%u:%u) -> used(%u:%u)]\n", pVirtio->szInstance, pVirtq->szName,
1037 uStartAvailIdx, pVirtq->uAvailIdxShadow - 1, uStartUsedIdx, pVirtq->uUsedIdxShadow - 1));
1038 virtioWriteUsedRingIdx(pDevIns, pVirtio, pVirtq, pVirtq->uUsedIdxShadow);
1039 virtioCoreNotifyGuestDriver(pDevIns, pVirtio, uVirtq);
1040 }
1041 else
1042 LogRel(("[%s] Avail ring is empty for %s, nothing to copy.\n", pVirtio->szInstance, pVirtq->szName));
1043}
1044#endif /* VIRTIO_REL_INFO_DUMP */
1045/** API Function: See header file */
1046DECLHIDDEN(int) virtioCoreR3VirtqAvailBufGet(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq,
1047 uint16_t uHeadIdx, PVIRTQBUF pVirtqBuf)
1048{
1049 AssertMsgReturn(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues),
1050 ("uVirtq out of range"), VERR_INVALID_PARAMETER);
1051
1052 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
1053
1054 if (!pVirtio->fLegacyDriver)
1055 AssertMsgReturn((pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK) && pVirtq->uEnable,
1056 ("Guest driver not in ready state.\n"), VERR_INVALID_STATE);
1057
1058 uint16_t uDescIdx = uHeadIdx;
1059
1060 Log6Func(("%s DESC CHAIN: (head idx = %u)\n", pVirtio->aVirtqueues[uVirtq].szName, uHeadIdx));
1061
1062 /*
1063 * Allocate and initialize the descriptor chain structure.
1064 */
1065 pVirtqBuf->u32Magic = VIRTQBUF_MAGIC;
1066 pVirtqBuf->cRefs = 1;
1067 pVirtqBuf->uHeadIdx = uHeadIdx;
1068 pVirtqBuf->uVirtq = uVirtq;
1069
1070 /*
1071 * Gather segments.
1072 */
1073 VIRTQ_DESC_T desc;
1074
1075 uint32_t cbIn = 0;
1076 uint32_t cbOut = 0;
1077 uint32_t cSegsIn = 0;
1078 uint32_t cSegsOut = 0;
1079
1080 PVIRTIOSGSEG paSegsIn = pVirtqBuf->aSegsIn;
1081 PVIRTIOSGSEG paSegsOut = pVirtqBuf->aSegsOut;
1082
1083 do
1084 {
1085 PVIRTIOSGSEG pSeg;
1086 /*
1087 * Malicious guests may go beyond paSegsIn or paSegsOut boundaries by linking
1088 * several descriptors into a loop. Since there is no legitimate way to get a sequences of
1089 * linked descriptors exceeding the total number of descriptors in the ring (see @bugref{8620}),
1090 * the following aborts I/O if breach and employs a simple log throttling algorithm to notify.
1091 */
1092#ifdef VIRTIO_REL_INFO_DUMP
1093 if (cSegsIn + cSegsOut >= pVirtq->uQueueSize || ASMAtomicCmpXchgBool(&pVirtio->fTestRecovery, false, true))
1094 {
1095 static volatile uint32_t s_cMessages = 0;
1096 if (ASMAtomicIncU32(&s_cMessages) <= 10)
1097 {
1098 LogRel(("Too many linked descriptors; check if the guest arranges descriptors in a loop "
1099 "(cSegsIn=%u cSegsOut=%u uQueueSize=%u uDescIdx=%u uHeadIdx=%u uAvailIdxShadow=%u queue=%s).\n",
1100 cSegsIn, cSegsOut, pVirtq->uQueueSize, uDescIdx, pVirtqBuf->uHeadIdx, pVirtq->uAvailIdxShadow, pVirtq->szName));
1101 virtioCoreDumpTraceBufToRelLog(pVirtio->hTraceBuf);
1102 dbgVirtioDump(pDevIns);
1103 }
1104 /* Disable the queue to prevent its operation until it is re-initialized. */
1105 pVirtq->uEnable = false;
1106 return VERR_INVALID_STATE;
1107 }
1108#else /* !VIRTIO_REL_INFO_DUMP */
1109 if (cSegsIn + cSegsOut >= pVirtq->uQueueSize)
1110 {
1111 static volatile uint32_t s_cMessages = 0;
1112 static volatile uint32_t s_cThreshold = 1;
1113 if (ASMAtomicIncU32(&s_cMessages) == ASMAtomicReadU32(&s_cThreshold))
1114 {
1115 LogRelMax(64, ("Too many linked descriptors; check if the guest arranges descriptors in a loop "
1116 "(cSegsIn=%u cSegsOut=%u uQueueSize=%u uDescIdx=%u uHeadIdx=%u uAvailIdxShadow=%u queue=%s).\n",
1117 cSegsIn, cSegsOut, pVirtq->uQueueSize, uDescIdx, pVirtqBuf->uHeadIdx, pVirtq->uAvailIdxShadow, pVirtq->szName));
1118 if (ASMAtomicReadU32(&s_cMessages) != 1)
1119 LogRelMax(64, ("(the above error has occured %u times so far)\n", ASMAtomicReadU32(&s_cMessages)));
1120 ASMAtomicWriteU32(&s_cThreshold, ASMAtomicReadU32(&s_cThreshold) * 10);
1121 }
1122 break;
1123 }
1124#endif /* !VIRTIO_REL_INFO_DUMP */
1125 /* Check if the limit has been reached for input chain (see section 2.4.4.1 of virtio 1.0 spec). */
1126 if (cSegsIn >= RT_ELEMENTS(pVirtqBuf->aSegsIn))
1127 {
1128 LogRelMax(64, ("Too many input descriptors (cSegsIn=%u).\n", cSegsIn));
1129 break;
1130 }
1131 /* Check if the limit has been reached for output chain (see section 2.4.4.1 of virtio 1.0 spec). */
1132 if (cSegsOut >= RT_ELEMENTS(pVirtqBuf->aSegsOut))
1133 {
1134 LogRelMax(64, ("Too many output descriptors (cSegsOut=%u).\n", cSegsOut));
1135 break;
1136 }
1137 RT_UNTRUSTED_VALIDATED_FENCE();
1138
1139 virtioReadDesc(pDevIns, pVirtio, pVirtq, uDescIdx, &desc);
1140
1141 if (desc.fFlags & VIRTQ_DESC_F_WRITE)
1142 {
1143 Log6Func(("%s IN idx=%-4u seg=%-3u addr=%RGp cb=%u\n", pVirtq->szName, uDescIdx, cSegsIn, desc.GCPhysBuf, desc.cb));
1144 cbIn += desc.cb;
1145 pSeg = &paSegsIn[cSegsIn++];
1146 }
1147 else
1148 {
1149 Log6Func(("%s OUT desc_idx=%-4u seg=%-3u addr=%RGp cb=%u\n", pVirtq->szName, uDescIdx, cSegsOut, desc.GCPhysBuf, desc.cb));
1150 cbOut += desc.cb;
1151 pSeg = &paSegsOut[cSegsOut++];
1152#ifdef DEEP_DEBUG
1153 if (LogIs11Enabled())
1154 {
1155 virtioCoreGCPhysHexDump(pDevIns, desc.GCPhysBuf, desc.cb, 0, NULL);
1156 Log(("\n"));
1157 }
1158#endif
1159 }
1160 pSeg->GCPhys = desc.GCPhysBuf;
1161 pSeg->cbSeg = desc.cb;
1162 uDescIdx = desc.uDescIdxNext;
1163 } while (desc.fFlags & VIRTQ_DESC_F_NEXT);
1164
1165 /*
1166 * Add segments to the descriptor chain structure.
1167 */
1168 if (cSegsIn)
1169 {
1170 virtioCoreGCPhysChainInit(&pVirtqBuf->SgBufIn, paSegsIn, cSegsIn);
1171 pVirtqBuf->pSgPhysReturn = &pVirtqBuf->SgBufIn;
1172 pVirtqBuf->cbPhysReturn = cbIn;
1173#ifdef VBOX_WITH_STATISTICS
1174 STAM_REL_COUNTER_ADD(&pVirtio->StatDescChainsSegsIn, cSegsIn);
1175#endif
1176 }
1177
1178 if (cSegsOut)
1179 {
1180 virtioCoreGCPhysChainInit(&pVirtqBuf->SgBufOut, paSegsOut, cSegsOut);
1181 pVirtqBuf->pSgPhysSend = &pVirtqBuf->SgBufOut;
1182 pVirtqBuf->cbPhysSend = cbOut;
1183#ifdef VBOX_WITH_STATISTICS
1184 STAM_REL_COUNTER_ADD(&pVirtio->StatDescChainsSegsOut, cSegsOut);
1185#endif
1186 }
1187
1188#ifdef VBOX_WITH_STATISTICS
1189 STAM_REL_COUNTER_INC(&pVirtio->StatDescChainsAllocated);
1190#endif
1191 Log6Func(("%s -- segs OUT: %u (%u bytes) IN: %u (%u bytes) --\n",
1192 pVirtq->szName, cSegsOut, cbOut, cSegsIn, cbIn));
1193
1194 return VINF_SUCCESS;
1195}
1196
1197/** API function: See Header file */
1198DECLHIDDEN(int) virtioCoreR3VirtqAvailBufGet(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq,
1199 PVIRTQBUF pVirtqBuf, bool fRemove)
1200{
1201 Assert(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues));
1202 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
1203
1204 if (IS_VIRTQ_EMPTY(pDevIns, pVirtio, pVirtq))
1205 return VERR_NOT_AVAILABLE;
1206
1207 uint16_t uHeadIdx = virtioReadAvailDescIdx(pDevIns, pVirtio, pVirtq, pVirtq->uAvailIdxShadow);
1208
1209 if (pVirtio->uDriverFeatures & VIRTIO_F_EVENT_IDX)
1210 virtioWriteUsedAvailEvent(pDevIns,pVirtio, pVirtq, pVirtq->uAvailIdxShadow + 1);
1211
1212#ifdef VIRTIO_REL_INFO_DUMP
1213 virtioCoreTraceEvent(pVirtio, pVirtq, fRemove ? VIRTIO_CORE_EVENT_AVAIL_GET : VIRTIO_CORE_EVENT_AVAIL_PEEK, pVirtq->uAvailIdxShadow, uHeadIdx);
1214#endif /* VIRTIO_REL_INFO_DUMP */
1215 if (fRemove)
1216 pVirtq->uAvailIdxShadow++;
1217
1218 return virtioCoreR3VirtqAvailBufGet(pDevIns, pVirtio, uVirtq, uHeadIdx, pVirtqBuf);
1219}
1220
1221/** API function: See Header file */
1222DECLHIDDEN(int) virtioCoreR3VirtqUsedBufPut(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq, PRTSGBUF pSgVirtReturn,
1223 PVIRTQBUF pVirtqBuf, bool fFence)
1224{
1225 Assert(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues));
1226 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
1227
1228 PVIRTIOSGBUF pSgPhysReturn = pVirtqBuf->pSgPhysReturn;
1229
1230 Assert(pVirtqBuf->u32Magic == VIRTQBUF_MAGIC);
1231 Assert(pVirtqBuf->cRefs > 0);
1232
1233 /*
1234 * Workaround for a bug in FreeBSD's virtio-net driver up until 12.3 which supports only the legacy style devive.
1235 * When the device is re-initialized from the driver it violates the spec and posts commands to the control queue
1236 * before setting the DRIVER_OK flag, breaking the following check and rendering the device non-functional.
1237 * The queues are properly set up at this stage however so no real harm is done and we can safely continue here,
1238 * for the legacy device only of course after making sure the queue is properly set up.
1239 */
1240 AssertMsgReturn( IS_DRIVER_OK(pVirtio)
1241 || ( pVirtio->fLegacyDriver
1242 && pVirtq->GCPhysVirtqDesc),
1243 ("Guest driver not in ready state.\n"), VERR_INVALID_STATE);
1244
1245 Log6Func((" Copying device data to %s, [desc:%u -> used ring:%u]\n",
1246 VIRTQNAME(pVirtio, uVirtq), pVirtqBuf->uHeadIdx, pVirtq->uUsedIdxShadow));
1247
1248 /* Copy s/g buf (virtual memory) to guest phys mem (VirtIO "IN" direction). */
1249
1250 size_t cbCopy = 0, cbTotal = 0, cbRemain = 0;
1251
1252 /** @todo r=aeichner Check whether VirtIO should return an error if the device wants to return data but
1253 * the guest didn't set up an IN buffer. */
1254 if ( pSgVirtReturn
1255 && pSgPhysReturn)
1256 {
1257 size_t cbTarget = virtioCoreGCPhysChainCalcBufSize(pSgPhysReturn);
1258 cbRemain = cbTotal = RTSgBufCalcTotalLength(pSgVirtReturn);
1259 AssertMsgReturn(cbTarget >= cbRemain, ("No space to write data to phys memory"), VERR_BUFFER_OVERFLOW);
1260 virtioCoreGCPhysChainReset(pSgPhysReturn);
1261 while (cbRemain)
1262 {
1263 cbCopy = RT_MIN(pSgVirtReturn->cbSegLeft, pSgPhysReturn->cbSegLeft);
1264 AssertReturn(cbCopy > 0, VERR_INVALID_PARAMETER);
1265 virtioCoreGCPhysWrite(pVirtio, pDevIns, (RTGCPHYS)pSgPhysReturn->GCPhysCur, pSgVirtReturn->pvSegCur, cbCopy);
1266 RTSgBufAdvance(pSgVirtReturn, cbCopy);
1267 virtioCoreGCPhysChainAdvance(pSgPhysReturn, cbCopy);
1268 cbRemain -= cbCopy;
1269 }
1270
1271 if (fFence)
1272 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE(); /* needed? */
1273
1274 Assert(!(cbCopy >> 32));
1275 }
1276
1277 /* Flag if write-ahead crosses threshold where guest driver indicated it wants event notification */
1278 if (pVirtio->uDriverFeatures & VIRTIO_F_EVENT_IDX)
1279 if (pVirtq->uUsedIdxShadow == virtioReadAvailUsedEvent(pDevIns, pVirtio, pVirtq))
1280 pVirtq->fUsedRingEvent = true;
1281
1282 /*
1283 * Place used buffer's descriptor in used ring but don't update used ring's slot index.
1284 * That will be done with a subsequent client call to virtioCoreVirtqUsedRingSync()
1285 *
1286 * @todo r=aeichner: The increment of the shadow index is not atomic but this code can be called
1287 * concurrently!!
1288 */
1289#ifdef VIRTIO_REL_INFO_DUMP
1290 virtioCoreTraceEvent(pVirtio, pVirtq, VIRTIO_CORE_EVENT_USED_PUT, pVirtq->uUsedIdxShadow, pVirtqBuf->uHeadIdx);
1291#endif /* VIRTIO_REL_INFO_DUMP */
1292 virtioWriteUsedElem(pDevIns, pVirtio, pVirtq, pVirtq->uUsedIdxShadow++, pVirtqBuf->uHeadIdx, (uint32_t)cbTotal);
1293
1294#ifdef LOG_ENABLED
1295 if ( LogIs6Enabled()
1296 && pSgVirtReturn
1297 && pSgPhysReturn)
1298 {
1299
1300 LogFunc((" ... %d segs, %zu bytes, copied to %u byte buf@offset=%u. Residual: %zu bytes\n",
1301 pSgVirtReturn->cSegs, cbTotal - cbRemain, pVirtqBuf->cbPhysReturn,
1302 ((virtioCoreGCPhysChainCalcBufSize(pVirtqBuf->pSgPhysReturn) -
1303 virtioCoreGCPhysChainCalcLengthLeft(pVirtqBuf->pSgPhysReturn)) - (cbTotal - cbRemain)),
1304 virtioCoreGCPhysChainCalcLengthLeft(pVirtqBuf->pSgPhysReturn) ));
1305
1306 uint16_t uPending = virtioCoreR3CountPendingBufs(
1307 virtioReadUsedRingIdx(pDevIns, pVirtio, pVirtq),
1308 pVirtq->uUsedIdxShadow, pVirtq->uQueueSize);
1309
1310 LogFunc((" %u used buf%s not synced in %s\n", uPending, uPending == 1 ? "" : "s ",
1311 VIRTQNAME(pVirtio, uVirtq)));
1312 }
1313#endif
1314 return VINF_SUCCESS;
1315}
1316
1317/** API function: See Header file */
1318DECLHIDDEN(int) virtioCoreR3VirtqUsedBufPut(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq,
1319 size_t cb, void const *pv, PVIRTQBUF pVirtqBuf, size_t cbEnqueue, bool fFence)
1320{
1321 Assert(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues));
1322 Assert(pv);
1323
1324 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
1325 PVIRTIOSGBUF pSgPhysReturn = pVirtqBuf->pSgPhysReturn;
1326
1327 Assert(pVirtqBuf->u32Magic == VIRTQBUF_MAGIC);
1328 Assert(pVirtqBuf->cRefs > 0);
1329
1330 AssertMsgReturn(IS_DRIVER_OK(pVirtio), ("Guest driver not in ready state.\n"), VERR_INVALID_STATE);
1331
1332 Log6Func((" Copying device data to %s, [desc chain head idx:%u]\n",
1333 VIRTQNAME(pVirtio, uVirtq), pVirtqBuf->uHeadIdx));
1334 /*
1335 * Convert virtual memory simple buffer to guest physical memory (VirtIO descriptor chain)
1336 */
1337 uint8_t *pvBuf = (uint8_t *)pv;
1338 size_t cbRemain = cb, cbCopy = 0;
1339 while (cbRemain)
1340 {
1341 cbCopy = RT_MIN(pSgPhysReturn->cbSegLeft, cbRemain);
1342 Assert(cbCopy > 0);
1343 virtioCoreGCPhysWrite(pVirtio, pDevIns, (RTGCPHYS)pSgPhysReturn->GCPhysCur, pvBuf, cbCopy);
1344 virtioCoreGCPhysChainAdvance(pSgPhysReturn, cbCopy);
1345 pvBuf += cbCopy;
1346 cbRemain -= cbCopy;
1347 }
1348 LogFunc((" ...%zu bytes, copied to %u byte buf@offset=%u. Residual: %zu bytes\n",
1349 cb , pVirtqBuf->cbPhysReturn,
1350 ((virtioCoreGCPhysChainCalcBufSize(pVirtqBuf->pSgPhysReturn) -
1351 virtioCoreGCPhysChainCalcLengthLeft(pVirtqBuf->pSgPhysReturn)) - cb),
1352 virtioCoreGCPhysChainCalcLengthLeft(pVirtqBuf->pSgPhysReturn)));
1353
1354 if (cbEnqueue)
1355 {
1356 if (fFence)
1357 {
1358 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE(); /* needed? */
1359 Assert(!(cbCopy >> 32));
1360 }
1361 /* Flag if write-ahead crosses threshold where guest driver indicated it wants event notification */
1362 if (pVirtio->uDriverFeatures & VIRTIO_F_EVENT_IDX)
1363 if (pVirtq->uUsedIdxShadow == virtioReadAvailUsedEvent(pDevIns, pVirtio, pVirtq))
1364 pVirtq->fUsedRingEvent = true;
1365 /*
1366 * Place used buffer's descriptor in used ring but don't update used ring's slot index.
1367 * That will be done with a subsequent client call to virtioCoreVirtqUsedRingSync()
1368 */
1369 Log6Func((" Enqueue desc chain head idx %u to %s used ring @ %u\n", pVirtqBuf->uHeadIdx,
1370 VIRTQNAME(pVirtio, uVirtq), pVirtq->uUsedIdxShadow));
1371
1372#ifdef VIRTIO_REL_INFO_DUMP
1373 virtioCoreTraceEvent(pVirtio, pVirtq, VIRTIO_CORE_EVENT_USED_PUT, pVirtq->uUsedIdxShadow, pVirtqBuf->uHeadIdx);
1374#endif /* VIRTIO_REL_INFO_DUMP */
1375 virtioWriteUsedElem(pDevIns, pVirtio, pVirtq, pVirtq->uUsedIdxShadow++, pVirtqBuf->uHeadIdx, (uint32_t)cbEnqueue);
1376
1377#ifdef LOG_ENABLED
1378 if (LogIs6Enabled())
1379 {
1380 uint16_t uPending = virtioCoreR3CountPendingBufs(
1381 virtioReadUsedRingIdx(pDevIns, pVirtio, pVirtq),
1382 pVirtq->uUsedIdxShadow, pVirtq->uQueueSize);
1383
1384 LogFunc((" %u used buf%s not synced in %s\n",
1385 uPending, uPending == 1 ? "" : "s ", VIRTQNAME(pVirtio, uVirtq)));
1386 }
1387#endif
1388 } /* fEnqueue */
1389
1390 return VINF_SUCCESS;
1391}
1392
1393
1394#endif /* IN_RING3 */
1395
1396/** API function: See Header file */
1397DECLHIDDEN(int) virtioCoreVirtqUsedRingSync(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq)
1398{
1399 Assert(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues));
1400 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
1401
1402 if (!pVirtio->fLegacyDriver)
1403 AssertMsgReturn((pVirtio->fDeviceStatus & VIRTIO_STATUS_DRIVER_OK) && pVirtq->uEnable,
1404 ("Guest driver not in ready state.\n"), VERR_INVALID_STATE);
1405
1406 Log6Func((" Sync %s used ring (%u -> idx)\n",
1407 pVirtq->szName, pVirtq->uUsedIdxShadow));
1408
1409#ifdef IN_RING3
1410 // I believe virtioCoreVirtqUsedRingSync is only called from ring 3 in virtio-net
1411#ifdef VIRTIO_REL_INFO_DUMP
1412 virtioCoreTraceEvent(pVirtio, pVirtq, VIRTIO_CORE_EVENT_USED_SYNC, pVirtq->uUsedIdxShadow, 0);
1413#endif /* VIRTIO_REL_INFO_DUMP */
1414#endif /* IN_RING3 */
1415 virtioWriteUsedRingIdx(pDevIns, pVirtio, pVirtq, pVirtq->uUsedIdxShadow);
1416 virtioCoreNotifyGuestDriver(pDevIns, pVirtio, uVirtq);
1417
1418 return VINF_SUCCESS;
1419}
1420
1421/**
1422 * This is called from the MMIO callback code when the guest does an MMIO access to the
1423 * mapped queue notification capability area corresponding to a particular queue, to notify
1424 * the queue handler of available data in the avail ring of the queue (VirtIO 1.0, 4.1.4.4.1)
1425 *
1426 * @param pDevIns The device instance.
1427 * @param pVirtio Pointer to the shared virtio state.
1428 * @param uVirtq Virtq to check for guest interrupt handling preference
1429 * @param uNotifyIdx Notification index
1430 */
1431static void virtioCoreVirtqNotified(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq, uint16_t uNotifyIdx)
1432{
1433 PVIRTIOCORECC pVirtioCC = PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC);
1434
1435 /* VirtIO 1.0, section 4.1.5.2 implies uVirtq and uNotifyIdx should match. Disregarding any of
1436 * these notifications (if those indicies disagree) may break device/driver synchronization,
1437 * causing eternal throughput starvation, yet there's no specified way to disambiguate
1438 * which queue to wake-up in any awkward situation where the two parameters differ.
1439 */
1440 AssertMsg(uNotifyIdx == uVirtq,
1441 ("Guest kicked virtq %d's notify addr w/non-corresponding virtq idx %d\n",
1442 uVirtq, uNotifyIdx));
1443 RT_NOREF(uNotifyIdx);
1444
1445 AssertReturnVoid(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues));
1446 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
1447
1448 Log6Func(("%s: (desc chains: %u)\n", *pVirtq->szName ? pVirtq->szName : "?UNAMED QUEUE?",
1449 virtioCoreVirtqAvailCnt(pDevIns, pVirtio, pVirtq)));
1450
1451 /* Inform client */
1452 pVirtioCC->pfnVirtqNotified(pDevIns, pVirtio, uVirtq);
1453 RT_NOREF2(pVirtio, pVirtq);
1454}
1455
1456/**
1457 * Trigger MSI-X or INT# interrupt to notify guest of data added to used ring of
1458 * the specified virtq, depending on the interrupt configuration of the device
1459 * and depending on negotiated and realtime constraints flagged by the guest driver.
1460 *
1461 * See VirtIO 1.0 specification (section 2.4.7).
1462 *
1463 * @param pDevIns The device instance.
1464 * @param pVirtio Pointer to the shared virtio state.
1465 * @param uVirtq Virtq to check for guest interrupt handling preference
1466 */
1467static void virtioCoreNotifyGuestDriver(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint16_t uVirtq)
1468{
1469 Assert(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues));
1470 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
1471
1472 if (!IS_DRIVER_OK(pVirtio))
1473 {
1474 LogFunc(("Guest driver not in ready state.\n"));
1475 return;
1476 }
1477
1478 if (pVirtio->uDriverFeatures & VIRTIO_F_EVENT_IDX)
1479 {
1480 if (pVirtq->fUsedRingEvent)
1481 {
1482#ifdef IN_RING3
1483 Log6Func(("...kicking guest %s, VIRTIO_F_EVENT_IDX set and threshold (%d) reached\n",
1484 pVirtq->szName, (uint16_t)virtioReadAvailUsedEvent(pDevIns, pVirtio, pVirtq)));
1485#endif
1486 virtioNudgeGuest(pDevIns, pVirtio, VIRTIO_ISR_VIRTQ_INTERRUPT, pVirtq->uMsixVector);
1487 pVirtq->fUsedRingEvent = false;
1488 return;
1489 }
1490#ifdef IN_RING3
1491 Log6Func(("...skip interrupt %s, VIRTIO_F_EVENT_IDX set but threshold (%d) not reached (%d)\n",
1492 pVirtq->szName,(uint16_t)virtioReadAvailUsedEvent(pDevIns, pVirtio, pVirtq), pVirtq->uUsedIdxShadow));
1493#endif
1494 }
1495 else
1496 {
1497 /** If guest driver hasn't suppressed interrupts, interrupt */
1498 if (!(virtioReadAvailRingFlags(pDevIns, pVirtio, pVirtq) & VIRTQ_AVAIL_F_NO_INTERRUPT))
1499 {
1500 virtioNudgeGuest(pDevIns, pVirtio, VIRTIO_ISR_VIRTQ_INTERRUPT, pVirtq->uMsixVector);
1501 return;
1502 }
1503 Log6Func(("...skipping interrupt for %s (guest set VIRTQ_AVAIL_F_NO_INTERRUPT)\n", pVirtq->szName));
1504 }
1505}
1506
1507/**
1508 * Raise interrupt or MSI-X
1509 *
1510 * @param pDevIns The device instance.
1511 * @param pVirtio Pointer to the shared virtio state.
1512 * @param uCause Interrupt cause bit mask to set in PCI ISR port.
1513 * @param uVec MSI-X vector, if enabled
1514 */
1515static int virtioNudgeGuest(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, uint8_t uCause, uint16_t uMsixVector)
1516{
1517#ifdef VIRTIO_REL_INFO_DUMP
1518 if (ASMAtomicReadBool(&pVirtio->fRecovering))
1519 LogRel(("[%s] Raising%s interrupt, because of %s\n", pVirtio->szInstance, pVirtio->fMsiSupport ? " MSI" : "",
1520 uCause == VIRTIO_ISR_VIRTQ_INTERRUPT ? "added buffer" : "config change"));
1521#endif /* VIRTIO_REL_INFO_DUMP */
1522 if (uCause == VIRTIO_ISR_VIRTQ_INTERRUPT)
1523 Log6Func(("Reason for interrupt - buffer added to 'used' ring.\n"));
1524 else
1525 if (uCause == VIRTIO_ISR_DEVICE_CONFIG)
1526 Log6Func(("Reason for interrupt - device config change\n"));
1527
1528 if (pVirtio->uIrqMmio)
1529 {
1530 pVirtio->uISR |= uCause;
1531 PDMDevHlpISASetIrq(pDevIns, pVirtio->uIrqMmio, PDM_IRQ_LEVEL_HIGH);
1532 }
1533 else if (!pVirtio->fMsiSupport)
1534 {
1535 pVirtio->uISR |= uCause;
1536 PDMDevHlpPCISetIrq(pDevIns, 0, PDM_IRQ_LEVEL_HIGH);
1537 }
1538 else if (uMsixVector != VIRTIO_MSI_NO_VECTOR)
1539 PDMDevHlpPCISetIrq(pDevIns, uMsixVector, 1);
1540 return VINF_SUCCESS;
1541}
1542
1543/**
1544 * Lower interrupt (Called when guest reads ISR and when resetting)
1545 *
1546 * @param pDevIns The device instance.
1547 */
1548static void virtioLowerInterrupt(PPDMDEVINS pDevIns, uint16_t uMsixVector)
1549{
1550 PVIRTIOCORE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOCORE);
1551 if (pVirtio->uIrqMmio)
1552 PDMDevHlpISASetIrq(pDevIns, pVirtio->uIrqMmio, PDM_IRQ_LEVEL_LOW);
1553 else if (!pVirtio->fMsiSupport)
1554 PDMDevHlpPCISetIrq(pDevIns, 0, PDM_IRQ_LEVEL_LOW);
1555 else if (uMsixVector != VIRTIO_MSI_NO_VECTOR)
1556 PDMDevHlpPCISetIrq(pDevIns, pVirtio->uMsixConfig, PDM_IRQ_LEVEL_LOW);
1557}
1558
1559#ifdef IN_RING3
1560static void virtioResetVirtq(PVIRTIOCORE pVirtio, uint16_t uVirtq)
1561{
1562 Assert(uVirtq < RT_ELEMENTS(pVirtio->aVirtqueues));
1563 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
1564
1565 pVirtq->uQueueSize = VIRTQ_SIZE;
1566 pVirtq->uEnable = false;
1567 pVirtq->uNotifyOffset = uVirtq;
1568 pVirtq->fUsedRingEvent = false;
1569 pVirtq->uAvailIdxShadow = 0;
1570 pVirtq->uUsedIdxShadow = 0;
1571 pVirtq->uMsixVector = uVirtq + 2;
1572
1573 if (!pVirtio->fMsiSupport) /* VirtIO 1.0, 4.1.4.3 and 4.1.5.1.2 */
1574 pVirtq->uMsixVector = VIRTIO_MSI_NO_VECTOR;
1575
1576 virtioLowerInterrupt(pVirtio->pDevInsR3, pVirtq->uMsixVector);
1577}
1578
1579static void virtioResetDevice(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio)
1580{
1581 LogFunc(("Resetting device VirtIO state\n"));
1582 pVirtio->fLegacyDriver = pVirtio->fOfferLegacy; /* Cleared if VIRTIO_F_VERSION_1 feature ack'd */
1583 pVirtio->fDriverFeaturesWritten = 0; /* Features can be re-negotiated after reset! */
1584 pVirtio->uDeviceFeaturesSelect = 0;
1585 pVirtio->uDriverFeaturesSelect = 0;
1586 pVirtio->uConfigGeneration = 0;
1587 pVirtio->fDeviceStatus = 0;
1588 pVirtio->uISR = 0;
1589
1590 if (!pVirtio->fMsiSupport)
1591 virtioLowerInterrupt(pDevIns, 0);
1592 else
1593 {
1594 virtioLowerInterrupt(pDevIns, pVirtio->uMsixConfig);
1595 for (int i = 0; i < VIRTQ_MAX_COUNT; i++)
1596 virtioLowerInterrupt(pDevIns, pVirtio->aVirtqueues[i].uMsixVector);
1597 }
1598
1599 if (!pVirtio->fMsiSupport) /* VirtIO 1.0, 4.1.4.3 and 4.1.5.1.2 */
1600 pVirtio->uMsixConfig = VIRTIO_MSI_NO_VECTOR;
1601
1602 for (uint16_t uVirtq = 0; uVirtq < VIRTQ_MAX_COUNT; uVirtq++)
1603 virtioResetVirtq(pVirtio, uVirtq);
1604}
1605
1606/**
1607 * Invoked by this implementation when guest driver resets the device.
1608 * The driver itself will not until the device has read the status change.
1609 */
1610static void virtioGuestR3WasReset(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC)
1611{
1612 Log(("%-23s: Guest reset the device\n", __FUNCTION__));
1613
1614 /* Let the client know */
1615 pVirtioCC->pfnStatusChanged(pVirtio, pVirtioCC, 0 /* fDriverOk */);
1616 virtioResetDevice(pDevIns, pVirtio);
1617}
1618
1619DECLHIDDEN(void) virtioCoreR3ResetDevice(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC)
1620{
1621 virtioGuestR3WasReset(pDevIns, pVirtio, pVirtioCC);
1622}
1623#endif /* IN_RING3 */
1624
1625/*
1626 * Determines whether guest virtio driver is modern or legacy and does callback
1627 * informing device-specific code that feature negotiation is complete.
1628 * Should be called only once (coordinated via the 'toggle' flag)
1629 */
1630#ifdef IN_RING3
1631DECLINLINE(void) virtioR3DoFeaturesCompleteOnceOnly(PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC)
1632{
1633 if (pVirtio->uDriverFeatures & VIRTIO_F_VERSION_1)
1634 {
1635 LogFunc(("VIRTIO_F_VERSION_1 feature ack'd by guest\n"));
1636 pVirtio->fLegacyDriver = 0;
1637 }
1638 else
1639 {
1640 if (pVirtio->fOfferLegacy)
1641 {
1642 pVirtio->fLegacyDriver = 1;
1643 LogFunc(("VIRTIO_F_VERSION_1 feature was NOT set by guest\n"));
1644 }
1645 else
1646 AssertMsgFailed(("Guest didn't accept VIRTIO_F_VERSION_1, but fLegacyOffered flag not set.\n"));
1647 }
1648 if (pVirtioCC->pfnFeatureNegotiationComplete)
1649 pVirtioCC->pfnFeatureNegotiationComplete(pVirtio, pVirtio->uDriverFeatures, pVirtio->fLegacyDriver);
1650 pVirtio->fDriverFeaturesWritten |= DRIVER_FEATURES_COMPLETE_HANDLED;
1651}
1652#endif
1653
1654
1655/**
1656 * Handles a write to the device status register from the driver.
1657 *
1658 * @returns VBox status code
1659 *
1660 * @param pDevIns The device instance.
1661 * @param pVirtio Pointer to the shared virtio state.
1662 * @param pVirtioCC Pointer to the current context virtio state.
1663 * @param fDeviceStatus The device status to be written.
1664 */
1665DECLINLINE(int) virtioDeviceStatusWrite(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC,
1666 uint8_t fDeviceStatus)
1667{
1668 pVirtio->fDeviceStatus = fDeviceStatus;
1669 bool fDeviceReset = pVirtio->fDeviceStatus == 0;
1670#ifdef LOG_ENABLED
1671 if (LogIs7Enabled())
1672 {
1673 char szOut[80] = { 0 };
1674 virtioCoreFormatDeviceStatus(pVirtio->fDeviceStatus, szOut, sizeof(szOut));
1675 Log(("%-23s: Guest wrote fDeviceStatus ................ (%s)\n", __FUNCTION__, szOut));
1676 }
1677#endif
1678 bool const fStatusChanged = IS_DRIVER_OK(pVirtio) != WAS_DRIVER_OK(pVirtio);
1679
1680 if (fDeviceReset || fStatusChanged)
1681 {
1682#ifdef IN_RING0
1683 /* Since VirtIO status changes are cumbersome by nature, e.g. not a benchmark priority,
1684 * handle the rest in R3 to facilitate logging or whatever dev-specific client needs to do */
1685 Log6(("%-23s: RING0 => RING3 (demote)\n", __FUNCTION__));
1686 return VINF_IOM_R3_MMIO_WRITE;
1687#endif
1688 }
1689
1690#ifdef IN_RING3
1691 /*
1692 * Notify client only if status actually changed from last time and when we're reset.
1693 */
1694 if (fDeviceReset)
1695 virtioGuestR3WasReset(pDevIns, pVirtio, pVirtioCC);
1696
1697 if (fStatusChanged)
1698 pVirtioCC->pfnStatusChanged(pVirtio, pVirtioCC, IS_DRIVER_OK(pVirtio));
1699#else
1700 RT_NOREF(pDevIns, pVirtioCC);
1701#endif
1702 /*
1703 * Save the current status for the next write so we can see what changed.
1704 */
1705 pVirtio->fPrevDeviceStatus = pVirtio->fDeviceStatus;
1706 return VINF_SUCCESS;
1707}
1708
1709
1710/**
1711 * Handles a read from the device status register from the driver.
1712 *
1713 * @returns The device status register value.
1714 *
1715 * @param pVirtio Pointer to the shared virtio state.
1716 */
1717DECLINLINE(uint8_t) virtioDeviceStatusRead(PVIRTIOCORE pVirtio)
1718{
1719#ifdef LOG_ENABLED
1720 if (LogIs7Enabled())
1721 {
1722 char szOut[80] = { 0 };
1723 virtioCoreFormatDeviceStatus(pVirtio->fDeviceStatus, szOut, sizeof(szOut));
1724 LogFunc(("Guest read fDeviceStatus ................ (%s)\n", szOut));
1725 }
1726#endif
1727 return pVirtio->fDeviceStatus;
1728}
1729
1730
1731/**
1732 * Handle accesses to Common Configuration capability
1733 *
1734 * @returns VBox status code
1735 *
1736 * @param pDevIns The device instance.
1737 * @param pVirtio Pointer to the shared virtio state.
1738 * @param pVirtioCC Pointer to the current context virtio state.
1739 * @param fWrite Set if write access, clear if read access.
1740 * @param uOffsetOfAccess The common configuration capability offset.
1741 * @param cb Number of bytes to read or write
1742 * @param pv Pointer to location to write to or read from
1743 */
1744static int virtioCommonCfgAccessed(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC,
1745 int fWrite, uint32_t uOffsetOfAccess, unsigned cb, void *pv)
1746{
1747 uint16_t uVirtq = pVirtio->uVirtqSelect;
1748 int rc = VINF_SUCCESS;
1749 uint64_t val;
1750#ifdef VIRTIO_REL_INFO_DUMP
1751 if (ASMAtomicReadBool(&pVirtio->fRecovering))
1752 {
1753 if (fWrite)
1754 LogRel(("[%s] writes %u bytes @ cmn cfg + %u: %.*Rhxs\n", pVirtio->szInstance, cb, uOffsetOfAccess, cb, pv));
1755 else
1756 LogRel(("[%s] attempts to read %u bytes @ cmn cfg + %u\n", pVirtio->szInstance, cb, uOffsetOfAccess));
1757 }
1758#endif /* VIRTIO_REL_INFO_DUMP */
1759 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(uDeviceFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1760 {
1761 if (fWrite) /* Guest WRITE pCommonCfg>uDeviceFeatures */
1762 {
1763 /* VirtIO 1.0, 4.1.4.3 states device_feature is a (guest) driver readonly field,
1764 * yet the linux driver attempts to write/read it back twice */
1765 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDeviceFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess);
1766 LogFunc(("... WARNING: Guest attempted to write readonly virtio_pci_common_cfg.device_feature (ignoring)\n"));
1767 return VINF_IOM_MMIO_UNUSED_00;
1768 }
1769 else /* Guest READ pCommonCfg->uDeviceFeatures */
1770 {
1771 switch (pVirtio->uDeviceFeaturesSelect)
1772 {
1773 case 0:
1774 val = pVirtio->uDeviceFeatures & UINT32_C(0xffffffff);
1775 memcpy(pv, &val, cb);
1776 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDeviceFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess);
1777 break;
1778 case 1:
1779 val = pVirtio->uDeviceFeatures >> 32;
1780 memcpy(pv, &val, cb);
1781 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDeviceFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess + sizeof(uint32_t));
1782 break;
1783 default:
1784 LogFunc(("Guest read uDeviceFeatures with out of range selector (%#x), returning 0\n",
1785 pVirtio->uDeviceFeaturesSelect));
1786 return VINF_IOM_MMIO_UNUSED_00;
1787 }
1788 }
1789 }
1790 else
1791 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(uDriverFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1792 {
1793 if (fWrite) /* Guest WRITE pCommonCfg->udriverFeatures */
1794 {
1795 switch (pVirtio->uDriverFeaturesSelect)
1796 {
1797 case 0:
1798 memcpy(&pVirtio->uDriverFeatures, pv, cb);
1799 pVirtio->fDriverFeaturesWritten |= DRIVER_FEATURES_0_WRITTEN;
1800 LogFunc(("Set DRIVER_FEATURES_0_WRITTEN. pVirtio->fDriverFeaturesWritten=%d\n", pVirtio->fDriverFeaturesWritten));
1801 if ( (pVirtio->fDriverFeaturesWritten & DRIVER_FEATURES_0_AND_1_WRITTEN) == DRIVER_FEATURES_0_AND_1_WRITTEN
1802 && !(pVirtio->fDriverFeaturesWritten & DRIVER_FEATURES_COMPLETE_HANDLED))
1803#ifdef IN_RING0
1804 return VINF_IOM_R3_MMIO_WRITE;
1805#endif
1806#ifdef IN_RING3
1807 virtioR3DoFeaturesCompleteOnceOnly(pVirtio, pVirtioCC);
1808#endif
1809 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDriverFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess);
1810 break;
1811 case 1:
1812 memcpy((char *)&pVirtio->uDriverFeatures + sizeof(uint32_t), pv, cb);
1813 pVirtio->fDriverFeaturesWritten |= DRIVER_FEATURES_1_WRITTEN;
1814 LogFunc(("Set DRIVER_FEATURES_1_WRITTEN. pVirtio->fDriverFeaturesWritten=%d\n", pVirtio->fDriverFeaturesWritten));
1815 if ( (pVirtio->fDriverFeaturesWritten & DRIVER_FEATURES_0_AND_1_WRITTEN) == DRIVER_FEATURES_0_AND_1_WRITTEN
1816 && !(pVirtio->fDriverFeaturesWritten & DRIVER_FEATURES_COMPLETE_HANDLED))
1817#ifdef IN_RING0
1818 return VINF_IOM_R3_MMIO_WRITE;
1819#endif
1820#ifdef IN_RING3
1821 virtioR3DoFeaturesCompleteOnceOnly(pVirtio, pVirtioCC);
1822#endif
1823 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDriverFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess + sizeof(uint32_t));
1824 break;
1825 default:
1826 LogFunc(("Guest wrote uDriverFeatures with out of range selector (%#x), returning 0\n",
1827 pVirtio->uDriverFeaturesSelect));
1828 return VINF_SUCCESS;
1829 }
1830 }
1831 else /* Guest READ pCommonCfg->udriverFeatures */
1832 {
1833 switch (pVirtio->uDriverFeaturesSelect)
1834 {
1835 case 0:
1836 val = pVirtio->uDriverFeatures & 0xffffffff;
1837 memcpy(pv, &val, cb);
1838 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDriverFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess);
1839 break;
1840 case 1:
1841 val = (pVirtio->uDriverFeatures >> 32) & 0xffffffff;
1842 memcpy(pv, &val, cb);
1843 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDriverFeatures, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess + 4);
1844 break;
1845 default:
1846 LogFunc(("Guest read uDriverFeatures with out of range selector (%#x), returning 0\n",
1847 pVirtio->uDriverFeaturesSelect));
1848 return VINF_IOM_MMIO_UNUSED_00;
1849 }
1850 }
1851 }
1852 else
1853 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(uNumVirtqs, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1854 {
1855 if (fWrite)
1856 {
1857 Log2Func(("Guest attempted to write readonly virtio_pci_common_cfg.num_queues\n"));
1858 return VINF_SUCCESS;
1859 }
1860 *(uint16_t *)pv = VIRTQ_MAX_COUNT;
1861 VIRTIO_DEV_CONFIG_LOG_ACCESS(uNumVirtqs, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess);
1862 }
1863 else
1864 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(fDeviceStatus, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1865 {
1866 if (fWrite) /* Guest WRITE pCommonCfg->fDeviceStatus */
1867 rc = virtioDeviceStatusWrite(pDevIns, pVirtio, pVirtioCC, *(uint8_t *)pv);
1868 else /* Guest READ pCommonCfg->fDeviceStatus */
1869 *(uint8_t *)pv = virtioDeviceStatusRead(pVirtio);
1870 }
1871 else
1872 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uMsixConfig, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1873 VIRTIO_DEV_CONFIG_ACCESS( uMsixConfig, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio);
1874 else
1875 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uDeviceFeaturesSelect, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1876 VIRTIO_DEV_CONFIG_ACCESS( uDeviceFeaturesSelect, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio);
1877 else
1878 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uDriverFeaturesSelect, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1879 VIRTIO_DEV_CONFIG_ACCESS( uDriverFeaturesSelect, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio);
1880 else
1881 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uConfigGeneration, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1882 VIRTIO_DEV_CONFIG_ACCESS( uConfigGeneration, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio);
1883 else
1884 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uVirtqSelect, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1885 {
1886 if (fWrite) {
1887 uint16_t uVirtqNew = *(uint16_t *)pv;
1888
1889 if (uVirtqNew < RT_ELEMENTS(pVirtio->aVirtqueues))
1890 VIRTIO_DEV_CONFIG_ACCESS( uVirtqSelect, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio);
1891 else
1892 LogFunc(("... WARNING: Guest attempted to write invalid virtq selector (ignoring)\n"));
1893 }
1894 else
1895 VIRTIO_DEV_CONFIG_ACCESS( uVirtqSelect, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio);
1896 }
1897 else
1898 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( GCPhysVirtqDesc, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1899 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( GCPhysVirtqDesc, uVirtq, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio->aVirtqueues);
1900 else
1901 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( GCPhysVirtqAvail, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1902 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( GCPhysVirtqAvail, uVirtq, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio->aVirtqueues);
1903 else
1904 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( GCPhysVirtqUsed, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1905 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( GCPhysVirtqUsed, uVirtq, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio->aVirtqueues);
1906 else
1907 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uQueueSize, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1908 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( uQueueSize, uVirtq, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio->aVirtqueues);
1909 else
1910 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uEnable, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1911 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( uEnable, uVirtq, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio->aVirtqueues);
1912 else
1913 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uNotifyOffset, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1914 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( uNotifyOffset, uVirtq, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio->aVirtqueues);
1915 else
1916 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uMsixVector, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess))
1917 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( uMsixVector, uVirtq, VIRTIO_PCI_COMMON_CFG_T, uOffsetOfAccess, pVirtio->aVirtqueues);
1918 else
1919 {
1920 Log2Func(("Bad guest %s access to virtio_pci_common_cfg: uOffsetOfAccess=%#x (%d), cb=%d\n",
1921 fWrite ? "write" : "read ", uOffsetOfAccess, uOffsetOfAccess, cb));
1922 return fWrite ? VINF_SUCCESS : VINF_IOM_MMIO_UNUSED_00;
1923 }
1924#ifdef VIRTIO_REL_INFO_DUMP
1925 if (ASMAtomicReadBool(&pVirtio->fRecovering) && !fWrite)
1926 LogRel(("[%s] read %u bytes @ cmn cfg + %u: %.*Rhxs\n", pVirtio->szInstance, cb, uOffsetOfAccess, cb, pv));
1927#endif /* VIRTIO_REL_INFO_DUMP */
1928
1929#ifndef IN_RING3
1930 RT_NOREF(pDevIns, pVirtioCC);
1931#endif
1932 return rc;
1933}
1934
1935/**
1936 * @callback_method_impl{FNIOMIOPORTNEWIN)
1937 *
1938 * This I/O handler exists only to handle access from legacy drivers.
1939 */
1940static DECLCALLBACK(VBOXSTRICTRC) virtioLegacyIOPortIn(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT offPort, uint32_t *pu32, unsigned cb)
1941{
1942 PVIRTIOCORE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOCORE);
1943 STAM_PROFILE_ADV_START(&pVirtio->CTX_SUFF(StatRead), a);
1944
1945 RT_NOREF(pvUser);
1946 Log(("%-23s: Port read at offset=%RTiop, cb=%#x%s",
1947 __FUNCTION__, offPort, cb,
1948 VIRTIO_DEV_CONFIG_MATCH_MEMBER(fIsrStatus, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort) ? "" : "\n"));
1949
1950 void *pv = pu32; /* To use existing macros */
1951 int fWrite = 0; /* To use existing macros */
1952
1953 uint16_t uVirtq = pVirtio->uVirtqSelect;
1954
1955 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(uDeviceFeatures, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
1956 {
1957 uint32_t val = pVirtio->uDeviceFeatures & UINT32_C(0xffffffff);
1958 memcpy(pu32, &val, cb);
1959 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDeviceFeatures, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort);
1960 }
1961 else
1962 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(uDriverFeatures, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
1963 {
1964 uint32_t val = pVirtio->uDriverFeatures & UINT32_C(0xffffffff);
1965 memcpy(pu32, &val, cb);
1966 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDriverFeatures, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort);
1967 }
1968 else
1969 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(fDeviceStatus, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
1970 {
1971 *(uint8_t *)pu32 = pVirtio->fDeviceStatus;
1972#ifdef LOG_ENABLED
1973 if (LogIs7Enabled())
1974 {
1975 char szOut[80] = { 0 };
1976 virtioCoreFormatDeviceStatus(pVirtio->fDeviceStatus, szOut, sizeof(szOut));
1977 Log(("%-23s: Guest read fDeviceStatus ................ (%s)\n", __FUNCTION__, szOut));
1978 }
1979#endif
1980 }
1981 else
1982 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(fIsrStatus, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
1983 {
1984 ASSERT_GUEST_MSG(cb == 1, ("%d\n", cb));
1985 *(uint8_t *)pu32 = pVirtio->uISR;
1986 pVirtio->uISR = 0;
1987 virtioLowerInterrupt( pDevIns, 0);
1988 Log((" (ISR read and cleared)\n"));
1989 }
1990 else
1991 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uVirtqSelect, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
1992 VIRTIO_DEV_CONFIG_ACCESS( uVirtqSelect, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort, pVirtio);
1993 else
1994 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uVirtqPfn, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
1995 {
1996 PVIRTQUEUE pVirtQueue = &pVirtio->aVirtqueues[uVirtq];
1997 *pu32 = pVirtQueue->GCPhysVirtqDesc >> GUEST_PAGE_SHIFT;
1998 Log(("%-23s: Guest read uVirtqPfn .................... %#x\n", __FUNCTION__, *pu32));
1999 }
2000 else
2001 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uQueueSize, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
2002 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( uQueueSize, uVirtq, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort, pVirtio->aVirtqueues);
2003 else
2004 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uQueueNotify, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
2005 VIRTIO_DEV_CONFIG_ACCESS( uQueueNotify, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort, pVirtio);
2006#ifdef LEGACY_MSIX_SUPPORTED
2007 else
2008 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uMsixConfig, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
2009 VIRTIO_DEV_CONFIG_ACCESS( uMsixConfig, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort, pVirtio);
2010 else
2011 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uMsixVector, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
2012 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( uMsixVector, uVirtq, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort, pVirtio->aVirtqueues);
2013#endif
2014 else if (offPort >= sizeof(VIRTIO_LEGACY_PCI_COMMON_CFG_T))
2015 {
2016 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatRead), a);
2017#ifdef IN_RING3
2018 /* Access device-specific configuration */
2019 PVIRTIOCORECC pVirtioCC = PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC);
2020 int rc = pVirtioCC->pfnDevCapRead(pDevIns, offPort - sizeof(VIRTIO_LEGACY_PCI_COMMON_CFG_T), pv, cb);
2021 return rc;
2022#else
2023 return VINF_IOM_R3_IOPORT_READ;
2024#endif
2025 }
2026 else
2027 {
2028 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatRead), a);
2029 Log2Func(("Bad guest read access to virtio_legacy_pci_common_cfg: offset=%#x, cb=%x\n",
2030 offPort, cb));
2031 int rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS,
2032 "virtioLegacyIOPortIn: no valid port at offset offset=%RTiop cb=%#x\n", offPort, cb);
2033 return rc;
2034 }
2035 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatRead), a);
2036 return VINF_SUCCESS;
2037}
2038
2039/**
2040 * @callback_method_impl{ * @callback_method_impl{FNIOMIOPORTNEWOUT}
2041 *
2042 * This I/O Port interface exists only to handle access from legacy drivers.
2043 */
2044static DECLCALLBACK(VBOXSTRICTRC) virtioLegacyIOPortOut(PPDMDEVINS pDevIns, void *pvUser, RTIOPORT offPort, uint32_t u32, unsigned cb)
2045{
2046 PVIRTIOCORE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOCORE);
2047 STAM_PROFILE_ADV_START(&pVirtio->CTX_SUFF(StatWrite), a);
2048 RT_NOREF(pvUser);
2049
2050 uint16_t uVirtq = pVirtio->uVirtqSelect;
2051 uint32_t u32OnStack = u32; /* allows us to use this impl's MMIO parsing macros */
2052 void *pv = &u32OnStack; /* To use existing macros */
2053 int fWrite = 1; /* To use existing macros */
2054
2055 Log(("%-23s: Port written at offset=%RTiop, cb=%#x, u32=%#x\n", __FUNCTION__, offPort, cb, u32));
2056
2057 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uVirtqSelect, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
2058 {
2059 if (u32 < RT_ELEMENTS(pVirtio->aVirtqueues))
2060 VIRTIO_DEV_CONFIG_ACCESS( uVirtqSelect, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort, pVirtio);
2061 else
2062 LogFunc(("... WARNING: Guest attempted to write invalid virtq selector (ignoring)\n"));
2063 }
2064 else
2065#ifdef LEGACY_MSIX_SUPPORTED
2066 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uMsixConfig, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
2067 VIRTIO_DEV_CONFIG_ACCESS( uMsixConfig, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort, pVirtio);
2068 else
2069 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER( uMsixVector, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
2070 VIRTIO_DEV_CONFIG_ACCESS_INDEXED( uMsixVector, uVirtq, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort, pVirtio->aVirtqueues);
2071 else
2072#endif
2073 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(uDeviceFeatures, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
2074 {
2075 /* Check to see if guest acknowledged unsupported features */
2076 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDeviceFeatures, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort);
2077 LogFunc(("... WARNING: Guest attempted to write readonly virtio_pci_common_cfg.device_feature (ignoring)\n"));
2078 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatWrite), a);
2079 return VINF_SUCCESS;
2080 }
2081 else
2082 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(uDriverFeatures, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
2083 {
2084 memcpy(&pVirtio->uDriverFeatures, pv, cb);
2085 if ((pVirtio->uDriverFeatures & ~VIRTIO_DEV_INDEPENDENT_LEGACY_FEATURES_OFFERED) == 0)
2086 {
2087 Log(("Guest asked for features host does not support! (host=%x guest=%x)\n",
2088 VIRTIO_DEV_INDEPENDENT_LEGACY_FEATURES_OFFERED, pVirtio->uDriverFeatures));
2089 pVirtio->uDriverFeatures &= VIRTIO_DEV_INDEPENDENT_LEGACY_FEATURES_OFFERED;
2090 }
2091 if (!(pVirtio->fDriverFeaturesWritten & DRIVER_FEATURES_COMPLETE_HANDLED))
2092 {
2093#ifdef IN_RING0
2094 Log6(("%-23s: RING0 => RING3 (demote)\n", __FUNCTION__));
2095 return VINF_IOM_R3_IOPORT_WRITE;
2096#endif
2097#ifdef IN_RING3
2098 PVIRTIOCORECC pVirtioCC = PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC);
2099 virtioR3DoFeaturesCompleteOnceOnly(pVirtio, pVirtioCC);
2100#endif
2101 }
2102 VIRTIO_DEV_CONFIG_LOG_ACCESS(uDriverFeatures, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort);
2103 }
2104 else
2105 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(uQueueSize, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
2106 {
2107 VIRTIO_DEV_CONFIG_LOG_ACCESS(uQueueSize, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort);
2108 LogFunc(("... WARNING: Guest attempted to write readonly device_feature (queue size) (ignoring)\n"));
2109 return VINF_SUCCESS;
2110 }
2111 else
2112 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(fDeviceStatus, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
2113 {
2114 bool const fDriverInitiatedReset = (pVirtio->fDeviceStatus = (uint8_t)u32) == 0;
2115 bool const fDriverStateImproved = IS_DRIVER_OK(pVirtio) && !WAS_DRIVER_OK(pVirtio);
2116#ifdef LOG_ENABLED
2117 if (LogIs7Enabled())
2118 {
2119 char szOut[80] = { 0 };
2120 virtioCoreFormatDeviceStatus(pVirtio->fDeviceStatus, szOut, sizeof(szOut));
2121 Log(("%-23s: Guest wrote fDeviceStatus ................ (%s)\n", __FUNCTION__, szOut));
2122 }
2123#endif
2124 if (fDriverStateImproved || fDriverInitiatedReset)
2125 {
2126#ifdef IN_RING0
2127 Log6(("%-23s: RING0 => RING3 (demote)\n", __FUNCTION__));
2128 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatWrite), a);
2129 return VINF_IOM_R3_IOPORT_WRITE;
2130#endif
2131 }
2132
2133#ifdef IN_RING3
2134 PVIRTIOCORECC pVirtioCC = PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC);
2135 if (fDriverInitiatedReset)
2136 virtioGuestR3WasReset(pDevIns, pVirtio, pVirtioCC);
2137
2138 else if (fDriverStateImproved)
2139 pVirtioCC->pfnStatusChanged(pVirtio, pVirtioCC, 1 /* fDriverOk */);
2140
2141#endif
2142 pVirtio->fPrevDeviceStatus = pVirtio->fDeviceStatus;
2143 }
2144 else
2145 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(uVirtqPfn, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
2146 {
2147 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
2148 uint64_t uVirtqPfn = (uint64_t)u32;
2149
2150 if (uVirtqPfn)
2151 {
2152 /* Transitional devices calculate ring physical addresses using rigid spec-defined formulae,
2153 * instead of guest conveying respective address of each ring, as "modern" VirtIO drivers do,
2154 * thus there is no virtq PFN or single base queue address stored in instance data for
2155 * this transitional device, but rather it is derived, when read back, from GCPhysVirtqDesc */
2156
2157 pVirtq->GCPhysVirtqDesc = uVirtqPfn * VIRTIO_PAGE_SIZE;
2158 pVirtq->GCPhysVirtqAvail = pVirtq->GCPhysVirtqDesc + sizeof(VIRTQ_DESC_T) * pVirtq->uQueueSize;
2159 pVirtq->GCPhysVirtqUsed =
2160 RT_ALIGN(pVirtq->GCPhysVirtqAvail + RT_UOFFSETOF_DYN(VIRTQ_AVAIL_T, auRing[pVirtq->uQueueSize]), VIRTIO_PAGE_SIZE);
2161 }
2162 else
2163 {
2164 /* Don't set ring addresses for queue (to meaningless values), when guest resets the virtq's PFN */
2165 pVirtq->GCPhysVirtqDesc = 0;
2166 pVirtq->GCPhysVirtqAvail = 0;
2167 pVirtq->GCPhysVirtqUsed = 0;
2168 }
2169 Log(("%-23s: Guest wrote uVirtqPfn .................... %#x:\n"
2170 "%68s... %p -> GCPhysVirtqDesc\n%68s... %p -> GCPhysVirtqAvail\n%68s... %p -> GCPhysVirtqUsed\n",
2171 __FUNCTION__, u32, " ", pVirtq->GCPhysVirtqDesc, " ", pVirtq->GCPhysVirtqAvail, " ", pVirtq->GCPhysVirtqUsed));
2172 }
2173 else
2174 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(uQueueNotify, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
2175 {
2176#ifdef IN_RING3
2177 ASSERT_GUEST_MSG(cb == 2, ("cb=%u\n", cb));
2178 pVirtio->uQueueNotify = u32 & 0xFFFF;
2179 if (uVirtq < VIRTQ_MAX_COUNT)
2180 {
2181 RT_UNTRUSTED_VALIDATED_FENCE();
2182
2183 /* Need to check that queue is configured. Legacy spec didn't have a queue enabled flag */
2184 if (pVirtio->aVirtqueues[pVirtio->uQueueNotify].GCPhysVirtqDesc)
2185 virtioCoreVirtqNotified(pDevIns, pVirtio, pVirtio->uQueueNotify, pVirtio->uQueueNotify /* uNotifyIdx */);
2186 else
2187 Log(("The queue (#%d) being notified has not been initialized.\n", pVirtio->uQueueNotify));
2188 }
2189 else
2190 Log(("Invalid queue number (%d)\n", pVirtio->uQueueNotify));
2191#else
2192 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatWrite), a);
2193 return VINF_IOM_R3_IOPORT_WRITE;
2194#endif
2195 }
2196 else
2197 if (VIRTIO_DEV_CONFIG_MATCH_MEMBER(fIsrStatus, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort))
2198 {
2199 VIRTIO_DEV_CONFIG_LOG_ACCESS( fIsrStatus, VIRTIO_LEGACY_PCI_COMMON_CFG_T, offPort);
2200 LogFunc(("... WARNING: Guest attempted to write readonly device_feature (ISR status) (ignoring)\n"));
2201 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatWrite), a);
2202 return VINF_SUCCESS;
2203 }
2204 else if (offPort >= sizeof(VIRTIO_LEGACY_PCI_COMMON_CFG_T))
2205 {
2206#ifdef IN_RING3
2207
2208 /* Access device-specific configuration */
2209 PVIRTIOCORECC pVirtioCC = PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC);
2210 return pVirtioCC->pfnDevCapWrite(pDevIns, offPort - sizeof(VIRTIO_LEGACY_PCI_COMMON_CFG_T), pv, cb);
2211#else
2212 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatWrite), a);
2213 return VINF_IOM_R3_IOPORT_WRITE;
2214#endif
2215 }
2216 else
2217 {
2218 Log2Func(("Bad guest write access to virtio_legacy_pci_common_cfg: offset=%#x, cb=0x%x\n",
2219 offPort, cb));
2220 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatWrite), a);
2221 int rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS,
2222 "virtioLegacyIOPortOut: no valid port at offset offset=%RTiop cb=0x%#x\n", offPort, cb);
2223 return rc;
2224 }
2225
2226 RT_NOREF(uVirtq);
2227 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatWrite), a);
2228 return VINF_SUCCESS;
2229}
2230
2231
2232/**
2233 * Read from the device specific configuration at the given offset.
2234 *
2235 * @returns VBox status code.
2236 * @param pDevIns
2237 */
2238DECLINLINE(VBOXSTRICTRC) virtioDeviceCfgRead(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC,
2239 uint32_t offDevCfg, void *pv, unsigned cb)
2240{
2241#ifdef IN_RING3
2242 /*
2243 * Callback to client to manage device-specific configuration.
2244 */
2245 VBOXSTRICTRC rcStrict = pVirtioCC->pfnDevCapRead(pDevIns, offDevCfg, pv, cb);
2246
2247 /*
2248 * Anytime any part of the dev-specific dev config (which this virtio core implementation sees
2249 * as a blob, and virtio dev-specific code separates into fields) is READ, it must be compared
2250 * for deltas from previous read to maintain a config gen. seq. counter (VirtIO 1.0, section 4.1.4.3.1)
2251 */
2252 bool fDevSpecificFieldChanged = RT_BOOL(memcmp(pVirtioCC->pbDevSpecificCfg + offDevCfg,
2253 pVirtioCC->pbPrevDevSpecificCfg + offDevCfg,
2254 RT_MIN(cb, pVirtioCC->cbDevSpecificCfg - offDevCfg)));
2255
2256 memcpy(pVirtioCC->pbPrevDevSpecificCfg, pVirtioCC->pbDevSpecificCfg, pVirtioCC->cbDevSpecificCfg);
2257
2258 if (pVirtio->fGenUpdatePending || fDevSpecificFieldChanged)
2259 {
2260 ++pVirtio->uConfigGeneration;
2261 Log6Func(("Bumped cfg. generation to %d because %s%s\n", pVirtio->uConfigGeneration,
2262 fDevSpecificFieldChanged ? "<dev cfg changed> " : "",
2263 pVirtio->fGenUpdatePending ? "<update was pending>" : ""));
2264 pVirtio->fGenUpdatePending = false;
2265 }
2266
2267 virtioLowerInterrupt(pDevIns, 0);
2268 return rcStrict;
2269#else
2270 RT_NOREF(pDevIns, pVirtio, pVirtioCC, offDevCfg, pv, cb);
2271 return VINF_IOM_R3_MMIO_READ;
2272#endif
2273}
2274
2275
2276/**
2277 * @callback_method_impl{FNIOMMMIONEWREAD,
2278 * Memory mapped I/O Handler for PCI Capabilities read operations.}
2279 *
2280 * This MMIO handler specifically supports the VIRTIO_PCI_CAP_PCI_CFG capability defined
2281 * in the VirtIO 1.0 specification, section 4.1.4.7, and as such is restricted to reads
2282 * of 1, 2 or 4 bytes, only.
2283 *
2284 */
2285static DECLCALLBACK(VBOXSTRICTRC) virtioMmioRead(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void *pv, unsigned cb)
2286{
2287 PVIRTIOCORE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOCORE);
2288 PVIRTIOCORECC pVirtioCC = PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC);
2289 AssertReturn(cb == 1 || cb == 2 || cb == 4, VINF_IOM_MMIO_UNUSED_FF);
2290 Assert(pVirtio == (PVIRTIOCORE)pvUser); RT_NOREF(pvUser);
2291
2292 STAM_PROFILE_ADV_START(&pVirtio->CTX_SUFF(StatRead), a);
2293
2294 VBOXSTRICTRC rcStrict;
2295 uint32_t uOffset;
2296 if (MATCHES_VIRTIO_CAP_STRUCT(off, cb, uOffset, pVirtio->LocDeviceCap))
2297 rcStrict = virtioDeviceCfgRead(pDevIns, pVirtio, pVirtioCC, uOffset, pv, cb);
2298 else if (MATCHES_VIRTIO_CAP_STRUCT(off, cb, uOffset, pVirtio->LocCommonCfgCap))
2299 rcStrict = virtioCommonCfgAccessed(pDevIns, pVirtio, pVirtioCC, false /* fWrite */, uOffset, cb, pv);
2300 else if (MATCHES_VIRTIO_CAP_STRUCT(off, cb, uOffset, pVirtio->LocIsrCap))
2301 {
2302 *(uint8_t *)pv = pVirtio->uISR;
2303 Log6Func(("Read and clear ISR\n"));
2304 pVirtio->uISR = 0; /* VirtIO spec requires reads of ISR to clear it */
2305 virtioLowerInterrupt(pDevIns, 0);
2306 rcStrict = VINF_SUCCESS;
2307 }
2308 else
2309 {
2310 ASSERT_GUEST_MSG_FAILED(("Bad read access to mapped capabilities region: off=%RGp cb=%u\n", off, cb));
2311 memset(pv, 0xFF, cb);
2312 rcStrict = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS,
2313 "virtioMmioRead: Bad MMIO access to capabilities, offset=%RTiop cb=%08x\n", off, cb);
2314 }
2315
2316 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatRead), a);
2317 return rcStrict;
2318}
2319
2320/**
2321 * @callback_method_impl{FNIOMMMIONEWREAD,
2322 * Memory mapped I/O Handler for PCI Capabilities write operations.}
2323 *
2324 * This MMIO handler specifically supports the VIRTIO_PCI_CAP_PCI_CFG capability defined
2325 * in the VirtIO 1.0 specification, section 4.1.4.7, and as such is restricted to writes
2326 * of 1, 2 or 4 bytes, only.
2327 */
2328static DECLCALLBACK(VBOXSTRICTRC) virtioMmioWrite(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void const *pv, unsigned cb)
2329{
2330 PVIRTIOCORE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOCORE);
2331 PVIRTIOCORECC pVirtioCC = PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC);
2332 AssertReturn(cb == 1 || cb == 2 || cb == 4, VERR_INVALID_PARAMETER);
2333 Assert(pVirtio == (PVIRTIOCORE)pvUser); RT_NOREF(pvUser);
2334 STAM_PROFILE_ADV_START(&pVirtio->CTX_SUFF(StatWrite), a);
2335
2336 VBOXSTRICTRC rcStrict;
2337 uint32_t uOffset;
2338 if (MATCHES_VIRTIO_CAP_STRUCT(off, cb, uOffset, pVirtio->LocDeviceCap))
2339 {
2340#ifdef IN_RING3
2341 /*
2342 * Foreward this MMIO write access for client to deal with.
2343 */
2344 rcStrict = pVirtioCC->pfnDevCapWrite(pDevIns, uOffset, pv, cb);
2345#else
2346 Log6(("%-23s: RING0 => RING3 (demote)\n", __FUNCTION__));
2347 rcStrict = VINF_IOM_R3_MMIO_WRITE;
2348#endif
2349 }
2350 else if (MATCHES_VIRTIO_CAP_STRUCT(off, cb, uOffset, pVirtio->LocCommonCfgCap))
2351 rcStrict = virtioCommonCfgAccessed(pDevIns, pVirtio, pVirtioCC, true /* fWrite */, uOffset, cb, (void *)pv);
2352 else if (MATCHES_VIRTIO_CAP_STRUCT(off, cb, uOffset, pVirtio->LocIsrCap) && cb == sizeof(uint8_t))
2353 {
2354 pVirtio->uISR = *(uint8_t *)pv;
2355 Log6Func(("Setting uISR = 0x%02x (virtq interrupt: %d, dev confg interrupt: %d)\n",
2356 pVirtio->uISR & 0xff,
2357 pVirtio->uISR & VIRTIO_ISR_VIRTQ_INTERRUPT,
2358 RT_BOOL(pVirtio->uISR & VIRTIO_ISR_DEVICE_CONFIG)));
2359 rcStrict = VINF_SUCCESS;
2360 }
2361 else if (MATCHES_VIRTIO_CAP_STRUCT(off, cb, uOffset, pVirtio->LocNotifyCap) && cb == sizeof(uint16_t))
2362 {
2363 /* This *should* be guest driver dropping index of a new descriptor in avail ring */
2364 virtioCoreVirtqNotified(pDevIns, pVirtio, uOffset / VIRTIO_NOTIFY_OFFSET_MULTIPLIER, *(uint16_t *)pv);
2365 rcStrict = VINF_SUCCESS;
2366 }
2367 else
2368 {
2369 ASSERT_GUEST_MSG_FAILED(("Bad write access to mapped capabilities region: off=%RGp pv=%#p{%.*Rhxs} cb=%u\n", off, pv, cb, pv, cb));
2370 rcStrict = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS,
2371 "virtioMmioRead: Bad MMIO access to capabilities, offset=%RTiop cb=%08x\n", off, cb);
2372 }
2373
2374 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatWrite), a);
2375 return rcStrict;
2376}
2377
2378
2379/**
2380 * @callback_method_impl{FNIOMMMIONEWREAD,
2381 * Memory mapped I/O Handler for Virtio over MMIO read operations.}
2382 *
2383 */
2384static DECLCALLBACK(VBOXSTRICTRC) virtioMmioTransportRead(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void *pv, unsigned cb)
2385{
2386 PVIRTIOCORE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOCORE);
2387 PVIRTIOCORECC pVirtioCC = PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC);
2388 RT_NOREF(pvUser);
2389 STAM_PROFILE_ADV_START(&pVirtio->CTX_SUFF(StatRead), a);
2390
2391 if (off >= VIRTIO_MMIO_SIZE)
2392 {
2393 VBOXSTRICTRC rcStrict = virtioDeviceCfgRead(pDevIns, pVirtio, pVirtioCC, (uint32_t)off - VIRTIO_MMIO_SIZE, pv, cb);
2394 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatRead), a);
2395 return rcStrict;
2396 }
2397
2398 /* All accesses below need to be aligned on a 32-bit boundary and must be 32-bit in size. */
2399 ASSERT_GUEST_MSG_RETURN(!(off & 0x3) && cb == sizeof(uint32_t),
2400 ("Bad read access: off=%RGp pv=%#p{%.*Rhxs} cb=%u\n", off, pv, cb, pv, cb),
2401 VINF_IOM_MMIO_UNUSED_FF);
2402
2403 int rc = VINF_SUCCESS;
2404 uint32_t *pu32 = (uint32_t *)pv;
2405 switch (off)
2406 {
2407 case VIRTIO_MMIO_REG_MAGIC_OFF:
2408 *pu32 = RT_H2LE_U32(VIRTIO_MMIO_REG_MAGIC_VALUE);
2409 break;
2410 case VIRTIO_MMIO_REG_VERSION_OFF:
2411 *pu32 = RT_H2LE_U32(VIRTIO_MMIO_REG_VERSION_VALUE);
2412 break;
2413 case VIRTIO_MMIO_REG_DEVICEID_OFF:
2414 *pu32 = pVirtio->uDeviceType;
2415 break;
2416 case VIRTIO_MMIO_REG_VENDORID_OFF:
2417 *pu32 = RT_H2LE_U32(DEVICE_PCI_VENDOR_ID_VIRTIO);
2418 break;
2419 case VIRTIO_MMIO_REG_DEVICEFEAT_OFF:
2420 {
2421 switch (pVirtio->uDeviceFeaturesSelect)
2422 {
2423 case 0:
2424 *pu32 = pVirtio->uDeviceFeatures & UINT32_C(0xffffffff);
2425 break;
2426 case 1:
2427 *pu32 = pVirtio->uDeviceFeatures >> 32;
2428 break;
2429 default:
2430 LogFunc(("Guest read uDeviceFeatures with out of range selector (%#x), returning 0\n",
2431 pVirtio->uDeviceFeaturesSelect));
2432 rc = VINF_IOM_MMIO_UNUSED_00;
2433 }
2434 break;
2435 }
2436 case VIRTIO_MMIO_REG_QUEUENUMMAX_OFF:
2437 *pu32 = VIRTQ_SIZE; /** @todo */
2438 break;
2439 case VIRTIO_MMIO_REG_QUEUERDY_OFF:
2440 {
2441 Assert(pVirtio->uVirtqSelect < RT_ELEMENTS(pVirtio->aVirtqueues));
2442 PVIRTQUEUE pVirtQueue = &pVirtio->aVirtqueues[pVirtio->uVirtqSelect];
2443 *pu32 = pVirtQueue->uEnable;
2444 break;
2445 }
2446 case VIRTIO_MMIO_REG_INTRSTATUS_OFF:
2447 *pu32 = pVirtio->uISR;
2448 break;
2449 case VIRTIO_MMIO_REG_DEVSTATUS_OFF:
2450 *pu32 = virtioDeviceStatusRead(pVirtio);
2451 break;
2452 case VIRTIO_MMIO_REG_CFGGEN_OFF:
2453 *pu32 = pVirtio->uConfigGeneration;
2454 break;
2455 default:
2456 ASSERT_GUEST_MSG_FAILED(("Bad read access to mapped capabilities region: off=%RGp cb=%u\n", off, cb));
2457 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS,
2458 "virtioMmioTransportRead: Bad MMIO access to capabilities, offset=%RTiop cb=%08x\n", off, cb);
2459 }
2460
2461 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatRead), a);
2462 return rc;
2463}
2464
2465/**
2466 * @callback_method_impl{FNIOMMMIONEWREAD,
2467 * Memory mapped I/O Handler for Virtio over MMIO write operations.}
2468 */
2469static DECLCALLBACK(VBOXSTRICTRC) virtioMmioTransportWrite(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void const *pv, unsigned cb)
2470{
2471 PVIRTIOCORE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOCORE);
2472 PVIRTIOCORECC pVirtioCC = PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC);
2473 RT_NOREF(pvUser);
2474 STAM_PROFILE_ADV_START(&pVirtio->CTX_SUFF(StatWrite), a);
2475
2476 if (off >= VIRTIO_MMIO_SIZE)
2477 {
2478#ifdef IN_RING3
2479 /*
2480 * Forward this MMIO write access for client to deal with.
2481 */
2482 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatWrite), a);
2483 return pVirtioCC->pfnDevCapWrite(pDevIns, (uint32_t)off - VIRTIO_MMIO_SIZE, pv, cb);
2484#else
2485 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatWrite), a);
2486 Log6(("%-23s: RING0 => RING3 (demote)\n", __FUNCTION__));
2487 return VINF_IOM_R3_MMIO_WRITE;
2488#endif
2489 }
2490
2491 /* All accesses below need to be aligned on a 32-bit boundary and must be 32-bit in size. */
2492 ASSERT_GUEST_MSG_RETURN(!(off & 0x3) && cb == sizeof(uint32_t),
2493 ("Bad write access: off=%RGp pv=%#p{%.*Rhxs} cb=%u\n", off, pv, cb, pv, cb),
2494 VINF_SUCCESS);
2495
2496 int rc = VINF_SUCCESS;
2497 uint32_t const u32Val = *(const uint32_t *)pv;
2498 switch (off)
2499 {
2500 case VIRTIO_MMIO_REG_DEVICEFEATSEL_OFF:
2501 {
2502 pVirtio->uDeviceFeaturesSelect = u32Val;
2503 break;
2504 }
2505 case VIRTIO_MMIO_REG_DRIVERFEAT_OFF:
2506 {
2507 switch (pVirtio->uDriverFeaturesSelect)
2508 {
2509 case 0:
2510 pVirtio->uDriverFeatures = (pVirtio->uDriverFeatures & UINT64_C(0xffffffff00000000)) | u32Val;
2511 pVirtio->fDriverFeaturesWritten |= DRIVER_FEATURES_0_WRITTEN;
2512 LogFunc(("Set DRIVER_FEATURES_0_WRITTEN. pVirtio->fDriverFeaturesWritten=%d\n", pVirtio->fDriverFeaturesWritten));
2513 if ( (pVirtio->fDriverFeaturesWritten & DRIVER_FEATURES_0_AND_1_WRITTEN) == DRIVER_FEATURES_0_AND_1_WRITTEN
2514 && !(pVirtio->fDriverFeaturesWritten & DRIVER_FEATURES_COMPLETE_HANDLED))
2515#ifdef IN_RING0
2516 return VINF_IOM_R3_MMIO_WRITE;
2517#endif
2518#ifdef IN_RING3
2519 virtioR3DoFeaturesCompleteOnceOnly(pVirtio, pVirtioCC);
2520#endif
2521 break;
2522 case 1:
2523 pVirtio->uDriverFeatures = (pVirtio->uDriverFeatures & UINT64_C(0x00000000ffffffff)) | ((uint64_t)u32Val << 32);
2524 pVirtio->fDriverFeaturesWritten |= DRIVER_FEATURES_1_WRITTEN;
2525 LogFunc(("Set DRIVER_FEATURES_1_WRITTEN. pVirtio->fDriverFeaturesWritten=%d\n", pVirtio->fDriverFeaturesWritten));
2526 if ( (pVirtio->fDriverFeaturesWritten & DRIVER_FEATURES_0_AND_1_WRITTEN) == DRIVER_FEATURES_0_AND_1_WRITTEN
2527 && !(pVirtio->fDriverFeaturesWritten & DRIVER_FEATURES_COMPLETE_HANDLED))
2528#ifdef IN_RING0
2529 return VINF_IOM_R3_MMIO_WRITE;
2530#endif
2531#ifdef IN_RING3
2532 virtioR3DoFeaturesCompleteOnceOnly(pVirtio, pVirtioCC);
2533#endif
2534 break;
2535 default:
2536 LogFunc(("Guest wrote uDriverFeatures with out of range selector (%#x), returning 0\n",
2537 pVirtio->uDriverFeaturesSelect));
2538 return VINF_SUCCESS;
2539 }
2540 break;
2541 }
2542 case VIRTIO_MMIO_REG_DRIVERFEATSEL_OFF:
2543 {
2544 pVirtio->uDriverFeaturesSelect = u32Val;
2545 break;
2546 }
2547 case VIRTIO_MMIO_REG_QUEUESEL_OFF:
2548 {
2549 if (u32Val < RT_ELEMENTS(pVirtio->aVirtqueues))
2550 pVirtio->uVirtqSelect = (uint16_t)u32Val;
2551 else
2552 LogFunc(("... WARNING: Guest attempted to write invalid virtq selector (ignoring)\n"));
2553 break;
2554 }
2555 case VIRTIO_MMIO_REG_QUEUENUM_OFF:
2556 {
2557 Assert(pVirtio->uVirtqSelect < RT_ELEMENTS(pVirtio->aVirtqueues));
2558 PVIRTQUEUE pVirtQueue = &pVirtio->aVirtqueues[pVirtio->uVirtqSelect];
2559 pVirtQueue->uQueueSize = (uint16_t)u32Val;
2560 break;
2561 }
2562 case VIRTIO_MMIO_REG_QUEUERDY_OFF:
2563 {
2564 Assert(pVirtio->uVirtqSelect < RT_ELEMENTS(pVirtio->aVirtqueues));
2565 PVIRTQUEUE pVirtQueue = &pVirtio->aVirtqueues[pVirtio->uVirtqSelect];
2566 pVirtQueue->uEnable = (uint16_t)u32Val;
2567 break;
2568 }
2569 case VIRTIO_MMIO_REG_QUEUENOTIFY_OFF:
2570 {
2571 virtioCoreVirtqNotified(pDevIns, pVirtio, u32Val, (uint16_t)u32Val);
2572 break;
2573 }
2574 case VIRTIO_MMIO_REG_INTRACK_OFF:
2575 {
2576 pVirtio->uISR &= ~u32Val;
2577 if (!pVirtio->uISR)
2578 virtioLowerInterrupt(pDevIns, 0);
2579 break;
2580 }
2581 case VIRTIO_MMIO_REG_DEVSTATUS_OFF:
2582 {
2583 rc = virtioDeviceStatusWrite(pDevIns, pVirtio, pVirtioCC, (uint8_t)u32Val);
2584 break;
2585 }
2586 case VIRTIO_MMIO_REG_QUEUEALIGN_LEGACY_OFF:
2587 {
2588 /* Written by edk2 even though we don't offer legacy mode, ignore. */
2589 break;
2590 }
2591 case VIRTIO_MMIO_REG_QUEUEDESCLOW_OFF:
2592 {
2593 Assert(pVirtio->uVirtqSelect < RT_ELEMENTS(pVirtio->aVirtqueues));
2594 PVIRTQUEUE pVirtQueue = &pVirtio->aVirtqueues[pVirtio->uVirtqSelect];
2595 pVirtQueue->GCPhysVirtqDesc = (pVirtQueue->GCPhysVirtqDesc & UINT64_C(0xffffffff00000000)) | u32Val;
2596 break;
2597 }
2598 case VIRTIO_MMIO_REG_QUEUEDESCHIGH_OFF:
2599 {
2600 Assert(pVirtio->uVirtqSelect < RT_ELEMENTS(pVirtio->aVirtqueues));
2601 PVIRTQUEUE pVirtQueue = &pVirtio->aVirtqueues[pVirtio->uVirtqSelect];
2602 pVirtQueue->GCPhysVirtqDesc = (pVirtQueue->GCPhysVirtqDesc & UINT64_C(0x00000000ffffffff)) | ((uint64_t)u32Val << 32);
2603 break;
2604 }
2605 case VIRTIO_MMIO_REG_QUEUEDRVLOW_OFF:
2606 {
2607 Assert(pVirtio->uVirtqSelect < RT_ELEMENTS(pVirtio->aVirtqueues));
2608 PVIRTQUEUE pVirtQueue = &pVirtio->aVirtqueues[pVirtio->uVirtqSelect];
2609 pVirtQueue->GCPhysVirtqAvail = (pVirtQueue->GCPhysVirtqAvail & UINT64_C(0xffffffff00000000)) | u32Val;
2610 break;
2611 }
2612 case VIRTIO_MMIO_REG_QUEUEDRVHIGH_OFF:
2613 {
2614 Assert(pVirtio->uVirtqSelect < RT_ELEMENTS(pVirtio->aVirtqueues));
2615 PVIRTQUEUE pVirtQueue = &pVirtio->aVirtqueues[pVirtio->uVirtqSelect];
2616 pVirtQueue->GCPhysVirtqAvail = (pVirtQueue->GCPhysVirtqAvail & UINT64_C(0x00000000ffffffff)) | ((uint64_t)u32Val << 32);
2617 break;
2618 }
2619 case VIRTIO_MMIO_REG_QUEUEDEVLOW_OFF:
2620 {
2621 Assert(pVirtio->uVirtqSelect < RT_ELEMENTS(pVirtio->aVirtqueues));
2622 PVIRTQUEUE pVirtQueue = &pVirtio->aVirtqueues[pVirtio->uVirtqSelect];
2623 pVirtQueue->GCPhysVirtqUsed = (pVirtQueue->GCPhysVirtqUsed & UINT64_C(0xffffffff00000000)) | u32Val;
2624 break;
2625 }
2626 case VIRTIO_MMIO_REG_QUEUEDEVHIGH_OFF:
2627 {
2628 Assert(pVirtio->uVirtqSelect < RT_ELEMENTS(pVirtio->aVirtqueues));
2629 PVIRTQUEUE pVirtQueue = &pVirtio->aVirtqueues[pVirtio->uVirtqSelect];
2630 pVirtQueue->GCPhysVirtqUsed = (pVirtQueue->GCPhysVirtqUsed & UINT64_C(0x00000000ffffffff)) | ((uint64_t)u32Val << 32);
2631 break;
2632 }
2633 default:
2634 ASSERT_GUEST_MSG_FAILED(("Bad write access to mapped capabilities region: off=%RGp pv=%#p{%.*Rhxs} cb=%u\n", off, pv, cb, pv, cb));
2635 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS,
2636 "virtioMmioTransportWrite: Bad MMIO access to capabilities, offset=%RTiop cb=%08x\n", off, cb);
2637 }
2638
2639 STAM_PROFILE_ADV_STOP(&pVirtio->CTX_SUFF(StatWrite), a);
2640 return rc;
2641}
2642
2643
2644#ifdef IN_RING3
2645
2646/**
2647 * @callback_method_impl{FNPCICONFIGREAD}
2648 */
2649static DECLCALLBACK(VBOXSTRICTRC) virtioR3PciConfigRead(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev,
2650 uint32_t uAddress, unsigned cb, uint32_t *pu32Value)
2651{
2652 PVIRTIOCORE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOCORE);
2653 PVIRTIOCORECC pVirtioCC = PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC);
2654 RT_NOREF(pPciDev);
2655
2656 if (uAddress == pVirtio->uPciCfgDataOff)
2657 {
2658 /* See comments in PCI Cfg capability initialization (in capabilities setup section of this code) */
2659 struct virtio_pci_cap *pPciCap = &pVirtioCC->pPciCfgCap->pciCap;
2660 uint32_t uLength = pPciCap->uLength;
2661
2662 Log7Func((" pDevIns=%p pPciDev=%p uAddress=%#x%s cb=%u uLength=%d, bar=%d\n",
2663 pDevIns, pPciDev, uAddress, uAddress < 0x10 ? " " : "", cb, uLength, pPciCap->uBar));
2664
2665 if ( (uLength != 1 && uLength != 2 && uLength != 4)
2666 || pPciCap->uBar != VIRTIO_REGION_PCI_CAP)
2667 {
2668 ASSERT_GUEST_MSG_FAILED(("Guest read virtio_pci_cfg_cap.pci_cfg_data using mismatching config. "
2669 "Ignoring\n"));
2670 *pu32Value = UINT32_MAX;
2671 return VINF_SUCCESS;
2672 }
2673
2674 VBOXSTRICTRC rcStrict = virtioMmioRead(pDevIns, pVirtio, pPciCap->uOffset, pu32Value, cb);
2675 Log7Func((" Guest read virtio_pci_cfg_cap.pci_cfg_data, bar=%d, offset=%d, length=%d, result=0x%x -> %Rrc\n",
2676 pPciCap->uBar, pPciCap->uOffset, uLength, *pu32Value, VBOXSTRICTRC_VAL(rcStrict)));
2677 return rcStrict;
2678 }
2679 Log7Func((" pDevIns=%p pPciDev=%p uAddress=%#x%s cb=%u pu32Value=%p\n",
2680 pDevIns, pPciDev, uAddress, uAddress < 0x10 ? " " : "", cb, pu32Value));
2681 return VINF_PDM_PCI_DO_DEFAULT;
2682}
2683
2684/**
2685 * @callback_method_impl{FNPCICONFIGWRITE}
2686 */
2687static DECLCALLBACK(VBOXSTRICTRC) virtioR3PciConfigWrite(PPDMDEVINS pDevIns, PPDMPCIDEV pPciDev,
2688 uint32_t uAddress, unsigned cb, uint32_t u32Value)
2689{
2690 PVIRTIOCORE pVirtio = PDMINS_2_DATA(pDevIns, PVIRTIOCORE);
2691 PVIRTIOCORECC pVirtioCC = PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC);
2692 RT_NOREF(pPciDev);
2693
2694 Log7Func(("pDevIns=%p pPciDev=%p uAddress=%#x %scb=%u u32Value=%#x\n", pDevIns, pPciDev, uAddress, uAddress < 0xf ? " " : "", cb, u32Value));
2695 if (uAddress == pVirtio->uPciCfgDataOff)
2696 {
2697 /* See comments in PCI Cfg capability initialization (in capabilities setup section of this code) */
2698 struct virtio_pci_cap *pPciCap = &pVirtioCC->pPciCfgCap->pciCap;
2699 uint32_t uLength = pPciCap->uLength;
2700
2701 if ( (uLength != 1 && uLength != 2 && uLength != 4)
2702 || cb != uLength
2703 || pPciCap->uBar != VIRTIO_REGION_PCI_CAP)
2704 {
2705 ASSERT_GUEST_MSG_FAILED(("Guest write virtio_pci_cfg_cap.pci_cfg_data using mismatching config. Ignoring\n"));
2706 return VINF_SUCCESS;
2707 }
2708
2709 VBOXSTRICTRC rcStrict = virtioMmioWrite(pDevIns, pVirtio, pPciCap->uOffset, &u32Value, cb);
2710 Log2Func(("Guest wrote virtio_pci_cfg_cap.pci_cfg_data, bar=%d, offset=%x, length=%x, value=%d -> %Rrc\n",
2711 pPciCap->uBar, pPciCap->uOffset, uLength, u32Value, VBOXSTRICTRC_VAL(rcStrict)));
2712 return rcStrict;
2713 }
2714 return VINF_PDM_PCI_DO_DEFAULT;
2715}
2716
2717
2718/*********************************************************************************************************************************
2719* Saved state (SSM) *
2720*********************************************************************************************************************************/
2721
2722
2723/**
2724 * Loads a saved device state (called from device-specific code on SSM final pass)
2725 *
2726 * @param pVirtio Pointer to the shared virtio state.
2727 * @param pHlp The ring-3 device helpers.
2728 * @param pSSM The saved state handle.
2729 * @returns VBox status code.
2730 */
2731DECLHIDDEN(int) virtioCoreR3LegacyDeviceLoadExec(PVIRTIOCORE pVirtio, PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM,
2732 uint32_t uVersion, uint32_t uVirtioLegacy_3_1_Beta)
2733{
2734 int rc;
2735 uint32_t uDriverFeaturesLegacy32bit;
2736
2737 rc = pHlp->pfnSSMGetU32( pSSM, &uDriverFeaturesLegacy32bit);
2738 AssertRCReturn(rc, rc);
2739 pVirtio->uDriverFeatures = (uint64_t)uDriverFeaturesLegacy32bit;
2740
2741 rc = pHlp->pfnSSMGetU16( pSSM, &pVirtio->uVirtqSelect);
2742 AssertRCReturn(rc, rc);
2743
2744 rc = pHlp->pfnSSMGetU8( pSSM, &pVirtio->fDeviceStatus);
2745 AssertRCReturn(rc, rc);
2746
2747#ifdef LOG_ENABLED
2748 char szOut[80] = { 0 };
2749 virtioCoreFormatDeviceStatus(pVirtio->fDeviceStatus, szOut, sizeof(szOut));
2750 Log(("Loaded legacy device status = (%s)\n", szOut));
2751#endif
2752
2753 rc = pHlp->pfnSSMGetU8( pSSM, &pVirtio->uISR);
2754 AssertRCReturn(rc, rc);
2755
2756 uint32_t cQueues = 3; /* This constant default value copied from earliest v0.9 code */
2757 if (uVersion > uVirtioLegacy_3_1_Beta)
2758 {
2759 rc = pHlp->pfnSSMGetU32(pSSM, &cQueues);
2760 AssertRCReturn(rc, rc);
2761 }
2762
2763 AssertLogRelMsgReturn(cQueues <= VIRTQ_MAX_COUNT, ("%#x\n", cQueues), VERR_SSM_LOAD_CONFIG_MISMATCH);
2764 AssertLogRelMsgReturn(pVirtio->uVirtqSelect < cQueues || (cQueues == 0 && pVirtio->uVirtqSelect),
2765 ("uVirtqSelect=%u cQueues=%u\n", pVirtio->uVirtqSelect, cQueues),
2766 VERR_SSM_LOAD_CONFIG_MISMATCH);
2767
2768 Log(("\nRestoring %d legacy-only virtio-net device queues from saved state:\n", cQueues));
2769 for (unsigned uVirtq = 0; uVirtq < cQueues; uVirtq++)
2770 {
2771 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[uVirtq];
2772
2773 if (uVirtq == cQueues - 1)
2774 RTStrPrintf(pVirtq->szName, sizeof(pVirtq->szName), "legacy-ctrlq");
2775 else if (uVirtq % 2)
2776 RTStrPrintf(pVirtq->szName, sizeof(pVirtq->szName), "legacy-xmitq<%d>", uVirtq / 2);
2777 else
2778 RTStrPrintf(pVirtq->szName, sizeof(pVirtq->szName), "legacy-recvq<%d>", uVirtq / 2);
2779
2780 rc = pHlp->pfnSSMGetU16(pSSM, &pVirtq->uQueueSize);
2781 AssertRCReturn(rc, rc);
2782
2783 uint32_t uVirtqPfn;
2784 rc = pHlp->pfnSSMGetU32(pSSM, &uVirtqPfn);
2785 AssertRCReturn(rc, rc);
2786
2787 rc = pHlp->pfnSSMGetU16(pSSM, &pVirtq->uAvailIdxShadow);
2788 AssertRCReturn(rc, rc);
2789
2790 rc = pHlp->pfnSSMGetU16(pSSM, &pVirtq->uUsedIdxShadow);
2791 AssertRCReturn(rc, rc);
2792
2793 if (uVirtqPfn)
2794 {
2795 pVirtq->GCPhysVirtqDesc = (uint64_t)uVirtqPfn * VIRTIO_PAGE_SIZE;
2796 pVirtq->GCPhysVirtqAvail = pVirtq->GCPhysVirtqDesc + sizeof(VIRTQ_DESC_T) * pVirtq->uQueueSize;
2797 pVirtq->GCPhysVirtqUsed =
2798 RT_ALIGN(pVirtq->GCPhysVirtqAvail + RT_UOFFSETOF_DYN(VIRTQ_AVAIL_T, auRing[pVirtq->uQueueSize]), VIRTIO_PAGE_SIZE);
2799 pVirtq->uEnable = 1;
2800 }
2801 else
2802 {
2803 LogFunc(("WARNING: QUEUE \"%s\" PAGE NUMBER ZERO IN SAVED STATE\n", pVirtq->szName));
2804 pVirtq->uEnable = 0;
2805 }
2806 pVirtq->uNotifyOffset = 0; /* unused in legacy mode */
2807 pVirtq->uMsixVector = 0; /* unused in legacy mode */
2808 }
2809 pVirtio->fGenUpdatePending = 0; /* unused in legacy mode */
2810 pVirtio->uConfigGeneration = 0; /* unused in legacy mode */
2811 pVirtio->uPciCfgDataOff = 0; /* unused in legacy mode (port I/O used instead) */
2812
2813 return VINF_SUCCESS;
2814}
2815
2816/**
2817 * Loads a saved device state (called from device-specific code on SSM final pass)
2818 *
2819 * Note: This loads state saved by a Modern (VirtIO 1.0+) device, of which this transitional device is one,
2820 * and thus supports both legacy and modern guest virtio drivers.
2821 *
2822 * @param pVirtio Pointer to the shared virtio state.
2823 * @param pHlp The ring-3 device helpers.
2824 * @param pSSM The saved state handle.
2825 * @returns VBox status code.
2826 */
2827DECLHIDDEN(int) virtioCoreR3ModernDeviceLoadExec(PVIRTIOCORE pVirtio, PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM,
2828 uint32_t uVersion, uint32_t uTestVersion, uint32_t cQueues)
2829{
2830 RT_NOREF2(cQueues, uVersion);
2831 LogFunc(("\n"));
2832 /*
2833 * Check the marker and (embedded) version number.
2834 */
2835 uint64_t uMarker = 0;
2836 int rc;
2837
2838 rc = pHlp->pfnSSMGetU64(pSSM, &uMarker);
2839 AssertRCReturn(rc, rc);
2840 if (uMarker != VIRTIO_SAVEDSTATE_MARKER)
2841 return pHlp->pfnSSMSetLoadError(pSSM, VERR_SSM_DATA_UNIT_FORMAT_CHANGED, RT_SRC_POS,
2842 N_("Expected marker value %#RX64 found %#RX64 instead"),
2843 VIRTIO_SAVEDSTATE_MARKER, uMarker);
2844 uint32_t uVersionSaved = 0;
2845 rc = pHlp->pfnSSMGetU32(pSSM, &uVersionSaved);
2846 AssertRCReturn(rc, rc);
2847 if (uVersionSaved != uTestVersion)
2848 return pHlp->pfnSSMSetLoadError(pSSM, VERR_SSM_DATA_UNIT_FORMAT_CHANGED, RT_SRC_POS,
2849 N_("Unsupported virtio version: %u"), uVersionSaved);
2850 /*
2851 * Load the state.
2852 */
2853 rc = pHlp->pfnSSMGetU32( pSSM, &pVirtio->fLegacyDriver);
2854 AssertRCReturn(rc, rc);
2855 rc = pHlp->pfnSSMGetBool( pSSM, &pVirtio->fGenUpdatePending);
2856 AssertRCReturn(rc, rc);
2857 rc = pHlp->pfnSSMGetU8( pSSM, &pVirtio->fDeviceStatus);
2858 AssertRCReturn(rc, rc);
2859 rc = pHlp->pfnSSMGetU8( pSSM, &pVirtio->uConfigGeneration);
2860 AssertRCReturn(rc, rc);
2861 rc = pHlp->pfnSSMGetU8( pSSM, &pVirtio->uPciCfgDataOff);
2862 AssertRCReturn(rc, rc);
2863 rc = pHlp->pfnSSMGetU8( pSSM, &pVirtio->uISR);
2864 AssertRCReturn(rc, rc);
2865 rc = pHlp->pfnSSMGetU16( pSSM, &pVirtio->uVirtqSelect);
2866 AssertRCReturn(rc, rc);
2867 rc = pHlp->pfnSSMGetU32( pSSM, &pVirtio->uDeviceFeaturesSelect);
2868 AssertRCReturn(rc, rc);
2869 rc = pHlp->pfnSSMGetU32( pSSM, &pVirtio->uDriverFeaturesSelect);
2870 AssertRCReturn(rc, rc);
2871 rc = pHlp->pfnSSMGetU64( pSSM, &pVirtio->uDriverFeatures);
2872 AssertRCReturn(rc, rc);
2873
2874 /** @todo Adapt this loop use cQueues argument instead of static queue count (safely with SSM versioning) */
2875 for (uint32_t i = 0; i < VIRTQ_MAX_COUNT; i++)
2876 {
2877 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[i];
2878 rc = pHlp->pfnSSMGetGCPhys64( pSSM, &pVirtq->GCPhysVirtqDesc);
2879 AssertRCReturn(rc, rc);
2880 rc = pHlp->pfnSSMGetGCPhys64( pSSM, &pVirtq->GCPhysVirtqAvail);
2881 AssertRCReturn(rc, rc);
2882 rc = pHlp->pfnSSMGetGCPhys64( pSSM, &pVirtq->GCPhysVirtqUsed);
2883 AssertRCReturn(rc, rc);
2884 rc = pHlp->pfnSSMGetU16( pSSM, &pVirtq->uNotifyOffset);
2885 AssertRCReturn(rc, rc);
2886 rc = pHlp->pfnSSMGetU16( pSSM, &pVirtq->uMsixVector);
2887 AssertRCReturn(rc, rc);
2888 rc = pHlp->pfnSSMGetU16( pSSM, &pVirtq->uEnable);
2889 AssertRCReturn(rc, rc);
2890 rc = pHlp->pfnSSMGetU16( pSSM, &pVirtq->uQueueSize);
2891 AssertRCReturn(rc, rc);
2892 rc = pHlp->pfnSSMGetU16( pSSM, &pVirtq->uAvailIdxShadow);
2893 AssertRCReturn(rc, rc);
2894 rc = pHlp->pfnSSMGetU16( pSSM, &pVirtq->uUsedIdxShadow);
2895 AssertRCReturn(rc, rc);
2896 rc = pHlp->pfnSSMGetMem( pSSM, pVirtq->szName, sizeof(pVirtq->szName));
2897 AssertRCReturn(rc, rc);
2898 }
2899 return VINF_SUCCESS;
2900}
2901
2902/**
2903 * Called from the FNSSMDEVSAVEEXEC function of the device.
2904 *
2905 * @param pVirtio Pointer to the shared virtio state.
2906 * @param pHlp The ring-3 device helpers.
2907 * @param pSSM The saved state handle.
2908 * @returns VBox status code.
2909 */
2910DECLHIDDEN(int) virtioCoreR3SaveExec(PVIRTIOCORE pVirtio, PCPDMDEVHLPR3 pHlp, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t cQueues)
2911{
2912 RT_NOREF(cQueues);
2913 /** @todo figure out a way to save cQueues (with SSM versioning) */
2914
2915 LogFunc(("\n"));
2916 pHlp->pfnSSMPutU64(pSSM, VIRTIO_SAVEDSTATE_MARKER);
2917 pHlp->pfnSSMPutU32(pSSM, uVersion);
2918
2919 pHlp->pfnSSMPutU32( pSSM, pVirtio->fLegacyDriver);
2920 pHlp->pfnSSMPutBool(pSSM, pVirtio->fGenUpdatePending);
2921 pHlp->pfnSSMPutU8( pSSM, pVirtio->fDeviceStatus);
2922 pHlp->pfnSSMPutU8( pSSM, pVirtio->uConfigGeneration);
2923 pHlp->pfnSSMPutU8( pSSM, pVirtio->uPciCfgDataOff);
2924 pHlp->pfnSSMPutU8( pSSM, pVirtio->uISR);
2925 pHlp->pfnSSMPutU16( pSSM, pVirtio->uVirtqSelect);
2926 pHlp->pfnSSMPutU32( pSSM, pVirtio->uDeviceFeaturesSelect);
2927 pHlp->pfnSSMPutU32( pSSM, pVirtio->uDriverFeaturesSelect);
2928 pHlp->pfnSSMPutU64( pSSM, pVirtio->uDriverFeatures);
2929
2930 for (uint32_t i = 0; i < VIRTQ_MAX_COUNT; i++)
2931 {
2932 PVIRTQUEUE pVirtq = &pVirtio->aVirtqueues[i];
2933
2934 pHlp->pfnSSMPutGCPhys64( pSSM, pVirtq->GCPhysVirtqDesc);
2935 pHlp->pfnSSMPutGCPhys64( pSSM, pVirtq->GCPhysVirtqAvail);
2936 pHlp->pfnSSMPutGCPhys64( pSSM, pVirtq->GCPhysVirtqUsed);
2937 pHlp->pfnSSMPutU16( pSSM, pVirtq->uNotifyOffset);
2938 pHlp->pfnSSMPutU16( pSSM, pVirtq->uMsixVector);
2939 pHlp->pfnSSMPutU16( pSSM, pVirtq->uEnable);
2940 pHlp->pfnSSMPutU16( pSSM, pVirtq->uQueueSize);
2941 pHlp->pfnSSMPutU16( pSSM, pVirtq->uAvailIdxShadow);
2942 pHlp->pfnSSMPutU16( pSSM, pVirtq->uUsedIdxShadow);
2943 int rc = pHlp->pfnSSMPutMem(pSSM, pVirtq->szName, 32);
2944 AssertRCReturn(rc, rc);
2945 }
2946 return VINF_SUCCESS;
2947}
2948
2949
2950/*********************************************************************************************************************************
2951* Device Level *
2952*********************************************************************************************************************************/
2953
2954/**
2955 * This must be called by the client to handle VM state changes after the client takes care of its device-specific
2956 * tasks for the state change (i.e. reset, suspend, power-off, resume)
2957 *
2958 * @param pDevIns The device instance.
2959 * @param pVirtio Pointer to the shared virtio state.
2960 */
2961DECLHIDDEN(void) virtioCoreR3VmStateChanged(PVIRTIOCORE pVirtio, VIRTIOVMSTATECHANGED enmState)
2962{
2963 LogFunc(("State changing to %s\n",
2964 virtioCoreGetStateChangeText(enmState)));
2965
2966 switch(enmState)
2967 {
2968 case kvirtIoVmStateChangedReset:
2969 virtioCoreResetAll(pVirtio);
2970 break;
2971 case kvirtIoVmStateChangedSuspend:
2972 break;
2973 case kvirtIoVmStateChangedPowerOff:
2974 break;
2975 case kvirtIoVmStateChangedResume:
2976 for (int uVirtq = 0; uVirtq < VIRTQ_MAX_COUNT; uVirtq++)
2977 {
2978 if ((!pVirtio->fLegacyDriver && pVirtio->aVirtqueues[uVirtq].uEnable)
2979 | pVirtio->aVirtqueues[uVirtq].GCPhysVirtqDesc)
2980 virtioCoreNotifyGuestDriver(pVirtio->pDevInsR3, pVirtio, uVirtq);
2981 }
2982 break;
2983 default:
2984 LogRelFunc(("Bad enum value"));
2985 return;
2986 }
2987}
2988
2989/**
2990 * This should be called from PDMDEVREGR3::pfnDestruct.
2991 *
2992 * @param pDevIns The device instance.
2993 * @param pVirtio Pointer to the shared virtio state.
2994 * @param pVirtioCC Pointer to the ring-3 virtio state.
2995 */
2996DECLHIDDEN(void) virtioCoreR3Term(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC)
2997{
2998#ifdef VIRTIO_REL_INFO_DUMP
2999 RTTraceBufRelease(pVirtio->hTraceBuf);
3000 pVirtio->hTraceBuf = NIL_RTTRACEBUF;
3001#endif /* VIRTIO_REL_INFO_DUMP */
3002 if (pVirtioCC->pbPrevDevSpecificCfg)
3003 {
3004 RTMemFree(pVirtioCC->pbPrevDevSpecificCfg);
3005 pVirtioCC->pbPrevDevSpecificCfg = NULL;
3006 }
3007
3008 RT_NOREF(pDevIns, pVirtio);
3009}
3010
3011
3012/**
3013 * Setup the Virtio device as a PCI device.
3014 *
3015 * @returns VBox status code.
3016 * @param pDevIns Device instance.
3017 * @param pVirtio Pointer to the shared virtio state. This
3018 * must be the first member in the shared
3019 * device instance data!
3020 * @param pVirtioCC Pointer to the ring-3 virtio state. This
3021 * must be the first member in the ring-3
3022 * device instance data!
3023 * @param pPciParams Values to populate industry standard PCI Configuration Space data structure
3024 * @param pcszInstance Device instance name (format-specifier)
3025 * @param cbDevSpecificCfg Size of virtio_pci_device_cap device-specific struct
3026 */
3027static int virtioR3PciTransportInit(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC, PVIRTIOPCIPARAMS pPciParams,
3028 const char *pcszInstance, uint16_t cbDevSpecificCfg)
3029{
3030 /* Set PCI config registers (assume 32-bit mode) */
3031 PPDMPCIDEV pPciDev = pDevIns->apPciDevs[0];
3032 PDMPCIDEV_ASSERT_VALID(pDevIns, pPciDev);
3033
3034 PDMPciDevSetVendorId(pPciDev, DEVICE_PCI_VENDOR_ID_VIRTIO);
3035 PDMPciDevSetDeviceId(pPciDev, pPciParams->uDeviceId);
3036
3037 if (pPciParams->uDeviceId < DEVICE_PCI_DEVICE_ID_VIRTIO_BASE)
3038 /* Transitional devices MUST have a PCI Revision ID of 0. */
3039 PDMPciDevSetRevisionId(pPciDev, DEVICE_PCI_REVISION_ID_VIRTIO_TRANS);
3040 else
3041 /* Non-transitional devices SHOULD have a PCI Revision ID of 1 or higher. */
3042 PDMPciDevSetRevisionId(pPciDev, DEVICE_PCI_REVISION_ID_VIRTIO_V1);
3043
3044 PDMPciDevSetSubSystemId(pPciDev, pPciParams->uSubsystemId);
3045 PDMPciDevSetSubSystemVendorId(pPciDev, DEVICE_PCI_VENDOR_ID_VIRTIO);
3046 PDMPciDevSetClassBase(pPciDev, pPciParams->uClassBase);
3047 PDMPciDevSetClassSub(pPciDev, pPciParams->uClassSub);
3048 PDMPciDevSetClassProg(pPciDev, pPciParams->uClassProg);
3049 PDMPciDevSetInterruptLine(pPciDev, pPciParams->uInterruptLine);
3050 PDMPciDevSetInterruptPin(pPciDev, pPciParams->uInterruptPin);
3051
3052 /* Register PCI device */
3053 int rc = PDMDevHlpPCIRegister(pDevIns, pPciDev);
3054 if (RT_FAILURE(rc))
3055 return PDMDEV_SET_ERROR(pDevIns, rc, N_("virtio: cannot register PCI Device")); /* can we put params in this error? */
3056
3057 rc = PDMDevHlpPCIInterceptConfigAccesses(pDevIns, pPciDev, virtioR3PciConfigRead, virtioR3PciConfigWrite);
3058 AssertRCReturn(rc, rc);
3059
3060 /* Construct & map PCI vendor-specific capabilities for virtio host negotiation with guest driver */
3061
3062#define CFG_ADDR_2_IDX(addr) ((uint8_t)(((uintptr_t)(addr) - (uintptr_t)&pPciDev->abConfig[0])))
3063#define SET_PCI_CAP_LOC(a_pPciDev, a_pCfg, a_LocCap, a_uMmioLengthAlign) \
3064 do { \
3065 (a_LocCap).offMmio = (a_pCfg)->uOffset; \
3066 (a_LocCap).cbMmio = RT_ALIGN_T((a_pCfg)->uLength, a_uMmioLengthAlign, uint16_t); \
3067 (a_LocCap).offPci = (uint16_t)(uintptr_t)((uint8_t *)(a_pCfg) - &(a_pPciDev)->abConfig[0]); \
3068 (a_LocCap).cbPci = (a_pCfg)->uCapLen; \
3069 } while (0)
3070
3071 PVIRTIO_PCI_CAP_T pCfg;
3072 uint32_t cbRegion = 0;
3073
3074 /*
3075 * Common capability (VirtIO 1.0, section 4.1.4.3)
3076 */
3077 pCfg = (PVIRTIO_PCI_CAP_T)&pPciDev->abConfig[0x40];
3078 pCfg->uCfgType = VIRTIO_PCI_CAP_COMMON_CFG;
3079 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
3080 pCfg->uCapLen = sizeof(VIRTIO_PCI_CAP_T);
3081 pCfg->uCapNext = CFG_ADDR_2_IDX(pCfg) + pCfg->uCapLen;
3082 pCfg->uBar = VIRTIO_REGION_PCI_CAP;
3083 pCfg->uOffset = RT_ALIGN_32(0, 4); /* Currently 0, but reminder to 32-bit align if changing this */
3084 pCfg->uLength = sizeof(VIRTIO_PCI_COMMON_CFG_T);
3085 cbRegion += pCfg->uLength;
3086 SET_PCI_CAP_LOC(pPciDev, pCfg, pVirtio->LocCommonCfgCap, 2);
3087 pVirtioCC->pCommonCfgCap = pCfg;
3088
3089 /*
3090 * Notify capability (VirtIO 1.0, section 4.1.4.4).
3091 *
3092 * The size of the spec-defined subregion described by this VirtIO capability is
3093 * based-on the choice of this implementation to make the notification area of each
3094 * queue equal to queue's ordinal position (e.g. queue selector value). The VirtIO
3095 * specification leaves it up to implementation to define queue notification area layout.
3096 */
3097 pCfg = (PVIRTIO_PCI_CAP_T)&pPciDev->abConfig[pCfg->uCapNext];
3098 pCfg->uCfgType = VIRTIO_PCI_CAP_NOTIFY_CFG;
3099 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
3100 pCfg->uCapLen = sizeof(VIRTIO_PCI_NOTIFY_CAP_T);
3101 pCfg->uCapNext = CFG_ADDR_2_IDX(pCfg) + pCfg->uCapLen;
3102 pCfg->uBar = VIRTIO_REGION_PCI_CAP;
3103 pCfg->uOffset = pVirtioCC->pCommonCfgCap->uOffset + pVirtioCC->pCommonCfgCap->uLength;
3104 pCfg->uOffset = RT_ALIGN_32(pCfg->uOffset, 4);
3105 pCfg->uLength = VIRTQ_MAX_COUNT * VIRTIO_NOTIFY_OFFSET_MULTIPLIER + 2; /* will change in VirtIO 1.1 */
3106 cbRegion += pCfg->uLength;
3107 SET_PCI_CAP_LOC(pPciDev, pCfg, pVirtio->LocNotifyCap, 1);
3108 pVirtioCC->pNotifyCap = (PVIRTIO_PCI_NOTIFY_CAP_T)pCfg;
3109 pVirtioCC->pNotifyCap->uNotifyOffMultiplier = VIRTIO_NOTIFY_OFFSET_MULTIPLIER;
3110
3111 /* ISR capability (VirtIO 1.0, section 4.1.4.5)
3112 *
3113 * VirtIO 1.0 spec says 8-bit, unaligned in MMIO space. The specification example/diagram
3114 * illustrates this capability as 32-bit field with upper bits 'reserved'. Those depictions
3115 * differ. The spec's wording, not the diagram, is seen to work in practice.
3116 */
3117 pCfg = (PVIRTIO_PCI_CAP_T)&pPciDev->abConfig[pCfg->uCapNext];
3118 pCfg->uCfgType = VIRTIO_PCI_CAP_ISR_CFG;
3119 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
3120 pCfg->uCapLen = sizeof(VIRTIO_PCI_CAP_T);
3121 pCfg->uCapNext = CFG_ADDR_2_IDX(pCfg) + pCfg->uCapLen;
3122 pCfg->uBar = VIRTIO_REGION_PCI_CAP;
3123 pCfg->uOffset = pVirtioCC->pNotifyCap->pciCap.uOffset + pVirtioCC->pNotifyCap->pciCap.uLength;
3124 pCfg->uOffset = RT_ALIGN_32(pCfg->uOffset, 4);
3125 pCfg->uLength = sizeof(uint8_t);
3126 cbRegion += pCfg->uLength;
3127 SET_PCI_CAP_LOC(pPciDev, pCfg, pVirtio->LocIsrCap, 4);
3128 pVirtioCC->pIsrCap = pCfg;
3129
3130 /* PCI Cfg capability (VirtIO 1.0, section 4.1.4.7)
3131 *
3132 * This capability facilitates early-boot access to this device (BIOS).
3133 * This region isn't page-MMIO mapped. PCI configuration accesses are intercepted,
3134 * wherein uBar, uOffset and uLength are modulated by consumers to locate and read/write
3135 * values in any part of any region. (NOTE: Linux driver doesn't utilize this feature.
3136 * This capability only appears in lspci output on Linux if uLength is non-zero, 4-byte aligned,
3137 * during initialization of linux virtio driver).
3138 */
3139 pVirtio->uPciCfgDataOff = pCfg->uCapNext + RT_OFFSETOF(VIRTIO_PCI_CFG_CAP_T, uPciCfgData);
3140 pCfg = (PVIRTIO_PCI_CAP_T)&pPciDev->abConfig[pCfg->uCapNext];
3141 pCfg->uCfgType = VIRTIO_PCI_CAP_PCI_CFG;
3142 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
3143 pCfg->uCapLen = sizeof(VIRTIO_PCI_CFG_CAP_T);
3144 pCfg->uCapNext = (pVirtio->fMsiSupport || pVirtioCC->pbDevSpecificCfg) ? CFG_ADDR_2_IDX(pCfg) + pCfg->uCapLen : 0;
3145 pCfg->uBar = VIRTIO_REGION_PCI_CAP;
3146 pCfg->uOffset = 0;
3147 pCfg->uLength = 4;
3148 cbRegion += pCfg->uLength;
3149 SET_PCI_CAP_LOC(pPciDev, pCfg, pVirtio->LocPciCfgCap, 1);
3150 pVirtioCC->pPciCfgCap = (PVIRTIO_PCI_CFG_CAP_T)pCfg;
3151
3152 if (pVirtioCC->pbDevSpecificCfg)
3153 {
3154 /* Device-specific config capability (VirtIO 1.0, section 4.1.4.6).
3155 *
3156 * Client defines the device-specific config struct and passes size to virtioCoreR3Init()
3157 * to inform this.
3158 */
3159 pCfg = (PVIRTIO_PCI_CAP_T)&pPciDev->abConfig[pCfg->uCapNext];
3160 pCfg->uCfgType = VIRTIO_PCI_CAP_DEVICE_CFG;
3161 pCfg->uCapVndr = VIRTIO_PCI_CAP_ID_VENDOR;
3162 pCfg->uCapLen = sizeof(VIRTIO_PCI_CAP_T);
3163 pCfg->uCapNext = pVirtio->fMsiSupport ? CFG_ADDR_2_IDX(pCfg) + pCfg->uCapLen : 0;
3164 pCfg->uBar = VIRTIO_REGION_PCI_CAP;
3165 pCfg->uOffset = pVirtioCC->pIsrCap->uOffset + pVirtioCC->pIsrCap->uLength;
3166 pCfg->uOffset = RT_ALIGN_32(pCfg->uOffset, 4);
3167 pCfg->uLength = cbDevSpecificCfg;
3168 cbRegion += pCfg->uLength;
3169 SET_PCI_CAP_LOC(pPciDev, pCfg, pVirtio->LocDeviceCap, 4);
3170 pVirtioCC->pDeviceCap = pCfg;
3171 }
3172 else
3173 Assert(pVirtio->LocDeviceCap.cbMmio == 0 && pVirtio->LocDeviceCap.cbPci == 0);
3174
3175 if (pVirtio->fMsiSupport)
3176 {
3177 PDMMSIREG aMsiReg;
3178 RT_ZERO(aMsiReg);
3179 aMsiReg.iMsixCapOffset = pCfg->uCapNext;
3180 aMsiReg.iMsixNextOffset = 0;
3181 aMsiReg.iMsixBar = VIRTIO_REGION_MSIX_CAP;
3182 aMsiReg.cMsixVectors = VBOX_MSIX_MAX_ENTRIES;
3183 rc = PDMDevHlpPCIRegisterMsi(pDevIns, &aMsiReg); /* see MsixR3init() */
3184 if (RT_FAILURE(rc))
3185 {
3186 /* See PDMDevHlp.cpp:pdmR3DevHlp_PCIRegisterMsi */
3187 LogFunc(("Failed to configure MSI-X (%Rrc). Reverting to INTx\n", rc));
3188 pVirtio->fMsiSupport = false;
3189 }
3190 else
3191 Log2Func(("Using MSI-X for guest driver notification\n"));
3192 }
3193 else
3194 LogFunc(("MSI-X not available for VBox, using INTx notification\n"));
3195
3196 /* Set offset to first capability and enable PCI dev capabilities */
3197 PDMPciDevSetCapabilityList(pPciDev, 0x40);
3198 PDMPciDevSetStatus(pPciDev, VBOX_PCI_STATUS_CAP_LIST);
3199
3200 size_t cbSize = RTStrPrintf(pVirtioCC->szMmioName, sizeof(pVirtioCC->szMmioName), "%s (modern)", pcszInstance);
3201 if (cbSize <= 0)
3202 return PDMDEV_SET_ERROR(pDevIns, VERR_BUFFER_OVERFLOW, N_("virtio: out of memory allocating string")); /* can we put params in this error? */
3203
3204 cbSize = RTStrPrintf(pVirtioCC->szPortIoName, sizeof(pVirtioCC->szPortIoName), "%s (legacy)", pcszInstance);
3205 if (cbSize <= 0)
3206 return PDMDEV_SET_ERROR(pDevIns, VERR_BUFFER_OVERFLOW, N_("virtio: out of memory allocating string")); /* can we put params in this error? */
3207
3208 if (pVirtio->fOfferLegacy)
3209 {
3210 /* As a transitional device that supports legacy VirtIO drivers, this VirtIO device generic implementation presents
3211 * legacy driver interface in I/O space at BAR0. The following maps the common (e.g. device independent)
3212 * dev config area as well as device-specific dev config area (whose size is passed to init function of this VirtIO
3213 * generic device code) for access via Port I/O, since legacy drivers (e.g. pre VirtIO 1.0) don't use MMIO callbacks.
3214 * (See VirtIO 1.1, Section 4.1.4.8).
3215 */
3216 rc = PDMDevHlpPCIIORegionCreateIo(pDevIns, VIRTIO_REGION_LEGACY_IO, sizeof(VIRTIO_LEGACY_PCI_COMMON_CFG_T) + cbDevSpecificCfg,
3217 virtioLegacyIOPortOut, virtioLegacyIOPortIn, NULL /*pvUser*/, pVirtioCC->szPortIoName,
3218 NULL /*paExtDescs*/, &pVirtio->hLegacyIoPorts);
3219 AssertLogRelRCReturn(rc, PDMDEV_SET_ERROR(pDevIns, rc, N_("virtio: cannot register legacy config in I/O space at BAR0 */")));
3220 }
3221
3222 /* Note: The Linux driver at drivers/virtio/virtio_pci_modern.c tries to map at least a page for the
3223 * 'unknown' device-specific capability without querying the capability to determine size, so pad w/extra page.
3224 */
3225 rc = PDMDevHlpPCIIORegionCreateMmio(pDevIns, VIRTIO_REGION_PCI_CAP, RT_ALIGN_32(cbRegion + VIRTIO_PAGE_SIZE, VIRTIO_PAGE_SIZE),
3226 PCI_ADDRESS_SPACE_MEM, virtioMmioWrite, virtioMmioRead, pVirtio,
3227 IOMMMIO_FLAGS_READ_PASSTHRU | IOMMMIO_FLAGS_WRITE_PASSTHRU,
3228 pVirtioCC->szMmioName,
3229 &pVirtio->hMmioPciCap);
3230 AssertLogRelRCReturn(rc, PDMDEV_SET_ERROR(pDevIns, rc, N_("virtio: cannot register PCI Capabilities address space")));
3231 return VINF_SUCCESS;
3232}
3233
3234
3235/**
3236 * Initializes the VirtIO device using the VirtIO over MMIO transport mode.
3237 *
3238 * @returns VBox status code.
3239 * @param pDevIns Device instance.
3240 * @param pVirtio Pointer to the shared virtio state. This
3241 * must be the first member in the shared
3242 * device instance data!
3243 * @param pVirtioCC Pointer to the ring-3 virtio state. This
3244 * must be the first member in the ring-3
3245 * device instance data!
3246 * @param pcszInstance Device instance name (format-specifier)
3247 * @param cbDevSpecificCfg Size of virtio_pci_device_cap device-specific struct
3248 * @param GCPhysMmioBase The physical guest address of the start of the MMIO area.
3249 * @param u16Irq The interrupt number to use for the virtio device.
3250 */
3251static int virtioR3MmioTransportInit(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC, const char *pcszInstance,
3252 uint16_t cbDevSpecificCfg, RTGCPHYS GCPhysMmioBase, uint16_t u16Irq)
3253{
3254 pVirtio->uIrqMmio = u16Irq;
3255
3256 size_t cbSize = RTStrPrintf(pVirtioCC->szMmioName, sizeof(pVirtioCC->szMmioName), "%s (modern)", pcszInstance);
3257 if (cbSize <= 0)
3258 return PDMDEV_SET_ERROR(pDevIns, VERR_BUFFER_OVERFLOW, N_("virtio: out of memory allocating string")); /* can we put params in this error? */
3259
3260 /*
3261 * Register and map the MMIO region.
3262 */
3263 int rc = PDMDevHlpMmioCreateAndMap(pDevIns, GCPhysMmioBase, RT_ALIGN_32(cbDevSpecificCfg + VIRTIO_MMIO_SIZE, 512),
3264 virtioMmioTransportWrite, virtioMmioTransportRead,
3265 IOMMMIO_FLAGS_READ_PASSTHRU | IOMMMIO_FLAGS_WRITE_PASSTHRU,
3266 pVirtioCC->szMmioName, &pVirtio->hMmioPciCap);
3267 AssertLogRelRCReturn(rc, PDMDEV_SET_ERROR(pDevIns, rc, N_("virtio: cannot register PCI Capabilities address space")));
3268 return VINF_SUCCESS;
3269}
3270
3271
3272/** API Function: See header file */
3273DECLHIDDEN(int) virtioCoreR3Init(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio, PVIRTIOCORECC pVirtioCC, PVIRTIOPCIPARAMS pPciParams,
3274 const char *pcszInstance, uint64_t fDevSpecificFeatures, uint32_t fOfferLegacy,
3275 void *pvDevSpecificCfg, uint16_t cbDevSpecificCfg)
3276{
3277 /*
3278 * Virtio state must be the first member of shared device instance data,
3279 * otherwise can't get our bearings in PCI config callbacks.
3280 */
3281 AssertLogRelReturn(pVirtio == PDMINS_2_DATA(pDevIns, PVIRTIOCORE), VERR_STATE_CHANGED);
3282 AssertLogRelReturn(pVirtioCC == PDMINS_2_DATA_CC(pDevIns, PVIRTIOCORECC), VERR_STATE_CHANGED);
3283
3284 pVirtio->pDevInsR3 = pDevIns;
3285
3286 /*
3287 * Caller must initialize these.
3288 */
3289 AssertReturn(pVirtioCC->pfnStatusChanged, VERR_INVALID_POINTER);
3290 AssertReturn(pVirtioCC->pfnVirtqNotified, VERR_INVALID_POINTER);
3291 AssertReturn(VIRTQ_SIZE > 0 && VIRTQ_SIZE <= 32768, VERR_OUT_OF_RANGE); /* VirtIO specification-defined limit */
3292
3293 PCPDMDEVHLPR3 pHlp = pDevIns->pHlpR3;
3294
3295 uint16_t u16Irq = 0;
3296 int rc = pHlp->pfnCFGMQueryU16Def(pDevIns->pCfg, "Irq", &u16Irq, 0);
3297 if (RT_FAILURE(rc))
3298 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Configuration error: Failed to get the \"Irq\" value"));
3299
3300 RTGCPHYS GCPhysMmioBase = 0;
3301 rc = pHlp->pfnCFGMQueryU64Def(pDevIns->pCfg, "MmioBase", &GCPhysMmioBase, NIL_RTGCPHYS);
3302 if (RT_FAILURE(rc))
3303 return PDMDEV_SET_ERROR(pDevIns, rc,
3304 N_("Configuration error: Failed to get the \"MmioBase\" value"));
3305
3306#if 0 /* Until pdmR3DvHlp_PCISetIrq() impl is fixed and Assert that limits vec to 0 is removed
3307 * VBox legacy MSI support has not been implemented yet
3308 */
3309# ifdef VBOX_WITH_MSI_DEVICES
3310 pVirtio->fMsiSupport = true;
3311# endif
3312#endif
3313
3314 /*
3315 * Host features (presented as a buffet for guest to select from)
3316 * include both dev-specific features & reserved dev-independent features (bitmask).
3317 */
3318 pVirtio->uDeviceType = pPciParams->uDeviceType;
3319 pVirtio->uDeviceFeatures = VIRTIO_F_VERSION_1
3320 | VIRTIO_DEV_INDEPENDENT_FEATURES_OFFERED
3321 | fDevSpecificFeatures;
3322
3323 pVirtio->fLegacyDriver = pVirtio->fOfferLegacy = fOfferLegacy;
3324
3325 RTStrCopy(pVirtio->szInstance, sizeof(pVirtio->szInstance), pcszInstance);
3326 pVirtioCC->cbDevSpecificCfg = cbDevSpecificCfg;
3327 pVirtioCC->pbDevSpecificCfg = (uint8_t *)pvDevSpecificCfg;
3328 pVirtioCC->pbPrevDevSpecificCfg = (uint8_t *)RTMemDup(pvDevSpecificCfg, cbDevSpecificCfg);
3329 AssertLogRelReturn(pVirtioCC->pbPrevDevSpecificCfg, VERR_NO_MEMORY);
3330
3331 if (GCPhysMmioBase != NIL_RTGCPHYS)
3332 rc = virtioR3MmioTransportInit(pDevIns, pVirtio, pVirtioCC, pcszInstance, cbDevSpecificCfg,
3333 GCPhysMmioBase, u16Irq);
3334 else
3335 rc = virtioR3PciTransportInit(pDevIns, pVirtio, pVirtioCC, pPciParams, pcszInstance, cbDevSpecificCfg);
3336 AssertLogRelRCReturn(rc, rc);
3337
3338 /*
3339 * Statistics.
3340 */
3341# ifdef VBOX_WITH_STATISTICS
3342 PDMDevHlpSTAMRegisterF(pDevIns, &pVirtio->StatDescChainsAllocated, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
3343 "Total number of allocated descriptor chains", "DescChainsAllocated");
3344 PDMDevHlpSTAMRegisterF(pDevIns, &pVirtio->StatDescChainsFreed, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
3345 "Total number of freed descriptor chains", "DescChainsFreed");
3346 PDMDevHlpSTAMRegisterF(pDevIns, &pVirtio->StatDescChainsSegsIn, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
3347 "Total number of inbound segments", "DescChainsSegsIn");
3348 PDMDevHlpSTAMRegisterF(pDevIns, &pVirtio->StatDescChainsSegsOut, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT,
3349 "Total number of outbound segments", "DescChainsSegsOut");
3350 PDMDevHlpSTAMRegister(pDevIns, &pVirtio->StatReadR3, STAMTYPE_PROFILE, "IO/ReadR3", STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in R3");
3351 PDMDevHlpSTAMRegister(pDevIns, &pVirtio->StatReadR0, STAMTYPE_PROFILE, "IO/ReadR0", STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in R0");
3352 PDMDevHlpSTAMRegister(pDevIns, &pVirtio->StatReadRC, STAMTYPE_PROFILE, "IO/ReadRC", STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in RC");
3353 PDMDevHlpSTAMRegister(pDevIns, &pVirtio->StatWriteR3, STAMTYPE_PROFILE, "IO/WriteR3", STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in R3");
3354 PDMDevHlpSTAMRegister(pDevIns, &pVirtio->StatWriteR0, STAMTYPE_PROFILE, "IO/WriteR0", STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in R0");
3355 PDMDevHlpSTAMRegister(pDevIns, &pVirtio->StatWriteRC, STAMTYPE_PROFILE, "IO/WriteRC", STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in RC");
3356# endif /* VBOX_WITH_STATISTICS */
3357
3358#ifdef VIRTIO_REL_INFO_DUMP
3359 pVirtio->fRecovering = false;
3360 pVirtio->fTestRecovery = false;
3361 size_t cbBlock = VIRTIO_CORE_TRACE_BUF_SIZE;
3362 rc = RTTraceBufCarve(&pVirtio->hTraceBuf, 0 /*cEntries*/, 0 /*cbEntry*/, 0 /*fFlags*/, pVirtio->aTraceBuf, &cbBlock);
3363 AssertRC(rc);
3364 if (RT_FAILURE(rc))
3365 LogRel(("virtioCore: Failed to initialize trace buffer (rc=%d)\n", rc));
3366#endif /* VIRTIO_REL_INFO_DUMP */
3367 return VINF_SUCCESS;
3368}
3369
3370#else /* !IN_RING3 */
3371
3372/**
3373 * Sets up the core ring-0/raw-mode virtio bits.
3374 *
3375 * @returns VBox status code.
3376 * @param pDevIns The device instance.
3377 * @param pVirtio Pointer to the shared virtio state. This must be the first
3378 * member in the shared device instance data!
3379 */
3380DECLHIDDEN(int) virtioCoreRZInit(PPDMDEVINS pDevIns, PVIRTIOCORE pVirtio)
3381{
3382 AssertLogRelReturn(pVirtio == PDMINS_2_DATA(pDevIns, PVIRTIOCORE), VERR_STATE_CHANGED);
3383 int rc;
3384#ifdef FUTURE_OPTIMIZATION
3385 rc = PDMDevHlpSetDeviceCritSect(pDevIns, PDMDevHlpCritSectGetNop(pDevIns));
3386 AssertRCReturn(rc, rc);
3387#endif
3388
3389 if (pVirtio->uIrqMmio != 0)
3390 {
3391 rc = PDMDevHlpMmioSetUpContext(pDevIns, pVirtio->hMmioPciCap, virtioMmioTransportWrite, virtioMmioTransportRead, pVirtio);
3392 AssertRCReturn(rc, rc);
3393 }
3394 else
3395 {
3396 rc = PDMDevHlpMmioSetUpContext(pDevIns, pVirtio->hMmioPciCap, virtioMmioWrite, virtioMmioRead, pVirtio);
3397 AssertRCReturn(rc, rc);
3398
3399 if (pVirtio->fOfferLegacy)
3400 {
3401 rc = PDMDevHlpIoPortSetUpContext(pDevIns, pVirtio->hLegacyIoPorts, virtioLegacyIOPortOut, virtioLegacyIOPortIn, NULL /*pvUser*/);
3402 AssertRCReturn(rc, rc);
3403 }
3404 }
3405 return rc;
3406}
3407
3408#endif /* !IN_RING3 */
3409
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette