VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/GICAll.cpp@ 108994

Last change on this file since 108994 was 108976, checked in by vboxsync, 4 weeks ago

VMM/GIC: bugref:10877 Minus 1 bites us again and LPI configuration table work-in-progress.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 143.9 KB
Line 
1/* $Id: GICAll.cpp 108976 2025-04-15 12:30:07Z vboxsync $ */
2/** @file
3 * GIC - Generic Interrupt Controller Architecture (GIC) - All Contexts.
4 */
5
6/*
7 * Copyright (C) 2023-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.215389.xyz.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28/** @page pg_gic GIC - Generic Interrupt Controller
29 *
30 * The GIC is an interrupt controller device that lives in VMM but also registers
31 * itself with PDM similar to the APIC. The reason for this is needs to access
32 * per-VCPU data and is an integral part of any ARMv8 VM.
33 *
34 * The GIC is made up of 3 main components:
35 * - Distributor
36 * - Redistributor
37 * - Interrupt Translation Service (ITS)
38 *
39 * The distributor is per-VM while the redistributors are per-VCPU. PEs (Processing
40 * Elements) and CIs (CPU Interfaces) correspond to VCPUs. The distributor and
41 * redistributor each have their memory mapped I/O regions. The redistributor is
42 * accessible via CPU system registers as well. The distributor and redistributor
43 * code lives in GICAll.cpp and GICR3.cpp.
44 *
45 * The ITS is the interrupt translation service component of the GIC and its
46 * presence is optional. It provides MSI support along with routing interrupt
47 * sources to specific PEs. The ITS is only accessible via its memory mapped I/O
48 * region. When the MMIO handle for the its region is NIL_IOMMMIOHANDLE it's
49 * considered to be disabled for the VM. Most of the ITS code lives in GITSAll.cpp.
50 *
51 * This implementation only targets GICv3. This implementation does not support
52 * dual security states, nor does it support exception levels (EL2, EL3). Earlier
53 * versions are considered legacy and not important enough to be emulated.
54 * GICv4 primarily adds support for virtualizing the GIC and its necessity will be
55 * evaluated in the future if/when there is support for nested virtualization on
56 * ARMv8 hosts.
57 */
58
59
60/*********************************************************************************************************************************
61* Header Files *
62*********************************************************************************************************************************/
63#define LOG_GROUP LOG_GROUP_DEV_GIC
64#include "GICInternal.h"
65#include <VBox/vmm/pdmgic.h>
66#include <VBox/vmm/pdmdev.h>
67#include <VBox/vmm/pdmapi.h>
68#include <VBox/vmm/vmcc.h>
69#include <VBox/vmm/vmm.h>
70#include <VBox/vmm/vmcpuset.h>
71
72
73/*********************************************************************************************************************************
74* Defined Constants And Macros *
75*********************************************************************************************************************************/
76#define GIC_IDLE_PRIORITY 0xff
77#define GIC_IS_INTR_SGI(a_uIntId) (a_uIntId - GIC_INTID_RANGE_SGI_START < GIC_INTID_SGI_RANGE_SIZE)
78#define GIC_IS_INTR_PPI(a_uIntId) (a_uIntId - GIC_INTID_RANGE_PPI_START < GIC_INTID_PPI_RANGE_SIZE)
79#define GIC_IS_INTR_SGI_OR_PPI(a_uIntId) (a_uIntId - GIC_INTID_RANGE_SGI_START < GIC_INTID_PPI_RANGE_SIZE)
80#define GIC_IS_INTR_SPI(a_uIntId) (a_uIntId - GIC_INTID_RANGE_SPI_START < GIC_INTID_SPI_RANGE_SIZE)
81#define GIC_IS_INTR_SPECIAL(a_uIntId) (a_uIntId - GIC_INTID_RANGE_SPECIAL_START < GIC_INTID_EXT_PPI_RANGE_SIZE)
82#define GIC_IS_INTR_EXT_PPI(a_uIntId) (a_uIntId - GIC_INTID_RANGE_EXT_PPI_START < GIC_INTID_EXT_PPI_RANGE_SIZE)
83#define GIC_IS_INTR_EXT_SPI(a_uIntId) (a_uIntId - GIC_INTID_RANGE_EXT_SPI_START < GIC_INTID_EXT_SPI_RANGE_SIZE)
84#define GIC_IS_REG_IN_RANGE(a_offReg, a_offFirst, a_cbRegion) ((uint32_t)(a_offReg) - (a_offFirst) < (a_cbRegion))
85
86
87#ifdef LOG_ENABLED
88/**
89 * Gets the description of a CPU interface register.
90 *
91 * @returns The description.
92 * @param u32Reg The CPU interface register offset.
93 */
94static const char *gicIccGetRegDescription(uint32_t u32Reg)
95{
96 switch (u32Reg)
97 {
98#define GIC_ICC_REG_CASE(a_Reg) case ARMV8_AARCH64_SYSREG_ ## a_Reg: return #a_Reg
99 GIC_ICC_REG_CASE(ICC_PMR_EL1);
100 GIC_ICC_REG_CASE(ICC_IAR0_EL1);
101 GIC_ICC_REG_CASE(ICC_EOIR0_EL1);
102 GIC_ICC_REG_CASE(ICC_HPPIR0_EL1);
103 GIC_ICC_REG_CASE(ICC_BPR0_EL1);
104 GIC_ICC_REG_CASE(ICC_AP0R0_EL1);
105 GIC_ICC_REG_CASE(ICC_AP0R1_EL1);
106 GIC_ICC_REG_CASE(ICC_AP0R2_EL1);
107 GIC_ICC_REG_CASE(ICC_AP0R3_EL1);
108 GIC_ICC_REG_CASE(ICC_AP1R0_EL1);
109 GIC_ICC_REG_CASE(ICC_AP1R1_EL1);
110 GIC_ICC_REG_CASE(ICC_AP1R2_EL1);
111 GIC_ICC_REG_CASE(ICC_AP1R3_EL1);
112 GIC_ICC_REG_CASE(ICC_DIR_EL1);
113 GIC_ICC_REG_CASE(ICC_RPR_EL1);
114 GIC_ICC_REG_CASE(ICC_SGI1R_EL1);
115 GIC_ICC_REG_CASE(ICC_ASGI1R_EL1);
116 GIC_ICC_REG_CASE(ICC_SGI0R_EL1);
117 GIC_ICC_REG_CASE(ICC_IAR1_EL1);
118 GIC_ICC_REG_CASE(ICC_EOIR1_EL1);
119 GIC_ICC_REG_CASE(ICC_HPPIR1_EL1);
120 GIC_ICC_REG_CASE(ICC_BPR1_EL1);
121 GIC_ICC_REG_CASE(ICC_CTLR_EL1);
122 GIC_ICC_REG_CASE(ICC_SRE_EL1);
123 GIC_ICC_REG_CASE(ICC_IGRPEN0_EL1);
124 GIC_ICC_REG_CASE(ICC_IGRPEN1_EL1);
125#undef GIC_ICC_REG_CASE
126 default:
127 return "<UNKNOWN>";
128 }
129}
130
131
132/**
133 * Gets the description of a distributor register given it's register offset.
134 *
135 * @returns The register description.
136 * @param offReg The distributor register offset.
137 */
138static const char *gicDistGetRegDescription(uint16_t offReg)
139{
140 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IGROUPRn_OFF_START, GIC_DIST_REG_IGROUPRn_RANGE_SIZE)) return "GICD_IGROUPRn";
141 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IGROUPRnE_OFF_START, GIC_DIST_REG_IGROUPRnE_RANGE_SIZE)) return "GICD_IGROUPRnE";
142 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IROUTERn_OFF_START, GIC_DIST_REG_IROUTERn_RANGE_SIZE)) return "GICD_IROUTERn";
143 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IROUTERnE_OFF_START, GIC_DIST_REG_IROUTERnE_RANGE_SIZE)) return "GICD_IROUTERnE";
144 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISENABLERn_OFF_START, GIC_DIST_REG_ISENABLERn_RANGE_SIZE)) return "GICD_ISENABLERn";
145 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISENABLERnE_OFF_START, GIC_DIST_REG_ISENABLERnE_RANGE_SIZE)) return "GICD_ISENABLERnE";
146 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICENABLERn_OFF_START, GIC_DIST_REG_ICENABLERn_RANGE_SIZE)) return "GICD_ICENABLERn";
147 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICENABLERnE_OFF_START, GIC_DIST_REG_ICENABLERnE_RANGE_SIZE)) return "GICD_ICENABLERnE";
148 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISACTIVERn_OFF_START, GIC_DIST_REG_ISACTIVERn_RANGE_SIZE)) return "GICD_ISACTIVERn";
149 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISACTIVERnE_OFF_START, GIC_DIST_REG_ISACTIVERnE_RANGE_SIZE)) return "GICD_ISACTIVERnE";
150 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICACTIVERn_OFF_START, GIC_DIST_REG_ICACTIVERn_RANGE_SIZE)) return "GICD_ICACTIVERn";
151 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICACTIVERnE_OFF_START, GIC_DIST_REG_ICACTIVERnE_RANGE_SIZE)) return "GICD_ICACTIVERnE";
152 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IPRIORITYRn_OFF_START, GIC_DIST_REG_IPRIORITYRn_RANGE_SIZE)) return "GICD_IPRIORITYRn";
153 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IPRIORITYRnE_OFF_START, GIC_DIST_REG_IPRIORITYRnE_RANGE_SIZE)) return "GICD_IPRIORITYRnE";
154 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISPENDRn_OFF_START, GIC_DIST_REG_ISPENDRn_RANGE_SIZE)) return "GICD_ISPENDRn";
155 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISPENDRnE_OFF_START, GIC_DIST_REG_ISPENDRnE_RANGE_SIZE)) return "GICD_ISPENDRnE";
156 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICPENDRn_OFF_START, GIC_DIST_REG_ICPENDRn_RANGE_SIZE)) return "GICD_ICPENDRn";
157 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICPENDRnE_OFF_START, GIC_DIST_REG_ICPENDRnE_RANGE_SIZE)) return "GICD_ICPENDRnE";
158 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICFGRn_OFF_START, GIC_DIST_REG_ICFGRn_RANGE_SIZE)) return "GICD_ICFGRn";
159 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICFGRnE_OFF_START, GIC_DIST_REG_ICFGRnE_RANGE_SIZE)) return "GICD_ICFGRnE";
160 switch (offReg)
161 {
162 case GIC_DIST_REG_CTLR_OFF: return "GICD_CTLR";
163 case GIC_DIST_REG_TYPER_OFF: return "GICD_TYPER";
164 case GIC_DIST_REG_STATUSR_OFF: return "GICD_STATUSR";
165 case GIC_DIST_REG_ITARGETSRn_OFF_START: return "GICD_ITARGETSRn";
166 case GIC_DIST_REG_IGRPMODRn_OFF_START: return "GICD_IGRPMODRn";
167 case GIC_DIST_REG_NSACRn_OFF_START: return "GICD_NSACRn";
168 case GIC_DIST_REG_SGIR_OFF: return "GICD_SGIR";
169 case GIC_DIST_REG_CPENDSGIRn_OFF_START: return "GICD_CSPENDSGIRn";
170 case GIC_DIST_REG_SPENDSGIRn_OFF_START: return "GICD_SPENDSGIRn";
171 case GIC_DIST_REG_INMIn_OFF_START: return "GICD_INMIn";
172 case GIC_DIST_REG_PIDR2_OFF: return "GICD_PIDR2";
173 case GIC_DIST_REG_IIDR_OFF: return "GICD_IIDR";
174 case GIC_DIST_REG_TYPER2_OFF: return "GICD_TYPER2";
175 default:
176 return "<UNKNOWN>";
177 }
178}
179#endif /* LOG_ENABLED */
180
181
182/**
183 * Gets the description of a redistributor register given it's register offset.
184 *
185 * @returns The register description.
186 * @param offReg The redistributor register offset.
187 */
188static const char *gicReDistGetRegDescription(uint16_t offReg)
189{
190 switch (offReg)
191 {
192 case GIC_REDIST_REG_CTLR_OFF: return "GICR_CTLR";
193 case GIC_REDIST_REG_IIDR_OFF: return "GICR_IIDR";
194 case GIC_REDIST_REG_TYPER_OFF: return "GICR_TYPER";
195 case GIC_REDIST_REG_TYPER_AFFINITY_OFF: return "GICR_TYPER_AFF";
196 case GIC_REDIST_REG_STATUSR_OFF: return "GICR_STATUSR";
197 case GIC_REDIST_REG_WAKER_OFF: return "GICR_WAKER";
198 case GIC_REDIST_REG_MPAMIDR_OFF: return "GICR_MPAMIDR";
199 case GIC_REDIST_REG_PARTIDR_OFF: return "GICR_PARTIDR";
200 case GIC_REDIST_REG_SETLPIR_OFF: return "GICR_SETLPIR";
201 case GIC_REDIST_REG_CLRLPIR_OFF: return "GICR_CLRLPIR";
202 case GIC_REDIST_REG_PROPBASER_OFF: return "GICR_PROPBASER";
203 case GIC_REDIST_REG_PENDBASER_OFF: return "GICR_PENDBASER";
204 case GIC_REDIST_REG_INVLPIR_OFF: return "GICR_INVLPIR";
205 case GIC_REDIST_REG_INVALLR_OFF: return "GICR_INVALLR";
206 case GIC_REDIST_REG_SYNCR_OFF: return "GICR_SYNCR";
207 case GIC_REDIST_REG_PIDR2_OFF: return "GICR_PIDR2";
208 default:
209 return "<UNKNOWN>";
210 }
211}
212
213
214/**
215 * Gets the description of an SGI/PPI redistributor register given it's register
216 * offset.
217 *
218 * @returns The register description.
219 * @param offReg The redistributor register offset.
220 */
221static const char *gicReDistGetSgiPpiRegDescription(uint16_t offReg)
222{
223 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_IGROUPR0_OFF, GIC_REDIST_SGI_PPI_REG_IGROUPRnE_RANGE_SIZE)) return "GICR_IGROUPn";
224 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ISENABLER0_OFF, GIC_REDIST_SGI_PPI_REG_ISENABLERnE_RANGE_SIZE)) return "GICR_ISENABLERn";
225 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ICENABLER0_OFF, GIC_REDIST_SGI_PPI_REG_ICENABLERnE_RANGE_SIZE)) return "GICR_ICENABLERn";
226 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ISACTIVER0_OFF, GIC_REDIST_SGI_PPI_REG_ISACTIVERnE_RANGE_SIZE)) return "GICR_ISACTIVERn";
227 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ICACTIVER0_OFF, GIC_REDIST_SGI_PPI_REG_ICACTIVERnE_RANGE_SIZE)) return "GICR_ICACTIVERn";
228 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ISPENDR0_OFF, GIC_REDIST_SGI_PPI_REG_ISPENDRnE_RANGE_SIZE)) return "GICR_ISPENDRn";
229 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ICPENDR0_OFF, GIC_REDIST_SGI_PPI_REG_ICPENDRnE_RANGE_SIZE)) return "GICR_ICPENDRn";
230 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_IPRIORITYRn_OFF_START, GIC_REDIST_SGI_PPI_REG_IPRIORITYRnE_RANGE_SIZE)) return "GICR_IPREIORITYn";
231 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ICFGR0_OFF, GIC_REDIST_SGI_PPI_REG_ICFGRnE_RANGE_SIZE)) return "GICR_ICFGRn";
232 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_INMIR0_OFF, GIC_REDIST_SGI_PPI_REG_INMIRnE_RANGE_SIZE)) return "GICR_INMIRn";
233 switch (offReg)
234 {
235 case GIC_REDIST_SGI_PPI_REG_NSACR_OFF: return "GICR_NSACR";
236 case GIC_REDIST_SGI_PPI_REG_IGRPMODR0_OFF: return "GICR_IGRPMODR0";
237 case GIC_REDIST_SGI_PPI_REG_IGRPMODR1E_OFF: return "GICR_IGRPMODR1E";
238 case GIC_REDIST_SGI_PPI_REG_IGRPMODR2E_OFF: return "GICR_IGRPMODR2E";
239 default:
240 return "<UNKNOWN>";
241 }
242}
243
244
245/**
246 * Gets the interrupt ID given a distributor interrupt index.
247 *
248 * @returns The interrupt ID.
249 * @param idxIntr The distributor interrupt index.
250 * @remarks A distributor interrupt is an interrupt type that belong in the
251 * distributor (e.g. SPIs, extended SPIs).
252 */
253DECLHIDDEN(uint16_t) gicDistGetIntIdFromIndex(uint16_t idxIntr)
254{
255 /*
256 * Distributor interrupts bits to interrupt ID mapping:
257 * +--------------------------------------------------------+
258 * | Range (incl) | SGI | PPI | SPI | Ext SPI |
259 * |--------------+--------+--------+----------+------------|
260 * | Bit | 0..15 | 16..31 | 32..1023 | 1024..2047 |
261 * | Int Id | 0..15 | 16..31 | 32..1023 | 4096..5119 |
262 * +--------------------------------------------------------+
263 */
264 uint16_t uIntId;
265 /* SGIs, PPIs, SPIs and specials. */
266 if (idxIntr < 1024)
267 uIntId = idxIntr;
268 /* Extended SPIs. */
269 else if (idxIntr < 2048)
270 uIntId = GIC_INTID_RANGE_EXT_SPI_START + idxIntr - 1024;
271 else
272 {
273 uIntId = 0;
274 AssertReleaseMsgFailed(("idxIntr=%u\n", idxIntr));
275 }
276 Assert( GIC_IS_INTR_SGI_OR_PPI(uIntId)
277 || GIC_IS_INTR_SPI(uIntId)
278 || GIC_IS_INTR_SPECIAL(uIntId)
279 || GIC_IS_INTR_EXT_SPI(uIntId));
280 return uIntId;
281}
282
283
284/**
285 * Gets the distributor interrupt index given an interrupt ID.
286 *
287 * @returns The distributor interrupt index.
288 * @param uIntId The interrupt ID.
289 * @remarks A distributor interrupt is an interrupt type that belong in the
290 * distributor (e.g. SPIs, extended SPIs).
291 */
292static uint16_t gicDistGetIndexFromIntId(uint16_t uIntId)
293{
294 uint16_t idxIntr;
295 /* SGIs, PPIs, SPIs and specials. */
296 if (uIntId <= GIC_INTID_RANGE_SPECIAL_NO_INTERRUPT)
297 idxIntr = uIntId;
298 /* Extended SPIs. */
299 else if (uIntId - GIC_INTID_RANGE_EXT_SPI_START < GIC_INTID_EXT_SPI_RANGE_SIZE)
300 idxIntr = 1024 + uIntId - GIC_INTID_RANGE_EXT_SPI_START;
301 else
302 {
303 idxIntr = 0;
304 AssertReleaseMsgFailed(("uIntId=%u\n", uIntId));
305 }
306 Assert(idxIntr < sizeof(GICDEV::bmIntrPending) * 8);
307 return idxIntr;
308}
309
310
311/**
312 * Gets the interrupt ID given a redistributor interrupt index.
313 *
314 * @returns The interrupt ID.
315 * @param idxIntr The redistributor interrupt index.
316 * @remarks A redistributor interrupt is an interrupt type that belong in the
317 * redistributor (e.g. SGIs, PPIs, extended PPIs).
318 */
319DECLHIDDEN(uint16_t) gicReDistGetIntIdFromIndex(uint16_t idxIntr)
320{
321 /*
322 * Redistributor interrupts bits to interrupt ID mapping:
323 * +---------------------------------------------+
324 * | Range (incl) | SGI | PPI | Ext PPI |
325 * +---------------------------------------------+
326 * | Bit | 0..15 | 16..31 | 32..95 |
327 * | Int Id | 0..15 | 16..31 | 1056..1119 |
328 * +---------------------------------------------+
329 */
330 uint16_t uIntId;
331 /* SGIs and PPIs. */
332 if (idxIntr < 32)
333 uIntId = idxIntr;
334 /* Extended PPIs. */
335 else if (idxIntr < 96)
336 uIntId = GIC_INTID_RANGE_EXT_PPI_START + idxIntr - 32;
337 else
338 {
339 uIntId = 0;
340 AssertReleaseMsgFailed(("idxIntr=%u\n", idxIntr));
341 }
342 Assert(GIC_IS_INTR_SGI_OR_PPI(uIntId) || GIC_IS_INTR_EXT_PPI(uIntId));
343 return uIntId;
344}
345
346
347/**
348 * Gets the redistributor interrupt index given an interrupt ID.
349 *
350 * @returns The interrupt ID.
351 * @param uIntId The interrupt ID.
352 * @remarks A redistributor interrupt is an interrupt type that belong in the
353 * redistributor (e.g. SGIs, PPIs, extended PPIs).
354 */
355static uint16_t gicReDistGetIndexFromIntId(uint16_t uIntId)
356{
357 /* SGIs and PPIs. */
358 uint16_t idxIntr;
359 if (uIntId <= GIC_INTID_RANGE_PPI_LAST)
360 idxIntr = uIntId;
361 /* Extended PPIs. */
362 else if (uIntId - GIC_INTID_RANGE_EXT_PPI_START < GIC_INTID_EXT_PPI_RANGE_SIZE)
363 idxIntr = 32 + uIntId - GIC_INTID_RANGE_EXT_PPI_START;
364 else
365 {
366 idxIntr = 0;
367 AssertReleaseMsgFailed(("uIntId=%u\n", uIntId));
368 }
369 Assert(idxIntr < sizeof(GICCPU::bmIntrPending) * 8);
370 return idxIntr;
371}
372
373
374/**
375 * Sets the interrupt pending force-flag and pokes the EMT if required.
376 *
377 * @param pVCpu The cross context virtual CPU structure.
378 * @param fIrq Flag whether to assert the IRQ line or leave it alone.
379 * @param fFiq Flag whether to assert the FIQ line or leave it alone.
380 */
381static void gicSetInterruptFF(PVMCPUCC pVCpu, bool fIrq, bool fFiq)
382{
383 Assert(fIrq || fFiq);
384 LogFlowFunc(("pVCpu=%p{.idCpu=%u} fIrq=%RTbool fFiq=%RTbool\n", pVCpu, pVCpu->idCpu, fIrq, fFiq));
385
386#ifdef IN_RING3
387 /* IRQ state should be loaded as-is by "LoadExec". Changes can be made from LoadDone. */
388 Assert(pVCpu->pVMR3->enmVMState != VMSTATE_LOADING || PDMR3HasLoadedState(pVCpu->pVMR3));
389#endif
390
391 if (fIrq)
392 VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_IRQ);
393 if (fFiq)
394 VMCPU_FF_SET(pVCpu, VMCPU_FF_INTERRUPT_FIQ);
395
396 /*
397 * We need to wake up the target CPU if we're not on EMT.
398 */
399 /** @todo We could just use RTThreadNativeSelf() here, couldn't we? */
400#if defined(IN_RING0)
401# error "Implement me!"
402#elif defined(IN_RING3)
403 PVMCC pVM = pVCpu->CTX_SUFF(pVM);
404 VMCPUID idCpu = pVCpu->idCpu;
405 if (VMMGetCpuId(pVM) != idCpu)
406 {
407 Log7Func(("idCpu=%u enmState=%d\n", idCpu, pVCpu->enmState));
408 VMR3NotifyCpuFFU(pVCpu->pUVCpu, VMNOTIFYFF_FLAGS_POKE);
409 }
410#endif
411}
412
413
414/**
415 * Clears the interrupt pending force-flag.
416 *
417 * @param pVCpu The cross context virtual CPU structure.
418 * @param fIrq Flag whether to clear the IRQ flag.
419 * @param fFiq Flag whether to clear the FIQ flag.
420 */
421DECLINLINE(void) gicClearInterruptFF(PVMCPUCC pVCpu, bool fIrq, bool fFiq)
422{
423 Assert(fIrq || fFiq);
424 LogFlowFunc(("pVCpu=%p{.idCpu=%u} fIrq=%RTbool fFiq=%RTbool\n", pVCpu, pVCpu->idCpu, fIrq, fFiq));
425
426#ifdef IN_RING3
427 /* IRQ state should be loaded as-is by "LoadExec". Changes can be made from LoadDone. */
428 Assert(pVCpu->pVMR3->enmVMState != VMSTATE_LOADING || PDMR3HasLoadedState(pVCpu->pVMR3));
429#endif
430
431 if (fIrq)
432 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_IRQ);
433 if (fFiq)
434 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_FIQ);
435}
436
437
438/**
439 * Updates the interrupt force-flag.
440 *
441 * @param pVCpu The cross context virtual CPU structure.
442 * @param fIrq Flag whether to clear the IRQ flag.
443 * @param fFiq Flag whether to clear the FIQ flag.
444 */
445DECLINLINE(void) gicUpdateInterruptFF(PVMCPUCC pVCpu, bool fIrq, bool fFiq)
446{
447 LogFlowFunc(("pVCpu=%p{.idCpu=%u} fIrq=%RTbool fFiq=%RTbool\n", pVCpu, pVCpu->idCpu, fIrq, fFiq));
448
449 if (fIrq || fFiq)
450 gicSetInterruptFF(pVCpu, fIrq, fFiq);
451
452 if (!fIrq || !fFiq)
453 gicClearInterruptFF(pVCpu, !fIrq, !fFiq);
454}
455
456
457/**
458 * Gets whether the redistributor has pending interrupts with sufficient priority to
459 * be signalled to the PE.
460 *
461 * @param pGicCpu The GIC redistributor and CPU interface state.
462 * @param pfIrq Where to store whether IRQs can be signalled.
463 * @param pfFiq Where to store whether FIQs can be signalled.
464 */
465static void gicReDistHasIrqPending(PCGICCPU pGicCpu, bool *pfIrq, bool *pfFiq)
466{
467 bool const fIsGroup1Enabled = pGicCpu->fIntrGroup1Enabled;
468 bool const fIsGroup0Enabled = pGicCpu->fIntrGroup0Enabled;
469 LogFlowFunc(("fIsGroup0Enabled=%RTbool fIsGroup1Enabled=%RTbool\n", fIsGroup0Enabled, fIsGroup1Enabled));
470
471# if 1
472 uint32_t bmIntrs[3];
473 for (uint8_t i = 0; i < RT_ELEMENTS(bmIntrs); i++)
474 {
475 /* Collect interrupts that are pending, enabled and inactive. */
476 bmIntrs[i] = (pGicCpu->bmIntrPending[i] & pGicCpu->bmIntrEnabled[i]) & ~pGicCpu->bmIntrActive[i];
477
478 /* Discard interrupts if the group they belong to is disabled. */
479 if (!fIsGroup1Enabled)
480 bmIntrs[i] &= ~pGicCpu->bmIntrGroup[i];
481 if (!fIsGroup0Enabled)
482 bmIntrs[i] &= pGicCpu->bmIntrGroup[i];
483 }
484
485 uint32_t const cIntrs = sizeof(bmIntrs) * 8;
486 int32_t idxIntr = ASMBitFirstSet(&bmIntrs[0], cIntrs);
487 AssertCompile(!(cIntrs % 32));
488 if (idxIntr >= 0)
489 {
490 /* Only allow interrupts with higher priority than the current configured and running one. */
491 uint8_t const bPriority = RT_MIN(pGicCpu->bIntrPriorityMask, pGicCpu->abRunningPriorities[pGicCpu->idxRunningPriority]);
492 do
493 {
494 Assert((uint32_t)idxIntr < RT_ELEMENTS(pGicCpu->abIntrPriority));
495 if (pGicCpu->abIntrPriority[idxIntr] < bPriority)
496 {
497 bool const fInGroup1 = ASMBitTest(&pGicCpu->bmIntrGroup[0], idxIntr);
498 bool const fInGroup0 = !fInGroup1;
499 *pfIrq = fInGroup1 && fIsGroup1Enabled;
500 *pfFiq = fInGroup0 && fIsGroup0Enabled;
501 return;
502 }
503 idxIntr = ASMBitNextSet(&bmIntrs[0], cIntrs, idxIntr);
504 } while (idxIntr != -1);
505 }
506#else /** @todo Measure and pick the faster version. */
507 /* Only allow interrupts with higher priority than the current configured and running one. */
508 uint8_t const bPriority = RT_MIN(pGicCpu->bIntrPriorityMask, pGicCpu->abRunningPriorities[pGicCpu->idxRunningPriority]);
509
510 for (uint8_t i = 0; i < RT_ELEMENTS(pGicCpu->bmIntrPending); i++)
511 {
512 /* Collect interrupts that are pending, enabled and inactive. */
513 uint32_t bmIntr = (pGicCpu->bmIntrPending[i] & pGicCpu->bmIntrEnabled[i]) & ~pGicCpu->bmIntrActive[i];
514
515 /* Discard interrupts if the group they belong to is disabled. */
516 if (!fIsGroup1Enabled)
517 bmIntr &= ~pGicCpu->bmIntrGroup[i];
518 if (!fIsGroup0Enabled)
519 bmIntr &= pGicCpu->bmIntrGroup[i];
520
521 /* If the interrupt is higher priority than the running interrupt, return whether to signal an IRQ, FIQ or neither. */
522 uint16_t const idxPending = ASMBitFirstSetU32(bmIntr);
523 if (idxPending > 0)
524 {
525 uint16_t const idxIntr = 32 * i + idxPending - 1;
526 AssertRelease(idxIntr < RT_ELEMENTS(pGicCpu->abIntrPriority));
527 if (pGicCpu->abIntrPriority[idxIntr] < bPriority)
528 {
529 AssertRelease(idxIntr < sizeof(pGicCpu->bmIntrGroup) * 8);
530 bool const fInGroup1 = ASMBitTest(&pGicCpu->bmIntrGroup[0], idxIntr);
531 bool const fInGroup0 = !fInGroup1;
532 *pfIrq = fInGroup1 && fIsGroup1Enabled;
533 *pfFiq = fInGroup0 && fIsGroup0Enabled;
534 return;
535 }
536 }
537 }
538#endif
539 *pfIrq = false;
540 *pfFiq = false;
541}
542
543
544/**
545 * Gets whether the distributor has pending interrupts with sufficient priority to
546 * be signalled to the PE.
547 *
548 * @param pGicDev The GIC distributor state.
549 * @param pVCpu The cross context virtual CPU structure.
550 * @param idCpu The ID of the virtual CPU.
551 * @param pfIrq Where to store whether there are IRQs can be signalled.
552 * @param pfFiq Where to store whether there are FIQs can be signalled.
553 */
554static void gicDistHasIrqPendingForVCpu(PCGICDEV pGicDev, PCVMCPUCC pVCpu, VMCPUID idCpu, bool *pfIrq, bool *pfFiq)
555{
556 bool const fIsGroup1Enabled = pGicDev->fIntrGroup1Enabled;
557 bool const fIsGroup0Enabled = pGicDev->fIntrGroup0Enabled;
558 LogFlowFunc(("fIsGroup1Enabled=%RTbool fIsGroup0Enabled=%RTbool\n", fIsGroup1Enabled, fIsGroup0Enabled));
559
560#if 1
561 uint32_t bmIntrs[64];
562 for (uint8_t i = 0; i < RT_ELEMENTS(bmIntrs); i++)
563 {
564 /* Collect interrupts that are pending, enabled and inactive. */
565 bmIntrs[i] = (pGicDev->bmIntrPending[i] & pGicDev->bmIntrEnabled[i]) & ~pGicDev->bmIntrActive[i];
566
567 /* Discard interrupts if the group they belong to is disabled. */
568 if (!fIsGroup1Enabled)
569 bmIntrs[i] &= ~pGicDev->bmIntrGroup[i];
570 if (!fIsGroup0Enabled)
571 bmIntrs[i] &= pGicDev->bmIntrGroup[i];
572 }
573
574 /*
575 * The distributor's interrupt pending/enabled/active bitmaps have 2048 bits which map
576 * SGIs (16), PPIs (16), SPIs (988), reserved SPIs (4) and extended SPIs (1024).
577 * Of these, the first 32 bits corresponding to SGIs and PPIs are RAZ/WI when affinity
578 * routing is enabled (which it currently is always enabled in our implementation).
579 */
580 Assert(pGicDev->fAffRoutingEnabled);
581 Assert(bmIntrs[0] == 0);
582 uint32_t const cIntrs = sizeof(bmIntrs) * 8;
583 int32_t idxIntr = ASMBitFirstSet(&bmIntrs[0], cIntrs);
584 AssertCompile(!(cIntrs % 32));
585 if (idxIntr >= 0)
586 {
587 /* Only allow interrupts with higher priority than the current configured and running one. */
588 PCGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
589 uint8_t const bPriority = RT_MIN(pGicCpu->bIntrPriorityMask, pGicCpu->abRunningPriorities[pGicCpu->idxRunningPriority]);
590 do
591 {
592 AssertCompile(RT_ELEMENTS(pGicDev->abIntrPriority) == RT_ELEMENTS(pGicDev->au32IntrRouting));
593 Assert((uint32_t)idxIntr < RT_ELEMENTS(pGicDev->abIntrPriority));
594 Assert(idxIntr < GIC_INTID_RANGE_SPECIAL_START || idxIntr > GIC_INTID_RANGE_SPECIAL_LAST);
595 if ( pGicDev->abIntrPriority[idxIntr] < bPriority
596 && pGicDev->au32IntrRouting[idxIntr] == idCpu)
597 {
598 bool const fInGroup1 = ASMBitTest(&pGicDev->bmIntrGroup[0], idxIntr);
599 bool const fInGroup0 = !fInGroup1;
600 *pfFiq = fInGroup0 && fIsGroup0Enabled;
601 *pfIrq = fInGroup1 && fIsGroup1Enabled;
602 return;
603 }
604 idxIntr = ASMBitNextSet(&bmIntrs[0], cIntrs, idxIntr);
605 } while (idxIntr != -1);
606 }
607#else /** @todo Measure and pick the faster version. */
608 /* Only allow interrupts with higher priority than the running one. */
609 PCGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
610 uint8_t const bPriority = RT_MIN(pGicCpu->bIntrPriorityMask, pGicCpu->abRunningPriorities[pGicCpu->idxRunningPriority]);
611
612 for (uint8_t i = 0; i < RT_ELEMENTS(pGicDev->bmIntrPending); i += 2)
613 {
614 /* Collect interrupts that are pending, enabled and inactive. */
615 uint32_t uLo = (pGicDev->bmIntrPending[i] & pGicDev->bmIntrEnabled[i]) & ~pGicDev->bmIntrActive[i];
616 uint32_t uHi = (pGicDev->bmIntrPending[i + 1] & pGicDev->bmIntrEnabled[i + 1]) & ~pGicDev->bmIntrActive[i + 1];
617
618 /* Discard interrupts if the group they belong to is disabled. */
619 if (!fIsGroup1Enabled)
620 {
621 uLo &= ~pGicDev->bmIntrGroup[i];
622 uHi &= ~pGicDev->bmIntrGroup[i + 1];
623 }
624 if (!fIsGroup0Enabled)
625 {
626 uLo &= pGicDev->bmIntrGroup[i];
627 uHi &= pGicDev->bmIntrGroup[i + 1];
628 }
629
630 /* If the interrupt is higher priority than the running interrupt, return whether to signal an IRQ, FIQ or neither. */
631 Assert(pGicDev->fAffRoutingEnabled);
632 uint64_t const bmIntrPending = RT_MAKE_U64(uLo, uHi);
633 uint16_t const idxPending = ASMBitFirstSetU64(bmIntrPending);
634 if (idxPending > 0)
635 {
636 /*
637 * The distributor's interrupt pending/enabled/active bitmaps have 2048 bits which map
638 * SGIs (16), PPIs (16), SPIs (988), reserved SPIs (4) and extended SPIs (1024).
639 * Of these, the first 32 bits corresponding to SGIs and PPIs are RAZ/WI when affinity
640 * routing is enabled (which it always is in our implementation).
641 */
642 uint32_t const idxIntr = 64 * i + idxPending - 1;
643 AssertRelease(idxIntr < RT_ELEMENTS(pGicDev->abIntrPriority));
644 if ( pGicDev->abIntrPriority[idxIntr] < bPriority
645 && pGicDev->au32IntrRouting[idxIntr] == idCpu)
646 {
647 Assert(idxIntr > GIC_INTID_RANGE_PPI_LAST);
648 AssertRelease(idxIntr < sizeof(pGicDev->bmIntrGroup) * 8);
649 bool const fInGroup1 = ASMBitTest(&pGicDev->bmIntrGroup[0], idxIntr);
650 bool const fInGroup0 = !fInGroup1;
651 *pfFiq = fInGroup0 && fIsGroup0Enabled;
652 *pfIrq = fInGroup1 && fIsGroup1Enabled;
653 return;
654 }
655 }
656 }
657#endif
658 *pfIrq = false;
659 *pfFiq = false;
660}
661
662
663static void gicDistReadLpiConfigTableFromMemory(PPDMDEVINS pDevIns, PGICDEV pGicDev)
664{
665 Assert(pGicDev->fEnableLpis);
666 LogFlowFunc(("\n"));
667
668 /* Check if the guest is disabling LPIs by setting GICR_PROPBASER.IDBits < 13. */
669 uint8_t const cIdBits = RT_BF_GET(pGicDev->uLpiConfigBaseReg.u, GIC_BF_REDIST_REG_PROPBASER_ID_BITS) + 1;
670 if (cIdBits < GIC_LPI_ID_BITS_MIN)
671 return;
672
673 /* Copy the LPI config table from guest memory to our internal cache. */
674 Assert(UINT32_C(2) << pGicDev->uMaxLpi <= RT_ELEMENTS(pGicDev->abLpiConfig));
675 RTGCPHYS const GCPhysLpiConfigTable = pGicDev->uLpiConfigBaseReg.u & GIC_BF_REDIST_REG_PROPBASER_PHYS_ADDR_MASK;
676 uint32_t const cbLpiConfigTable = sizeof(pGicDev->abLpiConfig);
677
678 /** @todo Try releasing and re-acquiring the device critical section here.
679 * Probably safe, but haven't verified this... */
680 int const rc = PDMDevHlpPhysReadMeta(pDevIns, GCPhysLpiConfigTable, (void *)&pGicDev->abLpiConfig[0], cbLpiConfigTable);
681 AssertRC(rc);
682}
683
684
685/**
686 * Updates the internal IRQ state and sets or clears the appropriate force action
687 * flags.
688 *
689 * @returns Strict VBox status code.
690 * @param pGicDev The GIC distributor state.
691 * @param pVCpu The cross context virtual CPU structure.
692 */
693static VBOXSTRICTRC gicReDistUpdateIrqState(PCGICDEV pGicDev, PVMCPUCC pVCpu)
694{
695 LogFlowFunc(("\n"));
696 bool fIrq;
697 bool fFiq;
698 gicReDistHasIrqPending(VMCPU_TO_GICCPU(pVCpu), &fIrq, &fFiq);
699
700 bool fIrqDist;
701 bool fFiqDist;
702 gicDistHasIrqPendingForVCpu(pGicDev, pVCpu, pVCpu->idCpu, &fIrqDist, &fFiqDist);
703 LogFlowFunc(("fIrq=%RTbool fFiq=%RTbool fIrqDist=%RTbool fFiqDist=%RTbool\n", fIrq, fFiq, fIrqDist, fFiqDist));
704
705 fIrq |= fIrqDist;
706 fFiq |= fFiqDist;
707 gicUpdateInterruptFF(pVCpu, fIrq, fFiq);
708 return VINF_SUCCESS;
709}
710
711
712/**
713 * Updates the internal IRQ state of the distributor and sets or clears the appropirate force action flags.
714 *
715 * @returns Strict VBox status code.
716 * @param pVM The cross context VM state.
717 * @param pGicDev The GIC distributor state.
718 */
719static VBOXSTRICTRC gicDistUpdateIrqState(PCVMCC pVM, PCGICDEV pGicDev)
720{
721 LogFlowFunc(("\n"));
722 for (uint32_t i = 0; i < pVM->cCpus; i++)
723 {
724 PVMCPUCC pVCpu = pVM->CTX_SUFF(apCpus)[i];
725 PCGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
726
727 bool fIrq, fFiq;
728 gicReDistHasIrqPending(pGicCpu, &fIrq, &fFiq);
729
730 bool fIrqDist, fFiqDist;
731 gicDistHasIrqPendingForVCpu(pGicDev, pVCpu, i, &fIrqDist, &fFiqDist);
732 fIrq |= fIrqDist;
733 fFiq |= fFiqDist;
734
735 gicUpdateInterruptFF(pVCpu, fIrq, fFiq);
736 }
737 return VINF_SUCCESS;
738}
739
740
741/**
742 * Reads the distributor's interrupt routing register (GICD_IROUTER).
743 *
744 * @returns Strict VBox status code.
745 * @param pGicDev The GIC distributor state.
746 * @param idxReg The index of the register in the GICD_IROUTER range.
747 * @param puValue Where to store the register's value.
748 */
749static VBOXSTRICTRC gicDistReadIntrRoutingReg(PCGICDEV pGicDev, uint16_t idxReg, uint32_t *puValue)
750{
751 /* When affinity routing is disabled, reads return 0. */
752 Assert(pGicDev->fAffRoutingEnabled);
753
754 /* Hardware does not map the first 32 registers (corresponding to SGIs and PPIs). */
755 idxReg += GIC_INTID_RANGE_SPI_START;
756 AssertReturn(idxReg < RT_ELEMENTS(pGicDev->au32IntrRouting), VERR_BUFFER_OVERFLOW);
757 Assert(idxReg < sizeof(pGicDev->bmIntrRoutingMode) * 8);
758 if (!(idxReg % 2))
759 {
760 /* Lower 32-bits. */
761 uint8_t const fIrm = ASMBitTest(&pGicDev->bmIntrRoutingMode[0], idxReg);
762 *puValue = GIC_DIST_REG_IROUTERn_SET(fIrm, pGicDev->au32IntrRouting[idxReg]);
763 }
764 else
765 {
766 /* Upper 32-bits. */
767 *puValue = pGicDev->au32IntrRouting[idxReg] >> 24;
768 }
769
770 LogFlowFunc(("idxReg=%#x read %#x\n", idxReg, *puValue));
771 return VINF_SUCCESS;
772}
773
774
775/**
776 * Writes the distributor's interrupt routing register (GICD_IROUTER).
777 *
778 * @returns Strict VBox status code.
779 * @param pGicDev The GIC distributor state.
780 * @param idxReg The index of the register in the GICD_IROUTER range.
781 * @param uValue The value to write to the register.
782 */
783static VBOXSTRICTRC gicDistWriteIntrRoutingReg(PGICDEV pGicDev, uint16_t idxReg, uint32_t uValue)
784{
785 /* When affinity routing is disabled, writes are ignored. */
786 Assert(pGicDev->fAffRoutingEnabled);
787
788 AssertMsgReturn(idxReg < RT_ELEMENTS(pGicDev->au32IntrRouting), ("idxReg=%u\n", idxReg), VERR_BUFFER_OVERFLOW);
789 Assert(idxReg < sizeof(pGicDev->bmIntrRoutingMode) * 8);
790 if (!(idxReg % 2))
791 {
792 /* Lower 32-bits. */
793 bool const fIrm = GIC_DIST_REG_IROUTERn_IRM_GET(uValue);
794 if (fIrm)
795 ASMBitSet(&pGicDev->bmIntrRoutingMode[0], idxReg);
796 else
797 ASMBitClear(&pGicDev->bmIntrRoutingMode[0], idxReg);
798 uint32_t const fAff3 = pGicDev->au32IntrRouting[idxReg] & 0xff000000;
799 pGicDev->au32IntrRouting[idxReg] = fAff3 | (uValue & 0x00ffffff);
800 }
801 else
802 {
803 /* Upper 32-bits. */
804 uint32_t const fAffOthers = pGicDev->au32IntrRouting[idxReg] & 0x00ffffff;
805 pGicDev->au32IntrRouting[idxReg] = (uValue << 24) | fAffOthers;
806 }
807
808 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, pGicDev->au32IntrRouting[idxReg]));
809 return VINF_SUCCESS;
810}
811
812
813/**
814 * Reads the distributor's interrupt (set/clear) enable register (GICD_ISENABLER and
815 * GICD_ICENABLER).
816 *
817 * @returns Strict VBox status code.
818 * @param pGicDev The GIC distributor state.
819 * @param idxReg The index of the register in the GICD_ISENABLER and
820 * GICD_ICENABLER range.
821 * @param puValue Where to store the register's value.
822 */
823static VBOXSTRICTRC gicDistReadIntrEnableReg(PGICDEV pGicDev, uint16_t idxReg, uint32_t *puValue)
824{
825 Assert(idxReg < RT_ELEMENTS(pGicDev->bmIntrEnabled));
826 *puValue = pGicDev->bmIntrEnabled[idxReg];
827 LogFlowFunc(("idxReg=%#x read %#x\n", idxReg, pGicDev->bmIntrEnabled[idxReg]));
828 return VINF_SUCCESS;
829}
830
831
832/**
833 * Writes the distributor's interrupt set-enable register (GICD_ISENABLER).
834 *
835 * @returns Strict VBox status code.
836 * @param pVM The cross context VM structure.
837 * @param pGicDev The GIC distributor state.
838 * @param idxReg The index of the register in the GICD_ISENABLER range.
839 * @param uValue The value to write to the register.
840 */
841static VBOXSTRICTRC gicDistWriteIntrSetEnableReg(PVM pVM, PGICDEV pGicDev, uint16_t idxReg, uint32_t uValue)
842{
843 /* When affinity routing is enabled, writes to SGIs and PPIs are ignored. */
844 Assert(pGicDev->fAffRoutingEnabled);
845 if (idxReg > 0)
846 {
847 Assert(idxReg < RT_ELEMENTS(pGicDev->bmIntrEnabled));
848 pGicDev->bmIntrEnabled[idxReg] |= uValue;
849 return gicDistUpdateIrqState(pVM, pGicDev);
850 }
851 else
852 AssertReleaseMsgFailed(("Unexpected (but not illegal) write to SGI/PPI register in distributor\n"));
853 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, pGicDev->bmIntrEnabled[idxReg]));
854 return VINF_SUCCESS;
855}
856
857
858/**
859 * Writes the distributor's interrupt clear-enable register (GICD_ICENABLER).
860 *
861 * @returns Strict VBox status code.
862 * @param pVM The cross context VM structure.
863 * @param pGicDev The GIC distributor state.
864 * @param idxReg The index of the register in the GICD_ICENABLER range.
865 * @param uValue The value to write to the register.
866 */
867static VBOXSTRICTRC gicDistWriteIntrClearEnableReg(PVM pVM, PGICDEV pGicDev, uint16_t idxReg, uint32_t uValue)
868{
869 /* When affinity routing is enabled, writes to SGIs and PPIs are ignored. */
870 Assert(pGicDev->fAffRoutingEnabled);
871 if (idxReg > 0)
872 {
873 Assert(idxReg < RT_ELEMENTS(pGicDev->bmIntrEnabled));
874 pGicDev->bmIntrEnabled[idxReg] &= ~uValue;
875 return gicDistUpdateIrqState(pVM, pGicDev);
876 }
877 else
878 AssertReleaseMsgFailed(("Unexpected (but not illegal) write to SGI/PPI register in distributor\n"));
879 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, pGicDev->bmIntrEnabled[idxReg]));
880 return VINF_SUCCESS;
881}
882
883
884/**
885 * Reads the distributor's interrupt active register (GICD_ISACTIVER and
886 * GICD_ICACTIVER).
887 *
888 * @returns Strict VBox status code.
889 * @param pGicDev The GIC distributor state.
890 * @param idxReg The index of the register in the GICD_ISACTIVER and
891 * GICD_ICACTIVER range.
892 * @param puValue Where to store the register's value.
893 */
894static VBOXSTRICTRC gicDistReadIntrActiveReg(PGICDEV pGicDev, uint16_t idxReg, uint32_t *puValue)
895{
896 Assert(idxReg < RT_ELEMENTS(pGicDev->bmIntrActive));
897 *puValue = pGicDev->bmIntrActive[idxReg];
898 LogFlowFunc(("idxReg=%#x read %#x\n", idxReg, pGicDev->bmIntrActive[idxReg]));
899 return VINF_SUCCESS;
900}
901
902
903/**
904 * Writes the distributor's interrupt set-active register (GICD_ISACTIVER).
905 *
906 * @returns Strict VBox status code.
907 * @param pVM The cross context VM structure.
908 * @param pGicDev The GIC distributor state.
909 * @param idxReg The index of the register in the GICD_ISACTIVER range.
910 * @param uValue The value to write to the register.
911 */
912static VBOXSTRICTRC gicDistWriteIntrSetActiveReg(PVM pVM, PGICDEV pGicDev, uint16_t idxReg, uint32_t uValue)
913{
914 /* When affinity routing is enabled, writes to SGIs and PPIs are ignored. */
915 Assert(pGicDev->fAffRoutingEnabled);
916 if (idxReg > 0)
917 {
918 Assert(idxReg < RT_ELEMENTS(pGicDev->bmIntrActive));
919 pGicDev->bmIntrActive[idxReg] |= uValue;
920 return gicDistUpdateIrqState(pVM, pGicDev);
921 }
922 else
923 AssertReleaseMsgFailed(("Unexpected (but not illegal) write to SGI/PPI register in distributor\n"));
924 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, pGicDev->bmIntrActive[idxReg]));
925 return VINF_SUCCESS;
926}
927
928
929/**
930 * Writes the distributor's interrupt clear-active register (GICD_ICACTIVER).
931 *
932 * @returns Strict VBox status code.
933 * @param pVM The cross context VM structure.
934 * @param pGicDev The GIC distributor state.
935 * @param idxReg The index of the register in the GICD_ICACTIVER range.
936 * @param uValue The value to write to the register.
937 */
938static VBOXSTRICTRC gicDistWriteIntrClearActiveReg(PVM pVM, PGICDEV pGicDev, uint16_t idxReg, uint32_t uValue)
939{
940 /* When affinity routing is enabled, writes to SGIs and PPIs are ignored. */
941 Assert(pGicDev->fAffRoutingEnabled);
942 if (idxReg > 0)
943 {
944 Assert(idxReg < RT_ELEMENTS(pGicDev->bmIntrActive));
945 pGicDev->bmIntrActive[idxReg] &= ~uValue;
946 return gicDistUpdateIrqState(pVM, pGicDev);
947 }
948 else
949 AssertReleaseMsgFailed(("Unexpected (but not illegal) write to SGI/PPI register in distributor\n"));
950 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, pGicDev->bmIntrActive[idxReg]));
951 return VINF_SUCCESS;
952}
953
954
955/**
956 * Reads the distributor's interrupt priority register (GICD_IPRIORITYR).
957 *
958 * @returns Strict VBox status code.
959 * @param pGicDev The GIC distributor state.
960 * @param idxReg The index of the register in the GICD_IPRIORITY range.
961 * @param puValue Where to store the register's value.
962 */
963static VBOXSTRICTRC gicDistReadIntrPriorityReg(PGICDEV pGicDev, uint16_t idxReg, uint32_t *puValue)
964{
965 /* When affinity routing is enabled, reads to registers 0..7 (pertaining to SGIs and PPIs) return 0. */
966 Assert(pGicDev->fAffRoutingEnabled);
967 Assert(idxReg < RT_ELEMENTS(pGicDev->abIntrPriority) / sizeof(uint32_t));
968 Assert(idxReg != 255);
969 if (idxReg > 7)
970 {
971 uint16_t const idxPriority = idxReg * sizeof(uint32_t);
972 AssertReturn(idxPriority <= RT_ELEMENTS(pGicDev->abIntrPriority) - sizeof(uint32_t), VERR_BUFFER_OVERFLOW);
973 AssertCompile(sizeof(*puValue) == sizeof(uint32_t));
974 *puValue = *(uint32_t *)&pGicDev->abIntrPriority[idxPriority];
975 }
976 else
977 {
978 AssertReleaseMsgFailed(("Unexpected (but not illegal) read to SGI/PPI register in distributor\n"));
979 *puValue = 0;
980 }
981 LogFlowFunc(("idxReg=%#x read %#x\n", idxReg, *puValue));
982 return VINF_SUCCESS;
983}
984
985
986/**
987 * Writes the distributor's interrupt priority register (GICD_IPRIORITYR).
988 *
989 * @returns Strict VBox status code.
990 * @param pGicDev The GIC distributor state.
991 * @param idxReg The index of the register in the GICD_IPRIORITY range.
992 * @param uValue The value to write to the register.
993 */
994static VBOXSTRICTRC gicDistWriteIntrPriorityReg(PGICDEV pGicDev, uint16_t idxReg, uint32_t uValue)
995{
996 /* When affinity routing is enabled, writes to registers 0..7 are ignored. */
997 Assert(pGicDev->fAffRoutingEnabled);
998 Assert(idxReg < RT_ELEMENTS(pGicDev->abIntrPriority) / sizeof(uint32_t));
999 Assert(idxReg != 255);
1000 if (idxReg > 7)
1001 {
1002 uint16_t const idxPriority = idxReg * sizeof(uint32_t);
1003 AssertReturn(idxPriority <= RT_ELEMENTS(pGicDev->abIntrPriority) - sizeof(uint32_t), VERR_BUFFER_OVERFLOW);
1004 AssertCompile(sizeof(uValue) == sizeof(uint32_t));
1005 *(uint32_t *)&pGicDev->abIntrPriority[idxPriority] = uValue;
1006 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, *(uint32_t *)&pGicDev->abIntrPriority[idxPriority]));
1007 }
1008 else
1009 AssertReleaseMsgFailed(("Unexpected (but not illegal) write to SGI/PPI register in distributor\n"));
1010 return VINF_SUCCESS;
1011}
1012
1013
1014/**
1015 * Reads the distributor's interrupt pending register (GICD_ISPENDR and
1016 * GICD_ICPENDR).
1017 *
1018 * @returns Strict VBox status code.
1019 * @param pGicDev The GIC distributor state.
1020 * @param idxReg The index of the register in the GICD_ISPENDR and
1021 * GICD_ICPENDR range.
1022 * @param puValue Where to store the register's value.
1023 */
1024static VBOXSTRICTRC gicDistReadIntrPendingReg(PGICDEV pGicDev, uint16_t idxReg, uint32_t *puValue)
1025{
1026 /* When affinity routing is enabled, reads for SGIs and PPIs return 0. */
1027 Assert(pGicDev->fAffRoutingEnabled);
1028 if (idxReg > 0)
1029 {
1030 Assert(idxReg < RT_ELEMENTS(pGicDev->bmIntrPending));
1031 *puValue = pGicDev->bmIntrPending[idxReg];
1032 }
1033 else
1034 {
1035 AssertReleaseMsgFailed(("Unexpected (but not illegal) read to SGI/PPI register in distributor\n"));
1036 *puValue = 0;
1037 }
1038 LogFlowFunc(("idxReg=%#x read %#x\n", idxReg, pGicDev->bmIntrPending[idxReg]));
1039 return VINF_SUCCESS;
1040}
1041
1042
1043/**
1044 * Write's the distributor's interrupt set-pending register (GICD_ISPENDR).
1045 *
1046 * @returns Strict VBox status code.
1047 * @param pVM The cross context VM structure.
1048 * @param pGicDev The GIC distributor state.
1049 * @param idxReg The index of the register in the GICD_ISPENDR range.
1050 * @param uValue The value to write to the register.
1051 */
1052static VBOXSTRICTRC gicDistWriteIntrSetPendingReg(PVMCC pVM, PGICDEV pGicDev, uint16_t idxReg, uint32_t uValue)
1053{
1054 /* When affinity routing is enabled, writes to SGIs and PPIs are ignored. */
1055 Assert(pGicDev->fAffRoutingEnabled);
1056 if (idxReg > 0)
1057 {
1058 Assert(idxReg < RT_ELEMENTS(pGicDev->bmIntrPending));
1059 pGicDev->bmIntrPending[idxReg] |= uValue;
1060 return gicDistUpdateIrqState(pVM, pGicDev);
1061 }
1062 else
1063 AssertReleaseMsgFailed(("Unexpected (but not illegal) write to SGI/PPI register in distributor\n"));
1064 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, pGicDev->bmIntrPending[idxReg]));
1065 return VINF_SUCCESS;
1066}
1067
1068
1069/**
1070 * Write's the distributor's interrupt clear-pending register (GICD_ICPENDR).
1071 *
1072 * @returns Strict VBox status code.
1073 * @param pVM The cross context VM structure.
1074 * @param pGicDev The GIC distributor state.
1075 * @param idxReg The index of the register in the GICD_ICPENDR range.
1076 * @param uValue The value to write to the register.
1077 */
1078static VBOXSTRICTRC gicDistWriteIntrClearPendingReg(PVMCC pVM, PGICDEV pGicDev, uint16_t idxReg, uint32_t uValue)
1079{
1080 /* When affinity routing is enabled, writes to SGIs and PPIs are ignored. */
1081 Assert(pGicDev->fAffRoutingEnabled);
1082 if (idxReg > 0)
1083 {
1084 Assert(idxReg < RT_ELEMENTS(pGicDev->bmIntrPending));
1085 pGicDev->bmIntrPending[idxReg] &= ~uValue;
1086 return gicDistUpdateIrqState(pVM, pGicDev);
1087 }
1088 else
1089 AssertReleaseMsgFailed(("Unexpected (but not illegal) write to SGI/PPI register in distributor\n"));
1090 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, pGicDev->bmIntrPending[idxReg]));
1091 return VINF_SUCCESS;
1092}
1093
1094
1095/**
1096 * Reads the distributor's interrupt config register (GICD_ICFGR).
1097 *
1098 * @returns Strict VBox status code.
1099 * @param pGicDev The GIC distributor state.
1100 * @param idxReg The index of the register in the GICD_ICFGR range.
1101 * @param puValue Where to store the register's value.
1102 */
1103static VBOXSTRICTRC gicDistReadIntrConfigReg(PCGICDEV pGicDev, uint16_t idxReg, uint32_t *puValue)
1104{
1105 /* When affinity routing is enabled SGIs and PPIs, reads to SGIs and PPIs return 0. */
1106 Assert(pGicDev->fAffRoutingEnabled);
1107 if (idxReg >= 2)
1108 {
1109 Assert(idxReg < RT_ELEMENTS(pGicDev->bmIntrConfig));
1110 *puValue = pGicDev->bmIntrConfig[idxReg];
1111 }
1112 else
1113 AssertReleaseMsgFailed(("Unexpected (but not illegal) read to SGI/PPI register in distributor\n"));
1114 LogFlowFunc(("idxReg=%#x read %#x\n", idxReg, pGicDev->bmIntrConfig[idxReg]));
1115 return VINF_SUCCESS;
1116}
1117
1118
1119/**
1120 * Writes the distributor's interrupt config register (GICD_ICFGR).
1121 *
1122 * @returns Strict VBox status code.
1123 * @param pGicDev The GIC distributor state.
1124 * @param idxReg The index of the register in the GICD_ICFGR range.
1125 * @param uValue The value to write to the register.
1126 */
1127static VBOXSTRICTRC gicDistWriteIntrConfigReg(PGICDEV pGicDev, uint16_t idxReg, uint32_t uValue)
1128{
1129 /* When affinity routing is enabled SGIs and PPIs, writes to SGIs and PPIs are ignored. */
1130 Assert(pGicDev->fAffRoutingEnabled);
1131 if (idxReg >= 2)
1132 {
1133 Assert(idxReg < RT_ELEMENTS(pGicDev->bmIntrConfig));
1134 pGicDev->bmIntrConfig[idxReg] = uValue & 0xaaaaaaaa;
1135 }
1136 else
1137 AssertReleaseMsgFailed(("Unexpected (but not illegal) write to SGI/PPI register in distributor\n"));
1138 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, pGicDev->bmIntrConfig[idxReg]));
1139 return VINF_SUCCESS;
1140}
1141
1142
1143/**
1144 * Reads the distributor's interrupt config register (GICD_IGROUPR).
1145 *
1146 * @returns Strict VBox status code.
1147 * @param pGicDev The GIC distributor state.
1148 * @param idxReg The index of the register in the GICD_IGROUPR range.
1149 * @param puValue Where to store the register's value.
1150 */
1151static VBOXSTRICTRC gicDistReadIntrGroupReg(PGICDEV pGicDev, uint16_t idxReg, uint32_t *puValue)
1152{
1153 /* When affinity routing is enabled, reads to SGIs and PPIs return 0. */
1154 Assert(pGicDev->fAffRoutingEnabled);
1155 if (idxReg > 0)
1156 {
1157 Assert(idxReg < RT_ELEMENTS(pGicDev->bmIntrGroup));
1158 *puValue = pGicDev->bmIntrGroup[idxReg];
1159 }
1160 else
1161 AssertReleaseMsgFailed(("Unexpected (but not illegal) read to SGI/PPI register in distributor\n"));
1162 LogFlowFunc(("idxReg=%#x read %#x\n", idxReg, *puValue));
1163 return VINF_SUCCESS;
1164}
1165
1166
1167/**
1168 * Writes the distributor's interrupt config register (GICD_ICFGR).
1169 *
1170 * @returns Strict VBox status code.
1171 * @param pVM The cross context VM structure.
1172 * @param pGicDev The GIC distributor state.
1173 * @param idxReg The index of the register in the GICD_ICFGR range.
1174 * @param uValue The value to write to the register.
1175 */
1176static VBOXSTRICTRC gicDistWriteIntrGroupReg(PCVM pVM, PGICDEV pGicDev, uint16_t idxReg, uint32_t uValue)
1177{
1178 /* When affinity routing is enabled, writes to SGIs and PPIs are ignored. */
1179 Assert(pGicDev->fAffRoutingEnabled);
1180 if (idxReg > 0)
1181 {
1182 pGicDev->bmIntrGroup[idxReg] = uValue;
1183 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, pGicDev->bmIntrGroup[idxReg]));
1184 }
1185 else
1186 AssertReleaseMsgFailed(("Unexpected (but not illegal) write to SGI/PPI register in distributor\n"));
1187 return gicDistUpdateIrqState(pVM, pGicDev);
1188}
1189
1190
1191/**
1192 * Reads the redistributor's interrupt priority register (GICR_IPRIORITYR).
1193 *
1194 * @returns Strict VBox status code.
1195 * @param pGicDev The GIC distributor state.
1196 * @param pGicCpu The GIC redistributor and CPU interface state.
1197 * @param idxReg The index of the register in the GICR_IPRIORITY range.
1198 * @param puValue Where to store the register's value.
1199 */
1200static VBOXSTRICTRC gicReDistReadIntrPriorityReg(PCGICDEV pGicDev, PGICCPU pGicCpu, uint16_t idxReg, uint32_t *puValue)
1201{
1202 /* When affinity routing is disabled, reads return 0. */
1203 Assert(pGicDev->fAffRoutingEnabled); RT_NOREF(pGicDev);
1204 uint16_t const idxPriority = idxReg * sizeof(uint32_t);
1205 AssertReturn(idxPriority <= RT_ELEMENTS(pGicCpu->abIntrPriority) - sizeof(uint32_t), VERR_BUFFER_OVERFLOW);
1206 AssertCompile(sizeof(*puValue) == sizeof(uint32_t));
1207 *puValue = *(uint32_t *)&pGicCpu->abIntrPriority[idxPriority];
1208 LogFlowFunc(("idxReg=%#x read %#x\n", idxReg, *puValue));
1209 return VINF_SUCCESS;
1210}
1211
1212
1213/**
1214 * Writes the redistributor's interrupt priority register (GICR_IPRIORITYR).
1215 *
1216 * @returns Strict VBox status code.
1217 * @param pGicDev The GIC distributor state.
1218 * @param pVCpu The cross context virtual CPU structure.
1219 * @param idxReg The index of the register in the GICR_IPRIORITY range.
1220 * @param uValue The value to write to the register.
1221 */
1222static VBOXSTRICTRC gicReDistWriteIntrPriorityReg(PCGICDEV pGicDev, PVMCPUCC pVCpu, uint16_t idxReg, uint32_t uValue)
1223{
1224 /* When affinity routing is disabled, writes are ignored. */
1225 Assert(pGicDev->fAffRoutingEnabled); RT_NOREF(pGicDev);
1226 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
1227 uint16_t const idxPriority = idxReg * sizeof(uint32_t);
1228 AssertReturn(idxPriority <= RT_ELEMENTS(pGicCpu->abIntrPriority) - sizeof(uint32_t), VERR_BUFFER_OVERFLOW);
1229 AssertCompile(sizeof(uValue) == sizeof(uint32_t));
1230 *(uint32_t *)&pGicCpu->abIntrPriority[idxPriority] = uValue;
1231 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, *(uint32_t *)&pGicCpu->abIntrPriority[idxPriority]));
1232 return VINF_SUCCESS;
1233}
1234
1235
1236/**
1237 * Reads the redistributor's interrupt pending register (GICR_ISPENDR and
1238 * GICR_ICPENDR).
1239 *
1240 * @returns Strict VBox status code.
1241 * @param pGicDev The GIC distributor state.
1242 * @param pGicCpu The GIC redistributor and CPU interface state.
1243 * @param idxReg The index of the register in the GICR_ISPENDR and
1244 * GICR_ICPENDR range.
1245 * @param puValue Where to store the register's value.
1246 */
1247static VBOXSTRICTRC gicReDistReadIntrPendingReg(PCGICDEV pGicDev, PGICCPU pGicCpu, uint16_t idxReg, uint32_t *puValue)
1248{
1249 /* When affinity routing is disabled, reads return 0. */
1250 Assert(pGicDev->fAffRoutingEnabled); RT_NOREF(pGicDev);
1251 Assert(idxReg < RT_ELEMENTS(pGicCpu->bmIntrPending));
1252 *puValue = pGicCpu->bmIntrPending[idxReg];
1253 LogFlowFunc(("idxReg=%#x read %#x\n", idxReg, pGicCpu->bmIntrPending[idxReg]));
1254 return VINF_SUCCESS;
1255}
1256
1257
1258/**
1259 * Writes the redistributor's interrupt set-pending register (GICR_ISPENDR).
1260 *
1261 * @returns Strict VBox status code.
1262 * @param pGicDev The GIC distributor state.
1263 * @param pVCpu The cross context virtual CPU structure.
1264 * @param idxReg The index of the register in the GICR_ISPENDR range.
1265 * @param uValue The value to write to the register.
1266 */
1267static VBOXSTRICTRC gicReDistWriteIntrSetPendingReg(PCGICDEV pGicDev, PVMCPUCC pVCpu, uint16_t idxReg, uint32_t uValue)
1268{
1269 /* When affinity routing is disabled, writes are ignored. */
1270 Assert(pGicDev->fAffRoutingEnabled);
1271 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
1272 Assert(idxReg < RT_ELEMENTS(pGicCpu->bmIntrPending));
1273 pGicCpu->bmIntrPending[idxReg] |= uValue;
1274 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, pGicCpu->bmIntrPending[idxReg]));
1275 return gicReDistUpdateIrqState(pGicDev, pVCpu);
1276}
1277
1278
1279/**
1280 * Writes the redistributor's interrupt clear-pending register (GICR_ICPENDR).
1281 *
1282 * @returns Strict VBox status code.
1283 * @param pGicDev The GIC distributor state.
1284 * @param pVCpu The cross context virtual CPU structure.
1285 * @param idxReg The index of the register in the GICR_ICPENDR range.
1286 * @param uValue The value to write to the register.
1287 */
1288static VBOXSTRICTRC gicReDistWriteIntrClearPendingReg(PCGICDEV pGicDev, PVMCPUCC pVCpu, uint16_t idxReg, uint32_t uValue)
1289{
1290 /* When affinity routing is disabled, writes are ignored. */
1291 Assert(pGicDev->fAffRoutingEnabled);
1292 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
1293 Assert(idxReg < RT_ELEMENTS(pGicCpu->bmIntrPending));
1294 pGicCpu->bmIntrPending[idxReg] &= ~uValue;
1295 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, pGicCpu->bmIntrPending[idxReg]));
1296 return gicReDistUpdateIrqState(pGicDev, pVCpu);
1297}
1298
1299
1300/**
1301 * Reads the redistributor's interrupt enable register (GICR_ISENABLER and
1302 * GICR_ICENABLER).
1303 *
1304 * @returns Strict VBox status code.
1305 * @param pGicDev The GIC distributor state.
1306 * @param pGicCpu The GIC redistributor and CPU interface state.
1307 * @param idxReg The index of the register in the GICR_ISENABLER and
1308 * GICR_ICENABLER range.
1309 * @param puValue Where to store the register's value.
1310 */
1311static VBOXSTRICTRC gicReDistReadIntrEnableReg(PCGICDEV pGicDev, PGICCPU pGicCpu, uint16_t idxReg, uint32_t *puValue)
1312{
1313 Assert(pGicDev->fAffRoutingEnabled); RT_NOREF(pGicDev);
1314 Assert(idxReg < RT_ELEMENTS(pGicCpu->bmIntrEnabled));
1315 *puValue = pGicCpu->bmIntrEnabled[idxReg];
1316 LogFlowFunc(("idxReg=%#x read %#x\n", idxReg, pGicCpu->bmIntrEnabled[idxReg]));
1317 return VINF_SUCCESS;
1318}
1319
1320
1321/**
1322 * Writes the redistributor's interrupt set-enable register (GICR_ISENABLER).
1323 *
1324 * @returns Strict VBox status code.
1325 * @param pGicDev The GIC distributor state.
1326 * @param pVCpu The cross context virtual CPU structure.
1327 * @param idxReg The index of the register in the GICR_ISENABLER range.
1328 * @param uValue The value to write to the register.
1329 */
1330static VBOXSTRICTRC gicReDistWriteIntrSetEnableReg(PCGICDEV pGicDev, PVMCPUCC pVCpu, uint16_t idxReg, uint32_t uValue)
1331{
1332 Assert(pGicDev->fAffRoutingEnabled);
1333 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
1334 Assert(idxReg < RT_ELEMENTS(pGicCpu->bmIntrEnabled));
1335 pGicCpu->bmIntrEnabled[idxReg] |= uValue;
1336 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, pGicCpu->bmIntrEnabled[idxReg]));
1337 return gicReDistUpdateIrqState(pGicDev, pVCpu);
1338}
1339
1340
1341/**
1342 * Writes the redistributor's interrupt clear-enable register (GICR_ICENABLER).
1343 *
1344 * @returns Strict VBox status code.
1345 * @param pGicDev The GIC distributor state.
1346 * @param pVCpu The cross context virtual CPU structure.
1347 * @param idxReg The index of the register in the GICR_ICENABLER range.
1348 * @param uValue The value to write to the register.
1349 */
1350static VBOXSTRICTRC gicReDistWriteIntrClearEnableReg(PCGICDEV pGicDev, PVMCPUCC pVCpu, uint16_t idxReg, uint32_t uValue)
1351{
1352 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
1353 Assert(idxReg < RT_ELEMENTS(pGicCpu->bmIntrEnabled));
1354 pGicCpu->bmIntrEnabled[idxReg] &= ~uValue;
1355 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, pGicCpu->bmIntrEnabled[idxReg]));
1356 return gicReDistUpdateIrqState(pGicDev, pVCpu);
1357}
1358
1359
1360/**
1361 * Reads the redistributor's interrupt active register (GICR_ISACTIVER and
1362 * GICR_ICACTIVER).
1363 *
1364 * @returns Strict VBox status code.
1365 * @param pGicCpu The GIC redistributor and CPU interface state.
1366 * @param idxReg The index of the register in the GICR_ISACTIVER and
1367 * GICR_ICACTIVER range.
1368 * @param puValue Where to store the register's value.
1369 */
1370static VBOXSTRICTRC gicReDistReadIntrActiveReg(PGICCPU pGicCpu, uint16_t idxReg, uint32_t *puValue)
1371{
1372 Assert(idxReg < RT_ELEMENTS(pGicCpu->bmIntrActive));
1373 *puValue = pGicCpu->bmIntrActive[idxReg];
1374 LogFlowFunc(("idxReg=%#x read %#x\n", idxReg, pGicCpu->bmIntrActive[idxReg]));
1375 return VINF_SUCCESS;
1376}
1377
1378
1379/**
1380 * Writes the redistributor's interrupt set-active register (GICR_ISACTIVER).
1381 *
1382 * @returns Strict VBox status code.
1383 * @param pGicDev The GIC distributor state.
1384 * @param pVCpu The cross context virtual CPU structure.
1385 * @param idxReg The index of the register in the GICR_ISACTIVER range.
1386 * @param uValue The value to write to the register.
1387 */
1388static VBOXSTRICTRC gicReDistWriteIntrSetActiveReg(PCGICDEV pGicDev, PVMCPUCC pVCpu, uint16_t idxReg, uint32_t uValue)
1389{
1390 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
1391 Assert(idxReg < RT_ELEMENTS(pGicCpu->bmIntrActive));
1392 pGicCpu->bmIntrActive[idxReg] |= uValue;
1393 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, pGicCpu->bmIntrActive[idxReg]));
1394 return gicReDistUpdateIrqState(pGicDev, pVCpu);
1395}
1396
1397
1398/**
1399 * Writes the redistributor's interrupt clear-active register (GICR_ICACTIVER).
1400 *
1401 * @returns Strict VBox status code.
1402 * @param pGicDev The GIC distributor state.
1403 * @param pVCpu The cross context virtual CPU structure.
1404 * @param idxReg The index of the register in the GICR_ICACTIVER range.
1405 * @param uValue The value to write to the register.
1406 */
1407static VBOXSTRICTRC gicReDistWriteIntrClearActiveReg(PCGICDEV pGicDev, PVMCPUCC pVCpu, uint16_t idxReg, uint32_t uValue)
1408{
1409 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
1410 Assert(idxReg < RT_ELEMENTS(pGicCpu->bmIntrActive));
1411 pGicCpu->bmIntrActive[idxReg] &= ~uValue;
1412 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, pGicCpu->bmIntrActive[idxReg]));
1413 return gicReDistUpdateIrqState(pGicDev, pVCpu);
1414}
1415
1416
1417/**
1418 * Reads the redistributor's interrupt config register (GICR_ICFGR).
1419 *
1420 * @returns Strict VBox status code.
1421 * @param pGicDev The GIC distributor state.
1422 * @param pGicCpu The GIC redistributor and CPU interface state.
1423 * @param idxReg The index of the register in the GICR_ICFGR range.
1424 * @param puValue Where to store the register's value.
1425 */
1426static VBOXSTRICTRC gicReDistReadIntrConfigReg(PCGICDEV pGicDev, PGICCPU pGicCpu, uint16_t idxReg, uint32_t *puValue)
1427{
1428 /* When affinity routing is disabled, reads return 0. */
1429 Assert(pGicDev->fAffRoutingEnabled); RT_NOREF(pGicDev);
1430 Assert(idxReg < RT_ELEMENTS(pGicCpu->bmIntrConfig));
1431 *puValue = pGicCpu->bmIntrConfig[idxReg];
1432 /* Ensure SGIs are read-only and remain configured as edge-triggered. */
1433 Assert(idxReg > 0 || *puValue == 0xaaaaaaaa);
1434 LogFlowFunc(("idxReg=%#x read %#x\n", idxReg, *puValue));
1435 return VINF_SUCCESS;
1436}
1437
1438
1439/**
1440 * Writes the redistributor's interrupt config register (GICR_ICFGR).
1441 *
1442 * @returns Strict VBox status code.
1443 * @param pGicDev The GIC distributor state.
1444 * @param pVCpu The cross context virtual CPU structure.
1445 * @param idxReg The index of the register in the GICR_ICFGR range.
1446 * @param uValue The value to write to the register.
1447 */
1448static VBOXSTRICTRC gicReDistWriteIntrConfigReg(PCGICDEV pGicDev, PVMCPUCC pVCpu, uint16_t idxReg, uint32_t uValue)
1449{
1450 /* When affinity routing is disabled, writes are ignored. */
1451 Assert(pGicDev->fAffRoutingEnabled); RT_NOREF(pGicDev);
1452 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
1453 if (idxReg > 0)
1454 {
1455 Assert(idxReg < RT_ELEMENTS(pGicCpu->bmIntrConfig));
1456 pGicCpu->bmIntrConfig[idxReg] = uValue & 0xaaaaaaaa;
1457 }
1458 else
1459 {
1460 /* SGIs are always edge-triggered ignore writes. Windows 11 (24H2) arm64 guests writes these. */
1461 Assert(uValue == 0xaaaaaaaa);
1462 Assert(pGicCpu->bmIntrConfig[0] == uValue);
1463 }
1464 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, pGicCpu->bmIntrConfig[idxReg]));
1465 return VINF_SUCCESS;
1466}
1467
1468
1469/**
1470 * Reads the redistributor's interrupt group register (GICD_IGROUPR).
1471 *
1472 * @returns Strict VBox status code.
1473 * @param pGicDev The GIC distributor state.
1474 * @param pGicCpu The GIC redistributor and CPU interface state.
1475 * @param idxReg The index of the register in the GICR_IGROUPR range.
1476 * @param puValue Where to store the register's value.
1477 */
1478static VBOXSTRICTRC gicReDistReadIntrGroupReg(PCGICDEV pGicDev, PGICCPU pGicCpu, uint16_t idxReg, uint32_t *puValue)
1479{
1480 /* When affinity routing is disabled, reads return 0. */
1481 Assert(pGicDev->fAffRoutingEnabled); RT_NOREF(pGicDev);
1482 Assert(idxReg < RT_ELEMENTS(pGicCpu->bmIntrGroup));
1483 *puValue = pGicCpu->bmIntrGroup[idxReg];
1484 LogFlowFunc(("idxReg=%#x read %#x\n", idxReg, pGicCpu->bmIntrGroup[idxReg]));
1485 return VINF_SUCCESS;
1486}
1487
1488
1489/**
1490 * Writes the redistributor's interrupt group register (GICR_IGROUPR).
1491 *
1492 * @returns Strict VBox status code.
1493 * @param pGicDev The GIC distributor state.
1494 * @param pVCpu The cross context virtual CPU structure.
1495 * @param idxReg The index of the register in the GICR_IGROUPR range.
1496 * @param uValue The value to write to the register.
1497 */
1498static VBOXSTRICTRC gicReDistWriteIntrGroupReg(PCGICDEV pGicDev, PVMCPUCC pVCpu, uint16_t idxReg, uint32_t uValue)
1499{
1500 /* When affinity routing is disabled, writes are ignored. */
1501 Assert(pGicDev->fAffRoutingEnabled);
1502 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
1503 Assert(idxReg < RT_ELEMENTS(pGicCpu->bmIntrGroup));
1504 pGicCpu->bmIntrGroup[idxReg] = uValue;
1505 LogFlowFunc(("idxReg=%#x written %#x\n", idxReg, pGicCpu->bmIntrGroup[idxReg]));
1506 return gicReDistUpdateIrqState(pGicDev, pVCpu);
1507}
1508
1509
1510/**
1511 * Gets the virtual CPUID given the affinity values.
1512 *
1513 * @returns The virtual CPUID.
1514 * @param idCpuInterface The virtual CPUID within the PE cluster (0..15).
1515 * @param uAff1 The affinity 1 value.
1516 * @param uAff2 The affinity 2 value.
1517 * @param uAff3 The affinity 3 value.
1518 */
1519DECL_FORCE_INLINE(VMCPUID) gicGetCpuIdFromAffinity(uint8_t idCpuInterface, uint8_t uAff1, uint8_t uAff2, uint8_t uAff3)
1520{
1521 AssertReturn(idCpuInterface < 16, 0);
1522 return (uAff3 * 1048576) + (uAff2 * 4096) + (uAff1 * 16) + idCpuInterface;
1523}
1524
1525
1526/**
1527 * Gets the highest priority pending interrupt that can be signalled to the PE.
1528 *
1529 * @returns The interrupt ID or GIC_INTID_RANGE_SPECIAL_NO_INTERRUPT if no interrupt
1530 * is pending or not in a state to be signalled to the PE.
1531 * @param pGicDev The GIC distributor state.
1532 * @param pGicCpu The GIC redistributor and CPU interface state.
1533 * @param fGroup0 Whether to consider group 0 interrupts.
1534 * @param fGroup1 Whether to consider group 1 interrupts.
1535 * @param pidxIntr Where to store the distributor interrupt index for the
1536 * returned interrupt ID. UINT16_MAX if this function returns
1537 * GIC_INTID_RANGE_SPECIAL_NO_INTERRUPT. Optional, can be
1538 * NULL.
1539 * @param pbPriority Where to store the priority of the returned interrupt ID.
1540 * GIC_IDLE_PRIORITY if this function returns
1541 * GIC_INTID_RANGE_SPECIAL_NO_INTERRUPT.
1542 */
1543static uint16_t gicGetHighestPriorityPendingIntr(PCGICDEV pGicDev, PCGICCPU pGicCpu, bool fGroup0, bool fGroup1,
1544 uint16_t *pidxIntr, uint8_t *pbPriority)
1545{
1546#if 1
1547 uint16_t idxIntr = UINT16_MAX;
1548 uint16_t uIntId = GIC_INTID_RANGE_SPECIAL_NO_INTERRUPT;
1549 uint8_t uPriority = GIC_IDLE_PRIORITY;
1550
1551 /* Redistributor. */
1552 {
1553 uint32_t bmReDistIntrs[RT_ELEMENTS(pGicCpu->bmIntrPending)];
1554 AssertCompile(sizeof(pGicCpu->bmIntrPending) == sizeof(bmReDistIntrs));
1555 for (uint16_t i = 0; i < RT_ELEMENTS(bmReDistIntrs); i++)
1556 {
1557 /* Collect interrupts that are pending, enabled and inactive. */
1558 bmReDistIntrs[i] = (pGicCpu->bmIntrPending[i] & pGicCpu->bmIntrEnabled[i]) & ~pGicCpu->bmIntrActive[i];
1559 /* Discard interrupts if the group they belong to is disabled. */
1560 if (!fGroup1)
1561 bmReDistIntrs[i] &= ~pGicCpu->bmIntrGroup[i];
1562 if (!fGroup0)
1563 bmReDistIntrs[i] &= pGicCpu->bmIntrGroup[i];
1564 }
1565 /* Among the collected interrupts, pick the one with the highest, non-idle priority. */
1566 uint16_t idxHighest = UINT16_MAX;
1567 const void *pvIntrs = &bmReDistIntrs[0];
1568 uint32_t const cIntrs = sizeof(bmReDistIntrs) * 8; AssertCompile(!(cIntrs % 32));
1569 int16_t idxPending = ASMBitFirstSet(pvIntrs, cIntrs);
1570 if (idxPending >= 0)
1571 {
1572 do
1573 {
1574 if (pGicCpu->abIntrPriority[idxPending] < uPriority)
1575 {
1576 idxHighest = (uint16_t)idxPending;
1577 uPriority = pGicCpu->abIntrPriority[idxPending];
1578 }
1579 idxPending = ASMBitNextSet(pvIntrs, cIntrs, idxPending);
1580 } while (idxPending != -1);
1581 if (idxHighest != UINT16_MAX)
1582 {
1583 uIntId = gicReDistGetIntIdFromIndex(idxHighest);
1584 idxIntr = idxHighest;
1585 Assert( GIC_IS_INTR_SGI_OR_PPI(uIntId)
1586 || GIC_IS_INTR_EXT_PPI(uIntId));
1587 }
1588 }
1589 }
1590
1591 /* Distributor */
1592 {
1593 uint32_t bmDistIntrs[RT_ELEMENTS(pGicDev->bmIntrPending)];
1594 AssertCompile(sizeof(pGicDev->bmIntrPending) == sizeof(bmDistIntrs));
1595 for (uint16_t i = 0; i < RT_ELEMENTS(bmDistIntrs); i++)
1596 {
1597 /* Collect interrupts that are pending, enabled and inactive. */
1598 bmDistIntrs[i] = (pGicDev->bmIntrPending[i] & pGicDev->bmIntrEnabled[i]) & ~pGicDev->bmIntrActive[i];
1599 /* Discard interrupts if the group they belong to is disabled. */
1600 if (!fGroup1)
1601 bmDistIntrs[i] &= ~pGicDev->bmIntrGroup[i];
1602 if (!fGroup0)
1603 bmDistIntrs[i] &= pGicDev->bmIntrGroup[i];
1604 }
1605 /* Among the collected interrupts, pick one with priority higher than what we picked from the redistributor. */
1606 {
1607 uint16_t idxHighest = UINT16_MAX;
1608 const void *pvIntrs = &bmDistIntrs[0];
1609 uint32_t const cIntrs = sizeof(bmDistIntrs) * 8; AssertCompile(!(cIntrs % 32));
1610 int16_t idxPending = ASMBitFirstSet(pvIntrs, cIntrs);
1611 if (idxPending >= 0)
1612 {
1613 do
1614 {
1615 if (pGicDev->abIntrPriority[idxPending] < uPriority)
1616 {
1617 idxHighest = (uint16_t)idxPending;
1618 uPriority = pGicDev->abIntrPriority[idxPending];
1619 }
1620 idxPending = ASMBitNextSet(pvIntrs, cIntrs, idxPending);
1621 } while (idxPending != -1);
1622 if (idxHighest != UINT16_MAX)
1623 {
1624 uIntId = gicDistGetIntIdFromIndex(idxHighest);
1625 idxIntr = idxHighest;
1626 Assert( GIC_IS_INTR_SPI(uIntId)
1627 || GIC_IS_INTR_EXT_SPI(uIntId));
1628 }
1629 }
1630 }
1631 }
1632#else /** @todo Measure and pick the faster version. */
1633 /*
1634 * Collect interrupts that are pending, enabled and inactive.
1635 * Discard interrupts if the group they belong to is disabled.
1636 * While collecting the interrupts, pick the one with the highest, non-idle priority.
1637 */
1638 uint16_t uIntId = GIC_INTID_RANGE_SPECIAL_NO_INTERRUPT;
1639 uint16_t idxIntr = UINT16_MAX;
1640 uint8_t uPriority = GIC_IDLE_PRIORITY;
1641
1642 /* Redistributor. */
1643 {
1644 uint16_t idxHighest = UINT16_MAX;
1645 for (uint16_t i = 0; i < RT_ELEMENTS(pGicCpu->bmIntrPending); i++)
1646 {
1647 uint32_t uIntrPending = (pGicCpu->bmIntrPending[i] & pGicCpu->bmIntrEnabled[i]) & ~pGicCpu->bmIntrActive[i];
1648 if (!fGroup1)
1649 uIntrPending &= ~pGicCpu->bmIntrGroup[i];
1650 if (!fGroup0)
1651 uIntrPending &= pGicCpu->bmIntrGroup[i];
1652
1653 uint16_t const idxPending = ASMBitFirstSetU32(uIntrPending);
1654 if (idxPending > 0)
1655 {
1656 uint32_t const idxPriority = 32 * i + idxPending - 1;
1657 Assert(idxPriority < RT_ELEMENTS(pGicCpu->abIntrPriority));
1658 if (pGicCpu->abIntrPriority[idxPriority] < uPriority)
1659 {
1660 idxHighest = idxPriority;
1661 uPriority = pGicCpu->abIntrPriority[idxPriority];
1662 }
1663 }
1664 }
1665 if (idxHighest != UINT16_MAX)
1666 {
1667 idxIntr = idxHighest;
1668 uIntId = gicReDistGetIntIdFromIndex(idxHighest);
1669 Assert( GIC_IS_INTR_SGI_OR_PPI(uIntId)
1670 || GIC_IS_INTR_EXT_PPI(uIntId));
1671 Assert(uPriority != GIC_IDLE_PRIORITY);
1672 }
1673 }
1674
1675 /* Distributor. */
1676 {
1677 uint16_t idxHighest = UINT16_MAX;
1678 for (uint16_t i = 0; i < RT_ELEMENTS(pGicDev->bmIntrPending); i += 2)
1679 {
1680 uint32_t uLo = (pGicDev->bmIntrPending[i] & pGicDev->bmIntrEnabled[i]) & ~pGicDev->bmIntrActive[i];
1681 uint32_t uHi = (pGicDev->bmIntrPending[i + 1] & pGicDev->bmIntrEnabled[i + 1]) & ~pGicDev->bmIntrActive[i + 1];
1682 if (!fGroup1)
1683 {
1684 uLo &= ~pGicDev->bmIntrGroup[i];
1685 uHi &= ~pGicDev->bmIntrGroup[i + 1];
1686 }
1687 if (!fGroup0)
1688 {
1689 uLo &= pGicDev->bmIntrGroup[i];
1690 uHi &= pGicDev->bmIntrGroup[i + 1];
1691 }
1692
1693 uint64_t const uIntrPending = RT_MAKE_U64(uLo, uHi);
1694 uint16_t const idxPending = ASMBitFirstSetU64(uIntrPending);
1695 if (idxPending > 0)
1696 {
1697 uint32_t const idxPriority = 64 * i + idxPending - 1;
1698 if (pGicDev->abIntrPriority[idxPriority] < uPriority)
1699 {
1700 idxHighest = idxPriority;
1701 uPriority = pGicDev->abIntrPriority[idxPriority];
1702 }
1703 }
1704 }
1705 if (idxHighest != UINT16_MAX)
1706 {
1707 idxIntr = idxHighest;
1708 uIntId = gicDistGetIntIdFromIndex(idxHighest);
1709 Assert( GIC_IS_INTR_SPI(uIntId)
1710 || GIC_IS_INTR_EXT_SPI(uIntId));
1711 Assert(uPriority != GIC_IDLE_PRIORITY);
1712 }
1713 }
1714#endif
1715
1716 /* Ensure that if no interrupt is pending, the idle priority is returned. */
1717 Assert(uIntId != GIC_INTID_RANGE_SPECIAL_NO_INTERRUPT || uPriority == GIC_IDLE_PRIORITY);
1718 if (pbPriority)
1719 *pbPriority = uPriority;
1720 if (pidxIntr)
1721 *pidxIntr = idxIntr;
1722
1723 LogFlowFunc(("uIntId=%u [idxIntr=%u uPriority=%u]\n", uIntId, idxIntr, uPriority));
1724 return uIntId;
1725}
1726
1727
1728/**
1729 * Get and acknowledge the interrupt ID of a signalled interrupt.
1730 *
1731 * @returns The interrupt ID or GIC_INTID_RANGE_SPECIAL_NO_INTERRUPT no interrupts
1732 * are pending or not in a state to be signalled.
1733 * @param pGicDev The GIC distributor state.
1734 * @param pVCpu The cross context virtual CPU structure.
1735 * @param fGroup0 Whether to consider group 0 interrupts.
1736 * @param fGroup1 Whether to consider group 1 interrupts.
1737 */
1738static uint16_t gicAckHighestPriorityPendingIntr(PGICDEV pGicDev, PVMCPUCC pVCpu, bool fGroup0, bool fGroup1)
1739{
1740 Assert(fGroup0 || fGroup1);
1741 LogFlowFunc(("fGroup0=%RTbool fGroup1=%RTbool\n", fGroup0, fGroup1));
1742
1743 /*
1744 * Get the pending interrupt with the highest priority for the given group.
1745 */
1746 uint8_t bIntrPriority;
1747 uint16_t idxIntr;
1748 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
1749 STAM_PROFILE_START(&pGicCpu->StatProfIntrAck, x);
1750 uint16_t const uIntId = gicGetHighestPriorityPendingIntr(pGicDev, pGicCpu, fGroup0, fGroup1, &idxIntr, &bIntrPriority);
1751 if (uIntId != GIC_INTID_RANGE_SPECIAL_NO_INTERRUPT)
1752 {
1753 /*
1754 * The interrupt priority must be higher than the priority mask of the CPU interface for the
1755 * interrupt to be signalled/acknowledged. Here, we must NOT use priority grouping when comparing
1756 * the priority of a pending interrupt with this priority mask (threshold).
1757 *
1758 * See ARM GIC spec. 4.8.6 "Priority masking".
1759 */
1760 if (bIntrPriority >= pGicCpu->bIntrPriorityMask)
1761 {
1762 STAM_PROFILE_STOP(&pGicCpu->StatProfIntrAck, x);
1763 return GIC_INTID_RANGE_SPECIAL_NO_INTERRUPT;
1764 }
1765
1766 /*
1767 * The group priority of the pending interrupt must be higher than that of the running priority.
1768 * The number of bits for the group priority depends on the the binary point registers.
1769 * We mask the sub-priority bits and only compare the group priority.
1770 *
1771 * When the binary point registers indicates no preemption, we must allow interrupts that have
1772 * a higher priority than idle. Hence, the use of two different masks below.
1773 *
1774 * See ARM GIC spec. 4.8.3 "Priority grouping".
1775 * See ARM GIC spec. 4.8.5 "Preemption".
1776 */
1777 static uint8_t const s_afGroupPriorityMasks[8] = { 0xfe, 0xfc, 0xf8, 0xf0, 0xe0, 0xc0, 0x80, 0x00 };
1778 static uint8_t const s_afRunningPriorityMasks[8] = { 0xfe, 0xfc, 0xf8, 0xf0, 0xe0, 0xc0, 0x80, 0xff };
1779 uint8_t const idxPriorityMask = (fGroup0 || (pGicCpu->uIccCtlr & ARMV8_ICC_CTLR_EL1_AARCH64_CBPR))
1780 ? pGicCpu->bBinaryPtGroup0 & 7
1781 : pGicCpu->bBinaryPtGroup1 & 7;
1782 uint8_t const bRunningPriority = pGicCpu->abRunningPriorities[pGicCpu->idxRunningPriority];
1783 uint8_t const bRunningGroupPriority = bRunningPriority & s_afRunningPriorityMasks[idxPriorityMask];
1784 uint8_t const bIntrGroupPriority = bIntrPriority & s_afGroupPriorityMasks[idxPriorityMask];
1785 if (bIntrGroupPriority >= bRunningGroupPriority)
1786 {
1787 STAM_PROFILE_STOP(&pGicCpu->StatProfIntrAck, x);
1788 return GIC_INTID_RANGE_SPECIAL_NO_INTERRUPT;
1789 }
1790
1791 /*
1792 * Acknowledge the interrupt.
1793 */
1794 bool const fIsRedistIntId = GIC_IS_INTR_SGI_OR_PPI(uIntId) || GIC_IS_INTR_EXT_PPI(uIntId);
1795 if (fIsRedistIntId)
1796 {
1797 /* Mark the interrupt as active. */
1798 AssertMsg(idxIntr < sizeof(pGicCpu->bmIntrActive) * 8, ("idxIntr=%u\n", idxIntr));
1799 ASMBitSet(&pGicCpu->bmIntrActive[0], idxIntr);
1800
1801 /** @todo Duplicate block Id=E5ED12D2-088D-4525-9609-8325C02846C3 (start). */
1802 /* Update the active priorities bitmap. */
1803 AssertCompile(sizeof(pGicCpu->bmActivePriorityGroup0) * 8 >= 128);
1804 AssertCompile(sizeof(pGicCpu->bmActivePriorityGroup1) * 8 >= 128);
1805 uint8_t const idxPreemptionLevel = bIntrPriority >> 1;
1806 if (fGroup0)
1807 ASMBitSet(&pGicCpu->bmActivePriorityGroup0[0], idxPreemptionLevel);
1808 if (fGroup1)
1809 ASMBitSet(&pGicCpu->bmActivePriorityGroup1[0], idxPreemptionLevel);
1810
1811 /* Drop priority. */
1812 if (RT_LIKELY(pGicCpu->idxRunningPriority < RT_ELEMENTS(pGicCpu->abRunningPriorities) - 1))
1813 {
1814 LogFlowFunc(("Dropping interrupt priority from %u -> %u (idxRunningPriority: %u -> %u)\n",
1815 pGicCpu->abRunningPriorities[pGicCpu->idxRunningPriority],
1816 bIntrPriority,
1817 pGicCpu->idxRunningPriority, pGicCpu->idxRunningPriority + 1));
1818 ++pGicCpu->idxRunningPriority;
1819 pGicCpu->abRunningPriorities[pGicCpu->idxRunningPriority] = bIntrPriority;
1820 }
1821 else
1822 AssertReleaseMsgFailed(("Index of running-interrupt priority out-of-bounds %u\n", pGicCpu->idxRunningPriority));
1823 /** @todo Duplicate block Id=E5ED12D2-088D-4525-9609-8325C02846C3 (end). */
1824
1825 /* If it is an edge-triggered interrupt, mark it as no longer pending. */
1826 AssertRelease(UINT32_C(2) * idxIntr + 1 < sizeof(pGicCpu->bmIntrConfig) * 8);
1827 bool const fEdgeTriggered = ASMBitTest(&pGicCpu->bmIntrConfig[0], 2 * idxIntr + 1);
1828 if (fEdgeTriggered)
1829 ASMBitClear(&pGicCpu->bmIntrPending[0], idxIntr);
1830
1831 /* Update the redistributor IRQ state to reflect change to the active interrupt. */
1832 gicReDistUpdateIrqState(pGicDev, pVCpu);
1833 }
1834 else
1835 {
1836 /* Sanity check if the interrupt ID belongs to the distributor. */
1837 Assert(GIC_IS_INTR_SPI(uIntId) || GIC_IS_INTR_EXT_SPI(uIntId));
1838
1839 /* Mark the interrupt as active. */
1840 Assert(idxIntr < sizeof(pGicDev->bmIntrActive) * 8);
1841 ASMBitSet(&pGicDev->bmIntrActive[0], idxIntr);
1842
1843 /** @todo Duplicate block Id=E5ED12D2-088D-4525-9609-8325C02846C3 (start). */
1844 /* Update the active priorities bitmap. */
1845 AssertCompile(sizeof(pGicCpu->bmActivePriorityGroup0) * 8 >= 128);
1846 AssertCompile(sizeof(pGicCpu->bmActivePriorityGroup1) * 8 >= 128);
1847 uint8_t const idxPreemptionLevel = bIntrPriority >> 1;
1848 if (fGroup0)
1849 ASMBitSet(&pGicCpu->bmActivePriorityGroup0[0], idxPreemptionLevel);
1850 if (fGroup1)
1851 ASMBitSet(&pGicCpu->bmActivePriorityGroup1[0], idxPreemptionLevel);
1852
1853 /* Drop priority. */
1854 if (RT_LIKELY(pGicCpu->idxRunningPriority < RT_ELEMENTS(pGicCpu->abRunningPriorities) - 1))
1855 {
1856 LogFlowFunc(("Dropping interrupt priority from %u -> %u (idxRunningPriority: %u -> %u)\n",
1857 pGicCpu->abRunningPriorities[pGicCpu->idxRunningPriority],
1858 bIntrPriority,
1859 pGicCpu->idxRunningPriority, pGicCpu->idxRunningPriority + 1));
1860 ++pGicCpu->idxRunningPriority;
1861 pGicCpu->abRunningPriorities[pGicCpu->idxRunningPriority] = bIntrPriority;
1862 }
1863 else
1864 AssertReleaseMsgFailed(("Index of running-interrupt priority out-of-bounds %u\n", pGicCpu->idxRunningPriority));
1865 /** @todo Duplicate block Id=E5ED12D2-088D-4525-9609-8325C02846C3 (end). */
1866
1867 /* If it is an edge-triggered interrupt, mark it as no longer pending. */
1868 AssertRelease(UINT32_C(2) * idxIntr + 1 < sizeof(pGicDev->bmIntrConfig) * 8);
1869 bool const fEdgeTriggered = ASMBitTest(&pGicDev->bmIntrConfig[0], 2 * idxIntr + 1);
1870 if (fEdgeTriggered)
1871 ASMBitClear(&pGicDev->bmIntrPending[0], idxIntr);
1872
1873 /* Update the distributor IRQ state to reflect change to the active interrupt. */
1874 gicDistUpdateIrqState(pVCpu->CTX_SUFF(pVM), pGicDev);
1875 }
1876 }
1877 else
1878 Assert(bIntrPriority == GIC_IDLE_PRIORITY);
1879
1880 LogFlowFunc(("uIntId=%u\n", uIntId));
1881 STAM_PROFILE_STOP(&pGicCpu->StatProfIntrAck, x);
1882 return uIntId;
1883}
1884
1885
1886/**
1887 * Reads a distributor register.
1888 *
1889 * @returns VBox status code.
1890 * @param pDevIns The device instance.
1891 * @param pVCpu The cross context virtual CPU structure.
1892 * @param offReg The offset of the register being read.
1893 * @param puValue Where to store the register value.
1894 */
1895DECLINLINE(VBOXSTRICTRC) gicDistReadRegister(PPDMDEVINS pDevIns, PVMCPUCC pVCpu, uint16_t offReg, uint32_t *puValue)
1896{
1897 VMCPU_ASSERT_EMT(pVCpu); RT_NOREF(pVCpu);
1898 PGICDEV pGicDev = PDMDEVINS_2_DATA(pDevIns, PGICDEV);
1899
1900 /*
1901 * 64-bit registers.
1902 */
1903 {
1904 /*
1905 * GICD_IROUTER<n> and GICD_IROUTER<n>E.
1906 */
1907 uint16_t const cbReg = sizeof(uint64_t);
1908 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IROUTERn_OFF_START, GIC_DIST_REG_IROUTERn_RANGE_SIZE))
1909 {
1910 /* Hardware does not map the first 32 registers (corresponding to SGIs and PPIs). */
1911 uint16_t const idxExt = GIC_INTID_RANGE_SPI_START;
1912 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_IROUTERn_OFF_START) / cbReg;
1913 return gicDistReadIntrRoutingReg(pGicDev, idxReg, puValue);
1914 }
1915 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IROUTERnE_OFF_START, GIC_DIST_REG_IROUTERnE_RANGE_SIZE))
1916 {
1917 uint16_t const idxExt = RT_ELEMENTS(pGicDev->au32IntrRouting) / 2;
1918 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_IROUTERnE_OFF_START) / cbReg;
1919 return gicDistReadIntrRoutingReg(pGicDev, idxReg, puValue);
1920 }
1921 }
1922
1923 /*
1924 * 32-bit registers.
1925 */
1926 {
1927 /*
1928 * GICD_IGROUPR<n> and GICD_IGROUPR<n>E.
1929 */
1930 uint16_t const cbReg = sizeof(uint32_t);
1931 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IGROUPRn_OFF_START, GIC_DIST_REG_IGROUPRn_RANGE_SIZE))
1932 {
1933 uint16_t const idxReg = (offReg - GIC_DIST_REG_IGROUPRn_OFF_START) / cbReg;
1934 return gicDistReadIntrGroupReg(pGicDev, idxReg, puValue);
1935 }
1936 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IGROUPRnE_OFF_START, GIC_DIST_REG_IGROUPRnE_RANGE_SIZE))
1937 {
1938 uint16_t const idxExt = RT_ELEMENTS(pGicDev->bmIntrGroup) / 2;
1939 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_IGROUPRnE_OFF_START) / cbReg;
1940 return gicDistReadIntrGroupReg(pGicDev, idxReg, puValue);
1941 }
1942
1943 /*
1944 * GICD_ISENABLER<n> and GICD_ISENABLER<n>E.
1945 * GICD_ICENABLER<n> and GICD_ICENABLER<n>E.
1946 */
1947 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISENABLERn_OFF_START, GIC_DIST_REG_ISENABLERn_RANGE_SIZE))
1948 {
1949 uint16_t const idxReg = (offReg - GIC_DIST_REG_ISENABLERn_OFF_START) / cbReg;
1950 return gicDistReadIntrEnableReg(pGicDev, idxReg, puValue);
1951 }
1952 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISENABLERnE_OFF_START, GIC_DIST_REG_ISENABLERnE_RANGE_SIZE))
1953 {
1954 uint16_t const idxExt = RT_ELEMENTS(pGicDev->bmIntrEnabled) / 2;
1955 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_ISENABLERnE_OFF_START) / cbReg;
1956 return gicDistReadIntrEnableReg(pGicDev, idxReg, puValue);
1957 }
1958 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICENABLERn_OFF_START, GIC_DIST_REG_ICENABLERn_RANGE_SIZE))
1959 {
1960 uint16_t const idxReg = (offReg - GIC_DIST_REG_ICENABLERn_OFF_START) / cbReg;
1961 return gicDistReadIntrEnableReg(pGicDev, idxReg, puValue);
1962 }
1963 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICENABLERnE_OFF_START, GIC_DIST_REG_ICENABLERnE_RANGE_SIZE))
1964 {
1965 uint16_t const idxExt = RT_ELEMENTS(pGicDev->bmIntrEnabled) / 2;
1966 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_ICENABLERnE_OFF_START) / cbReg;
1967 return gicDistReadIntrEnableReg(pGicDev, idxReg, puValue);
1968 }
1969
1970 /*
1971 * GICD_ISACTIVER<n> and GICD_ISACTIVER<n>E.
1972 * GICD_ICACTIVER<n> and GICD_ICACTIVER<n>E.
1973 */
1974 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISACTIVERn_OFF_START, GIC_DIST_REG_ISACTIVERn_RANGE_SIZE))
1975 {
1976 uint16_t const idxReg = (offReg - GIC_DIST_REG_ISACTIVERn_OFF_START) / cbReg;
1977 return gicDistReadIntrActiveReg(pGicDev, idxReg, puValue);
1978 }
1979 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISACTIVERnE_OFF_START, GIC_DIST_REG_ISACTIVERnE_RANGE_SIZE))
1980 {
1981 uint16_t const idxExt = RT_ELEMENTS(pGicDev->bmIntrActive) / 2;
1982 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_ISACTIVERnE_OFF_START) / cbReg;
1983 return gicDistReadIntrActiveReg(pGicDev, idxReg, puValue);
1984 }
1985 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICACTIVERn_OFF_START, GIC_DIST_REG_ICACTIVERn_RANGE_SIZE))
1986 {
1987 uint16_t const idxReg = (offReg - GIC_DIST_REG_ICENABLERn_OFF_START) / cbReg;
1988 return gicDistReadIntrActiveReg(pGicDev, idxReg, puValue);
1989 }
1990 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICACTIVERnE_OFF_START, GIC_DIST_REG_ICACTIVERnE_RANGE_SIZE))
1991 {
1992 uint16_t const idxExt = RT_ELEMENTS(pGicDev->bmIntrActive) / 2;
1993 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_ICACTIVERnE_OFF_START) / cbReg;
1994 return gicDistReadIntrActiveReg(pGicDev, idxReg, puValue);
1995 }
1996
1997 /*
1998 * GICD_IPRIORITYR<n> and GICD_IPRIORITYR<n>E.
1999 */
2000 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IPRIORITYRn_OFF_START, GIC_DIST_REG_IPRIORITYRn_RANGE_SIZE))
2001 {
2002 uint16_t const idxReg = (offReg - GIC_DIST_REG_IPRIORITYRn_OFF_START) / cbReg;
2003 return gicDistReadIntrPriorityReg(pGicDev, idxReg, puValue);
2004 }
2005 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IPRIORITYRnE_OFF_START, GIC_DIST_REG_IPRIORITYRnE_RANGE_SIZE))
2006 {
2007 uint16_t const idxExt = RT_ELEMENTS(pGicDev->abIntrPriority) / (2 * sizeof(uint32_t));
2008 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_IPRIORITYRnE_OFF_START) / cbReg;
2009 return gicDistReadIntrPriorityReg(pGicDev, idxReg, puValue);
2010 }
2011
2012 /*
2013 * GICD_ISPENDR<n> and GICD_ISPENDR<n>E.
2014 * GICD_ICPENDR<n> and GICD_ICPENDR<n>E.
2015 */
2016 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISPENDRn_OFF_START, GIC_DIST_REG_ISPENDRn_RANGE_SIZE))
2017 {
2018 uint16_t const idxReg = (offReg - GIC_DIST_REG_ISPENDRn_OFF_START) / cbReg;
2019 return gicDistReadIntrPendingReg(pGicDev, idxReg, puValue);
2020 }
2021 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISPENDRnE_OFF_START, GIC_DIST_REG_ISPENDRnE_RANGE_SIZE))
2022 {
2023 uint16_t const idxExt = RT_ELEMENTS(pGicDev->bmIntrPending) / 2;
2024 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_ISPENDRnE_OFF_START) / cbReg;
2025 return gicDistReadIntrPendingReg(pGicDev, idxReg, puValue);
2026 }
2027 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICPENDRn_OFF_START, GIC_DIST_REG_ICPENDRn_RANGE_SIZE))
2028 {
2029 uint16_t const idxReg = (offReg - GIC_DIST_REG_ICPENDRn_OFF_START) / cbReg;
2030 return gicDistReadIntrPendingReg(pGicDev, idxReg, puValue);
2031 }
2032 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICPENDRnE_OFF_START, GIC_DIST_REG_ICPENDRnE_RANGE_SIZE))
2033 {
2034 uint16_t const idxExt = RT_ELEMENTS(pGicDev->bmIntrPending) / 2;
2035 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_ICPENDRnE_OFF_START) / cbReg;
2036 return gicDistReadIntrPendingReg(pGicDev, idxReg, puValue);
2037 }
2038
2039 /*
2040 * GICD_ICFGR<n> and GICD_ICFGR<n>E.
2041 */
2042 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICFGRn_OFF_START, GIC_DIST_REG_ICFGRn_RANGE_SIZE))
2043 {
2044 uint16_t const idxReg = (offReg - GIC_DIST_REG_ICFGRn_OFF_START) / cbReg;
2045 return gicDistReadIntrConfigReg(pGicDev, idxReg, puValue);
2046 }
2047 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICFGRnE_OFF_START, GIC_DIST_REG_ICFGRnE_RANGE_SIZE))
2048 {
2049 uint16_t const idxExt = RT_ELEMENTS(pGicDev->bmIntrConfig) / 2;
2050 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_ICFGRnE_OFF_START) / cbReg;
2051 return gicDistReadIntrConfigReg(pGicDev, idxReg, puValue);
2052 }
2053 }
2054
2055 switch (offReg)
2056 {
2057 case GIC_DIST_REG_CTLR_OFF:
2058 Assert(pGicDev->fAffRoutingEnabled);
2059 *puValue = (pGicDev->fIntrGroup0Enabled ? GIC_DIST_REG_CTRL_ENABLE_GRP0 : 0)
2060 | (pGicDev->fIntrGroup1Enabled ? GIC_DIST_REG_CTRL_ENABLE_GRP1_NS : 0)
2061 | GIC_DIST_REG_CTRL_DS /* We don't support multiple security states. */
2062 | GIC_DIST_REG_CTRL_ARE_S; /* We don't support GICv2 backwards compatibility, ARE is always enabled. */
2063 break;
2064 case GIC_DIST_REG_TYPER_OFF:
2065 {
2066 Assert(pGicDev->uMaxSpi > 0 && pGicDev->uMaxSpi <= GIC_DIST_REG_TYPER_NUM_ITLINES);
2067 Assert(pGicDev->fAffRoutingEnabled);
2068 *puValue = GIC_DIST_REG_TYPER_NUM_ITLINES_SET(pGicDev->uMaxSpi)
2069 | GIC_DIST_REG_TYPER_NUM_PES_SET(0) /* Affinity routing is always enabled, hence this MBZ. */
2070 /*| GIC_DIST_REG_TYPER_NMI*/ /** @todo Support non-maskable interrupts */
2071 /*| GIC_DIST_REG_TYPER_SECURITY_EXTN*/ /** @todo Support dual security states. */
2072 | (pGicDev->fMbi ? GIC_DIST_REG_TYPER_MBIS : 0)
2073 | (pGicDev->fRangeSel ? GIC_DIST_REG_TYPER_RSS : 0)
2074 | GIC_DIST_REG_TYPER_IDBITS_SET(15) /* We only support 16-bit interrupt IDs. */
2075 | (pGicDev->fAff3Levels ? GIC_DIST_REG_TYPER_A3V : 0);
2076 if (pGicDev->fExtSpi)
2077 *puValue |= GIC_DIST_REG_TYPER_ESPI
2078 | GIC_DIST_REG_TYPER_ESPI_RANGE_SET(pGicDev->uMaxExtSpi);
2079 if (pGicDev->fLpi)
2080 {
2081 Assert(pGicDev->uMaxLpi - 2 < 13);
2082 Assert(GIC_INTID_RANGE_LPI_START + (UINT32_C(2) << pGicDev->uMaxLpi) <= UINT16_MAX);
2083 *puValue |= GIC_DIST_REG_TYPER_LPIS
2084 | GIC_DIST_REG_TYPER_NUM_LPIS_SET(pGicDev->uMaxLpi);
2085 }
2086 break;
2087 }
2088 case GIC_DIST_REG_PIDR2_OFF:
2089 Assert(pGicDev->uArchRev <= GIC_DIST_REG_PIDR2_ARCHREV_GICV4);
2090 *puValue = GIC_DIST_REG_PIDR2_ARCHREV_SET(pGicDev->uArchRev);
2091 break;
2092 case GIC_DIST_REG_IIDR_OFF:
2093 *puValue = GIC_DIST_REG_IIDR_IMPL_SET(GIC_JEDEC_JEP106_IDENTIFICATION_CODE, GIC_JEDEC_JEP106_CONTINUATION_CODE);
2094 break;
2095 case GIC_DIST_REG_TYPER2_OFF:
2096 *puValue = 0;
2097 break;
2098 default:
2099 AssertReleaseMsgFailed(("offReg=%#x\n", offReg));
2100 *puValue = 0;
2101 break;
2102 }
2103 return VINF_SUCCESS;
2104}
2105
2106
2107/**
2108 * Writes a distributor register.
2109 *
2110 * @returns Strict VBox status code.
2111 * @param pDevIns The device instance.
2112 * @param pVCpu The cross context virtual CPU structure.
2113 * @param offReg The offset of the register being written.
2114 * @param uValue The register value.
2115 */
2116DECLINLINE(VBOXSTRICTRC) gicDistWriteRegister(PPDMDEVINS pDevIns, PVMCPUCC pVCpu, uint16_t offReg, uint32_t uValue)
2117{
2118 VMCPU_ASSERT_EMT(pVCpu); RT_NOREF(pVCpu);
2119 PGICDEV pGicDev = PDMDEVINS_2_DATA(pDevIns, PGICDEV);
2120 PVMCC pVM = PDMDevHlpGetVM(pDevIns);
2121
2122 /*
2123 * 64-bit registers.
2124 */
2125 {
2126 /*
2127 * GICD_IROUTER<n> and GICD_IROUTER<n>E.
2128 */
2129 uint16_t const cbReg = sizeof(uint64_t);
2130 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IROUTERn_OFF_START, GIC_DIST_REG_IROUTERn_RANGE_SIZE))
2131 {
2132 /* Hardware does not map the first 32 registers (corresponding to SGIs and PPIs). */
2133 uint16_t const idxExt = GIC_INTID_RANGE_SPI_START;
2134 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_IROUTERn_OFF_START) / cbReg;
2135 return gicDistWriteIntrRoutingReg(pGicDev, idxReg, uValue);
2136 }
2137 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IROUTERnE_OFF_START, GIC_DIST_REG_IROUTERnE_RANGE_SIZE))
2138 {
2139 uint16_t const idxExt = RT_ELEMENTS(pGicDev->au32IntrRouting) / 2;
2140 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_IROUTERnE_OFF_START) / cbReg;
2141 return gicDistWriteIntrRoutingReg(pGicDev, idxReg, uValue);
2142 }
2143
2144 }
2145
2146 /*
2147 * 32-bit registers.
2148 */
2149 {
2150 /*
2151 * GICD_IGROUPR<n> and GICD_IGROUPR<n>E.
2152 */
2153 uint16_t const cbReg = sizeof(uint32_t);
2154 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IGROUPRn_OFF_START, GIC_DIST_REG_IGROUPRn_RANGE_SIZE))
2155 {
2156 uint16_t const idxReg = (offReg - GIC_DIST_REG_IGROUPRn_OFF_START) / cbReg;
2157 return gicDistWriteIntrGroupReg(pVM, pGicDev, idxReg, uValue);
2158 }
2159 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IGROUPRnE_OFF_START, GIC_DIST_REG_IGROUPRnE_RANGE_SIZE))
2160 {
2161 uint16_t const idxExt = RT_ELEMENTS(pGicDev->bmIntrGroup) / 2;
2162 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_IGROUPRnE_OFF_START) / cbReg;
2163 return gicDistWriteIntrGroupReg(pVM, pGicDev, idxReg, uValue);
2164 }
2165
2166 /*
2167 * GICD_ISENABLER<n> and GICD_ISENABLER<n>E.
2168 * GICD_ICENABLER<n> and GICD_ICENABLER<n>E.
2169 */
2170 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISENABLERn_OFF_START, GIC_DIST_REG_ISENABLERn_RANGE_SIZE))
2171 {
2172 uint16_t const idxReg = (offReg - GIC_DIST_REG_ISENABLERn_OFF_START) / cbReg;
2173 return gicDistWriteIntrSetEnableReg(pVM, pGicDev, idxReg, uValue);
2174 }
2175 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISENABLERnE_OFF_START, GIC_DIST_REG_ISENABLERnE_RANGE_SIZE))
2176 {
2177 uint16_t const idxExt = RT_ELEMENTS(pGicDev->bmIntrEnabled) / 2;
2178 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_ISENABLERnE_OFF_START) / cbReg;
2179 return gicDistWriteIntrSetEnableReg(pVM, pGicDev, idxReg, uValue);
2180 }
2181 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICENABLERn_OFF_START, GIC_DIST_REG_ICENABLERn_RANGE_SIZE))
2182 {
2183 uint16_t const idxReg = (offReg - GIC_DIST_REG_ICENABLERn_OFF_START) / cbReg;
2184 return gicDistWriteIntrClearEnableReg(pVM, pGicDev, idxReg, uValue);
2185 }
2186 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICENABLERnE_OFF_START, GIC_DIST_REG_ICENABLERnE_RANGE_SIZE))
2187 {
2188 uint16_t const idxExt = RT_ELEMENTS(pGicDev->bmIntrEnabled) / 2;
2189 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_ICENABLERnE_OFF_START) / cbReg;
2190 return gicDistWriteIntrClearEnableReg(pVM, pGicDev, idxReg, uValue);
2191 }
2192
2193 /*
2194 * GICD_ISACTIVER<n> and GICD_ISACTIVER<n>E.
2195 * GICD_ICACTIVER<n> and GICD_ICACTIVER<n>E.
2196 */
2197 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISACTIVERn_OFF_START, GIC_DIST_REG_ISACTIVERn_RANGE_SIZE))
2198 {
2199 uint16_t const idxReg = (offReg - GIC_DIST_REG_ISACTIVERn_OFF_START) / cbReg;
2200 return gicDistWriteIntrSetActiveReg(pVM, pGicDev, idxReg, uValue);
2201 }
2202 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISACTIVERnE_OFF_START, GIC_DIST_REG_ISACTIVERnE_RANGE_SIZE))
2203 {
2204 uint16_t const idxExt = RT_ELEMENTS(pGicDev->bmIntrActive) / 2;
2205 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_ISACTIVERnE_OFF_START) / cbReg;
2206 return gicDistWriteIntrSetActiveReg(pVM, pGicDev, idxReg, uValue);
2207 }
2208 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICACTIVERn_OFF_START, GIC_DIST_REG_ICACTIVERn_RANGE_SIZE))
2209 {
2210 uint16_t const idxReg = (offReg - GIC_DIST_REG_ICACTIVERn_OFF_START) / cbReg;
2211 return gicDistWriteIntrClearActiveReg(pVM, pGicDev, idxReg, uValue);
2212 }
2213 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICACTIVERnE_OFF_START, GIC_DIST_REG_ICACTIVERnE_RANGE_SIZE))
2214 {
2215 uint16_t const idxExt = RT_ELEMENTS(pGicDev->bmIntrActive) / 2;
2216 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_ICACTIVERnE_OFF_START) / cbReg;
2217 return gicDistWriteIntrClearActiveReg(pVM, pGicDev, idxReg, uValue);
2218 }
2219
2220 /*
2221 * GICD_IPRIORITYR<n> and GICD_IPRIORITYR<n>E.
2222 */
2223 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IPRIORITYRn_OFF_START, GIC_DIST_REG_IPRIORITYRn_RANGE_SIZE))
2224 {
2225 uint16_t const idxReg = (offReg - GIC_DIST_REG_IPRIORITYRn_OFF_START) / cbReg;
2226 return gicDistWriteIntrPriorityReg(pGicDev, idxReg, uValue);
2227 }
2228 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_IPRIORITYRnE_OFF_START, GIC_DIST_REG_IPRIORITYRnE_RANGE_SIZE))
2229 {
2230 uint16_t const idxExt = RT_ELEMENTS(pGicDev->abIntrPriority) / (2 * sizeof(uint32_t));
2231 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_IPRIORITYRnE_OFF_START) / cbReg;
2232 return gicDistWriteIntrPriorityReg(pGicDev, idxReg, uValue);
2233 }
2234
2235 /*
2236 * GICD_ISPENDR<n> and GICD_ISPENDR<n>E.
2237 * GICD_ICPENDR<n> and GICD_ICPENDR<n>E.
2238 */
2239 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISPENDRn_OFF_START, GIC_DIST_REG_ISPENDRn_RANGE_SIZE))
2240 {
2241 uint16_t const idxReg = (offReg - GIC_DIST_REG_ISPENDRn_OFF_START) / cbReg;
2242 return gicDistWriteIntrSetPendingReg(pVM, pGicDev, idxReg, uValue);
2243 }
2244 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ISPENDRnE_OFF_START, GIC_DIST_REG_ISPENDRnE_RANGE_SIZE))
2245 {
2246 uint16_t const idxExt = RT_ELEMENTS(pGicDev->bmIntrPending) / 2;
2247 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_ISPENDRnE_OFF_START) / cbReg;
2248 return gicDistWriteIntrSetPendingReg(pVM, pGicDev, idxReg, uValue);
2249 }
2250 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICPENDRn_OFF_START, GIC_DIST_REG_ICPENDRn_RANGE_SIZE))
2251 {
2252 uint16_t const idxReg = (offReg - GIC_DIST_REG_ICPENDRn_OFF_START) / cbReg;
2253 return gicDistWriteIntrClearPendingReg(pVM, pGicDev, idxReg, uValue);
2254 }
2255 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICPENDRnE_OFF_START, GIC_DIST_REG_ICPENDRnE_RANGE_SIZE))
2256 {
2257 uint16_t const idxExt = RT_ELEMENTS(pGicDev->bmIntrPending) / 2;
2258 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_ICPENDRnE_OFF_START) / cbReg;
2259 return gicDistWriteIntrClearPendingReg(pVM, pGicDev, idxReg, uValue);
2260 }
2261
2262 /*
2263 * GICD_ICFGR<n> and GICD_ICFGR<n>E.
2264 */
2265 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICFGRn_OFF_START, GIC_DIST_REG_ICFGRn_RANGE_SIZE))
2266 {
2267 uint16_t const idxReg = (offReg - GIC_DIST_REG_ICFGRn_OFF_START) / cbReg;
2268 return gicDistWriteIntrConfigReg(pGicDev, idxReg, uValue);
2269 }
2270 if (GIC_IS_REG_IN_RANGE(offReg, GIC_DIST_REG_ICFGRnE_OFF_START, GIC_DIST_REG_ICFGRnE_RANGE_SIZE))
2271 {
2272 uint16_t const idxExt = RT_ELEMENTS(pGicDev->bmIntrConfig) / 2;
2273 uint16_t const idxReg = idxExt + (offReg - GIC_DIST_REG_ICFGRnE_OFF_START) / cbReg;
2274 return gicDistWriteIntrConfigReg(pGicDev, idxReg, uValue);
2275 }
2276 }
2277
2278 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2279 switch (offReg)
2280 {
2281 case GIC_DIST_REG_CTLR_OFF:
2282 Assert(!(uValue & GIC_DIST_REG_CTRL_ARE_NS));
2283 pGicDev->fIntrGroup0Enabled = RT_BOOL(uValue & GIC_DIST_REG_CTRL_ENABLE_GRP0);
2284 pGicDev->fIntrGroup1Enabled = RT_BOOL(uValue & GIC_DIST_REG_CTRL_ENABLE_GRP1_NS);
2285 rcStrict = gicDistUpdateIrqState(pVM, pGicDev);
2286 break;
2287 default:
2288 {
2289 /* Windows 11 arm64 (24H2) writes zeroes into these reserved registers. We ignore them. */
2290 if (offReg >= 0x7fe0 && offReg <= 0x7ffc)
2291 LogFlowFunc(("Bad guest writing to reserved GIC distributor register space [0x7fe0..0x7ffc] -- ignoring!"));
2292 else
2293 AssertReleaseMsgFailed(("offReg=%#x uValue=%#RX32\n", offReg, uValue));
2294 break;
2295 }
2296 }
2297
2298 return rcStrict;
2299}
2300
2301
2302/**
2303 * Reads a GIC redistributor register.
2304 *
2305 * @returns VBox status code.
2306 * @param pDevIns The device instance.
2307 * @param pVCpu The cross context virtual CPU structure.
2308 * @param idRedist The redistributor ID.
2309 * @param offReg The offset of the register being read.
2310 * @param puValue Where to store the register value.
2311 */
2312DECLINLINE(VBOXSTRICTRC) gicReDistReadRegister(PPDMDEVINS pDevIns, PVMCPUCC pVCpu, uint32_t idRedist, uint16_t offReg, uint32_t *puValue)
2313{
2314 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
2315 PCGICDEV pGicDev = PDMDEVINS_2_DATA(pDevIns, PGICDEV);
2316 Assert(idRedist == pVCpu->idCpu);
2317
2318 switch (offReg)
2319 {
2320 case GIC_REDIST_REG_TYPER_OFF:
2321 *puValue = (pVCpu->idCpu == pVM->cCpus - 1 ? GIC_REDIST_REG_TYPER_LAST : 0)
2322 | GIC_REDIST_REG_TYPER_CPU_NUMBER_SET(idRedist)
2323 | GIC_REDIST_REG_TYPER_CMN_LPI_AFF_SET(GIC_REDIST_REG_TYPER_CMN_LPI_AFF_ALL)
2324 | (pGicDev->fExtPpi ? GIC_REDIST_REG_TYPER_PPI_NUM_SET(pGicDev->uMaxExtPpi) : 0)
2325 | (pGicDev->fLpi ? GIC_REDIST_REG_TYPER_PLPIS : 0);
2326 Assert(!pGicDev->fExtPpi || pGicDev->uMaxExtPpi > 0);
2327 break;
2328 case GIC_REDIST_REG_WAKER_OFF:
2329 *puValue = 0;
2330 break;
2331 case GIC_REDIST_REG_IIDR_OFF:
2332 *puValue = GIC_REDIST_REG_IIDR_IMPL_SET(GIC_JEDEC_JEP106_IDENTIFICATION_CODE, GIC_JEDEC_JEP106_CONTINUATION_CODE);
2333 break;
2334 case GIC_REDIST_REG_TYPER_AFFINITY_OFF:
2335 *puValue = idRedist;
2336 break;
2337 case GIC_REDIST_REG_PIDR2_OFF:
2338 Assert(pGicDev->uArchRev <= GIC_DIST_REG_PIDR2_ARCHREV_GICV4);
2339 *puValue = GIC_REDIST_REG_PIDR2_ARCHREV_SET(pGicDev->uArchRev);
2340 break;
2341 case GIC_REDIST_REG_CTLR_OFF:
2342 *puValue = (pGicDev->fEnableLpis ? GIC_REDIST_REG_CTLR_ENABLE_LPI : 0)
2343 | GIC_REDIST_REG_CTLR_CES_SET(1);
2344 break;
2345 case GIC_REDIST_REG_PROPBASER_OFF:
2346 *puValue = pGicDev->uLpiConfigBaseReg.s.Lo;
2347 break;
2348 case GIC_REDIST_REG_PROPBASER_OFF + 4:
2349 *puValue = pGicDev->uLpiConfigBaseReg.s.Hi;
2350 break;
2351 case GIC_REDIST_REG_PENDBASER_OFF:
2352 *puValue = pGicDev->uLpiPendingBaseReg.s.Lo;
2353 break;
2354 case GIC_REDIST_REG_PENDBASER_OFF + 4:
2355 *puValue = pGicDev->uLpiPendingBaseReg.s.Hi;
2356 break;
2357 default:
2358 AssertReleaseMsgFailed(("offReg=%#x\n", offReg));
2359 *puValue = 0;
2360 break;
2361 }
2362 return VINF_SUCCESS;
2363}
2364
2365
2366/**
2367 * Reads a GIC redistributor SGI/PPI frame register.
2368 *
2369 * @returns VBox status code.
2370 * @param pDevIns The device instance.
2371 * @param pVCpu The cross context virtual CPU structure.
2372 * @param offReg The offset of the register being read.
2373 * @param puValue Where to store the register value.
2374 */
2375DECLINLINE(VBOXSTRICTRC) gicReDistReadSgiPpiRegister(PPDMDEVINS pDevIns, PVMCPUCC pVCpu, uint16_t offReg, uint32_t *puValue)
2376{
2377 VMCPU_ASSERT_EMT(pVCpu);
2378 RT_NOREF(pDevIns);
2379
2380 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
2381 PCGICDEV pGicDev = PDMDEVINS_2_DATA(pDevIns, PGICDEV);
2382 uint16_t const cbReg = sizeof(uint32_t);
2383
2384 /*
2385 * GICR_IGROUPR0 and GICR_IGROUPR<n>E.
2386 */
2387 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_IGROUPR0_OFF, GIC_REDIST_SGI_PPI_REG_IGROUPRnE_RANGE_SIZE))
2388 {
2389 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_IGROUPR0_OFF) / cbReg;
2390 return gicReDistReadIntrGroupReg(pGicDev, pGicCpu, idxReg, puValue);
2391 }
2392
2393 /*
2394 * GICR_ISENABLER0 and GICR_ISENABLER<n>E.
2395 * GICR_ICENABLER0 and GICR_ICENABLER<n>E.
2396 */
2397 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ISENABLER0_OFF, GIC_REDIST_SGI_PPI_REG_ISENABLERnE_RANGE_SIZE))
2398 {
2399 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_ISENABLER0_OFF) / cbReg;
2400 return gicReDistReadIntrEnableReg(pGicDev, pGicCpu, idxReg, puValue);
2401 }
2402 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ICENABLER0_OFF, GIC_REDIST_SGI_PPI_REG_ICENABLERnE_RANGE_SIZE))
2403 {
2404 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_ICENABLERnE_OFF_START) / cbReg;
2405 return gicReDistReadIntrEnableReg(pGicDev, pGicCpu, idxReg, puValue);
2406 }
2407
2408 /*
2409 * GICR_ISACTIVER0 and GICR_ISACTIVER<n>E.
2410 * GICR_ICACTIVER0 and GICR_ICACTIVER<n>E.
2411 */
2412 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ISACTIVER0_OFF, GIC_REDIST_SGI_PPI_REG_ISACTIVERnE_RANGE_SIZE))
2413 {
2414 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_ISACTIVER0_OFF) / cbReg;
2415 return gicReDistReadIntrActiveReg(pGicCpu, idxReg, puValue);
2416 }
2417 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ICACTIVER0_OFF, GIC_REDIST_SGI_PPI_REG_ICACTIVERnE_RANGE_SIZE))
2418 {
2419 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_ICACTIVER0_OFF) / cbReg;
2420 return gicReDistReadIntrActiveReg(pGicCpu, idxReg, puValue);
2421 }
2422
2423 /*
2424 * GICR_ISPENDR0 and GICR_ISPENDR<n>E.
2425 * GICR_ICPENDR0 and GICR_ICPENDR<n>E.
2426 */
2427 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ISPENDR0_OFF, GIC_REDIST_SGI_PPI_REG_ISPENDRnE_RANGE_SIZE))
2428 {
2429 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_ISPENDR0_OFF) / cbReg;
2430 return gicReDistReadIntrPendingReg(pGicDev, pGicCpu, idxReg, puValue);
2431 }
2432 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ICPENDR0_OFF, GIC_REDIST_SGI_PPI_REG_ICPENDRnE_RANGE_SIZE))
2433 {
2434 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_ICPENDR0_OFF) / cbReg;
2435 return gicReDistReadIntrPendingReg(pGicDev, pGicCpu, idxReg, puValue);
2436 }
2437
2438 /*
2439 * GICR_IPRIORITYR<n> and GICR_IPRIORITYR<n>E.
2440 */
2441 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_IPRIORITYRn_OFF_START, GIC_REDIST_SGI_PPI_REG_IPRIORITYRnE_RANGE_SIZE))
2442 {
2443 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_IPRIORITYRn_OFF_START) / cbReg;
2444 return gicReDistReadIntrPriorityReg(pGicDev, pGicCpu, idxReg, puValue);
2445 }
2446
2447 /*
2448 * GICR_ICFGR0, GICR_ICFGR1 and GICR_ICFGR<n>E.
2449 */
2450 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ICFGR0_OFF, GIC_REDIST_SGI_PPI_REG_ICFGRnE_RANGE_SIZE))
2451 {
2452 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_ICFGR0_OFF) / cbReg;
2453 return gicReDistReadIntrConfigReg(pGicDev, pGicCpu, idxReg, puValue);
2454 }
2455
2456 AssertReleaseMsgFailed(("offReg=%#x (%s)\n", offReg, gicReDistGetSgiPpiRegDescription(offReg)));
2457 *puValue = 0;
2458 return VINF_SUCCESS;
2459}
2460
2461
2462/**
2463 * Writes a GIC redistributor frame register.
2464 *
2465 * @returns Strict VBox status code.
2466 * @param pDevIns The device instance.
2467 * @param pVCpu The cross context virtual CPU structure.
2468 * @param offReg The offset of the register being written.
2469 * @param uValue The register value.
2470 */
2471DECLINLINE(VBOXSTRICTRC) gicReDistWriteRegister(PPDMDEVINS pDevIns, PVMCPUCC pVCpu, uint16_t offReg, uint32_t uValue)
2472{
2473 VMCPU_ASSERT_EMT(pVCpu);
2474 RT_NOREF(pVCpu, uValue);
2475
2476 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2477 PGICDEV pGicDev = PDMDEVINS_2_DATA(pDevIns, PGICDEV);
2478 switch (offReg)
2479 {
2480 case GIC_REDIST_REG_WAKER_OFF:
2481 Assert(uValue == 0);
2482 break;
2483 case GIC_REDIST_REG_CTLR_OFF:
2484 pGicDev->fEnableLpis = RT_BOOL(uValue & GIC_REDIST_REG_CTLR_ENABLE_LPI);
2485 if (pGicDev->fEnableLpis)
2486 gicDistReadLpiConfigTableFromMemory(pDevIns, pGicDev);
2487 break;
2488 case GIC_REDIST_REG_PROPBASER_OFF:
2489 pGicDev->uLpiConfigBaseReg.s.Lo = uValue & RT_LO_U32(GIC_REDIST_REG_PROPBASER_RW_MASK);
2490 break;
2491 case GIC_REDIST_REG_PROPBASER_OFF + 4:
2492 pGicDev->uLpiConfigBaseReg.s.Hi = uValue & RT_HI_U32(GIC_REDIST_REG_PROPBASER_RW_MASK);
2493 break;
2494 case GIC_REDIST_REG_PENDBASER_OFF:
2495 pGicDev->uLpiPendingBaseReg.s.Lo = uValue & RT_LO_U32(GIC_REDIST_REG_PENDBASER_RW_MASK);
2496 break;
2497 case GIC_REDIST_REG_PENDBASER_OFF + 4:
2498 pGicDev->uLpiPendingBaseReg.s.Hi = uValue & RT_HI_U32(GIC_REDIST_REG_PENDBASER_RW_MASK);
2499 break;
2500 default:
2501 AssertReleaseMsgFailed(("offReg=%#x (%s) uValue=%#RX32\n", offReg, gicReDistGetRegDescription(offReg), uValue));
2502 break;
2503 }
2504
2505 return rcStrict;
2506}
2507
2508
2509/**
2510 * Writes a GIC redistributor SGI/PPI frame register.
2511 *
2512 * @returns Strict VBox status code.
2513 * @param pDevIns The device instance.
2514 * @param pVCpu The cross context virtual CPU structure.
2515 * @param offReg The offset of the register being written.
2516 * @param uValue The register value.
2517 */
2518DECLINLINE(VBOXSTRICTRC) gicReDistWriteSgiPpiRegister(PPDMDEVINS pDevIns, PVMCPUCC pVCpu, uint16_t offReg, uint32_t uValue)
2519{
2520 VMCPU_ASSERT_EMT(pVCpu);
2521 PCGICDEV pGicDev = PDMDEVINS_2_DATA(pDevIns, PCGICDEV);
2522 uint16_t const cbReg = sizeof(uint32_t);
2523
2524 /*
2525 * GICR_IGROUPR0 and GICR_IGROUPR<n>E.
2526 */
2527 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_IGROUPR0_OFF, GIC_REDIST_SGI_PPI_REG_IGROUPRnE_RANGE_SIZE))
2528 {
2529 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_IGROUPR0_OFF) / cbReg;
2530 return gicReDistWriteIntrGroupReg(pGicDev, pVCpu, idxReg, uValue);
2531 }
2532
2533 /*
2534 * GICR_ISENABLER0 and GICR_ISENABLER<n>E.
2535 * GICR_ICENABLER0 and GICR_ICENABLER<n>E.
2536 */
2537 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ISENABLER0_OFF, GIC_REDIST_SGI_PPI_REG_ISENABLERnE_RANGE_SIZE))
2538 {
2539 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_ISENABLER0_OFF) / cbReg;
2540 return gicReDistWriteIntrSetEnableReg(pGicDev, pVCpu, idxReg, uValue);
2541 }
2542 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ICENABLER0_OFF, GIC_REDIST_SGI_PPI_REG_ICENABLERnE_RANGE_SIZE))
2543 {
2544 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_ICENABLER0_OFF) / cbReg;
2545 return gicReDistWriteIntrClearEnableReg(pGicDev, pVCpu, idxReg, uValue);
2546 }
2547
2548 /*
2549 * GICR_ISACTIVER0 and GICR_ISACTIVER<n>E.
2550 * GICR_ICACTIVER0 and GICR_ICACTIVER<n>E.
2551 */
2552 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ISACTIVER0_OFF, GIC_REDIST_SGI_PPI_REG_ISACTIVERnE_RANGE_SIZE))
2553 {
2554 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_ISACTIVER0_OFF) / cbReg;
2555 return gicReDistWriteIntrSetActiveReg(pGicDev, pVCpu, idxReg, uValue);
2556 }
2557 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ICACTIVER0_OFF, GIC_REDIST_SGI_PPI_REG_ICACTIVERnE_RANGE_SIZE))
2558 {
2559 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_ICACTIVER0_OFF) / cbReg;
2560 return gicReDistWriteIntrClearActiveReg(pGicDev, pVCpu, idxReg, uValue);
2561 }
2562
2563 /*
2564 * GICR_ISPENDR0 and GICR_ISPENDR<n>E.
2565 * GICR_ICPENDR0 and GICR_ICPENDR<n>E.
2566 */
2567 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ISPENDR0_OFF, GIC_REDIST_SGI_PPI_REG_ISPENDRnE_RANGE_SIZE))
2568 {
2569 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_ISPENDR0_OFF) / cbReg;
2570 return gicReDistWriteIntrSetPendingReg(pGicDev, pVCpu, idxReg, uValue);
2571 }
2572 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ICPENDR0_OFF, GIC_REDIST_SGI_PPI_REG_ICPENDRnE_RANGE_SIZE))
2573 {
2574 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_ICPENDR0_OFF) / cbReg;
2575 return gicReDistWriteIntrClearPendingReg(pGicDev, pVCpu, idxReg, uValue);
2576 }
2577
2578 /*
2579 * GICR_IPRIORITYR<n> and GICR_IPRIORITYR<n>E.
2580 */
2581 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_IPRIORITYRn_OFF_START, GIC_REDIST_SGI_PPI_REG_IPRIORITYRnE_RANGE_SIZE))
2582 {
2583 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_IPRIORITYRn_OFF_START) / cbReg;
2584 return gicReDistWriteIntrPriorityReg(pGicDev, pVCpu, idxReg, uValue);
2585 }
2586
2587 /*
2588 * GICR_ICFGR0, GIC_ICFGR1 and GICR_ICFGR<n>E.
2589 */
2590 if (GIC_IS_REG_IN_RANGE(offReg, GIC_REDIST_SGI_PPI_REG_ICFGR0_OFF, GIC_REDIST_SGI_PPI_REG_ICFGRnE_RANGE_SIZE))
2591 {
2592 uint16_t const idxReg = (offReg - GIC_REDIST_SGI_PPI_REG_ICFGR0_OFF) / cbReg;
2593 return gicReDistWriteIntrConfigReg(pGicDev, pVCpu, idxReg, uValue);
2594 }
2595
2596 AssertReleaseMsgFailed(("offReg=%#RX16 (%s)\n", offReg, gicReDistGetSgiPpiRegDescription(offReg)));
2597 return VERR_INTERNAL_ERROR_2;
2598}
2599
2600
2601/**
2602 * @interface_method_impl{PDMGICBACKEND,pfnSetSpi}
2603 */
2604static DECLCALLBACK(int) gicSetSpi(PVMCC pVM, uint32_t uSpiIntId, bool fAsserted)
2605{
2606 LogFlowFunc(("pVM=%p uSpiIntId=%u fAsserted=%RTbool\n",
2607 pVM, uSpiIntId, fAsserted));
2608
2609 PGIC pGic = VM_TO_GIC(pVM);
2610 PPDMDEVINS pDevIns = pGic->CTX_SUFF(pDevIns);
2611 PGICDEV pGicDev = PDMDEVINS_2_DATA(pDevIns, PGICDEV);
2612
2613#ifdef VBOX_WITH_STATISTICS
2614 PVMCPU pVCpu = VMMGetCpuById(pVM, 0);
2615 STAM_COUNTER_INC(&pVCpu->gic.s.StatSetSpi);
2616 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
2617#endif
2618 STAM_PROFILE_START(&pGicCpu->StatProfSetSpi, a);
2619
2620 uint16_t const uIntId = GIC_INTID_RANGE_SPI_START + uSpiIntId;
2621 uint16_t const idxIntr = gicDistGetIndexFromIntId(uIntId);
2622
2623 Assert(idxIntr >= GIC_INTID_RANGE_SPI_START);
2624 AssertMsgReturn(idxIntr < sizeof(pGicDev->bmIntrPending) * 8,
2625 ("out-of-range SPI interrupt ID %RU32 (%RU32)\n", uIntId, uSpiIntId),
2626 VERR_INVALID_PARAMETER);
2627
2628 int const rcLock = PDMDevHlpCritSectEnter(pDevIns, pDevIns->pCritSectRoR3, VERR_IGNORED);
2629 PDM_CRITSECT_RELEASE_ASSERT_RC_DEV(pDevIns, pDevIns->pCritSectRoR3, rcLock);
2630
2631 /* Update the interrupt pending state. */
2632 if (fAsserted)
2633 ASMBitSet(&pGicDev->bmIntrPending[0], idxIntr);
2634 else
2635 ASMBitClear(&pGicDev->bmIntrPending[0], idxIntr);
2636
2637 int const rc = VBOXSTRICTRC_VAL(gicDistUpdateIrqState(pVM, pGicDev));
2638 STAM_PROFILE_STOP(&pGicCpu->StatProfSetSpi, a);
2639
2640 PDMDevHlpCritSectLeave(pDevIns, pDevIns->pCritSectRoR3);
2641 return rc;
2642}
2643
2644
2645/**
2646 * @interface_method_impl{PDMGICBACKEND,pfnSetPpi}
2647 */
2648static DECLCALLBACK(int) gicSetPpi(PVMCPUCC pVCpu, uint32_t uPpiIntId, bool fAsserted)
2649{
2650 LogFlowFunc(("pVCpu=%p{.idCpu=%u} uPpiIntId=%u fAsserted=%RTbool\n", pVCpu, pVCpu->idCpu, uPpiIntId, fAsserted));
2651
2652 PPDMDEVINS pDevIns = VMCPU_TO_DEVINS(pVCpu);
2653 PCGICDEV pGicDev = PDMDEVINS_2_DATA(pDevIns, PCGICDEV);
2654 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
2655
2656 STAM_COUNTER_INC(&pVCpu->gic.s.StatSetPpi);
2657 STAM_PROFILE_START(&pGicCpu->StatProfSetPpi, b);
2658
2659 uint32_t const uIntId = GIC_INTID_RANGE_PPI_START + uPpiIntId;
2660 uint16_t const idxIntr = gicReDistGetIndexFromIntId(uIntId);
2661
2662 Assert(idxIntr >= GIC_INTID_RANGE_PPI_START);
2663 AssertMsgReturn(idxIntr < sizeof(pGicCpu->bmIntrPending) * 8,
2664 ("out-of-range PPI interrupt ID %RU32 (%RU32)\n", uIntId, uPpiIntId),
2665 VERR_INVALID_PARAMETER);
2666
2667 int const rcLock = PDMDevHlpCritSectEnter(pDevIns, pDevIns->pCritSectRoR3, VERR_IGNORED);
2668 PDM_CRITSECT_RELEASE_ASSERT_RC_DEV(pDevIns, pDevIns->pCritSectRoR3, rcLock);
2669
2670 /* Update the interrupt pending state. */
2671 if (fAsserted)
2672 ASMBitSet(&pGicCpu->bmIntrPending[0], idxIntr);
2673 else
2674 ASMBitClear(&pGicCpu->bmIntrPending[0], idxIntr);
2675
2676 int const rc = VBOXSTRICTRC_VAL(gicReDistUpdateIrqState(pGicDev, pVCpu));
2677 STAM_PROFILE_STOP(&pGicCpu->StatProfSetPpi, b);
2678
2679 PDMDevHlpCritSectLeave(pDevIns, pDevIns->pCritSectRoR3);
2680 return rc;
2681}
2682
2683
2684/**
2685 * Sets the specified software generated interrupt (SGI).
2686 *
2687 * @returns Strict VBox status code.
2688 * @param pGicDev The GIC distributor state.
2689 * @param pVCpu The cross context virtual CPU structure.
2690 * @param pDestCpuSet Which CPUs to deliver the SGI to.
2691 * @param uIntId The SGI interrupt ID.
2692 */
2693static VBOXSTRICTRC gicSetSgi(PCGICDEV pGicDev, PVMCPUCC pVCpu, PCVMCPUSET pDestCpuSet, uint8_t uIntId)
2694{
2695 LogFlowFunc(("pVCpu=%p{.idCpu=%u} uIntId=%u\n", pVCpu, pVCpu->idCpu, uIntId));
2696
2697 PPDMDEVINS pDevIns = VMCPU_TO_DEVINS(pVCpu);
2698 PCVMCC pVM = pVCpu->CTX_SUFF(pVM);
2699 uint32_t const cCpus = pVM->cCpus;
2700 AssertReturn(uIntId <= GIC_INTID_RANGE_SGI_LAST, VERR_INVALID_PARAMETER);
2701 Assert(PDMDevHlpCritSectIsOwner(pDevIns, pDevIns->pCritSectRoR3)); RT_NOREF_PV(pDevIns);
2702
2703 for (VMCPUID idCpu = 0; idCpu < cCpus; idCpu++)
2704 if (VMCPUSET_IS_PRESENT(pDestCpuSet, idCpu))
2705 {
2706 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVM->CTX_SUFF(apCpus)[idCpu]);
2707 pGicCpu->bmIntrPending[0] |= RT_BIT_32(uIntId);
2708 }
2709
2710 return gicDistUpdateIrqState(pVM, pGicDev);
2711}
2712
2713
2714/**
2715 * Writes to the redistributor's SGI group 1 register (ICC_SGI1R_EL1).
2716 *
2717 * @returns Strict VBox status code.
2718 * @param pGicDev The GIC distributor state.
2719 * @param pVCpu The cross context virtual CPU structure.
2720 * @param uValue The value being written to the ICC_SGI1R_EL1 register.
2721 */
2722static VBOXSTRICTRC gicReDistWriteSgiReg(PCGICDEV pGicDev, PVMCPUCC pVCpu, uint64_t uValue)
2723{
2724#ifdef VBOX_WITH_STATISTICS
2725 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
2726 STAM_COUNTER_INC(&pVCpu->gic.s.StatSetSgi);
2727 STAM_PROFILE_START(&pGicCpu->StatProfSetSgi, c);
2728#else
2729 PCGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
2730#endif
2731
2732 VMCPUSET DestCpuSet;
2733 if (uValue & ARMV8_ICC_SGI1R_EL1_AARCH64_IRM)
2734 {
2735 /*
2736 * Deliver to all VCPUs but this one.
2737 */
2738 VMCPUSET_FILL(&DestCpuSet);
2739 VMCPUSET_DEL(&DestCpuSet, pVCpu->idCpu);
2740 }
2741 else
2742 {
2743 /*
2744 * Target specific VCPUs.
2745 * See ARM GICv3 and GICv4 Software Overview spec 3.3 "Affinity routing".
2746 */
2747 VMCPUSET_EMPTY(&DestCpuSet);
2748 bool const fRangeSelSupport = RT_BOOL(pGicCpu->uIccCtlr & ARMV8_ICC_CTLR_EL1_AARCH64_RSS);
2749 uint8_t const idRangeStart = ARMV8_ICC_SGI1R_EL1_AARCH64_RS_GET(uValue) * 16;
2750 uint16_t const bmCpuInterfaces = ARMV8_ICC_SGI1R_EL1_AARCH64_TARGET_LIST_GET(uValue);
2751 uint8_t const uAff1 = ARMV8_ICC_SGI1R_EL1_AARCH64_AFF1_GET(uValue);
2752 uint8_t const uAff2 = ARMV8_ICC_SGI1R_EL1_AARCH64_AFF2_GET(uValue);
2753 uint8_t const uAff3 = (pGicCpu->uIccCtlr & ARMV8_ICC_CTLR_EL1_AARCH64_A3V)
2754 ? ARMV8_ICC_SGI1R_EL1_AARCH64_AFF3_GET(uValue)
2755 : 0;
2756 uint32_t const cCpus = pVCpu->CTX_SUFF(pVM)->cCpus;
2757 for (uint8_t idCpuInterface = 0; idCpuInterface < 16; idCpuInterface++)
2758 {
2759 if (bmCpuInterfaces & RT_BIT(idCpuInterface))
2760 {
2761 VMCPUID idCpuTarget;
2762 if (fRangeSelSupport)
2763 idCpuTarget = RT_MAKE_U32_FROM_U8(idRangeStart + idCpuInterface, uAff1, uAff2, uAff3);
2764 else
2765 idCpuTarget = gicGetCpuIdFromAffinity(idCpuInterface, uAff1, uAff2, uAff3);
2766 if (RT_LIKELY(idCpuTarget < cCpus))
2767 VMCPUSET_ADD(&DestCpuSet, idCpuTarget);
2768 else
2769 AssertReleaseMsgFailed(("VCPU ID out-of-bounds %RU32, must be < %u\n", idCpuTarget, cCpus));
2770 }
2771 }
2772 }
2773
2774 if (!VMCPUSET_IS_EMPTY(&DestCpuSet))
2775 {
2776 uint8_t const uSgiIntId = ARMV8_ICC_SGI1R_EL1_AARCH64_INTID_GET(uValue);
2777 Assert(GIC_IS_INTR_SGI(uSgiIntId));
2778 VBOXSTRICTRC const rcStrict = gicSetSgi(pGicDev, pVCpu, &DestCpuSet, uSgiIntId);
2779 Assert(RT_SUCCESS(rcStrict)); RT_NOREF_PV(rcStrict);
2780 }
2781
2782 STAM_PROFILE_STOP(&pGicCpu->StatProfSetSgi, c);
2783 return VINF_SUCCESS;
2784}
2785
2786
2787/**
2788 * @interface_method_impl{PDMGICBACKEND,pfnReadSysReg}
2789 */
2790static DECLCALLBACK(VBOXSTRICTRC) gicReadSysReg(PVMCPUCC pVCpu, uint32_t u32Reg, uint64_t *pu64Value)
2791{
2792 /*
2793 * Validate.
2794 */
2795 VMCPU_ASSERT_EMT(pVCpu);
2796 Assert(pu64Value);
2797
2798 STAM_COUNTER_INC(&pVCpu->gic.s.StatSysRegRead);
2799
2800 *pu64Value = 0;
2801 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
2802 PPDMDEVINS pDevIns = VMCPU_TO_DEVINS(pVCpu);
2803 PGICDEV pGicDev = PDMDEVINS_2_DATA(pDevIns, PGICDEV);
2804
2805 int const rcLock = PDMDevHlpCritSectEnter(pDevIns, pDevIns->pCritSectRoR3, VERR_IGNORED);
2806 PDM_CRITSECT_RELEASE_ASSERT_RC_DEV(pDevIns, pDevIns->pCritSectRoR3, rcLock);
2807
2808 switch (u32Reg)
2809 {
2810 case ARMV8_AARCH64_SYSREG_ICC_PMR_EL1:
2811 *pu64Value = pGicCpu->bIntrPriorityMask;
2812 break;
2813 case ARMV8_AARCH64_SYSREG_ICC_IAR0_EL1:
2814 AssertReleaseFailed();
2815 break;
2816 case ARMV8_AARCH64_SYSREG_ICC_EOIR0_EL1:
2817 AssertReleaseFailed();
2818 break;
2819 case ARMV8_AARCH64_SYSREG_ICC_HPPIR0_EL1:
2820 AssertReleaseFailed();
2821 break;
2822 case ARMV8_AARCH64_SYSREG_ICC_BPR0_EL1:
2823 *pu64Value = ARMV8_ICC_BPR0_EL1_AARCH64_BINARYPOINT_SET(pGicCpu->bBinaryPtGroup0);
2824 break;
2825 case ARMV8_AARCH64_SYSREG_ICC_AP0R0_EL1:
2826 AssertReleaseFailed();
2827 *pu64Value = pGicCpu->bmActivePriorityGroup0[0];
2828 break;
2829 case ARMV8_AARCH64_SYSREG_ICC_AP0R1_EL1:
2830 AssertReleaseFailed();
2831 *pu64Value = pGicCpu->bmActivePriorityGroup0[1];
2832 break;
2833 case ARMV8_AARCH64_SYSREG_ICC_AP0R2_EL1:
2834 AssertReleaseFailed();
2835 *pu64Value = pGicCpu->bmActivePriorityGroup0[2];
2836 break;
2837 case ARMV8_AARCH64_SYSREG_ICC_AP0R3_EL1:
2838 AssertReleaseFailed();
2839 *pu64Value = pGicCpu->bmActivePriorityGroup0[3];
2840 break;
2841 case ARMV8_AARCH64_SYSREG_ICC_AP1R0_EL1:
2842 AssertReleaseFailed();
2843 *pu64Value = pGicCpu->bmActivePriorityGroup1[0];
2844 break;
2845 case ARMV8_AARCH64_SYSREG_ICC_AP1R1_EL1:
2846 AssertReleaseFailed();
2847 *pu64Value = pGicCpu->bmActivePriorityGroup1[1];
2848 break;
2849 case ARMV8_AARCH64_SYSREG_ICC_AP1R2_EL1:
2850 AssertReleaseFailed();
2851 *pu64Value = pGicCpu->bmActivePriorityGroup1[2];
2852 break;
2853 case ARMV8_AARCH64_SYSREG_ICC_AP1R3_EL1:
2854 AssertReleaseFailed();
2855 *pu64Value = pGicCpu->bmActivePriorityGroup1[3];
2856 break;
2857 case ARMV8_AARCH64_SYSREG_ICC_NMIAR1_EL1:
2858 AssertReleaseFailed();
2859 break;
2860 case ARMV8_AARCH64_SYSREG_ICC_DIR_EL1:
2861 AssertReleaseFailed();
2862 break;
2863 case ARMV8_AARCH64_SYSREG_ICC_RPR_EL1:
2864 *pu64Value = pGicCpu->abRunningPriorities[pGicCpu->idxRunningPriority];
2865 break;
2866 case ARMV8_AARCH64_SYSREG_ICC_SGI1R_EL1:
2867 AssertReleaseFailed();
2868 break;
2869 case ARMV8_AARCH64_SYSREG_ICC_ASGI1R_EL1:
2870 AssertReleaseFailed();
2871 break;
2872 case ARMV8_AARCH64_SYSREG_ICC_SGI0R_EL1:
2873 AssertReleaseFailed();
2874 break;
2875 case ARMV8_AARCH64_SYSREG_ICC_IAR1_EL1:
2876 *pu64Value = gicAckHighestPriorityPendingIntr(pGicDev, pVCpu, false /*fGroup0*/, true /*fGroup1*/);
2877 break;
2878 case ARMV8_AARCH64_SYSREG_ICC_EOIR1_EL1:
2879 AssertReleaseFailed();
2880 break;
2881 case ARMV8_AARCH64_SYSREG_ICC_HPPIR1_EL1:
2882 {
2883 AssertReleaseFailed();
2884 *pu64Value = gicGetHighestPriorityPendingIntr(pGicDev, pGicCpu, false /*fGroup0*/, true /*fGroup1*/,
2885 NULL /*pidxIntr*/, NULL /*pbPriority*/);
2886 break;
2887 }
2888 case ARMV8_AARCH64_SYSREG_ICC_BPR1_EL1:
2889 *pu64Value = ARMV8_ICC_BPR1_EL1_AARCH64_BINARYPOINT_SET(pGicCpu->bBinaryPtGroup1);
2890 break;
2891 case ARMV8_AARCH64_SYSREG_ICC_CTLR_EL1:
2892 *pu64Value = pGicCpu->uIccCtlr;
2893 break;
2894 case ARMV8_AARCH64_SYSREG_ICC_SRE_EL1:
2895 AssertReleaseFailed();
2896 break;
2897 case ARMV8_AARCH64_SYSREG_ICC_IGRPEN0_EL1:
2898 *pu64Value = pGicCpu->fIntrGroup0Enabled ? ARMV8_ICC_IGRPEN0_EL1_AARCH64_ENABLE : 0;
2899 break;
2900 case ARMV8_AARCH64_SYSREG_ICC_IGRPEN1_EL1:
2901 *pu64Value = pGicCpu->fIntrGroup1Enabled ? ARMV8_ICC_IGRPEN1_EL1_AARCH64_ENABLE : 0;
2902 break;
2903 default:
2904 AssertReleaseMsgFailed(("u32Reg=%#RX32\n", u32Reg));
2905 break;
2906 }
2907
2908 PDMDevHlpCritSectLeave(pDevIns, pDevIns->pCritSectRoR3);
2909
2910 LogFlowFunc(("pVCpu=%p u32Reg=%#x{%s} pu64Value=%RX64\n", pVCpu, u32Reg, gicIccGetRegDescription(u32Reg), *pu64Value));
2911 return VINF_SUCCESS;
2912}
2913
2914
2915/**
2916 * @interface_method_impl{PDMGICBACKEND,pfnWriteSysReg}
2917 */
2918static DECLCALLBACK(VBOXSTRICTRC) gicWriteSysReg(PVMCPUCC pVCpu, uint32_t u32Reg, uint64_t u64Value)
2919{
2920 /*
2921 * Validate.
2922 */
2923 VMCPU_ASSERT_EMT(pVCpu);
2924 LogFlowFunc(("pVCpu=%p u32Reg=%#x{%s} u64Value=%RX64\n", pVCpu, u32Reg, gicIccGetRegDescription(u32Reg), u64Value));
2925
2926 STAM_COUNTER_INC(&pVCpu->gic.s.StatSysRegWrite);
2927
2928 PPDMDEVINS pDevIns = VMCPU_TO_DEVINS(pVCpu);
2929 PGICDEV pGicDev = PDMDEVINS_2_DATA(pDevIns, PGICDEV);
2930 PGICCPU pGicCpu = VMCPU_TO_GICCPU(pVCpu);
2931
2932 int const rcLock = PDMDevHlpCritSectEnter(pDevIns, pDevIns->pCritSectRoR3, VERR_IGNORED);
2933 PDM_CRITSECT_RELEASE_ASSERT_RC_DEV(pDevIns, pDevIns->pCritSectRoR3, rcLock);
2934
2935 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
2936 switch (u32Reg)
2937 {
2938 case ARMV8_AARCH64_SYSREG_ICC_PMR_EL1:
2939 LogFlowFunc(("ICC_PMR_EL1: Interrupt priority now %u\n", (uint8_t)u64Value));
2940 pGicCpu->bIntrPriorityMask = (uint8_t)u64Value;
2941 rcStrict = gicReDistUpdateIrqState(pGicDev, pVCpu);
2942 break;
2943 case ARMV8_AARCH64_SYSREG_ICC_IAR0_EL1:
2944 AssertReleaseFailed();
2945 break;
2946 case ARMV8_AARCH64_SYSREG_ICC_EOIR0_EL1:
2947 AssertReleaseFailed();
2948 break;
2949 case ARMV8_AARCH64_SYSREG_ICC_HPPIR0_EL1:
2950 AssertReleaseFailed();
2951 break;
2952 case ARMV8_AARCH64_SYSREG_ICC_BPR0_EL1:
2953 pGicCpu->bBinaryPtGroup0 = (uint8_t)ARMV8_ICC_BPR0_EL1_AARCH64_BINARYPOINT_GET(u64Value);
2954 break;
2955 case ARMV8_AARCH64_SYSREG_ICC_AP0R0_EL1:
2956 case ARMV8_AARCH64_SYSREG_ICC_AP0R1_EL1:
2957 case ARMV8_AARCH64_SYSREG_ICC_AP0R2_EL1:
2958 case ARMV8_AARCH64_SYSREG_ICC_AP0R3_EL1:
2959 case ARMV8_AARCH64_SYSREG_ICC_AP1R0_EL1:
2960 case ARMV8_AARCH64_SYSREG_ICC_AP1R1_EL1:
2961 case ARMV8_AARCH64_SYSREG_ICC_AP1R2_EL1:
2962 case ARMV8_AARCH64_SYSREG_ICC_AP1R3_EL1:
2963 /* Writes ignored, well behaving guest would write all 0s or the last read value of the register. */
2964 break;
2965 case ARMV8_AARCH64_SYSREG_ICC_NMIAR1_EL1:
2966 AssertReleaseFailed();
2967 break;
2968 case ARMV8_AARCH64_SYSREG_ICC_DIR_EL1:
2969 AssertReleaseFailed();
2970 break;
2971 case ARMV8_AARCH64_SYSREG_ICC_RPR_EL1:
2972 AssertReleaseFailed();
2973 break;
2974 case ARMV8_AARCH64_SYSREG_ICC_SGI1R_EL1:
2975 {
2976 gicReDistWriteSgiReg(pGicDev, pVCpu, u64Value);
2977 break;
2978 }
2979 case ARMV8_AARCH64_SYSREG_ICC_ASGI1R_EL1:
2980 AssertReleaseFailed();
2981 break;
2982 case ARMV8_AARCH64_SYSREG_ICC_SGI0R_EL1:
2983 AssertReleaseFailed();
2984 break;
2985 case ARMV8_AARCH64_SYSREG_ICC_IAR1_EL1:
2986 AssertReleaseFailed();
2987 break;
2988 case ARMV8_AARCH64_SYSREG_ICC_EOIR1_EL1:
2989 {
2990 /*
2991 * We only support priority drop + interrupt deactivation with writes to this register.
2992 * This avoids an extra access which would be required by software for deactivation.
2993 */
2994 Assert(!(pGicCpu->uIccCtlr & ARMV8_ICC_CTLR_EL1_AARCH64_EOIMODE));
2995
2996 /*
2997 * Mark the interrupt as inactive, though it might still be pending.
2998 * It is up to the guest to ensure the interrupt ID belongs to the right group as
2999 * failure to do so results in unpredictable behavior.
3000 *
3001 * See ARM GIC spec. 12.2.10 "ICC_EOIR1_EL1, Interrupt Controller End Of Interrupt Register 1".
3002 * NOTE! The order of the 'if' checks below are crucial.
3003 */
3004 uint16_t const uIntId = (uint16_t)u64Value;
3005 if (uIntId <= GIC_INTID_RANGE_PPI_LAST)
3006 {
3007 /* SGIs and PPIs. */
3008 AssertCompile(GIC_INTID_RANGE_PPI_LAST < 8 * sizeof(pGicDev->bmIntrActive[0]));
3009 Assert(pGicDev->fAffRoutingEnabled);
3010 pGicCpu->bmIntrActive[0] &= ~RT_BIT_32(uIntId);
3011 }
3012 else if (uIntId <= GIC_INTID_RANGE_SPI_LAST)
3013 {
3014 /* SPIs. */
3015 uint16_t const idxIntr = /*gicDistGetIndexFromIntId*/(uIntId);
3016 AssertReturn(idxIntr < sizeof(pGicDev->bmIntrActive) * 8, VERR_BUFFER_OVERFLOW);
3017 ASMBitClear(&pGicDev->bmIntrActive[0], idxIntr);
3018 }
3019 else if (uIntId <= GIC_INTID_RANGE_SPECIAL_NO_INTERRUPT)
3020 {
3021 /* Special interrupt IDs, ignored. */
3022 Log(("Ignoring write to EOI with special interrupt ID.\n"));
3023 break;
3024 }
3025 else if (uIntId <= GIC_INTID_RANGE_EXT_PPI_LAST)
3026 {
3027 /* Extended PPIs. */
3028 uint16_t const idxIntr = gicReDistGetIndexFromIntId(uIntId);
3029 AssertReturn(idxIntr < sizeof(pGicCpu->bmIntrActive) * 8, VERR_BUFFER_OVERFLOW);
3030 ASMBitClear(&pGicCpu->bmIntrActive[0], idxIntr);
3031 }
3032 else if (uIntId <= GIC_INTID_RANGE_EXT_SPI_LAST)
3033 {
3034 /* Extended SPIs. */
3035 uint16_t const idxIntr = gicDistGetIndexFromIntId(uIntId);
3036 AssertReturn(idxIntr < sizeof(pGicDev->bmIntrActive) * 8, VERR_BUFFER_OVERFLOW);
3037 ASMBitClear(&pGicDev->bmIntrActive[0], idxIntr);
3038 }
3039 else
3040 {
3041 AssertMsgFailed(("Invalid INTID %u\n", uIntId));
3042 break;
3043 }
3044
3045 /*
3046 * Drop priority by restoring previous interrupt.
3047 */
3048 if (RT_LIKELY(pGicCpu->idxRunningPriority))
3049 {
3050 LogFlowFunc(("Restoring interrupt priority from %u -> %u (idxRunningPriority: %u -> %u)\n",
3051 pGicCpu->abRunningPriorities[pGicCpu->idxRunningPriority],
3052 pGicCpu->abRunningPriorities[pGicCpu->idxRunningPriority - 1],
3053 pGicCpu->idxRunningPriority, pGicCpu->idxRunningPriority - 1));
3054
3055 /*
3056 * Clear the interrupt priority from the active priorities bitmap.
3057 * It is up to the guest to ensure that writes to EOI registers are done in the exact
3058 * reverse order of the reads from the IAR registers.
3059 *
3060 * See ARM GIC spec 4.1.1 "Physical CPU interface".
3061 */
3062 uint8_t const idxPreemptionLevel = pGicCpu->abRunningPriorities[pGicCpu->idxRunningPriority] >> 1;
3063 AssertCompile(sizeof(pGicCpu->bmActivePriorityGroup1) * 8 >= 128);
3064 ASMBitClear(&pGicCpu->bmActivePriorityGroup1[0], idxPreemptionLevel);
3065
3066 pGicCpu->idxRunningPriority--;
3067 Assert(pGicCpu->abRunningPriorities[0] == GIC_IDLE_PRIORITY);
3068 }
3069 else
3070 AssertReleaseMsgFailed(("Index of running-priority interrupt out-of-bounds %u\n", pGicCpu->idxRunningPriority));
3071 rcStrict = gicReDistUpdateIrqState(pGicDev, pVCpu);
3072 break;
3073 }
3074 case ARMV8_AARCH64_SYSREG_ICC_HPPIR1_EL1:
3075 AssertReleaseFailed();
3076 break;
3077 case ARMV8_AARCH64_SYSREG_ICC_BPR1_EL1:
3078 pGicCpu->bBinaryPtGroup1 = (uint8_t)ARMV8_ICC_BPR1_EL1_AARCH64_BINARYPOINT_GET(u64Value);
3079 break;
3080 case ARMV8_AARCH64_SYSREG_ICC_CTLR_EL1:
3081 pGicCpu->uIccCtlr &= ARMV8_ICC_CTLR_EL1_RW;
3082 /** @todo */
3083 break;
3084 case ARMV8_AARCH64_SYSREG_ICC_SRE_EL1:
3085 AssertReleaseFailed();
3086 break;
3087 case ARMV8_AARCH64_SYSREG_ICC_IGRPEN0_EL1:
3088 pGicCpu->fIntrGroup0Enabled = RT_BOOL(u64Value & ARMV8_ICC_IGRPEN0_EL1_AARCH64_ENABLE);
3089 break;
3090 case ARMV8_AARCH64_SYSREG_ICC_IGRPEN1_EL1:
3091 pGicCpu->fIntrGroup1Enabled = RT_BOOL(u64Value & ARMV8_ICC_IGRPEN1_EL1_AARCH64_ENABLE);
3092 break;
3093 default:
3094 AssertReleaseMsgFailed(("u32Reg=%#RX32\n", u32Reg));
3095 break;
3096 }
3097
3098 PDMDevHlpCritSectLeave(pDevIns, pDevIns->pCritSectRoR3);
3099 return rcStrict;
3100}
3101
3102
3103/**
3104 * Initializes the GIC distributor state.
3105 *
3106 * @param pDevIns The device instance.
3107 * @remarks This is also called during VM reset, so do NOT remove values that are
3108 * cleared to zero!
3109 */
3110static void gicInit(PPDMDEVINS pDevIns)
3111{
3112 LogFlowFunc(("\n"));
3113 PGICDEV pGicDev = PDMDEVINS_2_DATA(pDevIns, PGICDEV);
3114
3115 /* Distributor. */
3116 RT_ZERO(pGicDev->bmIntrGroup);
3117 RT_ZERO(pGicDev->bmIntrConfig);
3118 RT_ZERO(pGicDev->bmIntrEnabled);
3119 RT_ZERO(pGicDev->bmIntrPending);
3120 RT_ZERO(pGicDev->bmIntrActive);
3121 RT_ZERO(pGicDev->abIntrPriority);
3122 RT_ZERO(pGicDev->au32IntrRouting);
3123 RT_ZERO(pGicDev->bmIntrRoutingMode);
3124 pGicDev->fIntrGroup0Enabled = false;
3125 pGicDev->fIntrGroup1Enabled = false;
3126 pGicDev->fAffRoutingEnabled = true; /* GICv2 backwards compatibility is not implemented, so this is RA1/WI. */
3127
3128 /* GITS. */
3129 PGITSDEV pGitsDev = &pGicDev->Gits;
3130 gitsInit(pGitsDev);
3131
3132 /* LPIs. */
3133 RT_ZERO(pGicDev->abLpiConfig);
3134 RT_ZERO(pGicDev->bmLpiPending);
3135 pGicDev->uLpiConfigBaseReg.u = 0;
3136 pGicDev->uLpiPendingBaseReg.u = 0;
3137 pGicDev->fEnableLpis = false;
3138}
3139
3140
3141/**
3142 * Initialies the GIC redistributor and CPU interface state.
3143 *
3144 * @param pDevIns The device instance.
3145 * @param pVCpu The cross context virtual CPU structure.
3146 * @remarks This is also called during VM reset, so do NOT remove values that are
3147 * cleared to zero!
3148 */
3149static void gicInitCpu(PPDMDEVINS pDevIns, PVMCPUCC pVCpu)
3150{
3151 LogFlowFunc(("[%u]\n", pVCpu->idCpu));
3152 PGICDEV pGicDev = PDMDEVINS_2_DATA(pDevIns, PGICDEV);
3153 PGICCPU pGicCpu = &pVCpu->gic.s;
3154
3155 RT_ZERO(pGicCpu->bmIntrGroup);
3156 RT_ZERO(pGicCpu->bmIntrConfig);
3157 /* SGIs are always edge-triggered, writes to GICR_ICFGR0 are to be ignored. */
3158 pGicCpu->bmIntrConfig[0] = 0xaaaaaaaa;
3159 RT_ZERO(pGicCpu->bmIntrEnabled);
3160 RT_ZERO(pGicCpu->bmIntrPending);
3161 RT_ZERO(pGicCpu->bmIntrActive);
3162 RT_ZERO(pGicCpu->abIntrPriority);
3163
3164 pGicCpu->uIccCtlr = ARMV8_ICC_CTLR_EL1_AARCH64_PMHE
3165 | ARMV8_ICC_CTLR_EL1_AARCH64_PRIBITS_SET(4)
3166 | ARMV8_ICC_CTLR_EL1_AARCH64_IDBITS_SET(ARMV8_ICC_CTLR_EL1_AARCH64_IDBITS_16BITS)
3167 | (pGicDev->fRangeSel ? ARMV8_ICC_CTLR_EL1_AARCH64_RSS : 0)
3168 | (pGicDev->fAff3Levels ? ARMV8_ICC_CTLR_EL1_AARCH64_A3V : 0)
3169 | (pGicDev->fExtPpi || pGicDev->fExtSpi ? ARMV8_ICC_CTLR_EL1_AARCH64_EXTRANGE : 0);
3170
3171 pGicCpu->bIntrPriorityMask = 0; /* Means no interrupt gets through to the PE. */
3172 pGicCpu->idxRunningPriority = 0;
3173 memset((void *)&pGicCpu->abRunningPriorities[0], 0xff, sizeof(pGicCpu->abRunningPriorities));
3174 RT_ZERO(pGicCpu->bmActivePriorityGroup0);
3175 RT_ZERO(pGicCpu->bmActivePriorityGroup1);
3176 pGicCpu->bBinaryPtGroup0 = 0;
3177 pGicCpu->bBinaryPtGroup1 = 0;
3178 pGicCpu->fIntrGroup0Enabled = false;
3179 pGicCpu->fIntrGroup1Enabled = false;
3180}
3181
3182
3183/**
3184 * Initializes per-VM GIC to the state following a power-up or hardware
3185 * reset.
3186 *
3187 * @param pDevIns The device instance.
3188 */
3189DECLHIDDEN(void) gicReset(PPDMDEVINS pDevIns)
3190{
3191 LogFlowFunc(("\n"));
3192 gicInit(pDevIns);
3193}
3194
3195
3196/**
3197 * Initializes per-VCPU GIC to the state following a power-up or hardware
3198 * reset.
3199 *
3200 * @param pDevIns The device instance.
3201 * @param pVCpu The cross context virtual CPU structure.
3202 */
3203DECLHIDDEN(void) gicResetCpu(PPDMDEVINS pDevIns, PVMCPUCC pVCpu)
3204{
3205 LogFlowFunc(("[%u]\n", pVCpu->idCpu));
3206 VMCPU_ASSERT_EMT_OR_NOT_RUNNING(pVCpu);
3207 gicInitCpu(pDevIns, pVCpu);
3208}
3209
3210
3211/**
3212 * @callback_method_impl{FNIOMMMIONEWREAD}
3213 */
3214DECL_HIDDEN_CALLBACK(VBOXSTRICTRC) gicDistMmioRead(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void *pv, unsigned cb)
3215{
3216 NOREF(pvUser);
3217 Assert(!(off & 0x3));
3218 Assert(cb == 4); RT_NOREF_PV(cb);
3219
3220 PVMCPUCC pVCpu = PDMDevHlpGetVMCPU(pDevIns);
3221 uint16_t offReg = off & 0xfffc;
3222 uint32_t uValue = 0;
3223
3224 STAM_COUNTER_INC(&pVCpu->gic.s.StatMmioRead);
3225
3226 VBOXSTRICTRC rc = VBOXSTRICTRC_VAL(gicDistReadRegister(pDevIns, pVCpu, offReg, &uValue));
3227 *(uint32_t *)pv = uValue;
3228
3229 LogFlowFunc(("[%u]: offReg=%#RX16 (%s) uValue=%#RX32\n", pVCpu->idCpu, offReg, gicDistGetRegDescription(offReg), uValue));
3230 return rc;
3231}
3232
3233
3234/**
3235 * @callback_method_impl{FNIOMMMIONEWWRITE}
3236 */
3237DECL_HIDDEN_CALLBACK(VBOXSTRICTRC) gicDistMmioWrite(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void const *pv, unsigned cb)
3238{
3239 NOREF(pvUser);
3240 Assert(!(off & 0x3));
3241 Assert(cb == 4); RT_NOREF_PV(cb);
3242
3243 PVMCPUCC pVCpu = PDMDevHlpGetVMCPU(pDevIns);
3244 uint16_t offReg = off & 0xfffc;
3245 uint32_t uValue = *(uint32_t *)pv;
3246
3247 STAM_COUNTER_INC(&pVCpu->gic.s.StatMmioWrite);
3248 LogFlowFunc(("[%u]: offReg=%#RX16 (%s) uValue=%#RX32\n", pVCpu->idCpu, offReg, gicDistGetRegDescription(offReg), uValue));
3249
3250 return gicDistWriteRegister(pDevIns, pVCpu, offReg, uValue);
3251}
3252
3253
3254/**
3255 * @callback_method_impl{FNIOMMMIONEWREAD}
3256 */
3257DECL_HIDDEN_CALLBACK(VBOXSTRICTRC) gicReDistMmioRead(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void *pv, unsigned cb)
3258{
3259 NOREF(pvUser);
3260 Assert(!(off & 0x3));
3261 Assert(cb == 4); RT_NOREF_PV(cb);
3262
3263 /*
3264 * Determine the redistributor being targeted. Each redistributor takes
3265 * GIC_REDIST_REG_FRAME_SIZE + GIC_REDIST_SGI_PPI_REG_FRAME_SIZE bytes
3266 * and the redistributors are adjacent.
3267 */
3268 uint32_t const idReDist = off / (GIC_REDIST_REG_FRAME_SIZE + GIC_REDIST_SGI_PPI_REG_FRAME_SIZE);
3269 off %= (GIC_REDIST_REG_FRAME_SIZE + GIC_REDIST_SGI_PPI_REG_FRAME_SIZE);
3270
3271 PVMCC pVM = PDMDevHlpGetVM(pDevIns);
3272 Assert(idReDist < pVM->cCpus);
3273 PVMCPUCC pVCpu = pVM->CTX_SUFF(apCpus)[idReDist];
3274
3275 STAM_COUNTER_INC(&pVCpu->gic.s.StatMmioRead);
3276
3277 /* Redistributor or SGI/PPI frame? */
3278 uint16_t const offReg = off & 0xfffc;
3279 uint32_t uValue = 0;
3280 VBOXSTRICTRC rcStrict;
3281 if (off < GIC_REDIST_REG_FRAME_SIZE)
3282 rcStrict = gicReDistReadRegister(pDevIns, pVCpu, idReDist, offReg, &uValue);
3283 else
3284 rcStrict = gicReDistReadSgiPpiRegister(pDevIns, pVCpu, offReg, &uValue);
3285
3286 *(uint32_t *)pv = uValue;
3287 LogFlowFunc(("[%u]: off=%RGp idReDist=%u offReg=%#RX16 (%s) uValue=%#RX32 -> %Rrc\n", pVCpu->idCpu, off, idReDist, offReg,
3288 gicReDistGetRegDescription(offReg), uValue, VBOXSTRICTRC_VAL(rcStrict)));
3289 return rcStrict;
3290}
3291
3292
3293/**
3294 * @callback_method_impl{FNIOMMMIONEWWRITE}
3295 */
3296DECL_HIDDEN_CALLBACK(VBOXSTRICTRC) gicReDistMmioWrite(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void const *pv, unsigned cb)
3297{
3298 NOREF(pvUser);
3299 Assert(!(off & 0x3));
3300 Assert(cb == 4); RT_NOREF_PV(cb);
3301
3302 uint32_t uValue = *(uint32_t *)pv;
3303
3304 /*
3305 * Determine the redistributor being targeted. Each redistributor takes
3306 * GIC_REDIST_REG_FRAME_SIZE + GIC_REDIST_SGI_PPI_REG_FRAME_SIZE bytes
3307 * and the redistributors are adjacent.
3308 */
3309 uint32_t const idReDist = off / (GIC_REDIST_REG_FRAME_SIZE + GIC_REDIST_SGI_PPI_REG_FRAME_SIZE);
3310 off %= (GIC_REDIST_REG_FRAME_SIZE + GIC_REDIST_SGI_PPI_REG_FRAME_SIZE);
3311
3312 PCVMCC pVM = PDMDevHlpGetVM(pDevIns);
3313 Assert(idReDist < pVM->cCpus);
3314 PVMCPUCC pVCpu = pVM->CTX_SUFF(apCpus)[idReDist];
3315
3316 STAM_COUNTER_INC(&pVCpu->gic.s.StatMmioWrite);
3317
3318 /* Redistributor or SGI/PPI frame? */
3319 uint16_t const offReg = off & 0xfffc;
3320 VBOXSTRICTRC rcStrict;
3321 if (off < GIC_REDIST_REG_FRAME_SIZE)
3322 rcStrict = gicReDistWriteRegister(pDevIns, pVCpu, offReg, uValue);
3323 else
3324 rcStrict = gicReDistWriteSgiPpiRegister(pDevIns, pVCpu, offReg, uValue);
3325
3326 LogFlowFunc(("[%u]: off=%RGp idReDist=%u offReg=%#RX16 (%s) uValue=%#RX32 -> %Rrc\n", pVCpu->idCpu, off, idReDist, offReg,
3327 gicReDistGetRegDescription(offReg), uValue, VBOXSTRICTRC_VAL(rcStrict)));
3328 return rcStrict;
3329}
3330
3331
3332/**
3333 * @callback_method_impl{FNIOMMMIONEWREAD}
3334 */
3335DECL_HIDDEN_CALLBACK(VBOXSTRICTRC) gicItsMmioRead(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void *pv, unsigned cb)
3336{
3337 RT_NOREF_PV(pvUser);
3338 Assert(!(off & 0x3));
3339 Assert(cb == 8 || cb == 4);
3340
3341 PCGICDEV pGicDev = PDMDEVINS_2_DATA(pDevIns, PCGICDEV);
3342 PCGITSDEV pGitsDev = &pGicDev->Gits;
3343 uint64_t uReg;
3344 if (off < GITS_REG_FRAME_SIZE)
3345 {
3346 /* Control registers space. */
3347 uint16_t const offReg = off & 0xfffc;
3348 uReg = gitsMmioReadCtrl(pGitsDev, offReg, cb);
3349 LogFlowFunc(("offReg=%#RX16 (%s) read %#RX64\n", offReg, gitsGetCtrlRegDescription(offReg), uReg));
3350 }
3351 else
3352 {
3353 /* Translation registers space. */
3354 uint16_t const offReg = (off - GITS_REG_FRAME_SIZE) & 0xfffc;
3355 uReg = gitsMmioReadTranslate(pGitsDev, offReg, cb);
3356 LogFlowFunc(("offReg=%#RX16 (%s) read %#RX64\n", offReg, gitsGetTranslationRegDescription(offReg), uReg));
3357 }
3358
3359 if (cb == 8)
3360 *(uint64_t *)pv = uReg;
3361 else
3362 *(uint32_t *)pv = uReg;
3363 return VINF_SUCCESS;
3364}
3365
3366
3367/**
3368 * @callback_method_impl{FNIOMMMIONEWWRITE}
3369 */
3370DECL_HIDDEN_CALLBACK(VBOXSTRICTRC) gicItsMmioWrite(PPDMDEVINS pDevIns, void *pvUser, RTGCPHYS off, void const *pv, unsigned cb)
3371{
3372 RT_NOREF_PV(pvUser);
3373 Assert(!(off & 0x3));
3374 Assert(cb == 8 || cb == 4);
3375
3376 PGICDEV pGicDev = PDMDEVINS_2_DATA(pDevIns, PGICDEV);
3377 PGITSDEV pGitsDev = &pGicDev->Gits;
3378
3379 uint64_t const uValue = cb == 8 ? *(uint64_t *)pv : *(uint32_t *)pv;
3380 if (off < GITS_REG_FRAME_SIZE)
3381 {
3382 /* Control registers space. */
3383 uint16_t const offReg = off & 0xfffc;
3384 gitsMmioWriteCtrl(pDevIns, pGitsDev, offReg, uValue, cb);
3385 LogFlowFunc(("offReg=%#RX16 (%s) written %#RX64\n", offReg, gitsGetCtrlRegDescription(offReg), uValue));
3386 }
3387 else
3388 {
3389 /* Translation registers space. */
3390 uint16_t const offReg = (off - GITS_REG_FRAME_SIZE) & 0xfffc;
3391 gitsMmioWriteTranslate(pGitsDev, offReg, uValue, cb);
3392 LogFlowFunc(("offReg=%#RX16 (%s) written %#RX64\n", offReg, gitsGetTranslationRegDescription(offReg), uValue));
3393 }
3394 return VINF_SUCCESS;
3395}
3396
3397
3398/**
3399 * GIC device registration structure.
3400 */
3401const PDMDEVREG g_DeviceGIC =
3402{
3403 /* .u32Version = */ PDM_DEVREG_VERSION,
3404 /* .uReserved0 = */ 0,
3405 /* .szName = */ "gic",
3406 /* .fFlags = */ PDM_DEVREG_FLAGS_DEFAULT_BITS | PDM_DEVREG_FLAGS_RZ | PDM_DEVREG_FLAGS_NEW_STYLE,
3407 /* .fClass = */ PDM_DEVREG_CLASS_PIC,
3408 /* .cMaxInstances = */ 1,
3409 /* .uSharedVersion = */ 42,
3410 /* .cbInstanceShared = */ sizeof(GICDEV),
3411 /* .cbInstanceCC = */ 0,
3412 /* .cbInstanceRC = */ 0,
3413 /* .cMaxPciDevices = */ 0,
3414 /* .cMaxMsixVectors = */ 0,
3415 /* .pszDescription = */ "Generic Interrupt Controller",
3416#if defined(IN_RING3)
3417 /* .szRCMod = */ "VMMRC.rc",
3418 /* .szR0Mod = */ "VMMR0.r0",
3419 /* .pfnConstruct = */ gicR3Construct,
3420 /* .pfnDestruct = */ gicR3Destruct,
3421 /* .pfnRelocate = */ NULL,
3422 /* .pfnMemSetup = */ NULL,
3423 /* .pfnPowerOn = */ NULL,
3424 /* .pfnReset = */ gicR3Reset,
3425 /* .pfnSuspend = */ NULL,
3426 /* .pfnResume = */ NULL,
3427 /* .pfnAttach = */ NULL,
3428 /* .pfnDetach = */ NULL,
3429 /* .pfnQueryInterface = */ NULL,
3430 /* .pfnInitComplete = */ NULL,
3431 /* .pfnPowerOff = */ NULL,
3432 /* .pfnSoftReset = */ NULL,
3433 /* .pfnReserved0 = */ NULL,
3434 /* .pfnReserved1 = */ NULL,
3435 /* .pfnReserved2 = */ NULL,
3436 /* .pfnReserved3 = */ NULL,
3437 /* .pfnReserved4 = */ NULL,
3438 /* .pfnReserved5 = */ NULL,
3439 /* .pfnReserved6 = */ NULL,
3440 /* .pfnReserved7 = */ NULL,
3441#elif defined(IN_RING0)
3442 /* .pfnEarlyConstruct = */ NULL,
3443 /* .pfnConstruct = */ NULL,
3444 /* .pfnDestruct = */ NULL,
3445 /* .pfnFinalDestruct = */ NULL,
3446 /* .pfnRequest = */ NULL,
3447 /* .pfnReserved0 = */ NULL,
3448 /* .pfnReserved1 = */ NULL,
3449 /* .pfnReserved2 = */ NULL,
3450 /* .pfnReserved3 = */ NULL,
3451 /* .pfnReserved4 = */ NULL,
3452 /* .pfnReserved5 = */ NULL,
3453 /* .pfnReserved6 = */ NULL,
3454 /* .pfnReserved7 = */ NULL,
3455#elif defined(IN_RC)
3456 /* .pfnConstruct = */ NULL,
3457 /* .pfnReserved0 = */ NULL,
3458 /* .pfnReserved1 = */ NULL,
3459 /* .pfnReserved2 = */ NULL,
3460 /* .pfnReserved3 = */ NULL,
3461 /* .pfnReserved4 = */ NULL,
3462 /* .pfnReserved5 = */ NULL,
3463 /* .pfnReserved6 = */ NULL,
3464 /* .pfnReserved7 = */ NULL,
3465#else
3466# error "Not in IN_RING3, IN_RING0 or IN_RC!"
3467#endif
3468 /* .u32VersionEnd = */ PDM_DEVREG_VERSION
3469};
3470
3471
3472/**
3473 * The VirtualBox GIC backend.
3474 */
3475const PDMGICBACKEND g_GicBackend =
3476{
3477 /* .pfnReadSysReg = */ gicReadSysReg,
3478 /* .pfnWriteSysReg = */ gicWriteSysReg,
3479 /* .pfnSetSpi = */ gicSetSpi,
3480 /* .pfnSetPpi = */ gicSetPpi,
3481 /* .pfnSendMsi = */ gitsSendMsi,
3482};
3483
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette