VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllMemRWTmpl.cpp.h@ 102424

Last change on this file since 102424 was 102424, checked in by vboxsync, 18 months ago

VMM/IEM: Continue refactoring IEM_MC_MEM_MAP into type specific MCs using bUnmapInfo. bugref:10371

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 19.0 KB
Line 
1/* $Id: IEMAllMemRWTmpl.cpp.h 102424 2023-12-01 22:43:39Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - R/W Memory Functions Template.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.215389.xyz.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/* Check template parameters. */
30#ifndef TMPL_MEM_TYPE
31# error "TMPL_MEM_TYPE is undefined"
32#endif
33#ifndef TMPL_MEM_TYPE_ALIGN
34# define TMPL_MEM_TYPE_ALIGN (sizeof(TMPL_MEM_TYPE) - 1)
35#endif
36#ifndef TMPL_MEM_FN_SUFF
37# error "TMPL_MEM_FN_SUFF is undefined"
38#endif
39#ifndef TMPL_MEM_FMT_TYPE
40# error "TMPL_MEM_FMT_TYPE is undefined"
41#endif
42#ifndef TMPL_MEM_FMT_DESC
43# error "TMPL_MEM_FMT_DESC is undefined"
44#endif
45
46
47/**
48 * Standard fetch function.
49 *
50 * This is used by CImpl code, so it needs to be kept even when IEM_WITH_SETJMP
51 * is defined.
52 */
53VBOXSTRICTRC RT_CONCAT(iemMemFetchData,TMPL_MEM_FN_SUFF)(PVMCPUCC pVCpu, TMPL_MEM_TYPE *puDst,
54 uint8_t iSegReg, RTGCPTR GCPtrMem) RT_NOEXCEPT
55{
56 /* The lazy approach for now... */
57 TMPL_MEM_TYPE const *puSrc;
58 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puSrc, sizeof(*puSrc), iSegReg, GCPtrMem,
59 IEM_ACCESS_DATA_R, TMPL_MEM_TYPE_ALIGN);
60 if (rc == VINF_SUCCESS)
61 {
62 *puDst = *puSrc;
63 rc = iemMemCommitAndUnmap(pVCpu, (void *)puSrc, IEM_ACCESS_DATA_R);
64 Log2(("IEM RD " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, *puDst));
65 }
66 return rc;
67}
68
69
70#ifdef IEM_WITH_SETJMP
71/**
72 * Safe/fallback fetch function that longjmps on error.
73 */
74# ifdef TMPL_MEM_BY_REF
75void
76RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,SafeJmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE *pDst, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
77{
78# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
79 pVCpu->iem.s.DataTlb.cTlbSafeReadPath++;
80# endif
81 TMPL_MEM_TYPE const *pSrc = (TMPL_MEM_TYPE const *)iemMemMapJmp(pVCpu, sizeof(*pSrc), iSegReg, GCPtrMem,
82 IEM_ACCESS_DATA_R, TMPL_MEM_TYPE_ALIGN);
83 *pDst = *pSrc;
84 iemMemCommitAndUnmapJmp(pVCpu, (void *)pSrc, IEM_ACCESS_DATA_R);
85 Log2(("IEM RD " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, pDst));
86}
87# else /* !TMPL_MEM_BY_REF */
88TMPL_MEM_TYPE
89RT_CONCAT3(iemMemFetchData,TMPL_MEM_FN_SUFF,SafeJmp)(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
90{
91# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
92 pVCpu->iem.s.DataTlb.cTlbSafeReadPath++;
93# endif
94 TMPL_MEM_TYPE const *puSrc = (TMPL_MEM_TYPE const *)iemMemMapJmp(pVCpu, sizeof(*puSrc), iSegReg, GCPtrMem,
95 IEM_ACCESS_DATA_R, TMPL_MEM_TYPE_ALIGN);
96 TMPL_MEM_TYPE const uRet = *puSrc;
97 iemMemCommitAndUnmapJmp(pVCpu, (void *)puSrc, IEM_ACCESS_DATA_R);
98 Log2(("IEM RD " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, uRet));
99 return uRet;
100}
101# endif /* !TMPL_MEM_BY_REF */
102#endif /* IEM_WITH_SETJMP */
103
104
105
106/**
107 * Standard store function.
108 *
109 * This is used by CImpl code, so it needs to be kept even when IEM_WITH_SETJMP
110 * is defined.
111 */
112VBOXSTRICTRC RT_CONCAT(iemMemStoreData,TMPL_MEM_FN_SUFF)(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
113#ifdef TMPL_MEM_BY_REF
114 TMPL_MEM_TYPE const *pValue) RT_NOEXCEPT
115#else
116 TMPL_MEM_TYPE uValue) RT_NOEXCEPT
117#endif
118{
119 /* The lazy approach for now... */
120 TMPL_MEM_TYPE *puDst;
121 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puDst, sizeof(*puDst), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, TMPL_MEM_TYPE_ALIGN);
122 if (rc == VINF_SUCCESS)
123 {
124#ifdef TMPL_MEM_BY_REF
125 *puDst = *pValue;
126#else
127 *puDst = uValue;
128#endif
129 rc = iemMemCommitAndUnmap(pVCpu, puDst, IEM_ACCESS_DATA_W);
130#ifdef TMPL_MEM_BY_REF
131 Log6(("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, pValue));
132#else
133 Log6(("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, uValue));
134#endif
135 }
136 return rc;
137}
138
139
140#ifdef IEM_WITH_SETJMP
141/**
142 * Stores a data byte, longjmp on error.
143 *
144 * @param pVCpu The cross context virtual CPU structure of the calling thread.
145 * @param iSegReg The index of the segment register to use for
146 * this access. The base and limits are checked.
147 * @param GCPtrMem The address of the guest memory.
148 * @param uValue The value to store.
149 */
150void RT_CONCAT3(iemMemStoreData,TMPL_MEM_FN_SUFF,SafeJmp)(PVMCPUCC pVCpu, uint8_t iSegReg, RTGCPTR GCPtrMem,
151#ifdef TMPL_MEM_BY_REF
152 TMPL_MEM_TYPE const *pValue) IEM_NOEXCEPT_MAY_LONGJMP
153#else
154 TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
155#endif
156{
157# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
158 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
159# endif
160#ifdef TMPL_MEM_BY_REF
161 Log6(("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, pValue));
162#else
163 Log6(("IEM WR " TMPL_MEM_FMT_DESC " %d|%RGv: " TMPL_MEM_FMT_TYPE "\n", iSegReg, GCPtrMem, uValue));
164#endif
165 TMPL_MEM_TYPE *puDst = (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, sizeof(*puDst), iSegReg, GCPtrMem,
166 IEM_ACCESS_DATA_W, TMPL_MEM_TYPE_ALIGN);
167#ifdef TMPL_MEM_BY_REF
168 *puDst = *pValue;
169#else
170 *puDst = uValue;
171#endif
172 iemMemCommitAndUnmapJmp(pVCpu, puDst, IEM_ACCESS_DATA_W);
173}
174#endif /* IEM_WITH_SETJMP */
175
176
177#ifdef IEM_WITH_SETJMP
178
179/**
180 * Maps a data buffer for read+write direct access (or via a bounce buffer),
181 * longjmp on error.
182 *
183 * @param pVCpu The cross context virtual CPU structure of the calling thread.
184 * @param pbUnmapInfo Pointer to unmap info variable.
185 * @param iSegReg The index of the segment register to use for
186 * this access. The base and limits are checked.
187 * @param GCPtrMem The address of the guest memory.
188 */
189TMPL_MEM_TYPE *
190RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RwSafeJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
191 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
192{
193# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
194 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
195# endif
196 Log8(("IEM RW/map " TMPL_MEM_FMT_DESC " %d|%RGv\n", iSegReg, GCPtrMem));
197 *pbUnmapInfo = 1 | ((IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE) << 4); /* zero is for the TLB hit */
198 return (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, sizeof(TMPL_MEM_TYPE), iSegReg, GCPtrMem, IEM_ACCESS_DATA_RW, TMPL_MEM_TYPE_ALIGN);
199}
200
201
202/**
203 * Maps a data buffer for writeonly direct access (or via a bounce buffer),
204 * longjmp on error.
205 *
206 * @param pVCpu The cross context virtual CPU structure of the calling thread.
207 * @param pbUnmapInfo Pointer to unmap info variable.
208 * @param iSegReg The index of the segment register to use for
209 * this access. The base and limits are checked.
210 * @param GCPtrMem The address of the guest memory.
211 */
212TMPL_MEM_TYPE *
213RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,WoSafeJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
214 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
215{
216# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
217 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
218# endif
219 Log8(("IEM WO/map " TMPL_MEM_FMT_DESC " %d|%RGv\n", iSegReg, GCPtrMem));
220 *pbUnmapInfo = 1 | (IEM_ACCESS_TYPE_WRITE << 4); /* zero is for the TLB hit */
221 return (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, sizeof(TMPL_MEM_TYPE), iSegReg, GCPtrMem, IEM_ACCESS_DATA_W, TMPL_MEM_TYPE_ALIGN);
222}
223
224
225/**
226 * Maps a data buffer for readonly direct access (or via a bounce buffer),
227 * longjmp on error.
228 *
229 * @param pVCpu The cross context virtual CPU structure of the calling thread.
230 * @param pbUnmapInfo Pointer to unmap info variable.
231 * @param iSegReg The index of the segment register to use for
232 * this access. The base and limits are checked.
233 * @param GCPtrMem The address of the guest memory.
234 */
235TMPL_MEM_TYPE const *
236RT_CONCAT3(iemMemMapData,TMPL_MEM_FN_SUFF,RoSafeJmp)(PVMCPUCC pVCpu, uint8_t *pbUnmapInfo,
237 uint8_t iSegReg, RTGCPTR GCPtrMem) IEM_NOEXCEPT_MAY_LONGJMP
238{
239# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
240 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
241# endif
242 Log4(("IEM RO/map " TMPL_MEM_FMT_DESC " %d|%RGv\n", iSegReg, GCPtrMem));
243 *pbUnmapInfo = 1 | (IEM_ACCESS_TYPE_READ << 4); /* zero is for the TLB hit */
244 return (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, sizeof(TMPL_MEM_TYPE), iSegReg, GCPtrMem, IEM_ACCESS_DATA_R, TMPL_MEM_TYPE_ALIGN);
245}
246
247#endif /* IEM_WITH_SETJMP */
248
249
250#ifdef TMPL_MEM_WITH_STACK
251
252/**
253 * Pushes an item onto the stack, regular version.
254 *
255 * @returns Strict VBox status code.
256 * @param pVCpu The cross context virtual CPU structure of the
257 * calling thread.
258 * @param uValue The value to push.
259 */
260VBOXSTRICTRC RT_CONCAT(iemMemStackPush,TMPL_MEM_FN_SUFF)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) RT_NOEXCEPT
261{
262 /* Increment the stack pointer. */
263 uint64_t uNewRsp;
264 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
265
266 /* Write the dword the lazy way. */
267 TMPL_MEM_TYPE *puDst;
268 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puDst, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop,
269 IEM_ACCESS_STACK_W, TMPL_MEM_TYPE_ALIGN);
270 if (rc == VINF_SUCCESS)
271 {
272 *puDst = uValue;
273 rc = iemMemCommitAndUnmap(pVCpu, puDst, IEM_ACCESS_STACK_W);
274
275 /* Commit the new RSP value unless we an access handler made trouble. */
276 if (rc == VINF_SUCCESS)
277 {
278 Log12(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE "\n",
279 GCPtrTop, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue));
280 pVCpu->cpum.GstCtx.rsp = uNewRsp;
281 return VINF_SUCCESS;
282 }
283 }
284
285 return rc;
286}
287
288
289/**
290 * Pops an item off the stack.
291 *
292 * @returns Strict VBox status code.
293 * @param pVCpu The cross context virtual CPU structure of the
294 * calling thread.
295 * @param puValue Where to store the popped value.
296 */
297VBOXSTRICTRC RT_CONCAT(iemMemStackPop,TMPL_MEM_FN_SUFF)(PVMCPUCC pVCpu, TMPL_MEM_TYPE *puValue) RT_NOEXCEPT
298{
299 /* Increment the stack pointer. */
300 uint64_t uNewRsp;
301 RTGCPTR GCPtrTop = iemRegGetRspForPop(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
302
303 /* Write the word the lazy way. */
304 TMPL_MEM_TYPE const *puSrc;
305 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puSrc, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop,
306 IEM_ACCESS_STACK_R, TMPL_MEM_TYPE_ALIGN);
307 if (rc == VINF_SUCCESS)
308 {
309 *puValue = *puSrc;
310 rc = iemMemCommitAndUnmap(pVCpu, (void *)puSrc, IEM_ACCESS_STACK_R);
311
312 /* Commit the new RSP value. */
313 if (rc == VINF_SUCCESS)
314 {
315 Log10(("IEM RD " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE "\n",
316 GCPtrTop, pVCpu->cpum.GstCtx.rsp, uNewRsp, *puValue));
317 pVCpu->cpum.GstCtx.rsp = uNewRsp;
318 return VINF_SUCCESS;
319 }
320 }
321 return rc;
322}
323
324
325/**
326 * Pushes an item onto the stack, using a temporary stack pointer.
327 *
328 * @returns Strict VBox status code.
329 * @param pVCpu The cross context virtual CPU structure of the
330 * calling thread.
331 * @param uValue The value to push.
332 * @param pTmpRsp Pointer to the temporary stack pointer.
333 */
334VBOXSTRICTRC RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,Ex)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue, PRTUINT64U pTmpRsp) RT_NOEXCEPT
335{
336 /* Increment the stack pointer. */
337 RTUINT64U NewRsp = *pTmpRsp;
338 RTGCPTR GCPtrTop = iemRegGetRspForPushEx(pVCpu, &NewRsp, sizeof(TMPL_MEM_TYPE));
339
340 /* Write the word the lazy way. */
341 TMPL_MEM_TYPE *puDst;
342 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puDst, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop,
343 IEM_ACCESS_STACK_W, TMPL_MEM_TYPE_ALIGN);
344 if (rc == VINF_SUCCESS)
345 {
346 *puDst = uValue;
347 rc = iemMemCommitAndUnmap(pVCpu, puDst, IEM_ACCESS_STACK_W);
348
349 /* Commit the new RSP value unless we an access handler made trouble. */
350 if (rc == VINF_SUCCESS)
351 {
352 Log12(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE " [ex]\n",
353 GCPtrTop, pTmpRsp->u, NewRsp.u, uValue));
354 *pTmpRsp = NewRsp;
355 return VINF_SUCCESS;
356 }
357 }
358 return rc;
359}
360
361
362/**
363 * Pops an item off the stack, using a temporary stack pointer.
364 *
365 * @returns Strict VBox status code.
366 * @param pVCpu The cross context virtual CPU structure of the
367 * calling thread.
368 * @param puValue Where to store the popped value.
369 * @param pTmpRsp Pointer to the temporary stack pointer.
370 */
371VBOXSTRICTRC
372RT_CONCAT3(iemMemStackPop,TMPL_MEM_FN_SUFF,Ex)(PVMCPUCC pVCpu, TMPL_MEM_TYPE *puValue, PRTUINT64U pTmpRsp) RT_NOEXCEPT
373{
374 /* Increment the stack pointer. */
375 RTUINT64U NewRsp = *pTmpRsp;
376 RTGCPTR GCPtrTop = iemRegGetRspForPopEx(pVCpu, &NewRsp, sizeof(TMPL_MEM_TYPE));
377
378 /* Write the word the lazy way. */
379 TMPL_MEM_TYPE const *puSrc;
380 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&puSrc, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop,
381 IEM_ACCESS_STACK_R, TMPL_MEM_TYPE_ALIGN);
382 if (rc == VINF_SUCCESS)
383 {
384 *puValue = *puSrc;
385 rc = iemMemCommitAndUnmap(pVCpu, (void *)puSrc, IEM_ACCESS_STACK_R);
386
387 /* Commit the new RSP value. */
388 if (rc == VINF_SUCCESS)
389 {
390 Log10(("IEM RD " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE " [ex]\n",
391 GCPtrTop, pTmpRsp->u, NewRsp.u, *puValue));
392 *pTmpRsp = NewRsp;
393 return VINF_SUCCESS;
394 }
395 }
396 return rc;
397}
398
399
400# ifdef IEM_WITH_SETJMP
401
402/**
403 * Safe/fallback stack push function that longjmps on error.
404 */
405void RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SafeJmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
406{
407# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
408 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
409# endif
410
411 /* Decrement the stack pointer (prep). */
412 uint64_t uNewRsp;
413 RTGCPTR const GCPtrTop = iemRegGetRspForPush(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
414
415 /* Write the data. */
416 TMPL_MEM_TYPE *puDst = (TMPL_MEM_TYPE *)iemMemMapJmp(pVCpu, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop,
417 IEM_ACCESS_STACK_W, TMPL_MEM_TYPE_ALIGN);
418 *puDst = uValue;
419 iemMemCommitAndUnmapJmp(pVCpu, puDst, IEM_ACCESS_STACK_W);
420
421 /* Commit the RSP change. */
422 Log12(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE "\n",
423 GCPtrTop, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue));
424 pVCpu->cpum.GstCtx.rsp = uNewRsp;
425}
426
427
428/**
429 * Safe/fallback stack pop function that longjmps on error.
430 */
431TMPL_MEM_TYPE RT_CONCAT3(iemMemStackPop,TMPL_MEM_FN_SUFF,SafeJmp)(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
432{
433# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
434 pVCpu->iem.s.DataTlb.cTlbSafeReadPath++;
435# endif
436
437 /* Increment the stack pointer. */
438 uint64_t uNewRsp;
439 RTGCPTR const GCPtrTop = iemRegGetRspForPop(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
440
441 /* Read the data. */
442 TMPL_MEM_TYPE const *puSrc = (TMPL_MEM_TYPE const *)iemMemMapJmp(pVCpu, sizeof(TMPL_MEM_TYPE), X86_SREG_SS, GCPtrTop,
443 IEM_ACCESS_STACK_R, TMPL_MEM_TYPE_ALIGN);
444 TMPL_MEM_TYPE const uRet = *puSrc;
445 iemMemCommitAndUnmapJmp(pVCpu, (void *)puSrc, IEM_ACCESS_STACK_R);
446
447 /* Commit the RSP change and return the popped value. */
448 Log10(("IEM RD " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE "\n",
449 GCPtrTop, pVCpu->cpum.GstCtx.rsp, uNewRsp, uRet));
450 pVCpu->cpum.GstCtx.rsp = uNewRsp;
451
452 return uRet;
453}
454
455# ifdef TMPL_WITH_PUSH_SREG
456/**
457 * Safe/fallback stack push function that longjmps on error.
458 */
459void RT_CONCAT3(iemMemStackPush,TMPL_MEM_FN_SUFF,SRegSafeJmp)(PVMCPUCC pVCpu, TMPL_MEM_TYPE uValue) IEM_NOEXCEPT_MAY_LONGJMP
460{
461# if defined(IEM_WITH_DATA_TLB) && defined(IN_RING3)
462 pVCpu->iem.s.DataTlb.cTlbSafeWritePath++;
463# endif
464
465 /* Decrement the stack pointer (prep). */
466 uint64_t uNewRsp;
467 RTGCPTR const GCPtrTop = iemRegGetRspForPush(pVCpu, sizeof(TMPL_MEM_TYPE), &uNewRsp);
468
469 /* Write the data. */
470 /* The intel docs talks about zero extending the selector register
471 value. My actual intel CPU here might be zero extending the value
472 but it still only writes the lower word... */
473 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what
474 * happens when crossing an electric page boundrary, is the high word checked
475 * for write accessibility or not? Probably it is. What about segment limits?
476 * It appears this behavior is also shared with trap error codes.
477 *
478 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check
479 * ancient hardware when it actually did change. */
480 uint16_t *puDst = (uint16_t *)iemMemMapJmp(pVCpu, sizeof(uint16_t), X86_SREG_SS, GCPtrTop,
481 IEM_ACCESS_STACK_W, sizeof(uint16_t) - 1); /** @todo 2 or 4 alignment check for PUSH SS? */
482 *puDst = (uint16_t)uValue;
483 iemMemCommitAndUnmapJmp(pVCpu, puDst, IEM_ACCESS_STACK_W);
484
485 /* Commit the RSP change. */
486 Log12(("IEM WR " TMPL_MEM_FMT_DESC " SS|%RGv (%RX64->%RX64): " TMPL_MEM_FMT_TYPE " [sreg]\n",
487 GCPtrTop, pVCpu->cpum.GstCtx.rsp, uNewRsp, uValue));
488 pVCpu->cpum.GstCtx.rsp = uNewRsp;
489}
490# endif /* TMPL_WITH_PUSH_SREG */
491
492# endif /* IEM_WITH_SETJMP */
493
494#endif /* TMPL_MEM_WITH_STACK */
495
496/* clean up */
497#undef TMPL_MEM_TYPE
498#undef TMPL_MEM_TYPE_ALIGN
499#undef TMPL_MEM_FN_SUFF
500#undef TMPL_MEM_FMT_TYPE
501#undef TMPL_MEM_FMT_DESC
502#undef TMPL_MEM_BY_REF
503#undef TMPL_WITH_PUSH_SREG
504
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette