VirtualBox

source: vbox/trunk/src/VBox/VMM/PATM/PATM.cpp@ 23099

Last change on this file since 23099 was 23099, checked in by vboxsync, 16 years ago

PATM: implemented couple tweaks to make NetBSD bootable (still some spurious segfaults)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 244.6 KB
Line 
1/* $Id: PATM.cpp 23099 2009-09-17 14:39:06Z vboxsync $ */
2/** @file
3 * PATM - Dynamic Guest OS Patching Manager
4 *
5 * NOTE: Never ever reuse patch memory!!
6 */
7
8/*
9 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
10 *
11 * This file is part of VirtualBox Open Source Edition (OSE), as
12 * available from http://www.215389.xyz. This file is free software;
13 * you can redistribute it and/or modify it under the terms of the GNU
14 * General Public License (GPL) as published by the Free Software
15 * Foundation, in version 2 as it comes in the "COPYING" file of the
16 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
17 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
20 * Clara, CA 95054 USA or visit http://www.sun.com if you need
21 * additional information or have any questions.
22 */
23
24/*******************************************************************************
25* Header Files *
26*******************************************************************************/
27#define LOG_GROUP LOG_GROUP_PATM
28#include <VBox/patm.h>
29#include <VBox/stam.h>
30#include <VBox/pgm.h>
31#include <VBox/cpum.h>
32#include <VBox/cpumdis.h>
33#include <VBox/iom.h>
34#include <VBox/sup.h>
35#include <VBox/mm.h>
36#include <VBox/ssm.h>
37#include <VBox/pdm.h>
38#include <VBox/trpm.h>
39#include <VBox/cfgm.h>
40#include <VBox/param.h>
41#include <VBox/selm.h>
42#include <iprt/avl.h>
43#include "PATMInternal.h"
44#include "PATMPatch.h"
45#include <VBox/vm.h>
46#include <VBox/csam.h>
47
48#include <VBox/dbg.h>
49#include <VBox/err.h>
50#include <VBox/log.h>
51#include <iprt/assert.h>
52#include <iprt/asm.h>
53#include <VBox/dis.h>
54#include <VBox/disopcode.h>
55
56#include <iprt/string.h>
57#include "PATMA.h"
58
59//#define PATM_REMOVE_PATCH_ON_TOO_MANY_TRAPS
60//#define PATM_DISABLE_ALL
61
62/*******************************************************************************
63* Internal Functions *
64*******************************************************************************/
65
66static int patmDisableUnusablePatch(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictAddr, PPATCHINFO pPatch);
67static int patmActivateInt3Patch(PVM pVM, PPATCHINFO pPatch);
68static int patmDeactivateInt3Patch(PVM pVM, PPATCHINFO pPatch);
69
70#ifdef LOG_ENABLED // keep gcc quiet
71static bool patmIsCommonIDTHandlerPatch(PVM pVM, RTRCPTR pInstrGC);
72#endif
73#ifdef VBOX_WITH_STATISTICS
74static const char *PATMPatchType(PVM pVM, PPATCHINFO pPatch);
75static void patmResetStat(PVM pVM, void *pvSample);
76static void patmPrintStat(PVM pVM, void *pvSample, char *pszBuf, size_t cchBuf);
77#endif
78
79#define patmPatchHCPtr2PatchGCPtr(pVM, pHC) (pVM->patm.s.pPatchMemGC + (pHC - pVM->patm.s.pPatchMemHC))
80#define patmPatchGCPtr2PatchHCPtr(pVM, pGC) (pVM->patm.s.pPatchMemHC + (pGC - pVM->patm.s.pPatchMemGC))
81
82static int patmReinit(PVM pVM);
83static DECLCALLBACK(int) RelocatePatches(PAVLOU32NODECORE pNode, void *pParam);
84
85#ifdef VBOX_WITH_DEBUGGER
86static DECLCALLBACK(int) DisableAllPatches(PAVLOU32NODECORE pNode, void *pVM);
87static DECLCALLBACK(int) patmr3CmdOn(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
88static DECLCALLBACK(int) patmr3CmdOff(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult);
89
90/** Command descriptors. */
91static const DBGCCMD g_aCmds[] =
92{
93 /* pszCmd, cArgsMin, cArgsMax, paArgDesc, cArgDescs, pResultDesc, fFlags, pfnHandler pszSyntax, ....pszDescription */
94 { "patmon", 0, 0, NULL, 0, NULL, 0, patmr3CmdOn, "", "Enable patching." },
95 { "patmoff", 0, 0, NULL, 0, NULL, 0, patmr3CmdOff, "", "Disable patching." },
96};
97#endif
98
99/* Don't want to break saved states, so put it here as a global variable. */
100static unsigned int cIDTHandlersDisabled = 0;
101
102/**
103 * Initializes the PATM.
104 *
105 * @returns VBox status code.
106 * @param pVM The VM to operate on.
107 */
108VMMR3DECL(int) PATMR3Init(PVM pVM)
109{
110 int rc;
111
112 Log(("PATMR3Init: Patch record size %d\n", sizeof(PATCHINFO)));
113
114 /* These values can't change as they are hardcoded in patch code (old saved states!) */
115 AssertCompile(VMCPU_FF_TIMER == RT_BIT_32(2));
116 AssertCompile(VM_FF_REQUEST == VMCPU_FF_REQUEST);
117 AssertCompile(VMCPU_FF_INTERRUPT_APIC == RT_BIT_32(0));
118 AssertCompile(VMCPU_FF_INTERRUPT_PIC == RT_BIT_32(1));
119
120 AssertReleaseMsg(PATMInterruptFlag == (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_TIMER | VMCPU_FF_REQUEST),
121 ("Interrupt flags out of sync!! PATMInterruptFlag=%#x expected %#x. broken assembler?\n", PATMInterruptFlag, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_TIMER | VMCPU_FF_REQUEST));
122
123 /* Allocate patch memory and GC patch state memory. */
124 pVM->patm.s.cbPatchMem = PATCH_MEMORY_SIZE;
125 /* Add another page in case the generated code is much larger than expected. */
126 /** @todo bad safety precaution */
127 rc = MMR3HyperAllocOnceNoRel(pVM, PATCH_MEMORY_SIZE + PAGE_SIZE + PATM_STACK_TOTAL_SIZE + PAGE_SIZE + PATM_STAT_MEMSIZE, PAGE_SIZE, MM_TAG_PATM, (void **)&pVM->patm.s.pPatchMemHC);
128 if (RT_FAILURE(rc))
129 {
130 Log(("MMHyperAlloc failed with %Rrc\n", rc));
131 return rc;
132 }
133 pVM->patm.s.pPatchMemGC = MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC);
134
135 /* PATM stack page for call instruction execution. (2 parts: one for our private stack and one to store the original return address */
136 pVM->patm.s.pGCStackHC = (RTRCPTR *)(pVM->patm.s.pPatchMemHC + PATCH_MEMORY_SIZE + PAGE_SIZE);
137 pVM->patm.s.pGCStackGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStackHC);
138
139 /*
140 * Hypervisor memory for GC status data (read/write)
141 *
142 * Note1: This is non-critical data; if trashed by the guest, then it will only cause problems for itself
143 * Note2: This doesn't really belong here, but we need access to it for relocation purposes
144 *
145 */
146 Assert(sizeof(PATMGCSTATE) < PAGE_SIZE); /** @note hardcoded dependencies on this exist. */
147 pVM->patm.s.pGCStateHC = (PPATMGCSTATE)((uint8_t *)pVM->patm.s.pGCStackHC + PATM_STACK_TOTAL_SIZE);
148 pVM->patm.s.pGCStateGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStateHC);
149
150 /* Hypervisor memory for patch statistics */
151 pVM->patm.s.pStatsHC = (PSTAMRATIOU32)((uint8_t *)pVM->patm.s.pGCStateHC + PAGE_SIZE);
152 pVM->patm.s.pStatsGC = MMHyperR3ToRC(pVM, pVM->patm.s.pStatsHC);
153
154 /* Memory for patch lookup trees. */
155 rc = MMHyperAlloc(pVM, sizeof(*pVM->patm.s.PatchLookupTreeHC), 0, MM_TAG_PATM, (void **)&pVM->patm.s.PatchLookupTreeHC);
156 AssertRCReturn(rc, rc);
157 pVM->patm.s.PatchLookupTreeGC = MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC);
158
159#ifdef RT_ARCH_AMD64 /* see patmReinit(). */
160 /* Check CFGM option. */
161 rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "PATMEnabled", &pVM->fPATMEnabled);
162 if (RT_FAILURE(rc))
163# ifdef PATM_DISABLE_ALL
164 pVM->fPATMEnabled = false;
165# else
166 pVM->fPATMEnabled = true;
167# endif
168#endif
169 pVM->patm.s.uPATMFlags = 0;
170 bool fSearchBackward;
171 CFGMR3QueryBoolDef(CFGMR3GetChild(CFGMR3GetRoot(pVM), "PATM"), "SearchImmediatesBackward", &fSearchBackward, false);
172 if (fSearchBackward)
173 pVM->patm.s.uPATMFlags |= PATMGFL_SEARCH_IMM_BACKWARD;
174
175 rc = patmReinit(pVM);
176 AssertRC(rc);
177 if (RT_FAILURE(rc))
178 return rc;
179
180 /*
181 * Register save and load state notificators.
182 */
183 rc = SSMR3RegisterInternal(pVM, "PATM", 0, PATM_SSM_VERSION, sizeof(pVM->patm.s) + PATCH_MEMORY_SIZE + PAGE_SIZE + PATM_STACK_TOTAL_SIZE + PAGE_SIZE,
184 NULL, NULL, NULL,
185 NULL, patmR3Save, NULL,
186 NULL, patmR3Load, NULL);
187 AssertRCReturn(rc, rc);
188
189#ifdef VBOX_WITH_DEBUGGER
190 /*
191 * Debugger commands.
192 */
193 static bool fRegisteredCmds = false;
194 if (!fRegisteredCmds)
195 {
196 int rc = DBGCRegisterCommands(&g_aCmds[0], RT_ELEMENTS(g_aCmds));
197 if (RT_SUCCESS(rc))
198 fRegisteredCmds = true;
199 }
200#endif
201
202#ifdef VBOX_WITH_STATISTICS
203 STAM_REG(pVM, &pVM->patm.s.StatNrOpcodeRead, STAMTYPE_COUNTER, "/PATM/OpcodeBytesRead", STAMUNIT_OCCURENCES, "The number of opcode bytes read by the recompiler.");
204 STAM_REG(pVM, &pVM->patm.s.StatPATMMemoryUsed,STAMTYPE_COUNTER, "/PATM/MemoryUsed", STAMUNIT_OCCURENCES, "The amount of hypervisor heap used for patches.");
205 STAM_REG(pVM, &pVM->patm.s.StatDisabled, STAMTYPE_COUNTER, "/PATM/Patch/Disabled", STAMUNIT_OCCURENCES, "Number of times patches were disabled.");
206 STAM_REG(pVM, &pVM->patm.s.StatEnabled, STAMTYPE_COUNTER, "/PATM/Patch/Enabled", STAMUNIT_OCCURENCES, "Number of times patches were enabled.");
207 STAM_REG(pVM, &pVM->patm.s.StatDirty, STAMTYPE_COUNTER, "/PATM/Patch/Dirty", STAMUNIT_OCCURENCES, "Number of times patches were marked dirty.");
208 STAM_REG(pVM, &pVM->patm.s.StatUnusable, STAMTYPE_COUNTER, "/PATM/Patch/Unusable", STAMUNIT_OCCURENCES, "Number of unusable patches (conflicts).");
209 STAM_REG(pVM, &pVM->patm.s.StatInstalled, STAMTYPE_COUNTER, "/PATM/Patch/Installed", STAMUNIT_OCCURENCES, "Number of installed patches.");
210 STAM_REG(pVM, &pVM->patm.s.StatInt3Callable, STAMTYPE_COUNTER, "/PATM/Patch/Int3Callable", STAMUNIT_OCCURENCES, "Number of cli patches turned into int3 patches.");
211
212 STAM_REG(pVM, &pVM->patm.s.StatInt3BlockRun, STAMTYPE_COUNTER, "/PATM/Patch/Run/Int3", STAMUNIT_OCCURENCES, "Number of times an int3 block patch was executed.");
213 STAMR3RegisterF(pVM, &pVM->patm.s.pGCStateHC->uPatchCalls, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Patch/Run/Normal");
214
215 STAM_REG(pVM, &pVM->patm.s.StatInstalledFunctionPatches, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Function", STAMUNIT_OCCURENCES, "Number of installed function duplication patches.");
216 STAM_REG(pVM, &pVM->patm.s.StatInstalledTrampoline, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Trampoline", STAMUNIT_OCCURENCES, "Number of installed trampoline patches.");
217 STAM_REG(pVM, &pVM->patm.s.StatInstalledJump, STAMTYPE_COUNTER, "/PATM/Patch/Installed/Jump", STAMUNIT_OCCURENCES, "Number of installed jump patches.");
218
219 STAM_REG(pVM, &pVM->patm.s.StatOverwritten, STAMTYPE_COUNTER, "/PATM/Patch/Overwritten", STAMUNIT_OCCURENCES, "Number of overwritten patches.");
220 STAM_REG(pVM, &pVM->patm.s.StatFixedConflicts,STAMTYPE_COUNTER, "/PATM/Patch/ConflictFixed", STAMUNIT_OCCURENCES, "Number of fixed conflicts.");
221 STAM_REG(pVM, &pVM->patm.s.StatFlushed, STAMTYPE_COUNTER, "/PATM/Patch/Flushed", STAMUNIT_OCCURENCES, "Number of flushes of pages with patch jumps.");
222 STAM_REG(pVM, &pVM->patm.s.StatMonitored, STAMTYPE_COUNTER, "/PATM/Patch/Monitored", STAMUNIT_OCCURENCES, "Number of patches in monitored patch pages.");
223 STAM_REG(pVM, &pVM->patm.s.StatPageBoundaryCrossed, STAMTYPE_COUNTER, "/PATM/Patch/BoundaryCross", STAMUNIT_OCCURENCES, "Number of refused patches due to patch jump crossing page boundary.");
224
225 STAM_REG(pVM, &pVM->patm.s.StatHandleTrap, STAMTYPE_PROFILE, "/PATM/HandleTrap", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMR3HandleTrap");
226 STAM_REG(pVM, &pVM->patm.s.StatPushTrap, STAMTYPE_COUNTER, "/PATM/HandleTrap/PushWP", STAMUNIT_OCCURENCES, "Number of traps due to monitored stack pages.");
227
228 STAM_REG(pVM, &pVM->patm.s.StatSwitchBack, STAMTYPE_COUNTER, "/PATM/SwitchBack", STAMUNIT_OCCURENCES, "Switch back to original guest code when IF=1 & executing PATM instructions");
229 STAM_REG(pVM, &pVM->patm.s.StatSwitchBackFail,STAMTYPE_COUNTER, "/PATM/SwitchBackFail", STAMUNIT_OCCURENCES, "Failed switch back to original guest code when IF=1 & executing PATM instructions");
230
231 STAM_REG(pVM, &pVM->patm.s.StatDuplicateREQFailed, STAMTYPE_COUNTER, "/PATM/Function/DupREQ/Failed", STAMUNIT_OCCURENCES, "Nr of failed PATMR3DuplicateFunctionRequest calls");
232 STAM_REG(pVM, &pVM->patm.s.StatDuplicateREQSuccess, STAMTYPE_COUNTER, "/PATM/Function/DupREQ/Success", STAMUNIT_OCCURENCES, "Nr of successful PATMR3DuplicateFunctionRequest calls");
233 STAM_REG(pVM, &pVM->patm.s.StatDuplicateUseExisting,STAMTYPE_COUNTER, "/PATM/Function/DupREQ/UseExist", STAMUNIT_OCCURENCES, "Nr of successful PATMR3DuplicateFunctionRequest calls when using an existing patch");
234
235 STAM_REG(pVM, &pVM->patm.s.StatFunctionLookupInsert, STAMTYPE_COUNTER, "/PATM/Function/Lookup/Insert", STAMUNIT_OCCURENCES, "Nr of successful function address insertions");
236 STAM_REG(pVM, &pVM->patm.s.StatFunctionLookupReplace, STAMTYPE_COUNTER, "/PATM/Function/Lookup/Replace", STAMUNIT_OCCURENCES, "Nr of successful function address replacements");
237 STAM_REG(pVM, &pVM->patm.s.StatU32FunctionMaxSlotsUsed, STAMTYPE_U32_RESET,"/PATM/Function/Lookup/MaxSlots", STAMUNIT_OCCURENCES, "Maximum nr of lookup slots used in all call patches");
238
239 STAM_REG(pVM, &pVM->patm.s.StatFunctionFound, STAMTYPE_COUNTER, "/PATM/Function/Found", STAMUNIT_OCCURENCES, "Nr of successful function patch lookups in GC");
240 STAM_REG(pVM, &pVM->patm.s.StatFunctionNotFound, STAMTYPE_COUNTER, "/PATM/Function/NotFound", STAMUNIT_OCCURENCES, "Nr of failed function patch lookups in GC");
241
242 STAM_REG(pVM, &pVM->patm.s.StatPatchWrite, STAMTYPE_PROFILE, "/PATM/Write/Handle", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMR3PatchWrite");
243 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteDetect, STAMTYPE_PROFILE, "/PATM/Write/Detect", STAMUNIT_TICKS_PER_CALL, "Profiling of PATMIsWriteToPatchPage");
244 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteInterpreted, STAMTYPE_COUNTER, "/PATM/Write/Interpreted/Success", STAMUNIT_OCCURENCES, "Nr of interpreted patch writes.");
245 STAM_REG(pVM, &pVM->patm.s.StatPatchWriteInterpretedFailed, STAMTYPE_COUNTER, "/PATM/Write/Interpreted/Failed", STAMUNIT_OCCURENCES, "Nr of failed interpreted patch writes.");
246
247 STAM_REG(pVM, &pVM->patm.s.StatPatchRefreshSuccess, STAMTYPE_COUNTER, "/PATM/Refresh/Success", STAMUNIT_OCCURENCES, "Successful patch refreshes");
248 STAM_REG(pVM, &pVM->patm.s.StatPatchRefreshFailed, STAMTYPE_COUNTER, "/PATM/Refresh/Failure", STAMUNIT_OCCURENCES, "Failed patch refreshes");
249
250 STAM_REG(pVM, &pVM->patm.s.StatPatchPageInserted, STAMTYPE_COUNTER, "/PATM/Page/Inserted", STAMUNIT_OCCURENCES, "Nr of inserted guest pages that were patched");
251 STAM_REG(pVM, &pVM->patm.s.StatPatchPageRemoved, STAMTYPE_COUNTER, "/PATM/Page/Removed", STAMUNIT_OCCURENCES, "Nr of removed guest pages that were patched");
252
253 STAM_REG(pVM, &pVM->patm.s.StatInstrDirty, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Detected", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty.");
254 STAM_REG(pVM, &pVM->patm.s.StatInstrDirtyGood, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Corrected", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty and corrected later on.");
255 STAM_REG(pVM, &pVM->patm.s.StatInstrDirtyBad, STAMTYPE_COUNTER, "/PATM/Instr/Dirty/Failed", STAMUNIT_OCCURENCES, "Number of times instructions were marked dirty and we were not able to correct them.");
256
257 STAM_REG(pVM, &pVM->patm.s.StatSysEnter, STAMTYPE_COUNTER, "/PATM/Emul/SysEnter", STAMUNIT_OCCURENCES, "Number of times sysenter was emulated.");
258 STAM_REG(pVM, &pVM->patm.s.StatSysExit, STAMTYPE_COUNTER, "/PATM/Emul/SysExit" , STAMUNIT_OCCURENCES, "Number of times sysexit was emulated.");
259 STAM_REG(pVM, &pVM->patm.s.StatEmulIret, STAMTYPE_COUNTER, "/PATM/Emul/Iret/Success", STAMUNIT_OCCURENCES, "Number of times iret was emulated.");
260 STAM_REG(pVM, &pVM->patm.s.StatEmulIretFailed, STAMTYPE_COUNTER, "/PATM/Emul/Iret/Failed", STAMUNIT_OCCURENCES, "Number of times iret was emulated.");
261
262 STAM_REG(pVM, &pVM->patm.s.StatGenRet, STAMTYPE_COUNTER, "/PATM/Gen/Ret" , STAMUNIT_OCCURENCES, "Number of generated ret instructions.");
263 STAM_REG(pVM, &pVM->patm.s.StatGenRetReused, STAMTYPE_COUNTER, "/PATM/Gen/RetReused" , STAMUNIT_OCCURENCES, "Number of reused ret instructions.");
264 STAM_REG(pVM, &pVM->patm.s.StatGenCall, STAMTYPE_COUNTER, "/PATM/Gen/Call", STAMUNIT_OCCURENCES, "Number of generated call instructions.");
265 STAM_REG(pVM, &pVM->patm.s.StatGenJump, STAMTYPE_COUNTER, "/PATM/Gen/Jmp" , STAMUNIT_OCCURENCES, "Number of generated indirect jump instructions.");
266 STAM_REG(pVM, &pVM->patm.s.StatGenPopf, STAMTYPE_COUNTER, "/PATM/Gen/Popf" , STAMUNIT_OCCURENCES, "Number of generated popf instructions.");
267
268 STAM_REG(pVM, &pVM->patm.s.StatCheckPendingIRQ, STAMTYPE_COUNTER, "/PATM/GC/CheckIRQ" , STAMUNIT_OCCURENCES, "Number of traps that ask to check for pending irqs.");
269#endif /* VBOX_WITH_STATISTICS */
270
271 Log(("PATMCallRecord.size %d\n", PATMCallRecord.size));
272 Log(("PATMCallIndirectRecord.size %d\n", PATMCallIndirectRecord.size));
273 Log(("PATMRetRecord.size %d\n", PATMRetRecord.size));
274 Log(("PATMJumpIndirectRecord.size %d\n", PATMJumpIndirectRecord.size));
275 Log(("PATMPopf32Record.size %d\n", PATMPopf32Record.size));
276 Log(("PATMIretRecord.size %d\n", PATMIretRecord.size));
277 Log(("PATMStiRecord.size %d\n", PATMStiRecord.size));
278 Log(("PATMCheckIFRecord.size %d\n", PATMCheckIFRecord.size));
279
280 return rc;
281}
282
283/**
284 * Finalizes HMA page attributes.
285 *
286 * @returns VBox status code.
287 * @param pVM The VM handle.
288 */
289VMMR3DECL(int) PATMR3InitFinalize(PVM pVM)
290{
291 /* The GC state, stack and statistics must be read/write for the guest (supervisor only of course). */
292 int rc = PGMMapSetPage(pVM, pVM->patm.s.pGCStateGC, PAGE_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
293 if (RT_FAILURE(rc))
294 Log(("PATMR3InitFinalize: PGMMapSetPage failed with %Rrc!!\n", rc));
295
296 rc = PGMMapSetPage(pVM, pVM->patm.s.pGCStackGC, PATM_STACK_TOTAL_SIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
297 if (RT_FAILURE(rc))
298 Log(("PATMR3InitFinalize: PGMMapSetPage failed with %Rrc!!\n", rc));
299
300 rc = PGMMapSetPage(pVM, pVM->patm.s.pStatsGC, PATM_STAT_MEMSIZE, X86_PTE_P | X86_PTE_A | X86_PTE_D | X86_PTE_RW);
301 if (RT_FAILURE(rc))
302 Log(("PATMR3InitFinalize: PGMMapSetPage failed with %Rrc!!\n", rc));
303
304 return rc;
305}
306
307/**
308 * (Re)initializes PATM
309 *
310 * @param pVM The VM.
311 */
312static int patmReinit(PVM pVM)
313{
314 int rc;
315
316 /*
317 * Assert alignment and sizes.
318 */
319 AssertRelease(!(RT_OFFSETOF(VM, patm.s) & 31));
320 AssertRelease(sizeof(pVM->patm.s) <= sizeof(pVM->patm.padding));
321
322 /*
323 * Setup any fixed pointers and offsets.
324 */
325 pVM->patm.s.offVM = RT_OFFSETOF(VM, patm);
326
327#ifndef RT_ARCH_AMD64 /* would be nice if this was changed everywhere. was driving me crazy on AMD64. */
328#ifndef PATM_DISABLE_ALL
329 pVM->fPATMEnabled = true;
330#endif
331#endif
332
333 Assert(pVM->patm.s.pGCStateHC);
334 memset(pVM->patm.s.pGCStateHC, 0, PAGE_SIZE);
335 AssertReleaseMsg(pVM->patm.s.pGCStateGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pGCStateGC));
336
337 Log(("Patch memory allocated at %p - %RRv\n", pVM->patm.s.pPatchMemHC, pVM->patm.s.pPatchMemGC));
338 pVM->patm.s.pGCStateHC->uVMFlags = X86_EFL_IF;
339
340 Assert(pVM->patm.s.pGCStackHC);
341 memset(pVM->patm.s.pGCStackHC, 0, PAGE_SIZE);
342 AssertReleaseMsg(pVM->patm.s.pGCStackGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pGCStackGC));
343 pVM->patm.s.pGCStateHC->Psp = PATM_STACK_SIZE;
344 pVM->patm.s.pGCStateHC->fPIF = 1; /* PATM Interrupt Flag */
345
346 Assert(pVM->patm.s.pStatsHC);
347 memset(pVM->patm.s.pStatsHC, 0, PATM_STAT_MEMSIZE);
348 AssertReleaseMsg(pVM->patm.s.pStatsGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pStatsGC));
349
350 Assert(pVM->patm.s.pPatchMemHC);
351 Assert(pVM->patm.s.pPatchMemGC = MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC));
352 memset(pVM->patm.s.pPatchMemHC, 0, PATCH_MEMORY_SIZE);
353 AssertReleaseMsg(pVM->patm.s.pPatchMemGC, ("Impossible! MMHyperHC2GC(%p) failed!\n", pVM->patm.s.pPatchMemHC));
354
355 /* Needed for future patching of sldt/sgdt/sidt/str etc. */
356 pVM->patm.s.pCPUMCtxGC = VM_RC_ADDR(pVM, CPUMQueryGuestCtxPtr(VMMGetCpu(pVM)));
357
358 Assert(pVM->patm.s.PatchLookupTreeHC);
359 Assert(pVM->patm.s.PatchLookupTreeGC == MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC));
360
361 /*
362 * (Re)Initialize PATM structure
363 */
364 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTree);
365 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr);
366 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage);
367 pVM->patm.s.offPatchMem = 16; /* don't start with zero here */
368 pVM->patm.s.uCurrentPatchIdx = 1; /* Index zero is a dummy */
369 pVM->patm.s.pvFaultMonitor = 0;
370 pVM->patm.s.deltaReloc = 0;
371
372 /* Lowest and highest patched instruction */
373 pVM->patm.s.pPatchedInstrGCLowest = ~0;
374 pVM->patm.s.pPatchedInstrGCHighest = 0;
375
376 pVM->patm.s.PatchLookupTreeHC->PatchTree = 0;
377 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr = 0;
378 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage = 0;
379
380 pVM->patm.s.pfnSysEnterPatchGC = 0;
381 pVM->patm.s.pfnSysEnterGC = 0;
382
383 pVM->patm.s.fOutOfMemory = false;
384
385 pVM->patm.s.pfnHelperCallGC = 0;
386
387 /* Generate all global functions to be used by future patches. */
388 /* We generate a fake patch in order to use the existing code for relocation. */
389 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pVM->patm.s.pGlobalPatchRec);
390 if (RT_FAILURE(rc))
391 {
392 Log(("Out of memory!!!!\n"));
393 return VERR_NO_MEMORY;
394 }
395 pVM->patm.s.pGlobalPatchRec->patch.flags = PATMFL_GLOBAL_FUNCTIONS;
396 pVM->patm.s.pGlobalPatchRec->patch.uState = PATCH_ENABLED;
397 pVM->patm.s.pGlobalPatchRec->patch.pPatchBlockOffset = pVM->patm.s.offPatchMem;
398
399 rc = patmPatchGenGlobalFunctions(pVM, &pVM->patm.s.pGlobalPatchRec->patch);
400 AssertRC(rc);
401
402 /* Update free pointer in patch memory. */
403 pVM->patm.s.offPatchMem += pVM->patm.s.pGlobalPatchRec->patch.uCurPatchOffset;
404 /* Round to next 8 byte boundary. */
405 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
406 return rc;
407}
408
409
410/**
411 * Applies relocations to data and code managed by this
412 * component. This function will be called at init and
413 * whenever the VMM need to relocate it self inside the GC.
414 *
415 * The PATM will update the addresses used by the switcher.
416 *
417 * @param pVM The VM.
418 */
419VMMR3DECL(void) PATMR3Relocate(PVM pVM)
420{
421 RTRCPTR GCPtrNew = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStateHC);
422 RTRCINTPTR delta = GCPtrNew - pVM->patm.s.pGCStateGC;
423
424 Log(("PATMR3Relocate from %RRv to %RRv - delta %08X\n", pVM->patm.s.pGCStateGC, GCPtrNew, delta));
425 if (delta)
426 {
427 PCPUMCTX pCtx;
428
429 /* Update CPUMCTX guest context pointer. */
430 pVM->patm.s.pCPUMCtxGC += delta;
431
432 pVM->patm.s.deltaReloc = delta;
433
434 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, RelocatePatches, (void *)pVM);
435
436 pCtx = CPUMQueryGuestCtxPtr(VMMGetCpu(pVM));
437
438 /* If we are running patch code right now, then also adjust EIP. */
439 if (PATMIsPatchGCAddr(pVM, pCtx->eip))
440 pCtx->eip += delta;
441
442 pVM->patm.s.pGCStateGC = GCPtrNew;
443 pVM->patm.s.pPatchMemGC = MMHyperR3ToRC(pVM, pVM->patm.s.pPatchMemHC);
444
445 pVM->patm.s.pGCStackGC = MMHyperR3ToRC(pVM, pVM->patm.s.pGCStackHC);
446
447 pVM->patm.s.pStatsGC = MMHyperR3ToRC(pVM, pVM->patm.s.pStatsHC);
448
449 pVM->patm.s.PatchLookupTreeGC = MMHyperR3ToRC(pVM, pVM->patm.s.PatchLookupTreeHC);
450
451 if (pVM->patm.s.pfnSysEnterPatchGC)
452 pVM->patm.s.pfnSysEnterPatchGC += delta;
453
454 /* Deal with the global patch functions. */
455 pVM->patm.s.pfnHelperCallGC += delta;
456 pVM->patm.s.pfnHelperRetGC += delta;
457 pVM->patm.s.pfnHelperIretGC += delta;
458 pVM->patm.s.pfnHelperJumpGC += delta;
459
460 RelocatePatches(&pVM->patm.s.pGlobalPatchRec->Core, (void *)pVM);
461 }
462}
463
464
465/**
466 * Terminates the PATM.
467 *
468 * Termination means cleaning up and freeing all resources,
469 * the VM it self is at this point powered off or suspended.
470 *
471 * @returns VBox status code.
472 * @param pVM The VM to operate on.
473 */
474VMMR3DECL(int) PATMR3Term(PVM pVM)
475{
476 /* Memory was all allocated from the two MM heaps and requires no freeing. */
477 return VINF_SUCCESS;
478}
479
480
481/**
482 * PATM reset callback.
483 *
484 * @returns VBox status code.
485 * @param pVM The VM which is reset.
486 */
487VMMR3DECL(int) PATMR3Reset(PVM pVM)
488{
489 Log(("PATMR3Reset\n"));
490
491 /* Free all patches. */
492 while (true)
493 {
494 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32RemoveBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, 0, true);
495 if (pPatchRec)
496 {
497 PATMRemovePatch(pVM, pPatchRec, true);
498 }
499 else
500 break;
501 }
502 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage);
503 Assert(!pVM->patm.s.PatchLookupTreeHC->PatchTree);
504 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr = 0;
505 pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage = 0;
506
507 int rc = patmReinit(pVM);
508 if (RT_SUCCESS(rc))
509 rc = PATMR3InitFinalize(pVM); /* paranoia */
510
511 return rc;
512}
513
514/**
515 * Read callback for disassembly function; supports reading bytes that cross a page boundary
516 *
517 * @returns VBox status code.
518 * @param pSrc GC source pointer
519 * @param pDest HC destination pointer
520 * @param size Number of bytes to read
521 * @param pvUserdata Callback specific user data (pCpu)
522 *
523 */
524int patmReadBytes(RTUINTPTR pSrc, uint8_t *pDest, unsigned size, void *pvUserdata)
525{
526 DISCPUSTATE *pCpu = (DISCPUSTATE *)pvUserdata;
527 PATMDISASM *pDisInfo = (PATMDISASM *)pCpu->apvUserData[0];
528 int orgsize = size;
529
530 Assert(size);
531 if (size == 0)
532 return VERR_INVALID_PARAMETER;
533
534 /*
535 * Trap/interrupt handler typically call common code on entry. Which might already have patches inserted.
536 * As we currently don't support calling patch code from patch code, we'll let it read the original opcode bytes instead.
537 */
538 /** @todo could change in the future! */
539 if (pDisInfo->fReadFlags & PATMREAD_ORGCODE)
540 {
541 for (int i=0;i<orgsize;i++)
542 {
543 int rc = PATMR3QueryOpcode(pDisInfo->pVM, (RTRCPTR)pSrc, pDest);
544 if (RT_SUCCESS(rc))
545 {
546 pSrc++;
547 pDest++;
548 size--;
549 }
550 else break;
551 }
552 if (size == 0)
553 return VINF_SUCCESS;
554#ifdef VBOX_STRICT
555 if ( !(pDisInfo->pPatchInfo->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER))
556 && !(pDisInfo->fReadFlags & PATMREAD_NOCHECK))
557 {
558 Assert(PATMR3IsInsidePatchJump(pDisInfo->pVM, pSrc, NULL) == false);
559 Assert(PATMR3IsInsidePatchJump(pDisInfo->pVM, pSrc+size-1, NULL) == false);
560 }
561#endif
562 }
563
564
565 if (PAGE_ADDRESS(pDisInfo->pInstrGC) != PAGE_ADDRESS(pSrc + size - 1) && !PATMIsPatchGCAddr(pDisInfo->pVM, pSrc))
566 {
567 return PGMPhysSimpleReadGCPtr(&pDisInfo->pVM->aCpus[0], pDest, pSrc, size);
568 }
569 else
570 {
571 uint8_t *pInstrHC = pDisInfo->pInstrHC;
572
573 Assert(pInstrHC);
574
575 /* pInstrHC is the base address; adjust according to the GC pointer. */
576 pInstrHC = pInstrHC + (pSrc - pDisInfo->pInstrGC);
577
578 memcpy(pDest, (void *)pInstrHC, size);
579 }
580
581 return VINF_SUCCESS;
582}
583
584/**
585 * Callback function for RTAvloU32DoWithAll
586 *
587 * Updates all fixups in the patches
588 *
589 * @returns VBox status code.
590 * @param pNode Current node
591 * @param pParam The VM to operate on.
592 */
593static DECLCALLBACK(int) RelocatePatches(PAVLOU32NODECORE pNode, void *pParam)
594{
595 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
596 PVM pVM = (PVM)pParam;
597 RTRCINTPTR delta;
598#ifdef LOG_ENABLED
599 DISCPUSTATE cpu;
600 char szOutput[256];
601 uint32_t opsize;
602 bool disret;
603#endif
604 int rc;
605
606 /* Nothing to do if the patch is not active. */
607 if (pPatch->patch.uState == PATCH_REFUSED)
608 return 0;
609
610#ifdef LOG_ENABLED
611 if (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE)
612 {
613 /** @note pPrivInstrHC is probably not valid anymore */
614 rc = PGMPhysGCPtr2R3Ptr(VMMGetCpu0(pVM), pPatch->patch.pPrivInstrGC, (PRTR3PTR)&pPatch->patch.pPrivInstrHC);
615 if (rc == VINF_SUCCESS)
616 {
617 cpu.mode = (pPatch->patch.flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
618 disret = PATMR3DISInstr(pVM, &pPatch->patch, &cpu, pPatch->patch.pPrivInstrGC, pPatch->patch.pPrivInstrHC, &opsize, szOutput, PATMREAD_RAWCODE);
619 Log(("Org patch jump: %s", szOutput));
620 }
621 }
622#endif
623
624 Log(("Nr of fixups %d\n", pPatch->patch.nrFixups));
625 delta = (RTRCINTPTR)pVM->patm.s.deltaReloc;
626
627 /*
628 * Apply fixups
629 */
630 PRELOCREC pRec = 0;
631 AVLPVKEY key = 0;
632
633 while (true)
634 {
635 /* Get the record that's closest from above */
636 pRec = (PRELOCREC)RTAvlPVGetBestFit(&pPatch->patch.FixupTree, key, true);
637 if (pRec == 0)
638 break;
639
640 key = (AVLPVKEY)(pRec->pRelocPos + 1); /* search for the next record during the next round. */
641
642 switch (pRec->uType)
643 {
644 case FIXUP_ABSOLUTE:
645 Log(("Absolute fixup at %RRv %RHv -> %RHv at %RRv\n", pRec->pSource, *(RTRCUINTPTR *)pRec->pRelocPos, *(RTRCINTPTR*)pRec->pRelocPos + delta, pRec->pRelocPos));
646 if (!pRec->pSource || PATMIsPatchGCAddr(pVM, pRec->pSource))
647 {
648 *(RTRCUINTPTR *)pRec->pRelocPos += delta;
649 }
650 else
651 {
652 uint8_t curInstr[15];
653 uint8_t oldInstr[15];
654 Assert(pRec->pSource && pPatch->patch.cbPrivInstr <= 15);
655
656 Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
657
658 memcpy(oldInstr, pPatch->patch.aPrivInstr, pPatch->patch.cbPrivInstr);
659 *(RTRCPTR *)&oldInstr[pPatch->patch.cbPrivInstr - sizeof(RTRCPTR)] = pRec->pDest;
660
661 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), curInstr, pPatch->patch.pPrivInstrGC, pPatch->patch.cbPrivInstr);
662 Assert(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
663
664 pRec->pDest = (RTRCPTR)((RTRCUINTPTR)pRec->pDest + delta);
665
666 if (rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT)
667 {
668 RTRCPTR pPage = pPatch->patch.pPrivInstrGC & PAGE_BASE_GC_MASK;
669
670 Log(("PATM: Patch page not present -> check later!\n"));
671 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_ALL, pPage, pPage + (PAGE_SIZE - 1) /* inclusive! */, 0, patmVirtPageHandler, "PATMGCMonitorPage", 0, "PATMMonitorPatchJump");
672 Assert(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT);
673 }
674 else
675 if (memcmp(curInstr, oldInstr, pPatch->patch.cbPrivInstr))
676 {
677 Log(("PATM: Patch was overwritten -> disabling patch!!\n"));
678 /*
679 * Disable patch; this is not a good solution
680 */
681 /* @todo hopefully it was completely overwritten (if the read was successful)!!!! */
682 pPatch->patch.uState = PATCH_DISABLED;
683 }
684 else
685 if (RT_SUCCESS(rc))
686 {
687 *(RTRCPTR *)&curInstr[pPatch->patch.cbPrivInstr - sizeof(RTRCPTR)] = pRec->pDest;
688 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pRec->pSource, curInstr, pPatch->patch.cbPrivInstr);
689 AssertRC(rc);
690 }
691 }
692 break;
693
694 case FIXUP_REL_JMPTOPATCH:
695 {
696 RTRCPTR pTarget = (RTRCPTR)((RTRCINTPTR)pRec->pDest + delta);
697
698 if ( pPatch->patch.uState == PATCH_ENABLED
699 && (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE))
700 {
701 uint8_t oldJump[SIZEOF_NEAR_COND_JUMP32];
702 uint8_t temp[SIZEOF_NEAR_COND_JUMP32];
703 RTRCPTR pJumpOffGC;
704 RTRCINTPTR displ = (RTRCINTPTR)pTarget - (RTRCINTPTR)pRec->pSource;
705 RTRCINTPTR displOld= (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pRec->pSource;
706
707 Log(("Relative fixup (g2p) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
708
709 Assert(pRec->pSource - pPatch->patch.cbPatchJump == pPatch->patch.pPrivInstrGC);
710#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
711 if (pPatch->patch.cbPatchJump == SIZEOF_NEAR_COND_JUMP32)
712 {
713 Assert(pPatch->patch.flags & PATMFL_JUMP_CONFLICT);
714
715 pJumpOffGC = pPatch->patch.pPrivInstrGC + 2; //two byte opcode
716 oldJump[0] = pPatch->patch.aPrivInstr[0];
717 oldJump[1] = pPatch->patch.aPrivInstr[1];
718 *(RTRCUINTPTR *)&oldJump[2] = displOld;
719 }
720 else
721#endif
722 if (pPatch->patch.cbPatchJump == SIZEOF_NEARJUMP32)
723 {
724 pJumpOffGC = pPatch->patch.pPrivInstrGC + 1; //one byte opcode
725 oldJump[0] = 0xE9;
726 *(RTRCUINTPTR *)&oldJump[1] = displOld;
727 }
728 else
729 {
730 AssertMsgFailed(("Invalid patch jump size %d\n", pPatch->patch.cbPatchJump));
731 continue; //this should never happen!!
732 }
733 Assert(pPatch->patch.cbPatchJump <= sizeof(temp));
734
735 /*
736 * Read old patch jump and compare it to the one we previously installed
737 */
738 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->patch.pPrivInstrGC, pPatch->patch.cbPatchJump);
739 Assert(RT_SUCCESS(rc) || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);
740
741 if (rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT)
742 {
743 RTRCPTR pPage = pPatch->patch.pPrivInstrGC & PAGE_BASE_GC_MASK;
744
745 rc = PGMR3HandlerVirtualRegister(pVM, PGMVIRTHANDLERTYPE_ALL, pPage, pPage + (PAGE_SIZE - 1) /* inclusive! */, 0, patmVirtPageHandler, "PATMGCMonitorPage", 0, "PATMMonitorPatchJump");
746 Assert(RT_SUCCESS(rc) || rc == VERR_PGM_HANDLER_VIRTUAL_CONFLICT);
747 }
748 else
749 if (memcmp(temp, oldJump, pPatch->patch.cbPatchJump))
750 {
751 Log(("PATM: Patch jump was overwritten -> disabling patch!!\n"));
752 /*
753 * Disable patch; this is not a good solution
754 */
755 /* @todo hopefully it was completely overwritten (if the read was successful)!!!! */
756 pPatch->patch.uState = PATCH_DISABLED;
757 }
758 else
759 if (RT_SUCCESS(rc))
760 {
761 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pJumpOffGC, &displ, sizeof(displ));
762 AssertRC(rc);
763 }
764 else
765 {
766 AssertMsgFailed(("Unexpected error %d from MMR3PhysReadGCVirt\n", rc));
767 }
768 }
769 else
770 {
771 Log(("Skip the guest jump to patch code for this disabled patch %08X - %08X\n", pPatch->patch.pPrivInstrHC, pRec->pRelocPos));
772 }
773
774 pRec->pDest = pTarget;
775 break;
776 }
777
778 case FIXUP_REL_JMPTOGUEST:
779 {
780 RTRCPTR pSource = (RTRCPTR)((RTRCINTPTR)pRec->pSource + delta);
781 RTRCINTPTR displ = (RTRCINTPTR)pRec->pDest - (RTRCINTPTR)pSource;
782
783 Assert(!(pPatch->patch.flags & PATMFL_GLOBAL_FUNCTIONS));
784 Log(("Relative fixup (p2g) %08X -> %08X at %08X (source=%08x, target=%08x)\n", *(int32_t*)pRec->pRelocPos, displ, pRec->pRelocPos, pRec->pSource, pRec->pDest));
785 *(RTRCUINTPTR *)pRec->pRelocPos = displ;
786 pRec->pSource = pSource;
787 break;
788 }
789
790 default:
791 AssertMsg(0, ("Invalid fixup type!!\n"));
792 return VERR_INVALID_PARAMETER;
793 }
794 }
795
796#ifdef LOG_ENABLED
797 if (pPatch->patch.flags & PATMFL_PATCHED_GUEST_CODE)
798 {
799 /** @note pPrivInstrHC is probably not valid anymore */
800 rc = PGMPhysGCPtr2R3Ptr(VMMGetCpu0(pVM), pPatch->patch.pPrivInstrGC, (PRTR3PTR)&pPatch->patch.pPrivInstrHC);
801 if (rc == VINF_SUCCESS)
802 {
803 cpu.mode = (pPatch->patch.flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
804 disret = PATMR3DISInstr(pVM, &pPatch->patch, &cpu, pPatch->patch.pPrivInstrGC, pPatch->patch.pPrivInstrHC, &opsize, szOutput, PATMREAD_RAWCODE);
805 Log(("Rel patch jump: %s", szOutput));
806 }
807 }
808#endif
809 return 0;
810}
811
812/**
813 * #PF Handler callback for virtual access handler ranges.
814 *
815 * Important to realize that a physical page in a range can have aliases, and
816 * for ALL and WRITE handlers these will also trigger.
817 *
818 * @returns VINF_SUCCESS if the handler have carried out the operation.
819 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
820 * @param pVM VM Handle.
821 * @param GCPtr The virtual address the guest is writing to. (not correct if it's an alias!)
822 * @param pvPtr The HC mapping of that address.
823 * @param pvBuf What the guest is reading/writing.
824 * @param cbBuf How much it's reading/writing.
825 * @param enmAccessType The access type.
826 * @param pvUser User argument.
827 */
828DECLCALLBACK(int) patmVirtPageHandler(PVM pVM, RTGCPTR GCPtr, void *pvPtr, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
829{
830 Assert(enmAccessType == PGMACCESSTYPE_WRITE);
831 /** @todo could be the wrong virtual address (alias) */
832 pVM->patm.s.pvFaultMonitor = GCPtr;
833 PATMR3HandleMonitoredPage(pVM);
834 return VINF_PGM_HANDLER_DO_DEFAULT;
835}
836
837
838#ifdef VBOX_WITH_DEBUGGER
839/**
840 * Callback function for RTAvloU32DoWithAll
841 *
842 * Enables the patch that's being enumerated
843 *
844 * @returns 0 (continue enumeration).
845 * @param pNode Current node
846 * @param pVM The VM to operate on.
847 */
848static DECLCALLBACK(int) EnableAllPatches(PAVLOU32NODECORE pNode, void *pVM)
849{
850 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
851
852 PATMR3EnablePatch((PVM)pVM, (RTRCPTR)pPatch->Core.Key);
853 return 0;
854}
855#endif /* VBOX_WITH_DEBUGGER */
856
857
858#ifdef VBOX_WITH_DEBUGGER
859/**
860 * Callback function for RTAvloU32DoWithAll
861 *
862 * Disables the patch that's being enumerated
863 *
864 * @returns 0 (continue enumeration).
865 * @param pNode Current node
866 * @param pVM The VM to operate on.
867 */
868static DECLCALLBACK(int) DisableAllPatches(PAVLOU32NODECORE pNode, void *pVM)
869{
870 PPATMPATCHREC pPatch = (PPATMPATCHREC)pNode;
871
872 PATMR3DisablePatch((PVM)pVM, (RTRCPTR)pPatch->Core.Key);
873 return 0;
874}
875#endif
876
877/**
878 * Returns the host context pointer and size of the patch memory block
879 *
880 * @returns VBox status code.
881 * @param pVM The VM to operate on.
882 * @param pcb Size of the patch memory block
883 */
884VMMR3DECL(void *) PATMR3QueryPatchMemHC(PVM pVM, uint32_t *pcb)
885{
886 if (pcb)
887 {
888 *pcb = pVM->patm.s.cbPatchMem;
889 }
890 return pVM->patm.s.pPatchMemHC;
891}
892
893
894/**
895 * Returns the guest context pointer and size of the patch memory block
896 *
897 * @returns VBox status code.
898 * @param pVM The VM to operate on.
899 * @param pcb Size of the patch memory block
900 */
901VMMR3DECL(RTRCPTR) PATMR3QueryPatchMemGC(PVM pVM, uint32_t *pcb)
902{
903 if (pcb)
904 {
905 *pcb = pVM->patm.s.cbPatchMem;
906 }
907 return pVM->patm.s.pPatchMemGC;
908}
909
910
911/**
912 * Returns the host context pointer of the GC context structure
913 *
914 * @returns VBox status code.
915 * @param pVM The VM to operate on.
916 */
917VMMR3DECL(PPATMGCSTATE) PATMR3QueryGCStateHC(PVM pVM)
918{
919 return pVM->patm.s.pGCStateHC;
920}
921
922
923/**
924 * Checks whether the HC address is part of our patch region
925 *
926 * @returns VBox status code.
927 * @param pVM The VM to operate on.
928 * @param pAddrGC Guest context address
929 */
930VMMR3DECL(bool) PATMR3IsPatchHCAddr(PVM pVM, R3PTRTYPE(uint8_t *) pAddrHC)
931{
932 return (pAddrHC >= pVM->patm.s.pPatchMemHC && pAddrHC < pVM->patm.s.pPatchMemHC + pVM->patm.s.cbPatchMem) ? true : false;
933}
934
935
936/**
937 * Allows or disallow patching of privileged instructions executed by the guest OS
938 *
939 * @returns VBox status code.
940 * @param pVM The VM to operate on.
941 * @param fAllowPatching Allow/disallow patching
942 */
943VMMR3DECL(int) PATMR3AllowPatching(PVM pVM, uint32_t fAllowPatching)
944{
945 pVM->fPATMEnabled = (fAllowPatching) ? true : false;
946 return VINF_SUCCESS;
947}
948
949/**
950 * Convert a GC patch block pointer to a HC patch pointer
951 *
952 * @returns HC pointer or NULL if it's not a GC patch pointer
953 * @param pVM The VM to operate on.
954 * @param pAddrGC GC pointer
955 */
956VMMR3DECL(R3PTRTYPE(void *)) PATMR3GCPtrToHCPtr(PVM pVM, RTRCPTR pAddrGC)
957{
958 if (pVM->patm.s.pPatchMemGC <= pAddrGC && pVM->patm.s.pPatchMemGC + pVM->patm.s.cbPatchMem > pAddrGC)
959 {
960 return pVM->patm.s.pPatchMemHC + (pAddrGC - pVM->patm.s.pPatchMemGC);
961 }
962 return NULL;
963}
964
965/**
966 * Query PATM state (enabled/disabled)
967 *
968 * @returns 0 - disabled, 1 - enabled
969 * @param pVM The VM to operate on.
970 */
971VMMR3DECL(int) PATMR3IsEnabled(PVM pVM)
972{
973 return pVM->fPATMEnabled;
974}
975
976
977/**
978 * Convert guest context address to host context pointer
979 *
980 * @returns VBox status code.
981 * @param pVM The VM to operate on.
982 * @param pPatch Patch block structure pointer
983 * @param pGCPtr Guest context pointer
984 *
985 * @returns Host context pointer or NULL in case of an error
986 *
987 */
988R3PTRTYPE(uint8_t *) PATMGCVirtToHCVirt(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t *) pGCPtr)
989{
990 int rc;
991 R3PTRTYPE(uint8_t *) pHCPtr;
992 uint32_t offset;
993
994 if (PATMIsPatchGCAddr(pVM, pGCPtr))
995 {
996 return PATCHCODE_PTR_HC(pPatch) + (pGCPtr - PATCHCODE_PTR_GC(pPatch));
997 }
998
999 offset = pGCPtr & PAGE_OFFSET_MASK;
1000 if (pPatch->cacheRec.pGuestLoc == (pGCPtr & PAGE_BASE_GC_MASK))
1001 {
1002 return pPatch->cacheRec.pPatchLocStartHC + offset;
1003 }
1004
1005 rc = PGMPhysGCPtr2R3Ptr(VMMGetCpu0(pVM), pGCPtr, (void **)&pHCPtr);
1006 if (rc != VINF_SUCCESS)
1007 {
1008 AssertMsg(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT, ("MMR3PhysGCVirt2HCVirtEx failed for %08X\n", pGCPtr));
1009 return NULL;
1010 }
1011////invalid? Assert(sizeof(R3PTRTYPE(uint8_t*)) == sizeof(uint32_t));
1012
1013 pPatch->cacheRec.pPatchLocStartHC = (R3PTRTYPE(uint8_t*))((RTHCUINTPTR)pHCPtr & PAGE_BASE_HC_MASK);
1014 pPatch->cacheRec.pGuestLoc = pGCPtr & PAGE_BASE_GC_MASK;
1015 return pHCPtr;
1016}
1017
1018
1019/* Calculates and fills in all branch targets
1020 *
1021 * @returns VBox status code.
1022 * @param pVM The VM to operate on.
1023 * @param pPatch Current patch block pointer
1024 *
1025 */
1026static int patmr3SetBranchTargets(PVM pVM, PPATCHINFO pPatch)
1027{
1028 int32_t displ;
1029
1030 PJUMPREC pRec = 0;
1031 int nrJumpRecs = 0;
1032
1033 /*
1034 * Set all branch targets inside the patch block.
1035 * We remove all jump records as they are no longer needed afterwards.
1036 */
1037 while (true)
1038 {
1039 RCPTRTYPE(uint8_t *) pInstrGC;
1040 RCPTRTYPE(uint8_t *) pBranchTargetGC = 0;
1041
1042 pRec = (PJUMPREC)RTAvlPVRemoveBestFit(&pPatch->JumpTree, 0, true);
1043 if (pRec == 0)
1044 break;
1045
1046 nrJumpRecs++;
1047
1048 /* HC in patch block to GC in patch block. */
1049 pInstrGC = patmPatchHCPtr2PatchGCPtr(pVM, pRec->pJumpHC);
1050
1051 if (pRec->opcode == OP_CALL)
1052 {
1053 /* Special case: call function replacement patch from this patch block.
1054 */
1055 PPATMPATCHREC pFunctionRec = PATMQueryFunctionPatch(pVM, pRec->pTargetGC);
1056 if (!pFunctionRec)
1057 {
1058 int rc;
1059
1060 if (PATMR3HasBeenPatched(pVM, pRec->pTargetGC) == false)
1061 rc = PATMR3InstallPatch(pVM, pRec->pTargetGC, PATMFL_CODE32 | PATMFL_DUPLICATE_FUNCTION);
1062 else
1063 rc = VERR_PATCHING_REFUSED; /* exists as a normal patch; can't use it */
1064
1065 if (RT_FAILURE(rc))
1066 {
1067 uint8_t *pPatchHC;
1068 RTRCPTR pPatchGC;
1069 RTRCPTR pOrgInstrGC;
1070
1071 pOrgInstrGC = PATMR3PatchToGCPtr(pVM, pInstrGC, 0);
1072 Assert(pOrgInstrGC);
1073
1074 /* Failure for some reason -> mark exit point with int 3. */
1075 Log(("Failed to install function replacement patch (at %x) for reason %Rrc\n", pOrgInstrGC, rc));
1076
1077 pPatchGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pOrgInstrGC);
1078 Assert(pPatchGC);
1079
1080 pPatchHC = pVM->patm.s.pPatchMemHC + (pPatchGC - pVM->patm.s.pPatchMemGC);
1081
1082 /* Set a breakpoint at the very beginning of the recompiled instruction */
1083 *pPatchHC = 0xCC;
1084
1085 continue;
1086 }
1087 }
1088 else
1089 {
1090 Log(("Patch block %RRv called as function\n", pFunctionRec->patch.pPrivInstrGC));
1091 pFunctionRec->patch.flags |= PATMFL_CODE_REFERENCED;
1092 }
1093
1094 pBranchTargetGC = PATMR3QueryPatchGCPtr(pVM, pRec->pTargetGC);
1095 }
1096 else
1097 {
1098 pBranchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pRec->pTargetGC);
1099 }
1100
1101 if (pBranchTargetGC == 0)
1102 {
1103 AssertMsgFailed(("patmr3SetBranchTargets: patmGuestGCPtrToPatchGCPtr failed for %08X\n", pRec->pTargetGC));
1104 return VERR_PATCHING_REFUSED;
1105 }
1106 /* Our jumps *always* have a dword displacement (to make things easier). */
1107 Assert(sizeof(uint32_t) == sizeof(RTRCPTR));
1108 displ = pBranchTargetGC - (pInstrGC + pRec->offDispl + sizeof(RTRCPTR));
1109 *(RTRCPTR *)(pRec->pJumpHC + pRec->offDispl) = displ;
1110 Log(("Set branch target %d to %08X : %08x - (%08x + %d + %d)\n", nrJumpRecs, displ, pBranchTargetGC, pInstrGC, pRec->offDispl, sizeof(RTRCPTR)));
1111 }
1112 Assert(nrJumpRecs == pPatch->nrJumpRecs);
1113 Assert(pPatch->JumpTree == 0);
1114 return VINF_SUCCESS;
1115}
1116
1117/* Add an illegal instruction record
1118 *
1119 * @param pVM The VM to operate on.
1120 * @param pPatch Patch structure ptr
1121 * @param pInstrGC Guest context pointer to privileged instruction
1122 *
1123 */
1124static void patmAddIllegalInstrRecord(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
1125{
1126 PAVLPVNODECORE pRec;
1127
1128 pRec = (PAVLPVNODECORE)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
1129 Assert(pRec);
1130 pRec->Key = (AVLPVKEY)pInstrGC;
1131
1132 bool ret = RTAvlPVInsert(&pPatch->pTempInfo->IllegalInstrTree, pRec);
1133 Assert(ret); NOREF(ret);
1134 pPatch->pTempInfo->nrIllegalInstr++;
1135}
1136
1137static bool patmIsIllegalInstr(PPATCHINFO pPatch, RTRCPTR pInstrGC)
1138{
1139 PAVLPVNODECORE pRec;
1140
1141 pRec = RTAvlPVGet(&pPatch->pTempInfo->IllegalInstrTree, (AVLPVKEY)pInstrGC);
1142 if (pRec)
1143 return true;
1144 return false;
1145}
1146
1147/**
1148 * Add a patch to guest lookup record
1149 *
1150 * @param pVM The VM to operate on.
1151 * @param pPatch Patch structure ptr
1152 * @param pPatchInstrHC Guest context pointer to patch block
1153 * @param pInstrGC Guest context pointer to privileged instruction
1154 * @param enmType Lookup type
1155 * @param fDirty Dirty flag
1156 *
1157 */
1158 /** @note Be extremely careful with this function. Make absolutely sure the guest address is correct! (to avoid executing instructions twice!) */
1159void patmr3AddP2GLookupRecord(PVM pVM, PPATCHINFO pPatch, uint8_t *pPatchInstrHC, RTRCPTR pInstrGC, PATM_LOOKUP_TYPE enmType, bool fDirty)
1160{
1161 bool ret;
1162 PRECPATCHTOGUEST pPatchToGuestRec;
1163 PRECGUESTTOPATCH pGuestToPatchRec;
1164 uint32_t PatchOffset = pPatchInstrHC - pVM->patm.s.pPatchMemHC; /* Offset in memory reserved for PATM. */
1165
1166 if (enmType == PATM_LOOKUP_PATCH2GUEST)
1167 {
1168 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1169 if (pPatchToGuestRec && pPatchToGuestRec->Core.Key == PatchOffset)
1170 return; /* already there */
1171
1172 Assert(!pPatchToGuestRec);
1173 }
1174#ifdef VBOX_STRICT
1175 else
1176 {
1177 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1178 Assert(!pPatchToGuestRec);
1179 }
1180#endif
1181
1182 pPatchToGuestRec = (PRECPATCHTOGUEST)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(RECPATCHTOGUEST) + sizeof(RECGUESTTOPATCH));
1183 Assert(pPatchToGuestRec);
1184 pPatchToGuestRec->Core.Key = PatchOffset;
1185 pPatchToGuestRec->pOrgInstrGC = pInstrGC;
1186 pPatchToGuestRec->enmType = enmType;
1187 pPatchToGuestRec->fDirty = fDirty;
1188
1189 ret = RTAvlU32Insert(&pPatch->Patch2GuestAddrTree, &pPatchToGuestRec->Core);
1190 Assert(ret);
1191
1192 /* GC to patch address */
1193 if (enmType == PATM_LOOKUP_BOTHDIR)
1194 {
1195 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32Get(&pPatch->Guest2PatchAddrTree, pInstrGC);
1196 if (!pGuestToPatchRec)
1197 {
1198 pGuestToPatchRec = (PRECGUESTTOPATCH)(pPatchToGuestRec+1);
1199 pGuestToPatchRec->Core.Key = pInstrGC;
1200 pGuestToPatchRec->PatchOffset = PatchOffset;
1201
1202 ret = RTAvlU32Insert(&pPatch->Guest2PatchAddrTree, &pGuestToPatchRec->Core);
1203 Assert(ret);
1204 }
1205 }
1206
1207 pPatch->nrPatch2GuestRecs++;
1208}
1209
1210
1211/**
1212 * Removes a patch to guest lookup record
1213 *
1214 * @param pVM The VM to operate on.
1215 * @param pPatch Patch structure ptr
1216 * @param pPatchInstrGC Guest context pointer to patch block
1217 */
1218void patmr3RemoveP2GLookupRecord(PVM pVM, PPATCHINFO pPatch, RTRCPTR pPatchInstrGC)
1219{
1220 PAVLU32NODECORE pNode;
1221 PAVLU32NODECORE pNode2;
1222 PRECPATCHTOGUEST pPatchToGuestRec;
1223 uint32_t PatchOffset = pPatchInstrGC - pVM->patm.s.pPatchMemGC; /* Offset in memory reserved for PATM. */
1224
1225 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
1226 Assert(pPatchToGuestRec);
1227 if (pPatchToGuestRec)
1228 {
1229 if (pPatchToGuestRec->enmType == PATM_LOOKUP_BOTHDIR)
1230 {
1231 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)(pPatchToGuestRec+1);
1232
1233 Assert(pGuestToPatchRec->Core.Key);
1234 pNode2 = RTAvlU32Remove(&pPatch->Guest2PatchAddrTree, pGuestToPatchRec->Core.Key);
1235 Assert(pNode2);
1236 }
1237 pNode = RTAvlU32Remove(&pPatch->Patch2GuestAddrTree, pPatchToGuestRec->Core.Key);
1238 Assert(pNode);
1239
1240 MMR3HeapFree(pPatchToGuestRec);
1241 pPatch->nrPatch2GuestRecs--;
1242 }
1243}
1244
1245
1246/**
1247 * RTAvlPVDestroy callback.
1248 */
1249static DECLCALLBACK(int) patmEmptyTreePVCallback(PAVLPVNODECORE pNode, void *)
1250{
1251 MMR3HeapFree(pNode);
1252 return 0;
1253}
1254
1255/**
1256 * Empty the specified tree (PV tree, MMR3 heap)
1257 *
1258 * @param pVM The VM to operate on.
1259 * @param ppTree Tree to empty
1260 */
1261void patmEmptyTree(PVM pVM, PAVLPVNODECORE *ppTree)
1262{
1263 RTAvlPVDestroy(ppTree, patmEmptyTreePVCallback, NULL);
1264}
1265
1266
1267/**
1268 * RTAvlU32Destroy callback.
1269 */
1270static DECLCALLBACK(int) patmEmptyTreeU32Callback(PAVLU32NODECORE pNode, void *)
1271{
1272 MMR3HeapFree(pNode);
1273 return 0;
1274}
1275
1276/**
1277 * Empty the specified tree (U32 tree, MMR3 heap)
1278 *
1279 * @param pVM The VM to operate on.
1280 * @param ppTree Tree to empty
1281 */
1282void patmEmptyTreeU32(PVM pVM, PPAVLU32NODECORE ppTree)
1283{
1284 RTAvlU32Destroy(ppTree, patmEmptyTreeU32Callback, NULL);
1285}
1286
1287
1288/**
1289 * Analyses the instructions following the cli for compliance with our heuristics for cli & pushf
1290 *
1291 * @returns VBox status code.
1292 * @param pVM The VM to operate on.
1293 * @param pCpu CPU disassembly state
1294 * @param pInstrGC Guest context pointer to privileged instruction
1295 * @param pCurInstrGC Guest context pointer to the current instruction
1296 * @param pUserData User pointer (callback specific)
1297 *
1298 */
1299static int patmAnalyseBlockCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, void *pUserData)
1300{
1301 PPATCHINFO pPatch = (PPATCHINFO)pUserData;
1302 bool fIllegalInstr = false;
1303
1304 //Preliminary heuristics:
1305 //- no call instructions without a fixed displacement between cli and sti/popf
1306 //- no jumps in the instructions following cli (4+ bytes; enough for the replacement jump (5 bytes))
1307 //- no nested pushf/cli
1308 //- sti/popf should be the (eventual) target of all branches
1309 //- no near or far returns; no int xx, no into
1310 //
1311 // Note: Later on we can impose less stricter guidelines if the need arises
1312
1313 /* Bail out if the patch gets too big. */
1314 if (pPatch->cbPatchBlockSize >= MAX_PATCH_SIZE)
1315 {
1316 Log(("Code block too big (%x) for patch at %RRv!!\n", pPatch->cbPatchBlockSize, pCurInstrGC));
1317 fIllegalInstr = true;
1318 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1319 }
1320 else
1321 {
1322 /* No unconditinal jumps or calls without fixed displacements. */
1323 if ( (pCpu->pCurInstr->optype & OPTYPE_CONTROLFLOW)
1324 && (pCpu->pCurInstr->opcode == OP_JMP || pCpu->pCurInstr->opcode == OP_CALL)
1325 )
1326 {
1327 Assert(pCpu->param1.size <= 4 || pCpu->param1.size == 6);
1328 if ( pCpu->param1.size == 6 /* far call/jmp */
1329 || (pCpu->pCurInstr->opcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
1330 || (OP_PARM_VTYPE(pCpu->pCurInstr->param1) != OP_PARM_J && !(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS))
1331 )
1332 {
1333 fIllegalInstr = true;
1334 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1335 }
1336 }
1337
1338 /* An unconditional (short) jump right after a cli is a potential problem; we will overwrite whichever function comes afterwards */
1339 if (pPatch->opcode == OP_CLI && pCpu->pCurInstr->opcode == OP_JMP)
1340 {
1341 if (pCurInstrGC > pPatch->pPrivInstrGC && pCurInstrGC + pCpu->opsize < pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32) /* hardcoded patch jump size; cbPatchJump is still zero */
1342 {
1343 Log(("Dangerous unconditional jump ends in our generated patch jump!! (%x vs %x)\n", pCurInstrGC, pPatch->pPrivInstrGC));
1344 /* We turn this one into a int 3 callable patch. */
1345 pPatch->flags |= PATMFL_INT3_REPLACEMENT_BLOCK;
1346 }
1347 }
1348 else
1349 /* no nested pushfs just yet; nested cli is allowed for cli patches though. */
1350 if (pPatch->opcode == OP_PUSHF)
1351 {
1352 if (pCurInstrGC != pInstrGC && pCpu->pCurInstr->opcode == OP_PUSHF)
1353 {
1354 fIllegalInstr = true;
1355 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1356 }
1357 }
1358
1359 // no far returns
1360 if (pCpu->pCurInstr->opcode == OP_RETF)
1361 {
1362 pPatch->pTempInfo->nrRetInstr++;
1363 fIllegalInstr = true;
1364 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1365 }
1366 else
1367 // no int xx or into either
1368 if (pCpu->pCurInstr->opcode == OP_INT3 || pCpu->pCurInstr->opcode == OP_INT || pCpu->pCurInstr->opcode == OP_INTO)
1369 {
1370 fIllegalInstr = true;
1371 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1372 }
1373 }
1374
1375 pPatch->cbPatchBlockSize += pCpu->opsize;
1376
1377 /* Illegal instruction -> end of analysis phase for this code block */
1378 if (fIllegalInstr || patmIsIllegalInstr(pPatch, pCurInstrGC))
1379 return VINF_SUCCESS;
1380
1381 /* Check for exit points. */
1382 switch (pCpu->pCurInstr->opcode)
1383 {
1384 case OP_SYSEXIT:
1385 return VINF_SUCCESS; /* duplicate it; will fault or emulated in GC. */
1386
1387 case OP_SYSENTER:
1388 case OP_ILLUD2:
1389 //This appears to be some kind of kernel panic in Linux 2.4; no point to analyse more
1390 Log(("Illegal opcode (0xf 0xb) -> return here\n"));
1391 return VINF_SUCCESS;
1392
1393 case OP_STI:
1394 case OP_POPF:
1395 Assert(!(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION)));
1396 /* If out exit point lies within the generated patch jump, then we have to refuse!! */
1397 if (pCurInstrGC > pPatch->pPrivInstrGC && pCurInstrGC < pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32) /* hardcoded patch jump size; cbPatchJump is still zero */
1398 {
1399 Log(("Exit point within patch jump itself!! (%x vs %x)\n", pCurInstrGC, pPatch->pPrivInstrGC));
1400 return VERR_PATCHING_REFUSED;
1401 }
1402 if (pPatch->opcode == OP_PUSHF)
1403 {
1404 if (pCpu->pCurInstr->opcode == OP_POPF)
1405 {
1406 if (pPatch->cbPatchBlockSize >= SIZEOF_NEARJUMP32)
1407 return VINF_SUCCESS;
1408
1409 /* Or else we need to duplicate more instructions, because we can't jump back yet! */
1410 Log(("WARNING: End of block reached, but we need to duplicate some extra instruction to avoid a conflict with the patch jump\n"));
1411 pPatch->flags |= PATMFL_CHECK_SIZE;
1412 }
1413 break; //sti doesn't mark the end of a pushf block; only popf does
1414 }
1415 //else no break
1416 case OP_RETN: /* exit point for function replacement */
1417 return VINF_SUCCESS;
1418
1419 case OP_IRET:
1420 return VINF_SUCCESS; /* exitpoint */
1421
1422 case OP_CPUID:
1423 case OP_CALL:
1424 case OP_JMP:
1425 break;
1426
1427 default:
1428 if (pCpu->pCurInstr->optype & (OPTYPE_PRIVILEGED_NOTRAP))
1429 {
1430 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1431 return VINF_SUCCESS; /* exit point */
1432 }
1433 break;
1434 }
1435
1436 // If single instruction patch, we've copied enough instructions *and* the current instruction is not a relative jump
1437 if ((pPatch->flags & PATMFL_CHECK_SIZE) && pPatch->cbPatchBlockSize > SIZEOF_NEARJUMP32 && !(pCpu->pCurInstr->optype & OPTYPE_RELATIVE_CONTROLFLOW))
1438 {
1439 // The end marker for this kind of patch is any instruction at a location outside our patch jump
1440 Log(("End of block at %RRv size %d\n", pCurInstrGC, pCpu->opsize));
1441 return VINF_SUCCESS;
1442 }
1443
1444 return VWRN_CONTINUE_ANALYSIS;
1445}
1446
1447/**
1448 * Analyses the instructions inside a function for compliance
1449 *
1450 * @returns VBox status code.
1451 * @param pVM The VM to operate on.
1452 * @param pCpu CPU disassembly state
1453 * @param pInstrGC Guest context pointer to privileged instruction
1454 * @param pCurInstrGC Guest context pointer to the current instruction
1455 * @param pUserData User pointer (callback specific)
1456 *
1457 */
1458static int patmAnalyseFunctionCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, void *pUserData)
1459{
1460 PPATCHINFO pPatch = (PPATCHINFO)pUserData;
1461 bool fIllegalInstr = false;
1462
1463 //Preliminary heuristics:
1464 //- no call instructions
1465 //- ret ends a block
1466
1467 Assert(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION));
1468
1469 // bail out if the patch gets too big
1470 if (pPatch->cbPatchBlockSize >= MAX_PATCH_SIZE)
1471 {
1472 Log(("Code block too big (%x) for function patch at %RRv!!\n", pPatch->cbPatchBlockSize, pCurInstrGC));
1473 fIllegalInstr = true;
1474 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1475 }
1476 else
1477 {
1478 // no unconditinal jumps or calls without fixed displacements
1479 if ( (pCpu->pCurInstr->optype & OPTYPE_CONTROLFLOW)
1480 && (pCpu->pCurInstr->opcode == OP_JMP || pCpu->pCurInstr->opcode == OP_CALL)
1481 )
1482 {
1483 Assert(pCpu->param1.size <= 4 || pCpu->param1.size == 6);
1484 if ( pCpu->param1.size == 6 /* far call/jmp */
1485 || (pCpu->pCurInstr->opcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
1486 || (OP_PARM_VTYPE(pCpu->pCurInstr->param1) != OP_PARM_J && !(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS))
1487 )
1488 {
1489 fIllegalInstr = true;
1490 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1491 }
1492 }
1493 else /* no far returns */
1494 if (pCpu->pCurInstr->opcode == OP_RETF)
1495 {
1496 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1497 fIllegalInstr = true;
1498 }
1499 else /* no int xx or into either */
1500 if (pCpu->pCurInstr->opcode == OP_INT3 || pCpu->pCurInstr->opcode == OP_INT || pCpu->pCurInstr->opcode == OP_INTO)
1501 {
1502 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1503 fIllegalInstr = true;
1504 }
1505
1506 #if 0
1507 ///@todo we can handle certain in/out and privileged instructions in the guest context
1508 if (pCpu->pCurInstr->optype & OPTYPE_PRIVILEGED && pCpu->pCurInstr->opcode != OP_STI)
1509 {
1510 Log(("Illegal instructions for function patch!!\n"));
1511 return VERR_PATCHING_REFUSED;
1512 }
1513 #endif
1514 }
1515
1516 pPatch->cbPatchBlockSize += pCpu->opsize;
1517
1518 /* Illegal instruction -> end of analysis phase for this code block */
1519 if (fIllegalInstr || patmIsIllegalInstr(pPatch, pCurInstrGC))
1520 {
1521 return VINF_SUCCESS;
1522 }
1523
1524 // Check for exit points
1525 switch (pCpu->pCurInstr->opcode)
1526 {
1527 case OP_ILLUD2:
1528 //This appears to be some kind of kernel panic in Linux 2.4; no point to analyse more
1529 Log(("Illegal opcode (0xf 0xb) -> return here\n"));
1530 return VINF_SUCCESS;
1531
1532 case OP_IRET:
1533 case OP_SYSEXIT: /* will fault or emulated in GC */
1534 case OP_RETN:
1535 return VINF_SUCCESS;
1536
1537 case OP_POPF:
1538 case OP_STI:
1539 return VWRN_CONTINUE_ANALYSIS;
1540 default:
1541 if (pCpu->pCurInstr->optype & (OPTYPE_PRIVILEGED_NOTRAP))
1542 {
1543 patmAddIllegalInstrRecord(pVM, pPatch, pCurInstrGC);
1544 return VINF_SUCCESS; /* exit point */
1545 }
1546 return VWRN_CONTINUE_ANALYSIS;
1547 }
1548
1549 return VWRN_CONTINUE_ANALYSIS;
1550}
1551
1552/**
1553 * Checks if few instructions before patch contain something
1554 * which looks like immediate referring potential patch instruction
1555 *
1556 * @returns boolean
1557 * @param pVM The VM to operate on.
1558 * @param pInstrGC Guest context pointer to instruction
1559 */
1560static bool patmHasImmsReferringPatch(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC)
1561{
1562 int rc;
1563 uint32_t value;
1564
1565 if (!(pVM->patm.s.uPATMFlags & PATMGFL_SEARCH_IMM_BACKWARD))
1566 return false;
1567
1568 PVMCPU pCpu = VMMGetCpu0(pVM);
1569 for (int i = 4; i < 12; i++)
1570 {
1571 rc = PGMPhysSimpleReadGCPtr(pCpu, &value, pInstrGC - i, 4);
1572 if (rc == VINF_SUCCESS)
1573 {
1574 if ((uint32_t)(value-(uint32_t)pInstrGC) < 6)
1575 return true;
1576 }
1577 else
1578 break;
1579 }
1580 return false;
1581}
1582
1583/**
1584 * Recompiles the instructions in a code block
1585 *
1586 * @returns VBox status code.
1587 * @param pVM The VM to operate on.
1588 * @param pCpu CPU disassembly state
1589 * @param pInstrGC Guest context pointer to privileged instruction
1590 * @param pCurInstrGC Guest context pointer to the current instruction
1591 * @param pUserData User pointer (callback specific)
1592 *
1593 */
1594static int patmRecompileCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, void *pUserData)
1595{
1596 PPATCHINFO pPatch = (PPATCHINFO)pUserData;
1597 int rc = VINF_SUCCESS;
1598 bool fInhibitIRQInstr = false; /* did the instruction cause PATMFL_INHIBITIRQS to be set? */
1599
1600 LogFlow(("patmRecompileCallback %RRv %RRv\n", pInstrGC, pCurInstrGC));
1601
1602 if ( patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pCurInstrGC) != 0
1603 && !(pPatch->flags & PATMFL_RECOMPILE_NEXT)) /* do not do this when the next instruction *must* be executed! */
1604 {
1605 /*
1606 * Been there, done that; so insert a jump (we don't want to duplicate code)
1607 * no need to record this instruction as it's glue code that never crashes (it had better not!)
1608 */
1609 Log(("patmRecompileCallback: jump to code we've recompiled before %RRv!\n", pCurInstrGC));
1610 return patmPatchGenRelJump(pVM, pPatch, pCurInstrGC, OP_JMP, !!(pCpu->prefix & PREFIX_OPSIZE));
1611 }
1612
1613
1614 if (patmHasImmsReferringPatch(pVM, pInstrGC))
1615 {
1616 pPatch->flags |= PATMFL_INT3_REPLACEMENT_BLOCK;
1617 }
1618
1619 if (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1620 {
1621 rc = patmAnalyseFunctionCallback(pVM, pCpu, pInstrGC, pCurInstrGC, pUserData);
1622 }
1623 else
1624 rc = patmAnalyseBlockCallback(pVM, pCpu, pInstrGC, pCurInstrGC, pUserData);
1625
1626 if (RT_FAILURE(rc))
1627 return rc;
1628
1629 /** @note Never do a direct return unless a failure is encountered! */
1630
1631 /* Clear recompilation of next instruction flag; we are doing that right here. */
1632 if (pPatch->flags & PATMFL_RECOMPILE_NEXT)
1633 pPatch->flags &= ~PATMFL_RECOMPILE_NEXT;
1634
1635 /* Add lookup record for patch to guest address translation */
1636 patmr3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
1637
1638 /* Update lowest and highest instruction address for this patch */
1639 if (pCurInstrGC < pPatch->pInstrGCLowest)
1640 pPatch->pInstrGCLowest = pCurInstrGC;
1641 else
1642 if (pCurInstrGC > pPatch->pInstrGCHighest)
1643 pPatch->pInstrGCHighest = pCurInstrGC + pCpu->opsize;
1644
1645 /* Illegal instruction -> end of recompile phase for this code block. */
1646 if (patmIsIllegalInstr(pPatch, pCurInstrGC))
1647 {
1648 Log(("Illegal instruction at %RRv -> mark with int 3\n", pCurInstrGC));
1649 rc = patmPatchGenIllegalInstr(pVM, pPatch);
1650 goto end;
1651 }
1652
1653 /* For our first attempt, we'll handle only simple relative jumps (immediate offset coded in instruction).
1654 * Indirect calls are handled below.
1655 */
1656 if ( (pCpu->pCurInstr->optype & OPTYPE_CONTROLFLOW)
1657 && (pCpu->pCurInstr->opcode != OP_CALL || (pPatch->flags & PATMFL_SUPPORT_CALLS))
1658 && (OP_PARM_VTYPE(pCpu->pCurInstr->param1) == OP_PARM_J))
1659 {
1660 RCPTRTYPE(uint8_t *) pTargetGC = PATMResolveBranch(pCpu, pCurInstrGC);
1661 if (pTargetGC == 0)
1662 {
1663 Log(("We don't support far jumps here!! (%08X)\n", pCpu->param1.flags));
1664 return VERR_PATCHING_REFUSED;
1665 }
1666
1667 if (pCpu->pCurInstr->opcode == OP_CALL)
1668 {
1669 Assert(!PATMIsPatchGCAddr(pVM, pTargetGC));
1670 rc = patmPatchGenCall(pVM, pPatch, pCpu, pCurInstrGC, pTargetGC, false);
1671 if (RT_FAILURE(rc))
1672 goto end;
1673 }
1674 else
1675 rc = patmPatchGenRelJump(pVM, pPatch, pTargetGC, pCpu->pCurInstr->opcode, !!(pCpu->prefix & PREFIX_OPSIZE));
1676
1677 if (RT_SUCCESS(rc))
1678 rc = VWRN_CONTINUE_RECOMPILE;
1679
1680 goto end;
1681 }
1682
1683 switch (pCpu->pCurInstr->opcode)
1684 {
1685 case OP_CLI:
1686 {
1687 /* If a cli is found while duplicating instructions for another patch, then it's of vital importance to continue
1688 * until we've found the proper exit point(s).
1689 */
1690 if ( pCurInstrGC != pInstrGC
1691 && !(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1692 )
1693 {
1694 Log(("cli instruction found in other instruction patch block; force it to continue & find an exit point\n"));
1695 pPatch->flags &= ~(PATMFL_CHECK_SIZE | PATMFL_SINGLE_INSTRUCTION);
1696 }
1697 /* Set by irq inhibition; no longer valid now. */
1698 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST;
1699
1700 rc = patmPatchGenCli(pVM, pPatch);
1701 if (RT_SUCCESS(rc))
1702 rc = VWRN_CONTINUE_RECOMPILE;
1703 break;
1704 }
1705
1706 case OP_MOV:
1707 if (pCpu->pCurInstr->optype & OPTYPE_POTENTIALLY_DANGEROUS)
1708 {
1709 /* mov ss, src? */
1710 if ( (pCpu->param1.flags & USE_REG_SEG)
1711 && (pCpu->param1.base.reg_seg == DIS_SELREG_SS))
1712 {
1713 Log(("Force recompilation of next instruction for OP_MOV at %RRv\n", pCurInstrGC));
1714 pPatch->flags |= PATMFL_RECOMPILE_NEXT;
1715 /** @todo this could cause a fault (ring 0 selector being loaded in ring 1) */
1716 }
1717#if 0 /* necessary for Haiku */
1718 else
1719 if ( (pCpu->param2.flags & USE_REG_SEG)
1720 && (pCpu->param2.base.reg_seg == USE_REG_SS)
1721 && (pCpu->param1.flags & (USE_REG_GEN32|USE_REG_GEN16))) /** @todo memory operand must in theory be handled too */
1722 {
1723 /* mov GPR, ss */
1724 rc = patmPatchGenMovFromSS(pVM, pPatch, pCpu, pCurInstrGC);
1725 if (RT_SUCCESS(rc))
1726 rc = VWRN_CONTINUE_RECOMPILE;
1727 break;
1728 }
1729#endif
1730 }
1731 goto duplicate_instr;
1732
1733 case OP_POP:
1734 if (pCpu->pCurInstr->param1 == OP_PARM_REG_SS)
1735 {
1736 Assert(pCpu->pCurInstr->optype & OPTYPE_INHIBIT_IRQS);
1737
1738 Log(("Force recompilation of next instruction for OP_MOV at %RRv\n", pCurInstrGC));
1739 pPatch->flags |= PATMFL_RECOMPILE_NEXT;
1740 }
1741 goto duplicate_instr;
1742
1743 case OP_STI:
1744 {
1745 RTRCPTR pNextInstrGC = 0; /* by default no inhibit irq */
1746
1747 /** In a sequence of instructions that inhibit irqs, only the first one actually inhibits irqs. */
1748 if (!(pPatch->flags & PATMFL_INHIBIT_IRQS))
1749 {
1750 pPatch->flags |= PATMFL_INHIBIT_IRQS | PATMFL_GENERATE_JUMPTOGUEST;
1751 fInhibitIRQInstr = true;
1752 pNextInstrGC = pCurInstrGC + pCpu->opsize;
1753 Log(("Inhibit irqs for instruction OP_STI at %RRv\n", pCurInstrGC));
1754 }
1755 rc = patmPatchGenSti(pVM, pPatch, pCurInstrGC, pNextInstrGC);
1756
1757 if (RT_SUCCESS(rc))
1758 {
1759 DISCPUSTATE cpu = *pCpu;
1760 unsigned opsize;
1761 int disret;
1762 RCPTRTYPE(uint8_t *) pNextInstrGC, pReturnInstrGC;
1763 R3PTRTYPE(uint8_t *) pNextInstrHC;
1764
1765 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1766
1767 pNextInstrGC = pCurInstrGC + pCpu->opsize;
1768 pNextInstrHC = PATMGCVirtToHCVirt(pVM, pPatch, pNextInstrGC);
1769 if (pNextInstrHC == NULL)
1770 {
1771 AssertFailed();
1772 return VERR_PATCHING_REFUSED;
1773 }
1774
1775 // Disassemble the next instruction
1776 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pNextInstrGC, pNextInstrHC, &opsize, NULL);
1777 if (disret == false)
1778 {
1779 AssertMsgFailed(("STI: Disassembly failed (probably page not present) -> return to caller\n"));
1780 return VERR_PATCHING_REFUSED;
1781 }
1782 pReturnInstrGC = pNextInstrGC + opsize;
1783
1784 if ( (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
1785 || pReturnInstrGC <= pInstrGC
1786 || pReturnInstrGC - pInstrGC >= SIZEOF_NEARJUMP32
1787 )
1788 {
1789 /* Not an exit point for function duplication patches */
1790 if ( (pPatch->flags & PATMFL_DUPLICATE_FUNCTION)
1791 && RT_SUCCESS(rc))
1792 {
1793 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST; /* Don't generate a jump back */
1794 rc = VWRN_CONTINUE_RECOMPILE;
1795 }
1796 else
1797 rc = VINF_SUCCESS; //exit point
1798 }
1799 else {
1800 Log(("PATM: sti occurred too soon; refusing patch!\n"));
1801 rc = VERR_PATCHING_REFUSED; //not allowed!!
1802 }
1803 }
1804 break;
1805 }
1806
1807 case OP_POPF:
1808 {
1809 bool fGenerateJmpBack = (pCurInstrGC + pCpu->opsize - pInstrGC >= SIZEOF_NEARJUMP32);
1810
1811 /* Not an exit point for IDT handler or function replacement patches */
1812 /* Note: keep IOPL in mind when changing any of this!! (see comments in PATMA.asm, PATMPopf32Replacement) */
1813 if (pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_DUPLICATE_FUNCTION))
1814 fGenerateJmpBack = false;
1815
1816 rc = patmPatchGenPopf(pVM, pPatch, pCurInstrGC + pCpu->opsize, !!(pCpu->prefix & PREFIX_OPSIZE), fGenerateJmpBack);
1817 if (RT_SUCCESS(rc))
1818 {
1819 if (fGenerateJmpBack == false)
1820 {
1821 /* Not an exit point for IDT handler or function replacement patches */
1822 rc = VWRN_CONTINUE_RECOMPILE;
1823 }
1824 else
1825 {
1826 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1827 rc = VINF_SUCCESS; /* exit point! */
1828 }
1829 }
1830 break;
1831 }
1832
1833 case OP_PUSHF:
1834 rc = patmPatchGenPushf(pVM, pPatch, !!(pCpu->prefix & PREFIX_OPSIZE));
1835 if (RT_SUCCESS(rc))
1836 rc = VWRN_CONTINUE_RECOMPILE;
1837 break;
1838
1839 case OP_PUSH:
1840 if (pCpu->pCurInstr->param1 == OP_PARM_REG_CS)
1841 {
1842 rc = patmPatchGenPushCS(pVM, pPatch);
1843 if (RT_SUCCESS(rc))
1844 rc = VWRN_CONTINUE_RECOMPILE;
1845 break;
1846 }
1847 goto duplicate_instr;
1848
1849 case OP_IRET:
1850 Log(("IRET at %RRv\n", pCurInstrGC));
1851 rc = patmPatchGenIret(pVM, pPatch, pCurInstrGC, !!(pCpu->prefix & PREFIX_OPSIZE));
1852 if (RT_SUCCESS(rc))
1853 {
1854 pPatch->flags |= PATMFL_FOUND_PATCHEND;
1855 rc = VINF_SUCCESS; /* exit point by definition */
1856 }
1857 break;
1858
1859 case OP_ILLUD2:
1860 /* This appears to be some kind of kernel panic in Linux 2.4; no point to continue */
1861 rc = patmPatchGenIllegalInstr(pVM, pPatch);
1862 if (RT_SUCCESS(rc))
1863 rc = VINF_SUCCESS; /* exit point by definition */
1864 Log(("Illegal opcode (0xf 0xb)\n"));
1865 break;
1866
1867 case OP_CPUID:
1868 rc = patmPatchGenCpuid(pVM, pPatch, pCurInstrGC);
1869 if (RT_SUCCESS(rc))
1870 rc = VWRN_CONTINUE_RECOMPILE;
1871 break;
1872
1873 case OP_STR:
1874 case OP_SLDT:
1875 rc = patmPatchGenSldtStr(pVM, pPatch, pCpu, pCurInstrGC);
1876 if (RT_SUCCESS(rc))
1877 rc = VWRN_CONTINUE_RECOMPILE;
1878 break;
1879
1880 case OP_SGDT:
1881 case OP_SIDT:
1882 rc = patmPatchGenSxDT(pVM, pPatch, pCpu, pCurInstrGC);
1883 if (RT_SUCCESS(rc))
1884 rc = VWRN_CONTINUE_RECOMPILE;
1885 break;
1886
1887 case OP_RETN:
1888 /* retn is an exit point for function patches */
1889 rc = patmPatchGenRet(pVM, pPatch, pCpu, pCurInstrGC);
1890 if (RT_SUCCESS(rc))
1891 rc = VINF_SUCCESS; /* exit point by definition */
1892 break;
1893
1894 case OP_SYSEXIT:
1895 /* Duplicate it, so it can be emulated in GC (or fault). */
1896 rc = patmPatchGenDuplicate(pVM, pPatch, pCpu, pCurInstrGC);
1897 if (RT_SUCCESS(rc))
1898 rc = VINF_SUCCESS; /* exit point by definition */
1899 break;
1900
1901 case OP_CALL:
1902 Assert(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS);
1903 /* In interrupt gate handlers it's possible to encounter jumps or calls when IF has been enabled again.
1904 * In that case we'll jump to the original instruction and continue from there. Otherwise an int 3 is executed.
1905 */
1906 Assert(pCpu->param1.size == 4 || pCpu->param1.size == 6);
1907 if (pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS && pCpu->param1.size == 4 /* no far calls! */)
1908 {
1909 rc = patmPatchGenCall(pVM, pPatch, pCpu, pCurInstrGC, (RTRCPTR)0xDEADBEEF, true);
1910 if (RT_SUCCESS(rc))
1911 {
1912 rc = VWRN_CONTINUE_RECOMPILE;
1913 }
1914 break;
1915 }
1916 goto gen_illegal_instr;
1917
1918 case OP_JMP:
1919 Assert(pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS);
1920 /* In interrupt gate handlers it's possible to encounter jumps or calls when IF has been enabled again.
1921 * In that case we'll jump to the original instruction and continue from there. Otherwise an int 3 is executed.
1922 */
1923 Assert(pCpu->param1.size == 4 || pCpu->param1.size == 6);
1924 if (pPatch->flags & PATMFL_SUPPORT_INDIRECT_CALLS && pCpu->param1.size == 4 /* no far jumps! */)
1925 {
1926 rc = patmPatchGenJump(pVM, pPatch, pCpu, pCurInstrGC);
1927 if (RT_SUCCESS(rc))
1928 rc = VINF_SUCCESS; /* end of branch */
1929 break;
1930 }
1931 goto gen_illegal_instr;
1932
1933 case OP_INT3:
1934 case OP_INT:
1935 case OP_INTO:
1936 goto gen_illegal_instr;
1937
1938 case OP_MOV_DR:
1939 /** @note: currently we let DRx writes cause a trap d; our trap handler will decide to interpret it or not. */
1940 if (pCpu->pCurInstr->param2 == OP_PARM_Dd)
1941 {
1942 rc = patmPatchGenMovDebug(pVM, pPatch, pCpu);
1943 if (RT_SUCCESS(rc))
1944 rc = VWRN_CONTINUE_RECOMPILE;
1945 break;
1946 }
1947 goto duplicate_instr;
1948
1949 case OP_MOV_CR:
1950 /** @note: currently we let CRx writes cause a trap d; our trap handler will decide to interpret it or not. */
1951 if (pCpu->pCurInstr->param2 == OP_PARM_Cd)
1952 {
1953 rc = patmPatchGenMovControl(pVM, pPatch, pCpu);
1954 if (RT_SUCCESS(rc))
1955 rc = VWRN_CONTINUE_RECOMPILE;
1956 break;
1957 }
1958 goto duplicate_instr;
1959
1960 default:
1961 if (pCpu->pCurInstr->optype & (OPTYPE_CONTROLFLOW | OPTYPE_PRIVILEGED_NOTRAP))
1962 {
1963gen_illegal_instr:
1964 rc = patmPatchGenIllegalInstr(pVM, pPatch);
1965 if (RT_SUCCESS(rc))
1966 rc = VINF_SUCCESS; /* exit point by definition */
1967 }
1968 else
1969 {
1970duplicate_instr:
1971 Log(("patmPatchGenDuplicate\n"));
1972 rc = patmPatchGenDuplicate(pVM, pPatch, pCpu, pCurInstrGC);
1973 if (RT_SUCCESS(rc))
1974 rc = VWRN_CONTINUE_RECOMPILE;
1975 }
1976 break;
1977 }
1978
1979end:
1980
1981 if ( !fInhibitIRQInstr
1982 && (pPatch->flags & PATMFL_INHIBIT_IRQS))
1983 {
1984 int rc2;
1985 RTRCPTR pNextInstrGC = pCurInstrGC + pCpu->opsize;
1986
1987 pPatch->flags &= ~PATMFL_INHIBIT_IRQS;
1988 Log(("Clear inhibit IRQ flag at %RRv\n", pCurInstrGC));
1989 if (pPatch->flags & PATMFL_GENERATE_JUMPTOGUEST)
1990 {
1991 Log(("patmRecompileCallback: generate jump back to guest (%RRv) after fused instruction\n", pNextInstrGC));
1992
1993 rc2 = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC, true /* clear inhibit irq flag */);
1994 pPatch->flags &= ~PATMFL_GENERATE_JUMPTOGUEST;
1995 rc = VINF_SUCCESS; /* end of the line */
1996 }
1997 else
1998 {
1999 rc2 = patmPatchGenClearInhibitIRQ(pVM, pPatch, pNextInstrGC);
2000 }
2001 if (RT_FAILURE(rc2))
2002 rc = rc2;
2003 }
2004
2005 if (RT_SUCCESS(rc))
2006 {
2007 // If single instruction patch, we've copied enough instructions *and* the current instruction is not a relative jump
2008 if ( (pPatch->flags & PATMFL_CHECK_SIZE)
2009 && pCurInstrGC + pCpu->opsize - pInstrGC >= SIZEOF_NEARJUMP32
2010 && !(pCpu->pCurInstr->optype & OPTYPE_RELATIVE_CONTROLFLOW)
2011 && !(pPatch->flags & PATMFL_RECOMPILE_NEXT) /* do not do this when the next instruction *must* be executed! */
2012 )
2013 {
2014 RTRCPTR pNextInstrGC = pCurInstrGC + pCpu->opsize;
2015
2016 // The end marker for this kind of patch is any instruction at a location outside our patch jump
2017 Log(("patmRecompileCallback: end found for single instruction patch at %RRv opsize %d\n", pNextInstrGC, pCpu->opsize));
2018
2019 rc = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC);
2020 AssertRC(rc);
2021 }
2022 }
2023 return rc;
2024}
2025
2026
2027#ifdef LOG_ENABLED
2028
2029/* Add a disasm jump record (temporary for prevent duplicate analysis)
2030 *
2031 * @param pVM The VM to operate on.
2032 * @param pPatch Patch structure ptr
2033 * @param pInstrGC Guest context pointer to privileged instruction
2034 *
2035 */
2036static void patmPatchAddDisasmJump(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
2037{
2038 PAVLPVNODECORE pRec;
2039
2040 pRec = (PAVLPVNODECORE)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(*pRec));
2041 Assert(pRec);
2042 pRec->Key = (AVLPVKEY)pInstrGC;
2043
2044 int ret = RTAvlPVInsert(&pPatch->pTempInfo->DisasmJumpTree, pRec);
2045 Assert(ret);
2046}
2047
2048/**
2049 * Checks if jump target has been analysed before.
2050 *
2051 * @returns VBox status code.
2052 * @param pPatch Patch struct
2053 * @param pInstrGC Jump target
2054 *
2055 */
2056static bool patmIsKnownDisasmJump(PPATCHINFO pPatch, RTRCPTR pInstrGC)
2057{
2058 PAVLPVNODECORE pRec;
2059
2060 pRec = RTAvlPVGet(&pPatch->pTempInfo->DisasmJumpTree, (AVLPVKEY)pInstrGC);
2061 if (pRec)
2062 return true;
2063 return false;
2064}
2065
2066/**
2067 * For proper disassembly of the final patch block
2068 *
2069 * @returns VBox status code.
2070 * @param pVM The VM to operate on.
2071 * @param pCpu CPU disassembly state
2072 * @param pInstrGC Guest context pointer to privileged instruction
2073 * @param pCurInstrGC Guest context pointer to the current instruction
2074 * @param pUserData User pointer (callback specific)
2075 *
2076 */
2077int patmr3DisasmCallback(PVM pVM, DISCPUSTATE *pCpu, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, void *pUserData)
2078{
2079 PPATCHINFO pPatch = (PPATCHINFO)pUserData;
2080
2081 if (pCpu->pCurInstr->opcode == OP_INT3)
2082 {
2083 /* Could be an int3 inserted in a call patch. Check to be sure */
2084 DISCPUSTATE cpu;
2085 uint8_t *pOrgJumpHC;
2086 RTRCPTR pOrgJumpGC;
2087 uint32_t dummy;
2088
2089 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2090 pOrgJumpGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pCurInstrGC);
2091 pOrgJumpHC = PATMGCVirtToHCVirt(pVM, pPatch, pOrgJumpGC);
2092
2093 bool disret = PATMR3DISInstr(pVM, pPatch, &cpu, pOrgJumpGC, pOrgJumpHC, &dummy, NULL);
2094 if (!disret || cpu.pCurInstr->opcode != OP_CALL || cpu.param1.size != 4 /* only near calls */)
2095 return VINF_SUCCESS;
2096
2097 return VWRN_CONTINUE_ANALYSIS;
2098 }
2099
2100 if ( pCpu->pCurInstr->opcode == OP_ILLUD2
2101 && PATMIsPatchGCAddr(pVM, pCurInstrGC))
2102 {
2103 /* the indirect call patch contains an 0xF/0xB illegal instr to call for assistance; check for this and continue */
2104 return VWRN_CONTINUE_ANALYSIS;
2105 }
2106
2107 if ( (pCpu->pCurInstr->opcode == OP_CALL && !(pPatch->flags & PATMFL_SUPPORT_CALLS))
2108 || pCpu->pCurInstr->opcode == OP_INT
2109 || pCpu->pCurInstr->opcode == OP_IRET
2110 || pCpu->pCurInstr->opcode == OP_RETN
2111 || pCpu->pCurInstr->opcode == OP_RETF
2112 )
2113 {
2114 return VINF_SUCCESS;
2115 }
2116
2117 if (pCpu->pCurInstr->opcode == OP_ILLUD2)
2118 return VINF_SUCCESS;
2119
2120 return VWRN_CONTINUE_ANALYSIS;
2121}
2122
2123
2124/**
2125 * Disassembles the code stream until the callback function detects a failure or decides everything is acceptable
2126 *
2127 * @returns VBox status code.
2128 * @param pVM The VM to operate on.
2129 * @param pInstrGC Guest context pointer to the initial privileged instruction
2130 * @param pCurInstrGC Guest context pointer to the current instruction
2131 * @param pfnPATMR3Disasm Callback for testing the disassembled instruction
2132 * @param pUserData User pointer (callback specific)
2133 *
2134 */
2135int patmr3DisasmCode(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Disasm, void *pUserData)
2136{
2137 DISCPUSTATE cpu;
2138 PPATCHINFO pPatch = (PPATCHINFO)pUserData;
2139 int rc = VWRN_CONTINUE_ANALYSIS;
2140 uint32_t opsize, delta;
2141 R3PTRTYPE(uint8_t *) pCurInstrHC = 0;
2142 bool disret;
2143 char szOutput[256];
2144
2145 Assert(pCurInstrHC != PATCHCODE_PTR_HC(pPatch) || pPatch->pTempInfo->DisasmJumpTree == 0);
2146
2147 /* We need this to determine branch targets (and for disassembling). */
2148 delta = pVM->patm.s.pPatchMemGC - (uintptr_t)pVM->patm.s.pPatchMemHC;
2149
2150 while(rc == VWRN_CONTINUE_ANALYSIS)
2151 {
2152 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2153
2154 pCurInstrHC = PATMGCVirtToHCVirt(pVM, pPatch, pCurInstrGC);
2155 if (pCurInstrHC == NULL)
2156 {
2157 rc = VERR_PATCHING_REFUSED;
2158 goto end;
2159 }
2160
2161 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pCurInstrGC, pCurInstrHC, &opsize, szOutput, PATMREAD_RAWCODE);
2162 if (PATMIsPatchGCAddr(pVM, pCurInstrGC))
2163 {
2164 RTRCPTR pOrgInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pCurInstrGC);
2165
2166 if (pOrgInstrGC != pPatch->pTempInfo->pLastDisasmInstrGC)
2167 Log(("DIS %RRv<-%s", pOrgInstrGC, szOutput));
2168 else
2169 Log(("DIS %s", szOutput));
2170
2171 pPatch->pTempInfo->pLastDisasmInstrGC = pOrgInstrGC;
2172 if (patmIsIllegalInstr(pPatch, pOrgInstrGC))
2173 {
2174 rc = VINF_SUCCESS;
2175 goto end;
2176 }
2177 }
2178 else
2179 Log(("DIS: %s", szOutput));
2180
2181 if (disret == false)
2182 {
2183 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
2184 rc = VINF_SUCCESS;
2185 goto end;
2186 }
2187
2188 rc = pfnPATMR3Disasm(pVM, &cpu, pInstrGC, pCurInstrGC, pUserData);
2189 if (rc != VWRN_CONTINUE_ANALYSIS) {
2190 break; //done!
2191 }
2192
2193 /* For our first attempt, we'll handle only simple relative jumps and calls (immediate offset coded in instruction) */
2194 if ( (cpu.pCurInstr->optype & OPTYPE_CONTROLFLOW)
2195 && (OP_PARM_VTYPE(cpu.pCurInstr->param1) == OP_PARM_J)
2196 && cpu.pCurInstr->opcode != OP_CALL /* complete functions are replaced; don't bother here. */
2197 )
2198 {
2199 RTRCPTR pTargetGC = PATMResolveBranch(&cpu, pCurInstrGC);
2200 RTRCPTR pOrgTargetGC;
2201
2202 if (pTargetGC == 0)
2203 {
2204 Log(("We don't support far jumps here!! (%08X)\n", cpu.param1.flags));
2205 rc = VERR_PATCHING_REFUSED;
2206 break;
2207 }
2208
2209 if (!PATMIsPatchGCAddr(pVM, pTargetGC))
2210 {
2211 //jump back to guest code
2212 rc = VINF_SUCCESS;
2213 goto end;
2214 }
2215 pOrgTargetGC = PATMR3PatchToGCPtr(pVM, pTargetGC, 0);
2216
2217 if (patmIsCommonIDTHandlerPatch(pVM, pOrgTargetGC))
2218 {
2219 rc = VINF_SUCCESS;
2220 goto end;
2221 }
2222
2223 if (patmIsKnownDisasmJump(pPatch, pTargetGC) == false)
2224 {
2225 /* New jump, let's check it. */
2226 patmPatchAddDisasmJump(pVM, pPatch, pTargetGC);
2227
2228 if (cpu.pCurInstr->opcode == OP_CALL) pPatch->pTempInfo->nrCalls++;
2229 rc = patmr3DisasmCode(pVM, pInstrGC, pTargetGC, pfnPATMR3Disasm, pUserData);
2230 if (cpu.pCurInstr->opcode == OP_CALL) pPatch->pTempInfo->nrCalls--;
2231
2232 if (rc != VINF_SUCCESS) {
2233 break; //done!
2234 }
2235 }
2236 if (cpu.pCurInstr->opcode == OP_JMP)
2237 {
2238 /* Unconditional jump; return to caller. */
2239 rc = VINF_SUCCESS;
2240 goto end;
2241 }
2242
2243 rc = VWRN_CONTINUE_ANALYSIS;
2244 }
2245 pCurInstrGC += opsize;
2246 }
2247end:
2248 return rc;
2249}
2250
2251/**
2252 * Disassembles the code stream until the callback function detects a failure or decides everything is acceptable
2253 *
2254 * @returns VBox status code.
2255 * @param pVM The VM to operate on.
2256 * @param pInstrGC Guest context pointer to the initial privileged instruction
2257 * @param pCurInstrGC Guest context pointer to the current instruction
2258 * @param pfnPATMR3Disasm Callback for testing the disassembled instruction
2259 * @param pUserData User pointer (callback specific)
2260 *
2261 */
2262int patmr3DisasmCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Disasm, void *pUserData)
2263{
2264 PPATCHINFO pPatch = (PPATCHINFO)pUserData;
2265
2266 int rc = patmr3DisasmCode(pVM, pInstrGC, pCurInstrGC, pfnPATMR3Disasm, pUserData);
2267 /* Free all disasm jump records. */
2268 patmEmptyTree(pVM, &pPatch->pTempInfo->DisasmJumpTree);
2269 return rc;
2270}
2271
2272#endif /* LOG_ENABLED */
2273
2274/**
2275 * Detects it the specified address falls within a 5 byte jump generated for an active patch.
2276 * If so, this patch is permanently disabled.
2277 *
2278 * @param pVM The VM to operate on.
2279 * @param pInstrGC Guest context pointer to instruction
2280 * @param pConflictGC Guest context pointer to check
2281 *
2282 * @note also checks for patch hints to make sure they can never be enabled if a conflict is present.
2283 *
2284 */
2285VMMR3DECL(int) PATMR3DetectConflict(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictGC)
2286{
2287 PPATCHINFO pTargetPatch = PATMFindActivePatchByEntrypoint(pVM, pConflictGC, true /* include patch hints */);
2288 if (pTargetPatch)
2289 {
2290 return patmDisableUnusablePatch(pVM, pInstrGC, pConflictGC, pTargetPatch);
2291 }
2292 return VERR_PATCH_NO_CONFLICT;
2293}
2294
2295/**
2296 * Recompile the code stream until the callback function detects a failure or decides everything is acceptable
2297 *
2298 * @returns VBox status code.
2299 * @param pVM The VM to operate on.
2300 * @param pInstrGC Guest context pointer to privileged instruction
2301 * @param pCurInstrGC Guest context pointer to the current instruction
2302 * @param pfnPATMR3Recompile Callback for testing the disassembled instruction
2303 * @param pUserData User pointer (callback specific)
2304 *
2305 */
2306static int patmRecompileCodeStream(PVM pVM, RCPTRTYPE(uint8_t *) pInstrGC, RCPTRTYPE(uint8_t *) pCurInstrGC, PFN_PATMR3ANALYSE pfnPATMR3Recompile, void *pUserData)
2307{
2308 DISCPUSTATE cpu;
2309 PPATCHINFO pPatch = (PPATCHINFO)pUserData;
2310 int rc = VWRN_CONTINUE_ANALYSIS;
2311 uint32_t opsize;
2312 R3PTRTYPE(uint8_t *) pCurInstrHC = 0;
2313 bool disret;
2314#ifdef LOG_ENABLED
2315 char szOutput[256];
2316#endif
2317
2318 while (rc == VWRN_CONTINUE_RECOMPILE)
2319 {
2320 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2321
2322 ////Log(("patmRecompileCodeStream %RRv %RRv\n", pInstrGC, pCurInstrGC));
2323
2324 pCurInstrHC = PATMGCVirtToHCVirt(pVM, pPatch, pCurInstrGC);
2325 if (pCurInstrHC == NULL)
2326 {
2327 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2328 goto end;
2329 }
2330#ifdef LOG_ENABLED
2331 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pCurInstrGC, pCurInstrHC, &opsize, szOutput);
2332 Log(("Recompile: %s", szOutput));
2333#else
2334 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pCurInstrGC, pCurInstrHC, &opsize, NULL);
2335#endif
2336 if (disret == false)
2337 {
2338 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
2339
2340 /* Add lookup record for patch to guest address translation */
2341 patmr3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
2342 patmPatchGenIllegalInstr(pVM, pPatch);
2343 rc = VINF_SUCCESS; /* Note: don't fail here; we might refuse an important patch!! */
2344 goto end;
2345 }
2346
2347 rc = pfnPATMR3Recompile(pVM, &cpu, pInstrGC, pCurInstrGC, pUserData);
2348 if (rc != VWRN_CONTINUE_RECOMPILE)
2349 {
2350 /* If irqs are inhibited because of the current instruction, then we must make sure the next one is executed! */
2351 if ( rc == VINF_SUCCESS
2352 && (pPatch->flags & PATMFL_INHIBIT_IRQS))
2353 {
2354 DISCPUSTATE cpunext;
2355 uint32_t opsizenext;
2356 uint8_t *pNextInstrHC;
2357 RTRCPTR pNextInstrGC = pCurInstrGC + opsize;
2358
2359 Log(("patmRecompileCodeStream: irqs inhibited by instruction %RRv\n", pNextInstrGC));
2360
2361 /* Certain instructions (e.g. sti) force the next instruction to be executed before any interrupts can occur.
2362 * Recompile the next instruction as well
2363 */
2364 pNextInstrHC = PATMGCVirtToHCVirt(pVM, pPatch, pNextInstrGC);
2365 if (pNextInstrHC == NULL)
2366 {
2367 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2368 goto end;
2369 }
2370 cpunext.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2371 disret = PATMR3DISInstr(pVM, pPatch, &cpunext, pNextInstrGC, pNextInstrHC, &opsizenext, NULL);
2372 if (disret == false)
2373 {
2374 rc = VERR_PATCHING_REFUSED; /* fatal in this case */
2375 goto end;
2376 }
2377 switch(cpunext.pCurInstr->opcode)
2378 {
2379 case OP_IRET: /* inhibit cleared in generated code */
2380 case OP_SYSEXIT: /* faults; inhibit should be cleared in HC handling */
2381 case OP_HLT:
2382 break; /* recompile these */
2383
2384 default:
2385 if (cpunext.pCurInstr->optype & OPTYPE_CONTROLFLOW)
2386 {
2387 Log(("Unexpected control flow instruction after inhibit irq instruction\n"));
2388
2389 rc = patmPatchGenJumpToGuest(pVM, pPatch, pNextInstrGC, true /* clear inhibit irq flag */);
2390 AssertRC(rc);
2391 pPatch->flags &= ~PATMFL_INHIBIT_IRQS;
2392 goto end; /** @todo should be ok to ignore instruction fusing in this case */
2393 }
2394 break;
2395 }
2396
2397 /** @note after a cli we must continue to a proper exit point */
2398 if (cpunext.pCurInstr->opcode != OP_CLI)
2399 {
2400 rc = pfnPATMR3Recompile(pVM, &cpunext, pInstrGC, pNextInstrGC, pUserData);
2401 if (RT_SUCCESS(rc))
2402 {
2403 rc = VINF_SUCCESS;
2404 goto end;
2405 }
2406 break;
2407 }
2408 else
2409 rc = VWRN_CONTINUE_RECOMPILE;
2410 }
2411 else
2412 break; /* done! */
2413 }
2414
2415 /** @todo continue with the instructions following the jump and then recompile the jump target code */
2416
2417
2418 /* For our first attempt, we'll handle only simple relative jumps and calls (immediate offset coded in instruction). */
2419 if ( (cpu.pCurInstr->optype & OPTYPE_CONTROLFLOW)
2420 && (OP_PARM_VTYPE(cpu.pCurInstr->param1) == OP_PARM_J)
2421 && cpu.pCurInstr->opcode != OP_CALL /* complete functions are replaced; don't bother here. */
2422 )
2423 {
2424 RCPTRTYPE(uint8_t *) addr = PATMResolveBranch(&cpu, pCurInstrGC);
2425 if (addr == 0)
2426 {
2427 Log(("We don't support far jumps here!! (%08X)\n", cpu.param1.flags));
2428 rc = VERR_PATCHING_REFUSED;
2429 break;
2430 }
2431
2432 Log(("Jump encountered target %RRv\n", addr));
2433
2434 /* We don't check if the branch target lies in a valid page as we've already done that in the analysis phase. */
2435 if (!(cpu.pCurInstr->optype & OPTYPE_UNCOND_CONTROLFLOW))
2436 {
2437 Log(("patmRecompileCodeStream continue passed conditional jump\n"));
2438 /* First we need to finish this linear code stream until the next exit point. */
2439 rc = patmRecompileCodeStream(pVM, pInstrGC, pCurInstrGC+opsize, pfnPATMR3Recompile, pUserData);
2440 if (RT_FAILURE(rc))
2441 {
2442 Log(("patmRecompileCodeStream fatal error %d\n", rc));
2443 break; //fatal error
2444 }
2445 }
2446
2447 if (patmGuestGCPtrToPatchGCPtr(pVM, pPatch, addr) == 0)
2448 {
2449 /* New code; let's recompile it. */
2450 Log(("patmRecompileCodeStream continue with jump\n"));
2451
2452 /*
2453 * If we are jumping to an existing patch (or within 5 bytes of the entrypoint), then we must temporarily disable
2454 * this patch so we can continue our analysis
2455 *
2456 * We rely on CSAM to detect and resolve conflicts
2457 */
2458 PPATCHINFO pTargetPatch = PATMFindActivePatchByEntrypoint(pVM, addr);
2459 if(pTargetPatch)
2460 {
2461 Log(("Found active patch at target %RRv (%RRv) -> temporarily disabling it!!\n", addr, pTargetPatch->pPrivInstrGC));
2462 PATMR3DisablePatch(pVM, pTargetPatch->pPrivInstrGC);
2463 }
2464
2465 if (cpu.pCurInstr->opcode == OP_CALL) pPatch->pTempInfo->nrCalls++;
2466 rc = patmRecompileCodeStream(pVM, pInstrGC, addr, pfnPATMR3Recompile, pUserData);
2467 if (cpu.pCurInstr->opcode == OP_CALL) pPatch->pTempInfo->nrCalls--;
2468
2469 if(pTargetPatch)
2470 {
2471 PATMR3EnablePatch(pVM, pTargetPatch->pPrivInstrGC);
2472 }
2473
2474 if (RT_FAILURE(rc))
2475 {
2476 Log(("patmRecompileCodeStream fatal error %d\n", rc));
2477 break; //done!
2478 }
2479 }
2480 /* Always return to caller here; we're done! */
2481 rc = VINF_SUCCESS;
2482 goto end;
2483 }
2484 else
2485 if (cpu.pCurInstr->optype & OPTYPE_UNCOND_CONTROLFLOW)
2486 {
2487 rc = VINF_SUCCESS;
2488 goto end;
2489 }
2490 pCurInstrGC += opsize;
2491 }
2492end:
2493 Assert(!(pPatch->flags & PATMFL_RECOMPILE_NEXT));
2494 return rc;
2495}
2496
2497
2498/**
2499 * Generate the jump from guest to patch code
2500 *
2501 * @returns VBox status code.
2502 * @param pVM The VM to operate on.
2503 * @param pPatch Patch record
2504 */
2505static int patmGenJumpToPatch(PVM pVM, PPATCHINFO pPatch, bool fAddFixup = true)
2506{
2507 uint8_t temp[8];
2508 uint8_t *pPB;
2509 int rc;
2510
2511 Assert(pPatch->cbPatchJump <= sizeof(temp));
2512 Assert(!(pPatch->flags & PATMFL_PATCHED_GUEST_CODE));
2513
2514 pPB = pPatch->pPrivInstrHC;
2515
2516#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
2517 if (pPatch->flags & PATMFL_JUMP_CONFLICT)
2518 {
2519 Assert(pPatch->pPatchJumpDestGC);
2520
2521 if (pPatch->cbPatchJump == SIZEOF_NEARJUMP32)
2522 {
2523 // jmp [PatchCode]
2524 if (fAddFixup)
2525 {
2526 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump, pPatch->pPatchJumpDestGC) != VINF_SUCCESS)
2527 {
2528 Log(("Relocation failed for the jump in the guest code!!\n"));
2529 return VERR_PATCHING_REFUSED;
2530 }
2531 }
2532
2533 temp[0] = pPatch->aPrivInstr[0]; //jump opcode copied from original instruction
2534 *(uint32_t *)&temp[1] = (uint32_t)pPatch->pPatchJumpDestGC - ((uint32_t)pPatch->pPrivInstrGC + pPatch->cbPatchJump); //return address
2535 }
2536 else
2537 if (pPatch->cbPatchJump == SIZEOF_NEAR_COND_JUMP32)
2538 {
2539 // jmp [PatchCode]
2540 if (fAddFixup)
2541 {
2542 if (patmPatchAddReloc32(pVM, pPatch, &pPB[2], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + pPatch->cbPatchJump, pPatch->pPatchJumpDestGC) != VINF_SUCCESS)
2543 {
2544 Log(("Relocation failed for the jump in the guest code!!\n"));
2545 return VERR_PATCHING_REFUSED;
2546 }
2547 }
2548
2549 temp[0] = pPatch->aPrivInstr[0]; //jump opcode copied from original instruction
2550 temp[1] = pPatch->aPrivInstr[1]; //jump opcode copied from original instruction
2551 *(uint32_t *)&temp[2] = (uint32_t)pPatch->pPatchJumpDestGC - ((uint32_t)pPatch->pPrivInstrGC + pPatch->cbPatchJump); //return address
2552 }
2553 else
2554 {
2555 Assert(0);
2556 return VERR_PATCHING_REFUSED;
2557 }
2558 }
2559 else
2560#endif
2561 {
2562 Assert(pPatch->cbPatchJump == SIZEOF_NEARJUMP32);
2563
2564 // jmp [PatchCode]
2565 if (fAddFixup)
2566 {
2567 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32, PATCHCODE_PTR_GC(pPatch)) != VINF_SUCCESS)
2568 {
2569 Log(("Relocation failed for the jump in the guest code!!\n"));
2570 return VERR_PATCHING_REFUSED;
2571 }
2572 }
2573 temp[0] = 0xE9; //jmp
2574 *(uint32_t *)&temp[1] = (RTRCUINTPTR)PATCHCODE_PTR_GC(pPatch) - ((RTRCUINTPTR)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32); //return address
2575 }
2576 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, temp, pPatch->cbPatchJump);
2577 AssertRC(rc);
2578
2579 if (rc == VINF_SUCCESS)
2580 pPatch->flags |= PATMFL_PATCHED_GUEST_CODE;
2581
2582 return rc;
2583}
2584
2585/**
2586 * Remove the jump from guest to patch code
2587 *
2588 * @returns VBox status code.
2589 * @param pVM The VM to operate on.
2590 * @param pPatch Patch record
2591 */
2592static int patmRemoveJumpToPatch(PVM pVM, PPATCHINFO pPatch)
2593{
2594#ifdef DEBUG
2595 DISCPUSTATE cpu;
2596 char szOutput[256];
2597 uint32_t opsize, i = 0;
2598 bool disret;
2599
2600 while(i < pPatch->cbPrivInstr)
2601 {
2602 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2603 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC + i, &pPatch->pPrivInstrHC[i], &opsize, szOutput);
2604 if (disret == false)
2605 break;
2606
2607 Log(("Org patch jump: %s", szOutput));
2608 Assert(opsize);
2609 i += opsize;
2610 }
2611#endif
2612
2613 /* Restore original code (privileged instruction + following instructions that were overwritten because of the 5/6 byte jmp). */
2614 int rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, pPatch->aPrivInstr, pPatch->cbPatchJump);
2615#ifdef DEBUG
2616 if (rc == VINF_SUCCESS)
2617 {
2618 DISCPUSTATE cpu;
2619 char szOutput[256];
2620 uint32_t opsize, i = 0;
2621 bool disret;
2622
2623 while(i < pPatch->cbPrivInstr)
2624 {
2625 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2626 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC + i, &pPatch->pPrivInstrHC[i], &opsize, szOutput);
2627 if (disret == false)
2628 break;
2629
2630 Log(("Org instr: %s", szOutput));
2631 Assert(opsize);
2632 i += opsize;
2633 }
2634 }
2635#endif
2636 pPatch->flags &= ~PATMFL_PATCHED_GUEST_CODE;
2637 return rc;
2638}
2639
2640/**
2641 * Generate the call from guest to patch code
2642 *
2643 * @returns VBox status code.
2644 * @param pVM The VM to operate on.
2645 * @param pPatch Patch record
2646 */
2647static int patmGenCallToPatch(PVM pVM, PPATCHINFO pPatch, RTRCPTR pTargetGC, bool fAddFixup = true)
2648{
2649 uint8_t temp[8];
2650 uint8_t *pPB;
2651 int rc;
2652
2653 Assert(pPatch->cbPatchJump <= sizeof(temp));
2654
2655 pPB = pPatch->pPrivInstrHC;
2656
2657 Assert(pPatch->cbPatchJump == SIZEOF_NEARJUMP32);
2658
2659 // jmp [PatchCode]
2660 if (fAddFixup)
2661 {
2662 if (patmPatchAddReloc32(pVM, pPatch, &pPB[1], FIXUP_REL_JMPTOPATCH, pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32, pTargetGC) != VINF_SUCCESS)
2663 {
2664 Log(("Relocation failed for the jump in the guest code!!\n"));
2665 return VERR_PATCHING_REFUSED;
2666 }
2667 }
2668
2669 Assert(pPatch->aPrivInstr[0] == 0xE8 || pPatch->aPrivInstr[0] == 0xE9); /* call or jmp */
2670 temp[0] = pPatch->aPrivInstr[0];
2671 *(uint32_t *)&temp[1] = (uint32_t)pTargetGC - ((uint32_t)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32); //return address
2672
2673 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, temp, pPatch->cbPatchJump);
2674 AssertRC(rc);
2675
2676 return rc;
2677}
2678
2679
2680/**
2681 * Patch cli/sti pushf/popf instruction block at specified location
2682 *
2683 * @returns VBox status code.
2684 * @param pVM The VM to operate on.
2685 * @param pInstrGC Guest context point to privileged instruction
2686 * @param pInstrHC Host context point to privileged instruction
2687 * @param uOpcode Instruction opcode
2688 * @param uOpSize Size of starting instruction
2689 * @param pPatchRec Patch record
2690 *
2691 * @note returns failure if patching is not allowed or possible
2692 *
2693 */
2694VMMR3DECL(int) PATMR3PatchBlock(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC,
2695 uint32_t uOpcode, uint32_t uOpSize, PPATMPATCHREC pPatchRec)
2696{
2697 PPATCHINFO pPatch = &pPatchRec->patch;
2698 int rc = VERR_PATCHING_REFUSED;
2699 DISCPUSTATE cpu;
2700 uint32_t orgOffsetPatchMem = ~0;
2701 RTRCPTR pInstrStart;
2702#ifdef LOG_ENABLED
2703 uint32_t opsize;
2704 char szOutput[256];
2705 bool disret;
2706#endif
2707
2708 /* Save original offset (in case of failures later on) */
2709 /** @todo use the hypervisor heap (that has quite a few consequences for save/restore though) */
2710 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
2711
2712 Assert(!(pPatch->flags & (PATMFL_GUEST_SPECIFIC|PATMFL_USER_MODE|PATMFL_TRAPHANDLER)));
2713 switch (uOpcode)
2714 {
2715 case OP_MOV:
2716 break;
2717
2718 case OP_CLI:
2719 case OP_PUSHF:
2720 /* We can 'call' a cli or pushf patch. It will either return to the original guest code when IF is set again, or fault. */
2721 /** @note special precautions are taken when disabling and enabling such patches. */
2722 pPatch->flags |= PATMFL_CALLABLE_AS_FUNCTION;
2723 break;
2724
2725 default:
2726 if (!(pPatch->flags & PATMFL_IDTHANDLER))
2727 {
2728 AssertMsg(0, ("PATMR3PatchBlock: Invalid opcode %x\n", uOpcode));
2729 return VERR_INVALID_PARAMETER;
2730 }
2731 }
2732
2733 if (!(pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER|PATMFL_INT3_REPLACEMENT_BLOCK)))
2734 pPatch->flags |= PATMFL_MUST_INSTALL_PATCHJMP;
2735
2736 /* If we're going to insert a patch jump, then the jump itself is not allowed to cross a page boundary. */
2737 if ( (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
2738 && PAGE_ADDRESS(pInstrGC) != PAGE_ADDRESS(pInstrGC + SIZEOF_NEARJUMP32)
2739 )
2740 {
2741 STAM_COUNTER_INC(&pVM->patm.s.StatPageBoundaryCrossed);
2742#ifdef DEBUG_sandervl
2743//// AssertMsgFailed(("Patch jump would cross page boundary -> refuse!!\n"));
2744#endif
2745 rc = VERR_PATCHING_REFUSED;
2746 goto failure;
2747 }
2748
2749 pPatch->nrPatch2GuestRecs = 0;
2750 pInstrStart = pInstrGC;
2751
2752#ifdef PATM_ENABLE_CALL
2753 pPatch->flags |= PATMFL_SUPPORT_CALLS | PATMFL_SUPPORT_INDIRECT_CALLS;
2754#endif
2755
2756 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
2757 pPatch->uCurPatchOffset = 0;
2758
2759 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2760
2761 if ((pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER)) == PATMFL_IDTHANDLER)
2762 {
2763 Assert(pPatch->flags & PATMFL_INTHANDLER);
2764
2765 /* Install fake cli patch (to clear the virtual IF and check int xx parameters) */
2766 rc = patmPatchGenIntEntry(pVM, pPatch, pInstrGC);
2767 if (RT_FAILURE(rc))
2768 goto failure;
2769 }
2770
2771 /***************************************************************************************************************************/
2772 /** @note We can't insert *any* code before a sysenter handler; some linux guests have an invalid stack at this point!!!!! */
2773 /***************************************************************************************************************************/
2774#ifdef VBOX_WITH_STATISTICS
2775 if (!(pPatch->flags & PATMFL_SYSENTER))
2776 {
2777 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
2778 if (RT_FAILURE(rc))
2779 goto failure;
2780 }
2781#endif
2782
2783 rc = patmRecompileCodeStream(pVM, pInstrGC, pInstrGC, patmRecompileCallback, pPatch);
2784 if (rc != VINF_SUCCESS)
2785 {
2786 Log(("PATMR3PatchCli: patmRecompileCodeStream failed with %d\n", rc));
2787 goto failure;
2788 }
2789
2790 /* Calculated during analysis. */
2791 if (pPatch->cbPatchBlockSize < SIZEOF_NEARJUMP32)
2792 {
2793 /* Most likely cause: we encountered an illegal instruction very early on. */
2794 /** @todo could turn it into an int3 callable patch. */
2795 Log(("PATMR3PatchBlock: patch block too small -> refuse\n"));
2796 rc = VERR_PATCHING_REFUSED;
2797 goto failure;
2798 }
2799
2800 /* size of patch block */
2801 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
2802
2803
2804 /* Update free pointer in patch memory. */
2805 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
2806 /* Round to next 8 byte boundary. */
2807 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
2808
2809 /*
2810 * Insert into patch to guest lookup tree
2811 */
2812 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
2813 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
2814 rc = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
2815 AssertMsg(rc, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
2816 if (!rc)
2817 {
2818 rc = VERR_PATCHING_REFUSED;
2819 goto failure;
2820 }
2821
2822 /* Note that patmr3SetBranchTargets can install additional patches!! */
2823 rc = patmr3SetBranchTargets(pVM, pPatch);
2824 if (rc != VINF_SUCCESS)
2825 {
2826 Log(("PATMR3PatchCli: patmr3SetBranchTargets failed with %d\n", rc));
2827 goto failure;
2828 }
2829
2830#ifdef LOG_ENABLED
2831 Log(("Patch code ----------------------------------------------------------\n"));
2832 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pPatch);
2833 Log(("Patch code ends -----------------------------------------------------\n"));
2834#endif
2835
2836 /* make a copy of the guest code bytes that will be overwritten */
2837 pPatch->cbPatchJump = SIZEOF_NEARJUMP32;
2838
2839 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
2840 AssertRC(rc);
2841
2842 if (pPatch->flags & PATMFL_INT3_REPLACEMENT_BLOCK)
2843 {
2844 /*uint8_t ASMInt3 = 0xCC; - unused */
2845
2846 Log(("PATMR3PatchBlock %RRv -> int 3 callable patch.\n", pPatch->pPrivInstrGC));
2847 /* Replace first opcode byte with 'int 3'. */
2848 rc = patmActivateInt3Patch(pVM, pPatch);
2849 if (RT_FAILURE(rc))
2850 goto failure;
2851
2852 /* normal patch can be turned into an int3 patch -> clear patch jump installation flag. */
2853 pPatch->flags &= ~PATMFL_MUST_INSTALL_PATCHJMP;
2854
2855 pPatch->flags &= ~PATMFL_INSTR_HINT;
2856 STAM_COUNTER_INC(&pVM->patm.s.StatInt3Callable);
2857 }
2858 else
2859 if (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
2860 {
2861 Assert(!(pPatch->flags & (PATMFL_IDTHANDLER|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_SYSENTER|PATMFL_INT3_REPLACEMENT_BLOCK)));
2862 /* now insert a jump in the guest code */
2863 rc = patmGenJumpToPatch(pVM, pPatch, true);
2864 AssertRC(rc);
2865 if (RT_FAILURE(rc))
2866 goto failure;
2867
2868 }
2869
2870#ifdef LOG_ENABLED
2871 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2872 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, pPatch->pPrivInstrHC, &opsize, szOutput, PATMREAD_RAWCODE);
2873 Log(("%s patch: %s", patmGetInstructionString(pPatch->opcode, pPatch->flags), szOutput));
2874#endif
2875
2876 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
2877 pPatch->pTempInfo->nrIllegalInstr = 0;
2878
2879 Log(("Successfully installed %s patch at %RRv\n", patmGetInstructionString(pPatch->opcode, pPatch->flags), pInstrGC));
2880
2881 pPatch->uState = PATCH_ENABLED;
2882 return VINF_SUCCESS;
2883
2884failure:
2885 if (pPatchRec->CoreOffset.Key)
2886 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
2887
2888 patmEmptyTree(pVM, &pPatch->FixupTree);
2889 pPatch->nrFixups = 0;
2890
2891 patmEmptyTree(pVM, &pPatch->JumpTree);
2892 pPatch->nrJumpRecs = 0;
2893
2894 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
2895 pPatch->pTempInfo->nrIllegalInstr = 0;
2896
2897 /* Turn this cli patch into a dummy. */
2898 pPatch->uState = PATCH_REFUSED;
2899 pPatch->pPatchBlockOffset = 0;
2900
2901 // Give back the patch memory we no longer need
2902 Assert(orgOffsetPatchMem != (uint32_t)~0);
2903 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
2904
2905 return rc;
2906}
2907
2908/**
2909 * Patch IDT handler
2910 *
2911 * @returns VBox status code.
2912 * @param pVM The VM to operate on.
2913 * @param pInstrGC Guest context point to privileged instruction
2914 * @param pInstrHC Host context point to privileged instruction
2915 * @param uOpSize Size of starting instruction
2916 * @param pPatchRec Patch record
2917 *
2918 * @note returns failure if patching is not allowed or possible
2919 *
2920 */
2921static int patmIdtHandler(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC,
2922 uint32_t uOpSize, PPATMPATCHREC pPatchRec)
2923{
2924 PPATCHINFO pPatch = &pPatchRec->patch;
2925 bool disret;
2926 DISCPUSTATE cpuPush, cpuJmp;
2927 uint32_t opsize;
2928 RTRCPTR pCurInstrGC = pInstrGC;
2929 uint8_t *pCurInstrHC = pInstrHC;
2930 uint32_t orgOffsetPatchMem = ~0;
2931
2932 /*
2933 * In Linux it's often the case that many interrupt handlers push a predefined value onto the stack
2934 * and then jump to a common entrypoint. In order not to waste a lot of memory, we will check for this
2935 * condition here and only patch the common entypoint once.
2936 */
2937 cpuPush.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2938 disret = PATMR3DISInstr(pVM, pPatch, &cpuPush, pCurInstrGC, pCurInstrHC, &opsize, NULL);
2939 Assert(disret);
2940 if (disret && cpuPush.pCurInstr->opcode == OP_PUSH)
2941 {
2942 RTRCPTR pJmpInstrGC;
2943 int rc;
2944
2945 pCurInstrGC += opsize;
2946 pCurInstrHC = PATMGCVirtToHCVirt(pVM, pPatch, pCurInstrGC);
2947
2948 cpuJmp.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
2949 disret = PATMR3DISInstr(pVM, pPatch, &cpuJmp, pCurInstrGC, pCurInstrHC, &opsize, NULL);
2950 if ( disret
2951 && cpuJmp.pCurInstr->opcode == OP_JMP
2952 && (pJmpInstrGC = PATMResolveBranch(&cpuJmp, pCurInstrGC))
2953 )
2954 {
2955 PPATMPATCHREC pJmpPatch = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pJmpInstrGC);
2956 if (pJmpPatch == 0)
2957 {
2958 /* Patch it first! */
2959 rc = PATMR3InstallPatch(pVM, pJmpInstrGC, pPatch->flags | PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT);
2960 if (rc != VINF_SUCCESS)
2961 goto failure;
2962 pJmpPatch = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pJmpInstrGC);
2963 Assert(pJmpPatch);
2964 }
2965 if (pJmpPatch->patch.uState != PATCH_ENABLED)
2966 goto failure;
2967
2968 /* save original offset (in case of failures later on) */
2969 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
2970
2971 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
2972 pPatch->uCurPatchOffset = 0;
2973 pPatch->nrPatch2GuestRecs = 0;
2974
2975#ifdef VBOX_WITH_STATISTICS
2976 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
2977 if (RT_FAILURE(rc))
2978 goto failure;
2979#endif
2980
2981 /* Install fake cli patch (to clear the virtual IF) */
2982 rc = patmPatchGenIntEntry(pVM, pPatch, pInstrGC);
2983 if (RT_FAILURE(rc))
2984 goto failure;
2985
2986 /* Add lookup record for patch to guest address translation (for the push) */
2987 patmr3AddP2GLookupRecord(pVM, pPatch, PATCHCODE_PTR_HC(pPatch) + pPatch->uCurPatchOffset, pInstrGC, PATM_LOOKUP_BOTHDIR);
2988
2989 /* Duplicate push. */
2990 rc = patmPatchGenDuplicate(pVM, pPatch, &cpuPush, pInstrGC);
2991 if (RT_FAILURE(rc))
2992 goto failure;
2993
2994 /* Generate jump to common entrypoint. */
2995 rc = patmPatchGenPatchJump(pVM, pPatch, pCurInstrGC, PATCHCODE_PTR_GC(&pJmpPatch->patch));
2996 if (RT_FAILURE(rc))
2997 goto failure;
2998
2999 /* size of patch block */
3000 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3001
3002 /* Update free pointer in patch memory. */
3003 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3004 /* Round to next 8 byte boundary */
3005 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3006
3007 /* There's no jump from guest to patch code. */
3008 pPatch->cbPatchJump = 0;
3009
3010
3011#ifdef LOG_ENABLED
3012 Log(("Patch code ----------------------------------------------------------\n"));
3013 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pPatch);
3014 Log(("Patch code ends -----------------------------------------------------\n"));
3015#endif
3016 Log(("Successfully installed IDT handler patch at %RRv\n", pInstrGC));
3017
3018 /*
3019 * Insert into patch to guest lookup tree
3020 */
3021 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3022 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3023 rc = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3024 AssertMsg(rc, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
3025
3026 pPatch->uState = PATCH_ENABLED;
3027
3028 return VINF_SUCCESS;
3029 }
3030 }
3031failure:
3032 /* Give back the patch memory we no longer need */
3033 if (orgOffsetPatchMem != (uint32_t)~0)
3034 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3035
3036 return PATMR3PatchBlock(pVM, pInstrGC, pInstrHC, OP_CLI, uOpSize, pPatchRec);
3037}
3038
3039/**
3040 * Install a trampoline to call a guest trap handler directly
3041 *
3042 * @returns VBox status code.
3043 * @param pVM The VM to operate on.
3044 * @param pInstrGC Guest context point to privileged instruction
3045 * @param pPatchRec Patch record
3046 *
3047 */
3048static int patmInstallTrapTrampoline(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec)
3049{
3050 PPATCHINFO pPatch = &pPatchRec->patch;
3051 int rc = VERR_PATCHING_REFUSED;
3052 uint32_t orgOffsetPatchMem = ~0;
3053#ifdef LOG_ENABLED
3054 bool disret;
3055 DISCPUSTATE cpu;
3056 uint32_t opsize;
3057 char szOutput[256];
3058#endif
3059
3060 // save original offset (in case of failures later on)
3061 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3062
3063 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3064 pPatch->uCurPatchOffset = 0;
3065 pPatch->nrPatch2GuestRecs = 0;
3066
3067#ifdef VBOX_WITH_STATISTICS
3068 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3069 if (RT_FAILURE(rc))
3070 goto failure;
3071#endif
3072
3073 rc = patmPatchGenTrapEntry(pVM, pPatch, pInstrGC);
3074 if (RT_FAILURE(rc))
3075 goto failure;
3076
3077 /* size of patch block */
3078 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3079
3080 /* Update free pointer in patch memory. */
3081 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3082 /* Round to next 8 byte boundary */
3083 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3084
3085 /* There's no jump from guest to patch code. */
3086 pPatch->cbPatchJump = 0;
3087
3088#ifdef LOG_ENABLED
3089 Log(("Patch code ----------------------------------------------------------\n"));
3090 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pPatch);
3091 Log(("Patch code ends -----------------------------------------------------\n"));
3092#endif
3093
3094#ifdef LOG_ENABLED
3095 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3096 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, pPatch->pPrivInstrHC, &opsize, szOutput);
3097 Log(("TRAP handler patch: %s", szOutput));
3098#endif
3099 Log(("Successfully installed Trap Trampoline patch at %RRv\n", pInstrGC));
3100
3101 /*
3102 * Insert into patch to guest lookup tree
3103 */
3104 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3105 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3106 rc = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3107 AssertMsg(rc, ("RTAvlULInsert failed for %x\n", pPatchRec->CoreOffset.Key));
3108
3109 pPatch->uState = PATCH_ENABLED;
3110 return VINF_SUCCESS;
3111
3112failure:
3113 AssertMsgFailed(("Failed to install trap handler trampoline!!\n"));
3114
3115 /* Turn this cli patch into a dummy. */
3116 pPatch->uState = PATCH_REFUSED;
3117 pPatch->pPatchBlockOffset = 0;
3118
3119 /* Give back the patch memory we no longer need */
3120 Assert(orgOffsetPatchMem != (uint32_t)~0);
3121 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3122
3123 return rc;
3124}
3125
3126
3127#ifdef LOG_ENABLED
3128/**
3129 * Check if the instruction is patched as a common idt handler
3130 *
3131 * @returns true or false
3132 * @param pVM The VM to operate on.
3133 * @param pInstrGC Guest context point to the instruction
3134 *
3135 */
3136static bool patmIsCommonIDTHandlerPatch(PVM pVM, RTRCPTR pInstrGC)
3137{
3138 PPATMPATCHREC pRec;
3139
3140 pRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
3141 if (pRec && pRec->patch.flags & PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT)
3142 return true;
3143 return false;
3144}
3145#endif //DEBUG
3146
3147
3148/**
3149 * Duplicates a complete function
3150 *
3151 * @returns VBox status code.
3152 * @param pVM The VM to operate on.
3153 * @param pInstrGC Guest context point to privileged instruction
3154 * @param pPatchRec Patch record
3155 *
3156 */
3157static int patmDuplicateFunction(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec)
3158{
3159 PPATCHINFO pPatch = &pPatchRec->patch;
3160 int rc = VERR_PATCHING_REFUSED;
3161 DISCPUSTATE cpu;
3162 uint32_t orgOffsetPatchMem = ~0;
3163
3164 Log(("patmDuplicateFunction %RRv\n", pInstrGC));
3165 /* Save original offset (in case of failures later on). */
3166 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3167
3168 /* We will not go on indefinitely with call instruction handling. */
3169 if (pVM->patm.s.ulCallDepth > PATM_MAX_CALL_DEPTH)
3170 {
3171 Log(("patmDuplicateFunction: maximum callback depth reached!!\n"));
3172 return VERR_PATCHING_REFUSED;
3173 }
3174
3175 pVM->patm.s.ulCallDepth++;
3176
3177#ifdef PATM_ENABLE_CALL
3178 pPatch->flags |= PATMFL_SUPPORT_CALLS | PATMFL_SUPPORT_INDIRECT_CALLS;
3179#endif
3180
3181 Assert(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION));
3182
3183 pPatch->nrPatch2GuestRecs = 0;
3184 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3185 pPatch->uCurPatchOffset = 0;
3186
3187 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3188
3189 /** @note Set the PATM interrupt flag here; it was cleared before the patched call. (!!!) */
3190 rc = patmPatchGenSetPIF(pVM, pPatch, pInstrGC);
3191 if (RT_FAILURE(rc))
3192 goto failure;
3193
3194#ifdef VBOX_WITH_STATISTICS
3195 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3196 if (RT_FAILURE(rc))
3197 goto failure;
3198#endif
3199 rc = patmRecompileCodeStream(pVM, pInstrGC, pInstrGC, patmRecompileCallback, pPatch);
3200 if (rc != VINF_SUCCESS)
3201 {
3202 Log(("PATMR3PatchCli: patmRecompileCodeStream failed with %d\n", rc));
3203 goto failure;
3204 }
3205
3206 //size of patch block
3207 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3208
3209 //update free pointer in patch memory
3210 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3211 /* Round to next 8 byte boundary. */
3212 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3213
3214 pPatch->uState = PATCH_ENABLED;
3215
3216 /*
3217 * Insert into patch to guest lookup tree
3218 */
3219 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3220 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3221 rc = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3222 AssertMsg(rc, ("RTAvloU32Insert failed for %x\n", pPatchRec->CoreOffset.Key));
3223 if (!rc)
3224 {
3225 rc = VERR_PATCHING_REFUSED;
3226 goto failure;
3227 }
3228
3229 /* Note that patmr3SetBranchTargets can install additional patches!! */
3230 rc = patmr3SetBranchTargets(pVM, pPatch);
3231 if (rc != VINF_SUCCESS)
3232 {
3233 Log(("PATMR3PatchCli: patmr3SetBranchTargets failed with %d\n", rc));
3234 goto failure;
3235 }
3236
3237#ifdef LOG_ENABLED
3238 Log(("Patch code ----------------------------------------------------------\n"));
3239 patmr3DisasmCodeStream(pVM, PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_GC(pPatch), patmr3DisasmCallback, pPatch);
3240 Log(("Patch code ends -----------------------------------------------------\n"));
3241#endif
3242
3243 Log(("Successfully installed function duplication patch at %RRv\n", pInstrGC));
3244
3245 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3246 pPatch->pTempInfo->nrIllegalInstr = 0;
3247
3248 pVM->patm.s.ulCallDepth--;
3249 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledFunctionPatches);
3250 return VINF_SUCCESS;
3251
3252failure:
3253 if (pPatchRec->CoreOffset.Key)
3254 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
3255
3256 patmEmptyTree(pVM, &pPatch->FixupTree);
3257 pPatch->nrFixups = 0;
3258
3259 patmEmptyTree(pVM, &pPatch->JumpTree);
3260 pPatch->nrJumpRecs = 0;
3261
3262 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3263 pPatch->pTempInfo->nrIllegalInstr = 0;
3264
3265 /* Turn this cli patch into a dummy. */
3266 pPatch->uState = PATCH_REFUSED;
3267 pPatch->pPatchBlockOffset = 0;
3268
3269 // Give back the patch memory we no longer need
3270 Assert(orgOffsetPatchMem != (uint32_t)~0);
3271 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3272
3273 pVM->patm.s.ulCallDepth--;
3274 Log(("patmDupicateFunction %RRv failed!!\n", pInstrGC));
3275 return rc;
3276}
3277
3278/**
3279 * Creates trampoline code to jump inside an existing patch
3280 *
3281 * @returns VBox status code.
3282 * @param pVM The VM to operate on.
3283 * @param pInstrGC Guest context point to privileged instruction
3284 * @param pPatchRec Patch record
3285 *
3286 */
3287static int patmCreateTrampoline(PVM pVM, RTRCPTR pInstrGC, PPATMPATCHREC pPatchRec)
3288{
3289 PPATCHINFO pPatch = &pPatchRec->patch;
3290 RTRCPTR pPage, pPatchTargetGC = 0;
3291 uint32_t orgOffsetPatchMem = ~0;
3292 int rc = VERR_PATCHING_REFUSED;
3293
3294 Log(("patmCreateTrampoline %RRv\n", pInstrGC));
3295 /* Save original offset (in case of failures later on). */
3296 orgOffsetPatchMem = pVM->patm.s.offPatchMem;
3297
3298 /* First we check if the duplicate function target lies in some existing function patch already. Will save some space. */
3299 /** @todo we already checked this before */
3300 pPage = pInstrGC & PAGE_BASE_GC_MASK;
3301
3302 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
3303 if (pPatchPage)
3304 {
3305 uint32_t i;
3306
3307 for (i=0;i<pPatchPage->cCount;i++)
3308 {
3309 if (pPatchPage->aPatch[i])
3310 {
3311 PPATCHINFO pPatch = pPatchPage->aPatch[i];
3312
3313 if ( (pPatch->flags & PATMFL_DUPLICATE_FUNCTION)
3314 && pPatch->uState == PATCH_ENABLED)
3315 {
3316 pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pInstrGC);
3317 if (pPatchTargetGC)
3318 {
3319 uint32_t offsetPatch = pPatchTargetGC - pVM->patm.s.pPatchMemGC;
3320 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->Patch2GuestAddrTree, offsetPatch, false);
3321 Assert(pPatchToGuestRec);
3322
3323 pPatchToGuestRec->fJumpTarget = true;
3324 Assert(pPatchTargetGC != pPatch->pPrivInstrGC);
3325 Log(("patmCreateTrampoline: generating jump to code inside patch at %RRv\n", pPatch->pPrivInstrGC));
3326 pPatch->flags |= PATMFL_EXTERNAL_JUMP_INSIDE;
3327 break;
3328 }
3329 }
3330 }
3331 }
3332 }
3333 AssertReturn(pPatchPage && pPatchTargetGC, VERR_PATCHING_REFUSED);
3334
3335 pPatch->nrPatch2GuestRecs = 0;
3336 pPatch->pPatchBlockOffset = pVM->patm.s.offPatchMem;
3337 pPatch->uCurPatchOffset = 0;
3338
3339 /** @note Set the PATM interrupt flag here; it was cleared before the patched call. (!!!) */
3340 rc = patmPatchGenSetPIF(pVM, pPatch, pInstrGC);
3341 if (RT_FAILURE(rc))
3342 goto failure;
3343
3344#ifdef VBOX_WITH_STATISTICS
3345 rc = patmPatchGenStats(pVM, pPatch, pInstrGC);
3346 if (RT_FAILURE(rc))
3347 goto failure;
3348#endif
3349
3350 rc = patmPatchGenPatchJump(pVM, pPatch, pInstrGC, pPatchTargetGC);
3351 if (RT_FAILURE(rc))
3352 goto failure;
3353
3354 /*
3355 * Insert into patch to guest lookup tree
3356 */
3357 LogFlow(("Insert %RRv patch offset %RRv\n", pPatchRec->patch.pPrivInstrGC, pPatch->pPatchBlockOffset));
3358 pPatchRec->CoreOffset.Key = pPatch->pPatchBlockOffset;
3359 rc = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, &pPatchRec->CoreOffset);
3360 AssertMsg(rc, ("RTAvloU32Insert failed for %x\n", pPatchRec->CoreOffset.Key));
3361 if (!rc)
3362 {
3363 rc = VERR_PATCHING_REFUSED;
3364 goto failure;
3365 }
3366
3367 /* size of patch block */
3368 pPatch->cbPatchBlockSize = pPatch->uCurPatchOffset;
3369
3370 /* Update free pointer in patch memory. */
3371 pVM->patm.s.offPatchMem += pPatch->cbPatchBlockSize;
3372 /* Round to next 8 byte boundary */
3373 pVM->patm.s.offPatchMem = RT_ALIGN_32(pVM->patm.s.offPatchMem, 8);
3374
3375 /* There's no jump from guest to patch code. */
3376 pPatch->cbPatchJump = 0;
3377
3378 /* Enable the patch. */
3379 pPatch->uState = PATCH_ENABLED;
3380 /* We allow this patch to be called as a function. */
3381 pPatch->flags |= PATMFL_CALLABLE_AS_FUNCTION;
3382 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledTrampoline);
3383 return VINF_SUCCESS;
3384
3385failure:
3386 if (pPatchRec->CoreOffset.Key)
3387 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->CoreOffset.Key);
3388
3389 patmEmptyTree(pVM, &pPatch->FixupTree);
3390 pPatch->nrFixups = 0;
3391
3392 patmEmptyTree(pVM, &pPatch->JumpTree);
3393 pPatch->nrJumpRecs = 0;
3394
3395 patmEmptyTree(pVM, &pPatch->pTempInfo->IllegalInstrTree);
3396 pPatch->pTempInfo->nrIllegalInstr = 0;
3397
3398 /* Turn this cli patch into a dummy. */
3399 pPatch->uState = PATCH_REFUSED;
3400 pPatch->pPatchBlockOffset = 0;
3401
3402 // Give back the patch memory we no longer need
3403 Assert(orgOffsetPatchMem != (uint32_t)~0);
3404 pVM->patm.s.offPatchMem = orgOffsetPatchMem;
3405
3406 return rc;
3407}
3408
3409
3410/**
3411 * Patch branch target function for call/jump at specified location.
3412 * (in responds to a VINF_PATM_DUPLICATE_FUNCTION GC exit reason)
3413 *
3414 * @returns VBox status code.
3415 * @param pVM The VM to operate on.
3416 * @param pCtx Guest context
3417 *
3418 */
3419VMMR3DECL(int) PATMR3DuplicateFunctionRequest(PVM pVM, PCPUMCTX pCtx)
3420{
3421 RTRCPTR pBranchTarget, pPage;
3422 int rc;
3423 RTRCPTR pPatchTargetGC = 0;
3424
3425 pBranchTarget = pCtx->edx;
3426 pBranchTarget = SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), pBranchTarget);
3427
3428 /* First we check if the duplicate function target lies in some existing function patch already. Will save some space. */
3429 pPage = pBranchTarget & PAGE_BASE_GC_MASK;
3430
3431 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
3432 if (pPatchPage)
3433 {
3434 uint32_t i;
3435
3436 for (i=0;i<pPatchPage->cCount;i++)
3437 {
3438 if (pPatchPage->aPatch[i])
3439 {
3440 PPATCHINFO pPatch = pPatchPage->aPatch[i];
3441
3442 if ( (pPatch->flags & PATMFL_DUPLICATE_FUNCTION)
3443 && pPatch->uState == PATCH_ENABLED)
3444 {
3445 pPatchTargetGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pBranchTarget);
3446 if (pPatchTargetGC)
3447 {
3448 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateUseExisting);
3449 break;
3450 }
3451 }
3452 }
3453 }
3454 }
3455
3456 if (pPatchTargetGC)
3457 {
3458 /* Create a trampoline that also sets PATM_INTERRUPTFLAG. */
3459 rc = PATMR3InstallPatch(pVM, pBranchTarget, PATMFL_CODE32 | PATMFL_TRAMPOLINE);
3460 }
3461 else
3462 {
3463 rc = PATMR3InstallPatch(pVM, pBranchTarget, PATMFL_CODE32 | PATMFL_DUPLICATE_FUNCTION);
3464 }
3465
3466 if (rc == VINF_SUCCESS)
3467 {
3468 pPatchTargetGC = PATMR3QueryPatchGCPtr(pVM, pBranchTarget);
3469 Assert(pPatchTargetGC);
3470 }
3471
3472 if (pPatchTargetGC)
3473 {
3474 pCtx->eax = pPatchTargetGC;
3475 pCtx->eax = pCtx->eax - (RTRCUINTPTR)pVM->patm.s.pPatchMemGC; /* make it relative */
3476 }
3477 else
3478 {
3479 /* We add a dummy entry into the lookup cache so we won't get bombarded with the same requests over and over again. */
3480 pCtx->eax = 0;
3481 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateREQFailed);
3482 }
3483 Assert(PATMIsPatchGCAddr(pVM, pCtx->edi));
3484 rc = PATMAddBranchToLookupCache(pVM, pCtx->edi, pBranchTarget, pCtx->eax);
3485 AssertRC(rc);
3486
3487 pCtx->eip += PATM_ILLEGAL_INSTR_SIZE;
3488 STAM_COUNTER_INC(&pVM->patm.s.StatDuplicateREQSuccess);
3489 return VINF_SUCCESS;
3490}
3491
3492/**
3493 * Replaces a function call by a call to an existing function duplicate (or jmp -> jmp)
3494 *
3495 * @returns VBox status code.
3496 * @param pVM The VM to operate on.
3497 * @param pCpu Disassembly CPU structure ptr
3498 * @param pInstrGC Guest context point to privileged instruction
3499 * @param pPatch Patch record
3500 *
3501 */
3502static int patmReplaceFunctionCall(PVM pVM, DISCPUSTATE *pCpu, RTRCPTR pInstrGC, PPATCHINFO pPatch)
3503{
3504 int rc = VERR_PATCHING_REFUSED;
3505 DISCPUSTATE cpu;
3506 RTRCPTR pTargetGC;
3507 PPATMPATCHREC pPatchFunction;
3508 uint32_t opsize;
3509 bool disret;
3510#ifdef LOG_ENABLED
3511 char szOutput[256];
3512#endif
3513
3514 Assert(pPatch->flags & PATMFL_REPLACE_FUNCTION_CALL);
3515 Assert((pCpu->pCurInstr->opcode == OP_CALL || pCpu->pCurInstr->opcode == OP_JMP) && pCpu->opsize == SIZEOF_NEARJUMP32);
3516
3517 if ((pCpu->pCurInstr->opcode != OP_CALL && pCpu->pCurInstr->opcode != OP_JMP) || pCpu->opsize != SIZEOF_NEARJUMP32)
3518 {
3519 rc = VERR_PATCHING_REFUSED;
3520 goto failure;
3521 }
3522
3523 pTargetGC = PATMResolveBranch(pCpu, pInstrGC);
3524 if (pTargetGC == 0)
3525 {
3526 Log(("We don't support far jumps here!! (%08X)\n", pCpu->param1.flags));
3527 rc = VERR_PATCHING_REFUSED;
3528 goto failure;
3529 }
3530
3531 pPatchFunction = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pTargetGC);
3532 if (pPatchFunction == NULL)
3533 {
3534 for(;;)
3535 {
3536 /* It could be an indirect call (call -> jmp dest).
3537 * Note that it's dangerous to assume the jump will never change...
3538 */
3539 uint8_t *pTmpInstrHC;
3540
3541 pTmpInstrHC = PATMGCVirtToHCVirt(pVM, pPatch, pTargetGC);
3542 Assert(pTmpInstrHC);
3543 if (pTmpInstrHC == 0)
3544 break;
3545
3546 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3547 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pTargetGC, pTmpInstrHC, &opsize, NULL);
3548 if (disret == false || cpu.pCurInstr->opcode != OP_JMP)
3549 break;
3550
3551 pTargetGC = PATMResolveBranch(&cpu, pTargetGC);
3552 if (pTargetGC == 0)
3553 {
3554 break;
3555 }
3556
3557 pPatchFunction = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pTargetGC);
3558 break;
3559 }
3560 if (pPatchFunction == 0)
3561 {
3562 AssertMsgFailed(("Unable to find duplicate function %RRv\n", pTargetGC));
3563 rc = VERR_PATCHING_REFUSED;
3564 goto failure;
3565 }
3566 }
3567
3568 // make a copy of the guest code bytes that will be overwritten
3569 pPatch->cbPatchJump = SIZEOF_NEARJUMP32;
3570
3571 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
3572 AssertRC(rc);
3573
3574 /* Now replace the original call in the guest code */
3575 rc = patmGenCallToPatch(pVM, pPatch, PATCHCODE_PTR_GC(&pPatchFunction->patch), true);
3576 AssertRC(rc);
3577 if (RT_FAILURE(rc))
3578 goto failure;
3579
3580 /* Lowest and highest address for write monitoring. */
3581 pPatch->pInstrGCLowest = pInstrGC;
3582 pPatch->pInstrGCHighest = pInstrGC + pCpu->opsize;
3583
3584#ifdef LOG_ENABLED
3585 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3586 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, pPatch->pPrivInstrHC, &opsize, szOutput);
3587 Log(("Call patch: %s", szOutput));
3588#endif
3589
3590 Log(("Successfully installed function replacement patch at %RRv\n", pInstrGC));
3591
3592 pPatch->uState = PATCH_ENABLED;
3593 return VINF_SUCCESS;
3594
3595failure:
3596 /* Turn this patch into a dummy. */
3597 pPatch->uState = PATCH_REFUSED;
3598
3599 return rc;
3600}
3601
3602/**
3603 * Replace the address in an MMIO instruction with the cached version.
3604 *
3605 * @returns VBox status code.
3606 * @param pVM The VM to operate on.
3607 * @param pInstrGC Guest context point to privileged instruction
3608 * @param pCpu Disassembly CPU structure ptr
3609 * @param pPatch Patch record
3610 *
3611 * @note returns failure if patching is not allowed or possible
3612 *
3613 */
3614static int patmPatchMMIOInstr(PVM pVM, RTRCPTR pInstrGC, DISCPUSTATE *pCpu, PPATCHINFO pPatch)
3615{
3616 uint8_t *pPB;
3617 int rc = VERR_PATCHING_REFUSED;
3618#ifdef LOG_ENABLED
3619 DISCPUSTATE cpu;
3620 uint32_t opsize;
3621 bool disret;
3622 char szOutput[256];
3623#endif
3624
3625 Assert(pVM->patm.s.mmio.pCachedData);
3626 if (!pVM->patm.s.mmio.pCachedData)
3627 goto failure;
3628
3629 if (pCpu->param2.flags != USE_DISPLACEMENT32)
3630 goto failure;
3631
3632 pPB = pPatch->pPrivInstrHC;
3633
3634 /* Add relocation record for cached data access. */
3635 if (patmPatchAddReloc32(pVM, pPatch, &pPB[pCpu->opsize - sizeof(RTRCPTR)], FIXUP_ABSOLUTE, pPatch->pPrivInstrGC, pVM->patm.s.mmio.pCachedData) != VINF_SUCCESS)
3636 {
3637 Log(("Relocation failed for cached mmio address!!\n"));
3638 return VERR_PATCHING_REFUSED;
3639 }
3640#ifdef LOG_ENABLED
3641 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3642 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, pPatch->pPrivInstrHC, &opsize, szOutput);
3643 Log(("MMIO patch old instruction: %s", szOutput));
3644#endif
3645
3646 /* Save original instruction. */
3647 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPrivInstr);
3648 AssertRC(rc);
3649
3650 pPatch->cbPatchJump = pPatch->cbPrivInstr; /* bit of a misnomer in this case; size of replacement instruction. */
3651
3652 /* Replace address with that of the cached item. */
3653 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pInstrGC + pCpu->opsize - sizeof(RTRCPTR), &pVM->patm.s.mmio.pCachedData, sizeof(RTRCPTR));
3654 AssertRC(rc);
3655 if (RT_FAILURE(rc))
3656 {
3657 goto failure;
3658 }
3659
3660#ifdef LOG_ENABLED
3661 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3662 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, pPatch->pPrivInstrHC, &opsize, szOutput);
3663 Log(("MMIO patch: %s", szOutput));
3664#endif
3665 pVM->patm.s.mmio.pCachedData = 0;
3666 pVM->patm.s.mmio.GCPhys = 0;
3667 pPatch->uState = PATCH_ENABLED;
3668 return VINF_SUCCESS;
3669
3670failure:
3671 /* Turn this patch into a dummy. */
3672 pPatch->uState = PATCH_REFUSED;
3673
3674 return rc;
3675}
3676
3677
3678/**
3679 * Replace the address in an MMIO instruction with the cached version. (instruction is part of an existing patch)
3680 *
3681 * @returns VBox status code.
3682 * @param pVM The VM to operate on.
3683 * @param pInstrGC Guest context point to privileged instruction
3684 * @param pPatch Patch record
3685 *
3686 * @note returns failure if patching is not allowed or possible
3687 *
3688 */
3689static int patmPatchPATMMMIOInstr(PVM pVM, RTRCPTR pInstrGC, PPATCHINFO pPatch)
3690{
3691 DISCPUSTATE cpu;
3692 uint32_t opsize;
3693 bool disret;
3694 uint8_t *pInstrHC;
3695#ifdef LOG_ENABLED
3696 char szOutput[256];
3697#endif
3698
3699 AssertReturn(pVM->patm.s.mmio.pCachedData, VERR_INVALID_PARAMETER);
3700
3701 /* Convert GC to HC address. */
3702 pInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pInstrGC);
3703 AssertReturn(pInstrHC, VERR_PATCHING_REFUSED);
3704
3705 /* Disassemble mmio instruction. */
3706 cpu.mode = pPatch->uOpMode;
3707 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pInstrGC, pInstrHC, &opsize, NULL);
3708 if (disret == false)
3709 {
3710 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
3711 return VERR_PATCHING_REFUSED;
3712 }
3713
3714 AssertMsg(opsize <= MAX_INSTR_SIZE, ("privileged instruction too big %d!!\n", opsize));
3715 if (opsize > MAX_INSTR_SIZE)
3716 return VERR_PATCHING_REFUSED;
3717 if (cpu.param2.flags != USE_DISPLACEMENT32)
3718 return VERR_PATCHING_REFUSED;
3719
3720 /* Add relocation record for cached data access. */
3721 if (patmPatchAddReloc32(pVM, pPatch, &pInstrHC[cpu.opsize - sizeof(RTRCPTR)], FIXUP_ABSOLUTE) != VINF_SUCCESS)
3722 {
3723 Log(("Relocation failed for cached mmio address!!\n"));
3724 return VERR_PATCHING_REFUSED;
3725 }
3726 /* Replace address with that of the cached item. */
3727 *(RTRCPTR *)&pInstrHC[cpu.opsize - sizeof(RTRCPTR)] = pVM->patm.s.mmio.pCachedData;
3728
3729 /* Lowest and highest address for write monitoring. */
3730 pPatch->pInstrGCLowest = pInstrGC;
3731 pPatch->pInstrGCHighest = pInstrGC + cpu.opsize;
3732
3733#ifdef LOG_ENABLED
3734 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3735 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pInstrGC, pInstrHC, &opsize, szOutput);
3736 Log(("MMIO patch: %s", szOutput));
3737#endif
3738
3739 pVM->patm.s.mmio.pCachedData = 0;
3740 pVM->patm.s.mmio.GCPhys = 0;
3741 return VINF_SUCCESS;
3742}
3743
3744/**
3745 * Activates an int3 patch
3746 *
3747 * @returns VBox status code.
3748 * @param pVM The VM to operate on.
3749 * @param pPatch Patch record
3750 */
3751static int patmActivateInt3Patch(PVM pVM, PPATCHINFO pPatch)
3752{
3753 uint8_t ASMInt3 = 0xCC;
3754 int rc;
3755
3756 Assert(pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK));
3757 Assert(pPatch->uState != PATCH_ENABLED);
3758
3759 /* Replace first opcode byte with 'int 3'. */
3760 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, &ASMInt3, sizeof(ASMInt3));
3761 AssertRC(rc);
3762
3763 pPatch->cbPatchJump = sizeof(ASMInt3);
3764
3765 return rc;
3766}
3767
3768/**
3769 * Deactivates an int3 patch
3770 *
3771 * @returns VBox status code.
3772 * @param pVM The VM to operate on.
3773 * @param pPatch Patch record
3774 */
3775static int patmDeactivateInt3Patch(PVM pVM, PPATCHINFO pPatch)
3776{
3777 uint8_t ASMInt3 = 0xCC;
3778 int rc;
3779
3780 Assert(pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK));
3781 Assert(pPatch->uState == PATCH_ENABLED || pPatch->uState == PATCH_DIRTY);
3782
3783 /* Restore first opcode byte. */
3784 rc = PGMPhysSimpleDirtyWriteGCPtr(VMMGetCpu0(pVM), pPatch->pPrivInstrGC, pPatch->aPrivInstr, sizeof(ASMInt3));
3785 AssertRC(rc);
3786 return rc;
3787}
3788
3789/**
3790 * Replace an instruction with a breakpoint (0xCC), that is handled dynamically in the guest context.
3791 *
3792 * @returns VBox status code.
3793 * @param pVM The VM to operate on.
3794 * @param pInstrGC Guest context point to privileged instruction
3795 * @param pInstrHC Host context point to privileged instruction
3796 * @param pCpu Disassembly CPU structure ptr
3797 * @param pPatch Patch record
3798 *
3799 * @note returns failure if patching is not allowed or possible
3800 *
3801 */
3802VMMR3DECL(int) PATMR3PatchInstrInt3(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC, DISCPUSTATE *pCpu, PPATCHINFO pPatch)
3803{
3804 uint8_t ASMInt3 = 0xCC;
3805 int rc;
3806
3807 /** @note Do not use patch memory here! It might called during patch installation too. */
3808
3809#ifdef LOG_ENABLED
3810 DISCPUSTATE cpu;
3811 char szOutput[256];
3812 uint32_t opsize;
3813
3814 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3815 PATMR3DISInstr(pVM, pPatch, &cpu, pInstrGC, pInstrHC, &opsize, szOutput);
3816 Log(("PATMR3PatchInstrInt3: %s", szOutput));
3817#endif
3818
3819 /* Save the original instruction. */
3820 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPrivInstr);
3821 AssertRC(rc);
3822 pPatch->cbPatchJump = sizeof(ASMInt3); /* bit of a misnomer in this case; size of replacement instruction. */
3823
3824 pPatch->flags |= PATMFL_INT3_REPLACEMENT;
3825
3826 /* Replace first opcode byte with 'int 3'. */
3827 rc = patmActivateInt3Patch(pVM, pPatch);
3828 if (RT_FAILURE(rc))
3829 goto failure;
3830
3831 /* Lowest and highest address for write monitoring. */
3832 pPatch->pInstrGCLowest = pInstrGC;
3833 pPatch->pInstrGCHighest = pInstrGC + pCpu->opsize;
3834
3835 pPatch->uState = PATCH_ENABLED;
3836 return VINF_SUCCESS;
3837
3838failure:
3839 /* Turn this patch into a dummy. */
3840 return VERR_PATCHING_REFUSED;
3841}
3842
3843#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
3844/**
3845 * Patch a jump instruction at specified location
3846 *
3847 * @returns VBox status code.
3848 * @param pVM The VM to operate on.
3849 * @param pInstrGC Guest context point to privileged instruction
3850 * @param pInstrHC Host context point to privileged instruction
3851 * @param pCpu Disassembly CPU structure ptr
3852 * @param pPatchRec Patch record
3853 *
3854 * @note returns failure if patching is not allowed or possible
3855 *
3856 */
3857int patmPatchJump(PVM pVM, RTRCPTR pInstrGC, R3PTRTYPE(uint8_t *) pInstrHC, DISCPUSTATE *pCpu, PPATMPATCHREC pPatchRec)
3858{
3859 PPATCHINFO pPatch = &pPatchRec->patch;
3860 int rc = VERR_PATCHING_REFUSED;
3861#ifdef LOG_ENABLED
3862 bool disret;
3863 DISCPUSTATE cpu;
3864 uint32_t opsize;
3865 char szOutput[256];
3866#endif
3867
3868 pPatch->pPatchBlockOffset = 0; /* doesn't use patch memory */
3869 pPatch->uCurPatchOffset = 0;
3870 pPatch->cbPatchBlockSize = 0;
3871 pPatch->flags |= PATMFL_SINGLE_INSTRUCTION;
3872
3873 /*
3874 * Instruction replacements such as these should never be interrupted. I've added code to EM.cpp to
3875 * make sure this never happens. (unless a trap is triggered (intentionally or not))
3876 */
3877 switch (pCpu->pCurInstr->opcode)
3878 {
3879 case OP_JO:
3880 case OP_JNO:
3881 case OP_JC:
3882 case OP_JNC:
3883 case OP_JE:
3884 case OP_JNE:
3885 case OP_JBE:
3886 case OP_JNBE:
3887 case OP_JS:
3888 case OP_JNS:
3889 case OP_JP:
3890 case OP_JNP:
3891 case OP_JL:
3892 case OP_JNL:
3893 case OP_JLE:
3894 case OP_JNLE:
3895 case OP_JMP:
3896 Assert(pPatch->flags & PATMFL_JUMP_CONFLICT);
3897 Assert(pCpu->param1.flags & USE_IMMEDIATE32_REL);
3898 if (!(pCpu->param1.flags & USE_IMMEDIATE32_REL))
3899 goto failure;
3900
3901 Assert(pCpu->opsize == SIZEOF_NEARJUMP32 || pCpu->opsize == SIZEOF_NEAR_COND_JUMP32);
3902 if (pCpu->opsize != SIZEOF_NEARJUMP32 && pCpu->opsize != SIZEOF_NEAR_COND_JUMP32)
3903 goto failure;
3904
3905 if (PAGE_ADDRESS(pInstrGC) != PAGE_ADDRESS(pInstrGC + pCpu->opsize))
3906 {
3907 STAM_COUNTER_INC(&pVM->patm.s.StatPageBoundaryCrossed);
3908 AssertMsgFailed(("Patch jump would cross page boundary -> refuse!!\n"));
3909 rc = VERR_PATCHING_REFUSED;
3910 goto failure;
3911 }
3912
3913 break;
3914
3915 default:
3916 goto failure;
3917 }
3918
3919 // make a copy of the guest code bytes that will be overwritten
3920 Assert(pCpu->opsize <= sizeof(pPatch->aPrivInstr));
3921 Assert(pCpu->opsize >= SIZEOF_NEARJUMP32);
3922 pPatch->cbPatchJump = pCpu->opsize;
3923
3924 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pPatch->aPrivInstr, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
3925 AssertRC(rc);
3926
3927 /* Now insert a jump in the guest code. */
3928 /*
3929 * A conflict jump patch needs to be treated differently; we'll just replace the relative jump address with one that
3930 * references the target instruction in the conflict patch.
3931 */
3932 RTRCPTR pJmpDest = PATMR3GuestGCPtrToPatchGCPtr(pVM, pInstrGC + pCpu->opsize + (int32_t)pCpu->param1.parval);
3933
3934 AssertMsg(pJmpDest, ("PATMR3GuestGCPtrToPatchGCPtr failed for %RRv\n", pInstrGC + pCpu->opsize + (int32_t)pCpu->param1.parval));
3935 pPatch->pPatchJumpDestGC = pJmpDest;
3936
3937 rc = patmGenJumpToPatch(pVM, pPatch, true);
3938 AssertRC(rc);
3939 if (RT_FAILURE(rc))
3940 goto failure;
3941
3942 pPatch->flags |= PATMFL_MUST_INSTALL_PATCHJMP;
3943
3944#ifdef LOG_ENABLED
3945 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
3946 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC, pPatch->pPrivInstrHC, &opsize, szOutput);
3947 Log(("%s patch: %s", patmGetInstructionString(pPatch->opcode, pPatch->flags), szOutput));
3948#endif
3949
3950 Log(("Successfully installed %s patch at %RRv\n", patmGetInstructionString(pPatch->opcode, pPatch->flags), pInstrGC));
3951
3952 STAM_COUNTER_INC(&pVM->patm.s.StatInstalledJump);
3953
3954 /* Lowest and highest address for write monitoring. */
3955 pPatch->pInstrGCLowest = pInstrGC;
3956 pPatch->pInstrGCHighest = pInstrGC + pPatch->cbPatchJump;
3957
3958 pPatch->uState = PATCH_ENABLED;
3959 return VINF_SUCCESS;
3960
3961failure:
3962 /* Turn this cli patch into a dummy. */
3963 pPatch->uState = PATCH_REFUSED;
3964
3965 return rc;
3966}
3967#endif /* PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES */
3968
3969
3970/**
3971 * Gives hint to PATM about supervisor guest instructions
3972 *
3973 * @returns VBox status code.
3974 * @param pVM The VM to operate on.
3975 * @param pInstr Guest context point to privileged instruction
3976 * @param flags Patch flags
3977 */
3978VMMR3DECL(int) PATMR3AddHint(PVM pVM, RTRCPTR pInstrGC, uint32_t flags)
3979{
3980 Assert(pInstrGC);
3981 Assert(flags == PATMFL_CODE32);
3982
3983 Log(("PATMR3AddHint %RRv\n", pInstrGC));
3984 return PATMR3InstallPatch(pVM, pInstrGC, PATMFL_CODE32 | PATMFL_INSTR_HINT);
3985}
3986
3987/**
3988 * Patch privileged instruction at specified location
3989 *
3990 * @returns VBox status code.
3991 * @param pVM The VM to operate on.
3992 * @param pInstr Guest context point to privileged instruction (0:32 flat address)
3993 * @param flags Patch flags
3994 *
3995 * @note returns failure if patching is not allowed or possible
3996 */
3997VMMR3DECL(int) PATMR3InstallPatch(PVM pVM, RTRCPTR pInstrGC, uint64_t flags)
3998{
3999 DISCPUSTATE cpu;
4000 R3PTRTYPE(uint8_t *) pInstrHC;
4001 uint32_t opsize;
4002 PPATMPATCHREC pPatchRec;
4003 PCPUMCTX pCtx = 0;
4004 bool disret;
4005 int rc;
4006 PVMCPU pVCpu = VMMGetCpu0(pVM);
4007
4008 if (!pVM || pInstrGC == 0 || (flags & ~(PATMFL_CODE32|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_SYSENTER|PATMFL_TRAPHANDLER|PATMFL_DUPLICATE_FUNCTION|PATMFL_REPLACE_FUNCTION_CALL|PATMFL_GUEST_SPECIFIC|PATMFL_INT3_REPLACEMENT|PATMFL_TRAPHANDLER_WITH_ERRORCODE|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT|PATMFL_MMIO_ACCESS|PATMFL_TRAMPOLINE|PATMFL_INSTR_HINT|PATMFL_JUMP_CONFLICT)))
4009 {
4010 AssertFailed();
4011 return VERR_INVALID_PARAMETER;
4012 }
4013
4014 if (PATMIsEnabled(pVM) == false)
4015 return VERR_PATCHING_REFUSED;
4016
4017 /* Test for patch conflict only with patches that actually change guest code. */
4018 if (!(flags & (PATMFL_GUEST_SPECIFIC|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_TRAMPOLINE)))
4019 {
4020 PPATCHINFO pConflictPatch = PATMFindActivePatchByEntrypoint(pVM, pInstrGC);
4021 AssertReleaseMsg(pConflictPatch == 0, ("Unable to patch overwritten instruction at %RRv (%RRv)\n", pInstrGC, pConflictPatch->pPrivInstrGC));
4022 if (pConflictPatch != 0)
4023 return VERR_PATCHING_REFUSED;
4024 }
4025
4026 if (!(flags & PATMFL_CODE32))
4027 {
4028 /** @todo Only 32 bits code right now */
4029 AssertMsgFailed(("PATMR3InstallPatch: We don't support 16 bits code at this moment!!\n"));
4030 return VERR_NOT_IMPLEMENTED;
4031 }
4032
4033 /* We ran out of patch memory; don't bother anymore. */
4034 if (pVM->patm.s.fOutOfMemory == true)
4035 return VERR_PATCHING_REFUSED;
4036
4037 /* Make sure the code selector is wide open; otherwise refuse. */
4038 pCtx = CPUMQueryGuestCtxPtr(pVCpu);
4039 if (CPUMGetGuestCPL(pVCpu, CPUMCTX2CORE(pCtx)) == 0)
4040 {
4041 RTRCPTR pInstrGCFlat = SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), pInstrGC);
4042 if (pInstrGCFlat != pInstrGC)
4043 {
4044 Log(("PATMR3InstallPatch: code selector not wide open: %04x:%RRv != %RRv eflags=%08x\n", pCtx->cs, pInstrGCFlat, pInstrGC, pCtx->eflags.u32));
4045 return VERR_PATCHING_REFUSED;
4046 }
4047 }
4048
4049 /** @note the OpenBSD specific check will break if we allow additional patches to be installed (int 3)) */
4050 if (!(flags & PATMFL_GUEST_SPECIFIC))
4051 {
4052 /* New code. Make sure CSAM has a go at it first. */
4053 CSAMR3CheckCode(pVM, pInstrGC);
4054 }
4055
4056 /** @note obsolete */
4057 if ( PATMIsPatchGCAddr(pVM, pInstrGC)
4058 && (flags & PATMFL_MMIO_ACCESS))
4059 {
4060 RTRCUINTPTR offset;
4061 void *pvPatchCoreOffset;
4062
4063 /* Find the patch record. */
4064 offset = pInstrGC - pVM->patm.s.pPatchMemGC;
4065 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, offset, false);
4066 if (pvPatchCoreOffset == NULL)
4067 {
4068 AssertMsgFailed(("PATMR3InstallPatch: patch not found at address %RRv!!\n", pInstrGC));
4069 return VERR_PATCH_NOT_FOUND; //fatal error
4070 }
4071 pPatchRec = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
4072
4073 return patmPatchPATMMMIOInstr(pVM, pInstrGC, &pPatchRec->patch);
4074 }
4075
4076 AssertReturn(!PATMIsPatchGCAddr(pVM, pInstrGC), VERR_PATCHING_REFUSED);
4077
4078 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
4079 if (pPatchRec)
4080 {
4081 Assert(!(flags & PATMFL_TRAMPOLINE));
4082
4083 /* Hints about existing patches are ignored. */
4084 if (flags & PATMFL_INSTR_HINT)
4085 return VERR_PATCHING_REFUSED;
4086
4087 if (pPatchRec->patch.uState == PATCH_DISABLE_PENDING)
4088 {
4089 Log(("PATMR3InstallPatch: disable operation is pending for patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
4090 PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
4091 Assert(pPatchRec->patch.uState == PATCH_DISABLED);
4092 }
4093
4094 if (pPatchRec->patch.uState == PATCH_DISABLED)
4095 {
4096 /* A patch, for which we previously received a hint, will be enabled and turned into a normal patch. */
4097 if (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
4098 {
4099 Log(("Enabling HINTED patch %RRv\n", pInstrGC));
4100 pPatchRec->patch.flags &= ~PATMFL_INSTR_HINT;
4101 }
4102 else
4103 Log(("Enabling patch %RRv again\n", pInstrGC));
4104
4105 /** @todo we shouldn't disable and enable patches too often (it's relatively cheap, but pointless if it always happens) */
4106 rc = PATMR3EnablePatch(pVM, pInstrGC);
4107 if (RT_SUCCESS(rc))
4108 return VWRN_PATCH_ENABLED;
4109
4110 return rc;
4111 }
4112 if ( pPatchRec->patch.uState == PATCH_ENABLED
4113 || pPatchRec->patch.uState == PATCH_DIRTY)
4114 {
4115 /*
4116 * The patch might have been overwritten.
4117 */
4118 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
4119 if (pPatchRec->patch.uState != PATCH_REFUSED && pPatchRec->patch.uState != PATCH_UNUSABLE)
4120 {
4121 /* Patch must have been overwritten; remove it and pretend nothing happened. */
4122 Log(("Patch an existing patched instruction?!? (%RRv)\n", pInstrGC));
4123 if (pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_MMIO_ACCESS|PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
4124 {
4125 if (flags & PATMFL_IDTHANDLER)
4126 pPatchRec->patch.flags |= (flags & (PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER|PATMFL_INTHANDLER)); /* update the type */
4127
4128 return VERR_PATM_ALREADY_PATCHED; /* already done once */
4129 }
4130 }
4131 rc = PATMR3RemovePatch(pVM, pInstrGC);
4132 if (RT_FAILURE(rc))
4133 return VERR_PATCHING_REFUSED;
4134 }
4135 else
4136 {
4137 AssertMsg(pPatchRec->patch.uState == PATCH_REFUSED || pPatchRec->patch.uState == PATCH_UNUSABLE, ("Patch an existing patched instruction?!? (%RRv, state=%d)\n", pInstrGC, pPatchRec->patch.uState));
4138 /* already tried it once! */
4139 return VERR_PATCHING_REFUSED;
4140 }
4141 }
4142
4143 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHREC), 0, MM_TAG_PATM_PATCH, (void **)&pPatchRec);
4144 if (RT_FAILURE(rc))
4145 {
4146 Log(("Out of memory!!!!\n"));
4147 return VERR_NO_MEMORY;
4148 }
4149 pPatchRec->Core.Key = pInstrGC;
4150 pPatchRec->patch.uState = PATCH_REFUSED; //default
4151 rc = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
4152 Assert(rc);
4153
4154 RTGCPHYS GCPhys;
4155 rc = PGMGstGetPage(pVCpu, pInstrGC, NULL, &GCPhys);
4156 if (rc != VINF_SUCCESS)
4157 {
4158 Log(("PGMGstGetPage failed with %Rrc\n", rc));
4159 return rc;
4160 }
4161 /* Disallow patching instructions inside ROM code; complete function duplication is allowed though. */
4162 if ( !(flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_TRAMPOLINE))
4163 && !PGMPhysIsGCPhysNormal(pVM, GCPhys))
4164 {
4165 Log(("Code at %RGv (phys %RGp) is in a ROM, MMIO or invalid page - refused\n", pInstrGC, GCPhys));
4166 return VERR_PATCHING_REFUSED;
4167 }
4168 GCPhys = GCPhys + (pInstrGC & PAGE_OFFSET_MASK);
4169 rc = PGMPhysGCPhys2R3Ptr(pVM, GCPhys, MAX_INSTR_SIZE, (void **)&pInstrHC);
4170 AssertRCReturn(rc, rc);
4171
4172 pPatchRec->patch.pPrivInstrHC = pInstrHC;
4173 pPatchRec->patch.pPrivInstrGC = pInstrGC;
4174 pPatchRec->patch.flags = flags;
4175 pPatchRec->patch.uOpMode = (flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
4176
4177 pPatchRec->patch.pInstrGCLowest = pInstrGC;
4178 pPatchRec->patch.pInstrGCHighest = pInstrGC;
4179
4180 if (!(pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION | PATMFL_IDTHANDLER | PATMFL_SYSENTER | PATMFL_TRAMPOLINE)))
4181 {
4182 /*
4183 * Close proximity to an unusable patch is a possible hint that this patch would turn out to be dangerous too!
4184 */
4185 PPATMPATCHREC pPatchNear = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, (pInstrGC + SIZEOF_NEARJUMP32 - 1), false);
4186 if (pPatchNear)
4187 {
4188 if (pPatchNear->patch.uState == PATCH_UNUSABLE && pInstrGC < pPatchNear->patch.pPrivInstrGC && pInstrGC + SIZEOF_NEARJUMP32 > pPatchNear->patch.pPrivInstrGC)
4189 {
4190 Log(("Dangerous patch; would overwrite the ususable patch at %RRv\n", pPatchNear->patch.pPrivInstrGC));
4191
4192 pPatchRec->patch.uState = PATCH_UNUSABLE;
4193 /*
4194 * Leave the new patch active as it's marked unusable; to prevent us from checking it over and over again
4195 */
4196 return VERR_PATCHING_REFUSED;
4197 }
4198 }
4199 }
4200
4201 pPatchRec->patch.pTempInfo = (PPATCHINFOTEMP)MMR3HeapAllocZ(pVM, MM_TAG_PATM_PATCH, sizeof(PATCHINFOTEMP));
4202 if (pPatchRec->patch.pTempInfo == 0)
4203 {
4204 Log(("Out of memory!!!!\n"));
4205 return VERR_NO_MEMORY;
4206 }
4207
4208 cpu.mode = pPatchRec->patch.uOpMode;
4209 disret = PATMR3DISInstr(pVM, &pPatchRec->patch, &cpu, pInstrGC, pInstrHC, &opsize, NULL);
4210 if (disret == false)
4211 {
4212 Log(("Disassembly failed (probably page not present) -> return to caller\n"));
4213 return VERR_PATCHING_REFUSED;
4214 }
4215
4216 AssertMsg(opsize <= MAX_INSTR_SIZE, ("privileged instruction too big %d!!\n", opsize));
4217 if (opsize > MAX_INSTR_SIZE)
4218 {
4219 return VERR_PATCHING_REFUSED;
4220 }
4221
4222 pPatchRec->patch.cbPrivInstr = opsize;
4223 pPatchRec->patch.opcode = cpu.pCurInstr->opcode;
4224
4225 /* Restricted hinting for now. */
4226 Assert(!(flags & PATMFL_INSTR_HINT) || cpu.pCurInstr->opcode == OP_CLI);
4227
4228 /* Allocate statistics slot */
4229 if (pVM->patm.s.uCurrentPatchIdx < PATM_STAT_MAX_COUNTERS)
4230 {
4231 pPatchRec->patch.uPatchIdx = pVM->patm.s.uCurrentPatchIdx++;
4232 }
4233 else
4234 {
4235 Log(("WARNING: Patch index wrap around!!\n"));
4236 pPatchRec->patch.uPatchIdx = PATM_STAT_INDEX_DUMMY;
4237 }
4238
4239 if (pPatchRec->patch.flags & PATMFL_TRAPHANDLER)
4240 {
4241 rc = patmInstallTrapTrampoline(pVM, pInstrGC, pPatchRec);
4242 }
4243 else
4244 if (pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION ))
4245 {
4246 rc = patmDuplicateFunction(pVM, pInstrGC, pPatchRec);
4247 }
4248 else
4249 if (pPatchRec->patch.flags & PATMFL_TRAMPOLINE)
4250 {
4251 rc = patmCreateTrampoline(pVM, pInstrGC, pPatchRec);
4252 }
4253 else
4254 if (pPatchRec->patch.flags & PATMFL_REPLACE_FUNCTION_CALL)
4255 {
4256 rc = patmReplaceFunctionCall(pVM, &cpu, pInstrGC, &pPatchRec->patch);
4257 }
4258 else
4259 if (pPatchRec->patch.flags & PATMFL_INT3_REPLACEMENT)
4260 {
4261 rc = PATMR3PatchInstrInt3(pVM, pInstrGC, pInstrHC, &cpu, &pPatchRec->patch);
4262 }
4263 else
4264 if (pPatchRec->patch.flags & PATMFL_MMIO_ACCESS)
4265 {
4266 rc = patmPatchMMIOInstr(pVM, pInstrGC, &cpu, &pPatchRec->patch);
4267 }
4268 else
4269 if (pPatchRec->patch.flags & (PATMFL_IDTHANDLER|PATMFL_SYSENTER))
4270 {
4271 if (pPatchRec->patch.flags & PATMFL_SYSENTER)
4272 pPatchRec->patch.flags |= PATMFL_IDTHANDLER; /* we treat a sysenter handler as an IDT handler */
4273
4274 rc = patmIdtHandler(pVM, pInstrGC, pInstrHC, opsize, pPatchRec);
4275#ifdef VBOX_WITH_STATISTICS
4276 if ( rc == VINF_SUCCESS
4277 && (pPatchRec->patch.flags & PATMFL_SYSENTER))
4278 {
4279 pVM->patm.s.uSysEnterPatchIdx = pPatchRec->patch.uPatchIdx;
4280 }
4281#endif
4282 }
4283 else
4284 if (pPatchRec->patch.flags & PATMFL_GUEST_SPECIFIC)
4285 {
4286 switch (cpu.pCurInstr->opcode)
4287 {
4288 case OP_SYSENTER:
4289 case OP_PUSH:
4290 rc = PATMInstallGuestSpecificPatch(pVM, &cpu, pInstrGC, pInstrHC, pPatchRec);
4291 if (rc == VINF_SUCCESS)
4292 {
4293 if (rc == VINF_SUCCESS)
4294 Log(("PATMR3InstallPatch GUEST: %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4295 return rc;
4296 }
4297 break;
4298
4299 default:
4300 rc = VERR_NOT_IMPLEMENTED;
4301 break;
4302 }
4303 }
4304 else
4305 {
4306 switch (cpu.pCurInstr->opcode)
4307 {
4308 case OP_SYSENTER:
4309 rc = PATMInstallGuestSpecificPatch(pVM, &cpu, pInstrGC, pInstrHC, pPatchRec);
4310 if (rc == VINF_SUCCESS)
4311 {
4312 Log(("PATMR3InstallPatch GUEST: %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4313 return VINF_SUCCESS;
4314 }
4315 break;
4316
4317#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
4318 case OP_JO:
4319 case OP_JNO:
4320 case OP_JC:
4321 case OP_JNC:
4322 case OP_JE:
4323 case OP_JNE:
4324 case OP_JBE:
4325 case OP_JNBE:
4326 case OP_JS:
4327 case OP_JNS:
4328 case OP_JP:
4329 case OP_JNP:
4330 case OP_JL:
4331 case OP_JNL:
4332 case OP_JLE:
4333 case OP_JNLE:
4334 case OP_JECXZ:
4335 case OP_LOOP:
4336 case OP_LOOPNE:
4337 case OP_LOOPE:
4338 case OP_JMP:
4339 if (pPatchRec->patch.flags & PATMFL_JUMP_CONFLICT)
4340 {
4341 rc = patmPatchJump(pVM, pInstrGC, pInstrHC, &cpu, pPatchRec);
4342 break;
4343 }
4344 return VERR_NOT_IMPLEMENTED;
4345#endif
4346
4347 case OP_PUSHF:
4348 case OP_CLI:
4349 Log(("PATMR3InstallPatch %s %RRv code32=%d\n", patmGetInstructionString(pPatchRec->patch.opcode, pPatchRec->patch.flags), pInstrGC, (flags & PATMFL_CODE32) ? 1 : 0));
4350 rc = PATMR3PatchBlock(pVM, pInstrGC, pInstrHC, cpu.pCurInstr->opcode, opsize, pPatchRec);
4351 break;
4352
4353 case OP_STR:
4354 case OP_SGDT:
4355 case OP_SLDT:
4356 case OP_SIDT:
4357 case OP_CPUID:
4358 case OP_LSL:
4359 case OP_LAR:
4360 case OP_SMSW:
4361 case OP_VERW:
4362 case OP_VERR:
4363 case OP_IRET:
4364 rc = PATMR3PatchInstrInt3(pVM, pInstrGC, pInstrHC, &cpu, &pPatchRec->patch);
4365 break;
4366
4367 default:
4368 return VERR_NOT_IMPLEMENTED;
4369 }
4370 }
4371
4372 if (rc != VINF_SUCCESS)
4373 {
4374 if (pPatchRec && pPatchRec->patch.nrPatch2GuestRecs)
4375 {
4376 patmEmptyTreeU32(pVM, &pPatchRec->patch.Patch2GuestAddrTree);
4377 pPatchRec->patch.nrPatch2GuestRecs = 0;
4378 }
4379 pVM->patm.s.uCurrentPatchIdx--;
4380 }
4381 else
4382 {
4383 rc = patmInsertPatchPages(pVM, &pPatchRec->patch);
4384 AssertRCReturn(rc, rc);
4385
4386 /* Keep track upper and lower boundaries of patched instructions */
4387 if (pPatchRec->patch.pInstrGCLowest < pVM->patm.s.pPatchedInstrGCLowest)
4388 pVM->patm.s.pPatchedInstrGCLowest = pPatchRec->patch.pInstrGCLowest;
4389 if (pPatchRec->patch.pInstrGCHighest > pVM->patm.s.pPatchedInstrGCHighest)
4390 pVM->patm.s.pPatchedInstrGCHighest = pPatchRec->patch.pInstrGCHighest;
4391
4392 Log(("Patch lowest %RRv highest %RRv\n", pPatchRec->patch.pInstrGCLowest, pPatchRec->patch.pInstrGCHighest));
4393 Log(("Global lowest %RRv highest %RRv\n", pVM->patm.s.pPatchedInstrGCLowest, pVM->patm.s.pPatchedInstrGCHighest));
4394
4395 STAM_COUNTER_ADD(&pVM->patm.s.StatInstalled, 1);
4396 STAM_COUNTER_ADD(&pVM->patm.s.StatPATMMemoryUsed, pPatchRec->patch.cbPatchBlockSize);
4397
4398 rc = VINF_SUCCESS;
4399
4400 /* Patch hints are not enabled by default. Only when the are actually encountered. */
4401 if (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
4402 {
4403 rc = PATMR3DisablePatch(pVM, pInstrGC);
4404 AssertRCReturn(rc, rc);
4405 }
4406
4407#ifdef VBOX_WITH_STATISTICS
4408 /* Register statistics counter */
4409 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
4410 {
4411 STAMR3RegisterCallback(pVM, &pPatchRec->patch, STAMVISIBILITY_NOT_GUI, STAMUNIT_GOOD_BAD, patmResetStat, patmPrintStat, "Patch statistics",
4412 "/PATM/Stats/Patch/0x%RRv", pPatchRec->patch.pPrivInstrGC);
4413#ifndef DEBUG_sandervl
4414 /* Full breakdown for the GUI. */
4415 STAMR3RegisterF(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx], STAMTYPE_RATIO_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_GOOD_BAD, PATMPatchType(pVM, &pPatchRec->patch),
4416 "/PATM/Stats/PatchBD/0x%RRv", pPatchRec->patch.pPrivInstrGC);
4417 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPatchBlockSize,STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cbPatchBlockSize", pPatchRec->patch.pPrivInstrGC);
4418 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPatchJump, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cbPatchJump", pPatchRec->patch.pPrivInstrGC);
4419 STAMR3RegisterF(pVM, &pPatchRec->patch.cbPrivInstr, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cbPrivInstr", pPatchRec->patch.pPrivInstrGC);
4420 STAMR3RegisterF(pVM, &pPatchRec->patch.cCodeWrites, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cCodeWrites", pPatchRec->patch.pPrivInstrGC);
4421 STAMR3RegisterF(pVM, &pPatchRec->patch.cInvalidWrites, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cInvalidWrites", pPatchRec->patch.pPrivInstrGC);
4422 STAMR3RegisterF(pVM, &pPatchRec->patch.cTraps, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/cTraps", pPatchRec->patch.pPrivInstrGC);
4423 STAMR3RegisterF(pVM, &pPatchRec->patch.flags, STAMTYPE_X32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/flags", pPatchRec->patch.pPrivInstrGC);
4424 STAMR3RegisterF(pVM, &pPatchRec->patch.nrJumpRecs, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/nrJumpRecs", pPatchRec->patch.pPrivInstrGC);
4425 STAMR3RegisterF(pVM, &pPatchRec->patch.nrFixups, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/nrFixups", pPatchRec->patch.pPrivInstrGC);
4426 STAMR3RegisterF(pVM, &pPatchRec->patch.opcode, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, NULL, "/PATM/Stats/PatchBD/0x%RRv/opcode", pPatchRec->patch.pPrivInstrGC);
4427 STAMR3RegisterF(pVM, &pPatchRec->patch.uOldState, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/uOldState", pPatchRec->patch.pPrivInstrGC);
4428 STAMR3RegisterF(pVM, &pPatchRec->patch.uOpMode, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/uOpMode", pPatchRec->patch.pPrivInstrGC);
4429 /// @todo change the state to be a callback so we can get a state mnemonic instead.
4430 STAMR3RegisterF(pVM, &pPatchRec->patch.uState, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_NONE, NULL, "/PATM/Stats/PatchBD/0x%RRv/uState", pPatchRec->patch.pPrivInstrGC);
4431#endif
4432 }
4433#endif
4434 }
4435 return rc;
4436}
4437
4438/**
4439 * Query instruction size
4440 *
4441 * @returns VBox status code.
4442 * @param pVM The VM to operate on.
4443 * @param pPatch Patch record
4444 * @param pInstrGC Instruction address
4445 */
4446static uint32_t patmGetInstrSize(PVM pVM, PPATCHINFO pPatch, RTRCPTR pInstrGC)
4447{
4448 uint8_t *pInstrHC;
4449
4450 int rc = PGMPhysGCPtr2R3Ptr(VMMGetCpu0(pVM), pInstrGC, (PRTR3PTR)&pInstrHC);
4451 if (rc == VINF_SUCCESS)
4452 {
4453 DISCPUSTATE cpu;
4454 bool disret;
4455 uint32_t opsize;
4456
4457 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
4458 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pInstrGC, pInstrHC, &opsize, NULL, PATMREAD_ORGCODE | PATMREAD_NOCHECK);
4459 if (disret)
4460 return opsize;
4461 }
4462 return 0;
4463}
4464
4465/**
4466 * Add patch to page record
4467 *
4468 * @returns VBox status code.
4469 * @param pVM The VM to operate on.
4470 * @param pPage Page address
4471 * @param pPatch Patch record
4472 */
4473int patmAddPatchToPage(PVM pVM, RTRCUINTPTR pPage, PPATCHINFO pPatch)
4474{
4475 PPATMPATCHPAGE pPatchPage;
4476 int rc;
4477
4478 Log(("patmAddPatchToPage: insert patch %RHv to page %RRv\n", pPatch, pPage));
4479
4480 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4481 if (pPatchPage)
4482 {
4483 Assert(pPatchPage->cCount <= pPatchPage->cMaxPatches);
4484 if (pPatchPage->cCount == pPatchPage->cMaxPatches)
4485 {
4486 uint32_t cMaxPatchesOld = pPatchPage->cMaxPatches;
4487 PPATCHINFO *paPatchOld = pPatchPage->aPatch;
4488
4489 pPatchPage->cMaxPatches += PATMPATCHPAGE_PREALLOC_INCREMENT;
4490 rc = MMHyperAlloc(pVM, sizeof(PPATCHINFO)*pPatchPage->cMaxPatches, 0, MM_TAG_PATM_PATCH, (void **)&pPatchPage->aPatch);
4491 if (RT_FAILURE(rc))
4492 {
4493 Log(("Out of memory!!!!\n"));
4494 return VERR_NO_MEMORY;
4495 }
4496 memcpy(pPatchPage->aPatch, paPatchOld, cMaxPatchesOld*sizeof(PPATCHINFO));
4497 MMHyperFree(pVM, paPatchOld);
4498 }
4499 pPatchPage->aPatch[pPatchPage->cCount] = pPatch;
4500 pPatchPage->cCount++;
4501 }
4502 else
4503 {
4504 rc = MMHyperAlloc(pVM, sizeof(PATMPATCHPAGE), 0, MM_TAG_PATM_PATCH, (void **)&pPatchPage);
4505 if (RT_FAILURE(rc))
4506 {
4507 Log(("Out of memory!!!!\n"));
4508 return VERR_NO_MEMORY;
4509 }
4510 pPatchPage->Core.Key = pPage;
4511 pPatchPage->cCount = 1;
4512 pPatchPage->cMaxPatches = PATMPATCHPAGE_PREALLOC_INCREMENT;
4513
4514 rc = MMHyperAlloc(pVM, sizeof(PPATCHINFO)*PATMPATCHPAGE_PREALLOC_INCREMENT, 0, MM_TAG_PATM_PATCH, (void **)&pPatchPage->aPatch);
4515 if (RT_FAILURE(rc))
4516 {
4517 Log(("Out of memory!!!!\n"));
4518 MMHyperFree(pVM, pPatchPage);
4519 return VERR_NO_MEMORY;
4520 }
4521 pPatchPage->aPatch[0] = pPatch;
4522
4523 rc = RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, &pPatchPage->Core);
4524 Assert(rc);
4525 pVM->patm.s.cPageRecords++;
4526
4527 STAM_COUNTER_INC(&pVM->patm.s.StatPatchPageInserted);
4528 }
4529 CSAMR3MonitorPage(pVM, pPage, CSAM_TAG_PATM);
4530
4531 /* Get the closest guest instruction (from below) */
4532 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4533 Assert(pGuestToPatchRec);
4534 if (pGuestToPatchRec)
4535 {
4536 LogFlow(("patmAddPatchToPage: lowest patch page address %RRv current lowest %RRv\n", pGuestToPatchRec->Core.Key, pPatchPage->pLowestAddrGC));
4537 if ( pPatchPage->pLowestAddrGC == 0
4538 || pPatchPage->pLowestAddrGC > (RTRCPTR)pGuestToPatchRec->Core.Key)
4539 {
4540 RTRCUINTPTR offset;
4541
4542 pPatchPage->pLowestAddrGC = (RTRCPTR)pGuestToPatchRec->Core.Key;
4543
4544 offset = pPatchPage->pLowestAddrGC & PAGE_OFFSET_MASK;
4545 /* If we're too close to the page boundary, then make sure an instruction from the previous page doesn't cross the boundary itself. */
4546 if (offset && offset < MAX_INSTR_SIZE)
4547 {
4548 /* Get the closest guest instruction (from above) */
4549 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage-1, false);
4550
4551 if (pGuestToPatchRec)
4552 {
4553 uint32_t size = patmGetInstrSize(pVM, pPatch, (RTRCPTR)pGuestToPatchRec->Core.Key);
4554 if ((RTRCUINTPTR)pGuestToPatchRec->Core.Key + size > pPage)
4555 {
4556 pPatchPage->pLowestAddrGC = pPage;
4557 LogFlow(("patmAddPatchToPage: new lowest %RRv\n", pPatchPage->pLowestAddrGC));
4558 }
4559 }
4560 }
4561 }
4562 }
4563
4564 /* Get the closest guest instruction (from above) */
4565 pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage+PAGE_SIZE-1, false);
4566 Assert(pGuestToPatchRec);
4567 if (pGuestToPatchRec)
4568 {
4569 LogFlow(("patmAddPatchToPage: highest patch page address %RRv current lowest %RRv\n", pGuestToPatchRec->Core.Key, pPatchPage->pHighestAddrGC));
4570 if ( pPatchPage->pHighestAddrGC == 0
4571 || pPatchPage->pHighestAddrGC <= (RTRCPTR)pGuestToPatchRec->Core.Key)
4572 {
4573 pPatchPage->pHighestAddrGC = (RTRCPTR)pGuestToPatchRec->Core.Key;
4574 /* Increase by instruction size. */
4575 uint32_t size = patmGetInstrSize(pVM, pPatch, pPatchPage->pHighestAddrGC);
4576//// Assert(size);
4577 pPatchPage->pHighestAddrGC += size;
4578 LogFlow(("patmAddPatchToPage: new highest %RRv\n", pPatchPage->pHighestAddrGC));
4579 }
4580 }
4581
4582 return VINF_SUCCESS;
4583}
4584
4585/**
4586 * Remove patch from page record
4587 *
4588 * @returns VBox status code.
4589 * @param pVM The VM to operate on.
4590 * @param pPage Page address
4591 * @param pPatch Patch record
4592 */
4593int patmRemovePatchFromPage(PVM pVM, RTRCUINTPTR pPage, PPATCHINFO pPatch)
4594{
4595 PPATMPATCHPAGE pPatchPage;
4596 int rc;
4597
4598 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4599 Assert(pPatchPage);
4600
4601 if (!pPatchPage)
4602 return VERR_INVALID_PARAMETER;
4603
4604 Assert(pPatchPage->cCount <= pPatchPage->cMaxPatches);
4605
4606 Log(("patmRemovePatchPage: remove patch %RHv from page %RRv\n", pPatch, pPage));
4607 if (pPatchPage->cCount > 1)
4608 {
4609 uint32_t i;
4610
4611 /* Used by multiple patches */
4612 for (i=0;i<pPatchPage->cCount;i++)
4613 {
4614 if (pPatchPage->aPatch[i] == pPatch)
4615 {
4616 pPatchPage->aPatch[i] = 0;
4617 break;
4618 }
4619 }
4620 /* close the gap between the remaining pointers. */
4621 if (i < pPatchPage->cCount - 1)
4622 {
4623 memcpy(&pPatchPage->aPatch[i], &pPatchPage->aPatch[i+1], sizeof(PPATCHINFO)*(pPatchPage->cCount - (i+1)));
4624 }
4625 AssertMsg(i < pPatchPage->cCount, ("Unable to find patch %RHv in page %RRv\n", pPatch, pPage));
4626
4627 pPatchPage->cCount--;
4628 }
4629 else
4630 {
4631 PPATMPATCHPAGE pPatchNode;
4632
4633 Log(("patmRemovePatchFromPage %RRv\n", pPage));
4634
4635 STAM_COUNTER_INC(&pVM->patm.s.StatPatchPageRemoved);
4636 pPatchNode = (PPATMPATCHPAGE)RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, pPage);
4637 Assert(pPatchNode && pPatchNode == pPatchPage);
4638
4639 Assert(pPatchPage->aPatch);
4640 rc = MMHyperFree(pVM, pPatchPage->aPatch);
4641 AssertRC(rc);
4642 rc = MMHyperFree(pVM, pPatchPage);
4643 AssertRC(rc);
4644 pVM->patm.s.cPageRecords--;
4645 }
4646 return VINF_SUCCESS;
4647}
4648
4649/**
4650 * Insert page records for all guest pages that contain instructions that were recompiled for this patch
4651 *
4652 * @returns VBox status code.
4653 * @param pVM The VM to operate on.
4654 * @param pPatch Patch record
4655 */
4656int patmInsertPatchPages(PVM pVM, PPATCHINFO pPatch)
4657{
4658 int rc;
4659 RTRCUINTPTR pPatchPageStart, pPatchPageEnd, pPage;
4660
4661 /* Insert the pages that contain patched instructions into a lookup tree for detecting self-modifying code. */
4662 pPatchPageStart = (RTRCUINTPTR)pPatch->pInstrGCLowest & PAGE_BASE_GC_MASK;
4663 pPatchPageEnd = (RTRCUINTPTR)pPatch->pInstrGCHighest & PAGE_BASE_GC_MASK;
4664
4665 /** @todo optimize better (large gaps between current and next used page) */
4666 for(pPage = pPatchPageStart; pPage <= pPatchPageEnd; pPage += PAGE_SIZE)
4667 {
4668 /* Get the closest guest instruction (from above) */
4669 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4670 if ( pGuestToPatchRec
4671 && PAGE_ADDRESS(pGuestToPatchRec->Core.Key) == PAGE_ADDRESS(pPage)
4672 )
4673 {
4674 /* Code in page really patched -> add record */
4675 rc = patmAddPatchToPage(pVM, pPage, pPatch);
4676 AssertRC(rc);
4677 }
4678 }
4679 pPatch->flags |= PATMFL_CODE_MONITORED;
4680 return VINF_SUCCESS;
4681}
4682
4683/**
4684 * Remove page records for all guest pages that contain instructions that were recompiled for this patch
4685 *
4686 * @returns VBox status code.
4687 * @param pVM The VM to operate on.
4688 * @param pPatch Patch record
4689 */
4690int patmRemovePatchPages(PVM pVM, PPATCHINFO pPatch)
4691{
4692 int rc;
4693 RTRCUINTPTR pPatchPageStart, pPatchPageEnd, pPage;
4694
4695 /* Insert the pages that contain patched instructions into a lookup tree for detecting self-modifying code. */
4696 pPatchPageStart = (RTRCUINTPTR)pPatch->pInstrGCLowest & PAGE_BASE_GC_MASK;
4697 pPatchPageEnd = (RTRCUINTPTR)pPatch->pInstrGCHighest & PAGE_BASE_GC_MASK;
4698
4699 for(pPage = pPatchPageStart; pPage <= pPatchPageEnd; pPage += PAGE_SIZE)
4700 {
4701 /* Get the closest guest instruction (from above) */
4702 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pPage, true);
4703 if ( pGuestToPatchRec
4704 && PAGE_ADDRESS(pGuestToPatchRec->Core.Key) == PAGE_ADDRESS(pPage) /** @todo bird: PAGE_ADDRESS is for the current context really. check out these. */
4705 )
4706 {
4707 /* Code in page really patched -> remove record */
4708 rc = patmRemovePatchFromPage(pVM, pPage, pPatch);
4709 AssertRC(rc);
4710 }
4711 }
4712 pPatch->flags &= ~PATMFL_CODE_MONITORED;
4713 return VINF_SUCCESS;
4714}
4715
4716/**
4717 * Notifies PATM about a (potential) write to code that has been patched.
4718 *
4719 * @returns VBox status code.
4720 * @param pVM The VM to operate on.
4721 * @param GCPtr GC pointer to write address
4722 * @param cbWrite Nr of bytes to write
4723 *
4724 */
4725VMMR3DECL(int) PATMR3PatchWrite(PVM pVM, RTRCPTR GCPtr, uint32_t cbWrite)
4726{
4727 RTRCUINTPTR pWritePageStart, pWritePageEnd, pPage;
4728
4729 Log(("PATMR3PatchWrite %RRv %x\n", GCPtr, cbWrite));
4730
4731 Assert(VM_IS_EMT(pVM));
4732
4733 /* Quick boundary check */
4734 if ( GCPtr < pVM->patm.s.pPatchedInstrGCLowest
4735 || GCPtr > pVM->patm.s.pPatchedInstrGCHighest
4736 )
4737 return VINF_SUCCESS;
4738
4739 STAM_PROFILE_ADV_START(&pVM->patm.s.StatPatchWrite, a);
4740
4741 pWritePageStart = (RTRCUINTPTR)GCPtr & PAGE_BASE_GC_MASK;
4742 pWritePageEnd = ((RTRCUINTPTR)GCPtr + cbWrite - 1) & PAGE_BASE_GC_MASK;
4743
4744 for (pPage = pWritePageStart; pPage <= pWritePageEnd; pPage += PAGE_SIZE)
4745 {
4746loop_start:
4747 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
4748 if (pPatchPage)
4749 {
4750 uint32_t i;
4751 bool fValidPatchWrite = false;
4752
4753 /* Quick check to see if the write is in the patched part of the page */
4754 if ( pPatchPage->pLowestAddrGC > (RTRCPTR)((RTRCUINTPTR)GCPtr + cbWrite - 1)
4755 || pPatchPage->pHighestAddrGC < GCPtr)
4756 {
4757 break;
4758 }
4759
4760 for (i=0;i<pPatchPage->cCount;i++)
4761 {
4762 if (pPatchPage->aPatch[i])
4763 {
4764 PPATCHINFO pPatch = pPatchPage->aPatch[i];
4765 RTRCPTR pPatchInstrGC;
4766 //unused: bool fForceBreak = false;
4767
4768 Assert(pPatchPage->aPatch[i]->flags & PATMFL_CODE_MONITORED);
4769 /** @todo inefficient and includes redundant checks for multiple pages. */
4770 for (uint32_t j=0; j<cbWrite; j++)
4771 {
4772 RTRCPTR pGuestPtrGC = (RTRCPTR)((RTRCUINTPTR)GCPtr + j);
4773
4774 if ( pPatch->cbPatchJump
4775 && pGuestPtrGC >= pPatch->pPrivInstrGC
4776 && pGuestPtrGC < pPatch->pPrivInstrGC + pPatch->cbPatchJump)
4777 {
4778 /* The guest is about to overwrite the 5 byte jump to patch code. Remove the patch. */
4779 Log(("PATMR3PatchWrite: overwriting jump to patch code -> remove patch.\n"));
4780 int rc = PATMR3RemovePatch(pVM, pPatch->pPrivInstrGC);
4781 if (rc == VINF_SUCCESS)
4782 /** @note jump back to the start as the pPatchPage has been deleted or changed */
4783 goto loop_start;
4784
4785 continue;
4786 }
4787
4788 /* Find the closest instruction from below; the above quick check ensured that we are indeed in patched code */
4789 pPatchInstrGC = patmGuestGCPtrToPatchGCPtr(pVM, pPatch, pGuestPtrGC);
4790 if (!pPatchInstrGC)
4791 {
4792 RTRCPTR pClosestInstrGC;
4793 uint32_t size;
4794
4795 pPatchInstrGC = patmGuestGCPtrToClosestPatchGCPtr(pVM, pPatch, pGuestPtrGC);
4796 if (pPatchInstrGC)
4797 {
4798 pClosestInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, pPatch, pPatchInstrGC);
4799 Assert(pClosestInstrGC <= pGuestPtrGC);
4800 size = patmGetInstrSize(pVM, pPatch, pClosestInstrGC);
4801 /* Check if this is not a write into a gap between two patches */
4802 if (pClosestInstrGC + size - 1 < pGuestPtrGC)
4803 pPatchInstrGC = 0;
4804 }
4805 }
4806 if (pPatchInstrGC)
4807 {
4808 uint32_t PatchOffset = pPatchInstrGC - pVM->patm.s.pPatchMemGC; /* Offset in memory reserved for PATM. */
4809
4810 fValidPatchWrite = true;
4811
4812 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32Get(&pPatch->Patch2GuestAddrTree, PatchOffset);
4813 Assert(pPatchToGuestRec);
4814 if (pPatchToGuestRec && !pPatchToGuestRec->fDirty)
4815 {
4816 Log(("PATMR3PatchWrite: Found patched instruction %RRv -> %RRv\n", pGuestPtrGC, pPatchInstrGC));
4817
4818 if (++pPatch->cCodeWrites > PATM_MAX_CODE_WRITES)
4819 {
4820 LogRel(("PATM: Disable block at %RRv - write %RRv-%RRv\n", pPatch->pPrivInstrGC, pGuestPtrGC, pGuestPtrGC+cbWrite));
4821
4822 PATMR3MarkDirtyPatch(pVM, pPatch);
4823
4824 /** @note jump back to the start as the pPatchPage has been deleted or changed */
4825 goto loop_start;
4826 }
4827 else
4828 {
4829 /* Replace the patch instruction with a breakpoint; when it's hit, then we'll attempt to recompile the instruction again. */
4830 uint8_t *pInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pPatchInstrGC);
4831
4832 pPatchToGuestRec->u8DirtyOpcode = *pInstrHC;
4833 pPatchToGuestRec->fDirty = true;
4834
4835 *pInstrHC = 0xCC;
4836
4837 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirty);
4838 }
4839 }
4840 /* else already marked dirty */
4841 }
4842 }
4843 }
4844 } /* for each patch */
4845
4846 if (fValidPatchWrite == false)
4847 {
4848 /* Write to a part of the page that either:
4849 * - doesn't contain any code (shared code/data); rather unlikely
4850 * - old code page that's no longer in active use.
4851 */
4852invalid_write_loop_start:
4853 pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, (RTRCPTR)pPage);
4854
4855 if (pPatchPage)
4856 {
4857 for (i=0;i<pPatchPage->cCount;i++)
4858 {
4859 PPATCHINFO pPatch = pPatchPage->aPatch[i];
4860
4861 if (pPatch->cInvalidWrites > PATM_MAX_INVALID_WRITES)
4862 {
4863 /** @note possibly dangerous assumption that all future writes will be harmless. */
4864 if (pPatch->flags & PATMFL_IDTHANDLER)
4865 {
4866 LogRel(("PATM: Stop monitoring IDT handler pages at %RRv - invalid write %RRv-%RRv (this is not a fatal error)\n", pPatch->pPrivInstrGC, GCPtr, GCPtr+cbWrite));
4867
4868 Assert(pPatch->flags & PATMFL_CODE_MONITORED);
4869 int rc = patmRemovePatchPages(pVM, pPatch);
4870 AssertRC(rc);
4871 }
4872 else
4873 {
4874 LogRel(("PATM: Disable block at %RRv - invalid write %RRv-%RRv \n", pPatch->pPrivInstrGC, GCPtr, GCPtr+cbWrite));
4875 PATMR3MarkDirtyPatch(pVM, pPatch);
4876 }
4877 /** @note jump back to the start as the pPatchPage has been deleted or changed */
4878 goto invalid_write_loop_start;
4879 }
4880 } /* for */
4881 }
4882 }
4883 }
4884 }
4885 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatPatchWrite, a);
4886 return VINF_SUCCESS;
4887
4888}
4889
4890/**
4891 * Disable all patches in a flushed page
4892 *
4893 * @returns VBox status code
4894 * @param pVM The VM to operate on.
4895 * @param addr GC address of the page to flush
4896 */
4897/** @note Currently only called by CSAMR3FlushPage; optimization to avoid having to double check if the physical address has changed
4898 */
4899VMMR3DECL(int) PATMR3FlushPage(PVM pVM, RTRCPTR addr)
4900{
4901 addr &= PAGE_BASE_GC_MASK;
4902
4903 PPATMPATCHPAGE pPatchPage = (PPATMPATCHPAGE)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPage, addr);
4904 if (pPatchPage)
4905 {
4906 int i;
4907
4908 /* From top to bottom as the array is modified by PATMR3MarkDirtyPatch. */
4909 for (i=(int)pPatchPage->cCount-1;i>=0;i--)
4910 {
4911 if (pPatchPage->aPatch[i])
4912 {
4913 PPATCHINFO pPatch = pPatchPage->aPatch[i];
4914
4915 Log(("PATMR3FlushPage %RRv remove patch at %RRv\n", addr, pPatch->pPrivInstrGC));
4916 PATMR3MarkDirtyPatch(pVM, pPatch);
4917 }
4918 }
4919 STAM_COUNTER_INC(&pVM->patm.s.StatFlushed);
4920 }
4921 return VINF_SUCCESS;
4922}
4923
4924/**
4925 * Checks if the instructions at the specified address has been patched already.
4926 *
4927 * @returns boolean, patched or not
4928 * @param pVM The VM to operate on.
4929 * @param pInstrGC Guest context pointer to instruction
4930 */
4931VMMR3DECL(bool) PATMR3HasBeenPatched(PVM pVM, RTRCPTR pInstrGC)
4932{
4933 PPATMPATCHREC pPatchRec;
4934 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
4935 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED)
4936 return true;
4937 return false;
4938}
4939
4940/**
4941 * Query the opcode of the original code that was overwritten by the 5 bytes patch jump
4942 *
4943 * @returns VBox status code.
4944 * @param pVM The VM to operate on.
4945 * @param pInstrGC GC address of instr
4946 * @param pByte opcode byte pointer (OUT)
4947 *
4948 */
4949VMMR3DECL(int) PATMR3QueryOpcode(PVM pVM, RTRCPTR pInstrGC, uint8_t *pByte)
4950{
4951 PPATMPATCHREC pPatchRec;
4952
4953 /** @todo this will not work for aliased pages! (never has, but so far not a problem for us) */
4954
4955 /* Shortcut. */
4956 if ( !PATMIsEnabled(pVM)
4957 || pInstrGC < pVM->patm.s.pPatchedInstrGCLowest
4958 || pInstrGC > pVM->patm.s.pPatchedInstrGCHighest)
4959 {
4960 return VERR_PATCH_NOT_FOUND;
4961 }
4962
4963 pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
4964 // if the patch is enabled and the pointer lies within 5 bytes of this priv instr ptr, then we've got a hit!
4965 if ( pPatchRec
4966 && pPatchRec->patch.uState == PATCH_ENABLED
4967 && pInstrGC >= pPatchRec->patch.pPrivInstrGC
4968 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
4969 {
4970 RTRCPTR offset = pInstrGC - pPatchRec->patch.pPrivInstrGC;
4971 *pByte = pPatchRec->patch.aPrivInstr[offset];
4972
4973 if (pPatchRec->patch.cbPatchJump == 1)
4974 {
4975 Log(("PATMR3QueryOpcode: returning opcode %2X for instruction at %RRv\n", *pByte, pInstrGC));
4976 }
4977 STAM_COUNTER_ADD(&pVM->patm.s.StatNrOpcodeRead, 1);
4978 return VINF_SUCCESS;
4979 }
4980 return VERR_PATCH_NOT_FOUND;
4981}
4982
4983/**
4984 * Disable patch for privileged instruction at specified location
4985 *
4986 * @returns VBox status code.
4987 * @param pVM The VM to operate on.
4988 * @param pInstr Guest context point to privileged instruction
4989 *
4990 * @note returns failure if patching is not allowed or possible
4991 *
4992 */
4993VMMR3DECL(int) PATMR3DisablePatch(PVM pVM, RTRCPTR pInstrGC)
4994{
4995 PPATMPATCHREC pPatchRec;
4996 PPATCHINFO pPatch;
4997
4998 Log(("PATMR3DisablePatch: %RRv\n", pInstrGC));
4999 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5000 if (pPatchRec)
5001 {
5002 int rc = VINF_SUCCESS;
5003
5004 pPatch = &pPatchRec->patch;
5005
5006 /* Already disabled? */
5007 if (pPatch->uState == PATCH_DISABLED)
5008 return VINF_SUCCESS;
5009
5010 /* Clear the IDT entries for the patch we're disabling. */
5011 /** @note very important as we clear IF in the patch itself */
5012 /** @todo this needs to be changed */
5013 if (pPatch->flags & PATMFL_IDTHANDLER)
5014 {
5015 uint32_t iGate;
5016
5017 iGate = TRPMR3QueryGateByHandler(pVM, PATCHCODE_PTR_GC(pPatch));
5018 if (iGate != (uint32_t)~0)
5019 {
5020 TRPMR3SetGuestTrapHandler(pVM, iGate, TRPM_INVALID_HANDLER);
5021 if (++cIDTHandlersDisabled < 256)
5022 LogRel(("PATM: Disabling IDT %x patch handler %RRv\n", iGate, pInstrGC));
5023 }
5024 }
5025
5026 /* Mark the entry with a breakpoint in case somebody else calls it later on (cli patch used as a function, function, trampoline or idt patches) */
5027 if ( pPatch->pPatchBlockOffset
5028 && pPatch->uState == PATCH_ENABLED)
5029 {
5030 Log(("Invalidate patch at %RRv (HC=%RRv)\n", PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_HC(pPatch)));
5031 pPatch->bDirtyOpcode = *PATCHCODE_PTR_HC(pPatch);
5032 *PATCHCODE_PTR_HC(pPatch) = 0xCC;
5033 }
5034
5035 /* IDT or function patches haven't changed any guest code. */
5036 if (pPatch->flags & PATMFL_PATCHED_GUEST_CODE)
5037 {
5038 Assert(pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP);
5039 Assert(!(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAMPOLINE|PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK)));
5040
5041 if (pPatch->uState != PATCH_REFUSED)
5042 {
5043 AssertMsg(pPatch->pPrivInstrHC, ("Invalid HC pointer?!? (%RRv)\n", pInstrGC));
5044 Assert(pPatch->cbPatchJump);
5045
5046 /** pPrivInstrHC is probably not valid anymore */
5047 rc = PGMPhysGCPtr2R3Ptr(VMMGetCpu0(pVM), pPatchRec->patch.pPrivInstrGC, (PRTR3PTR)&pPatchRec->patch.pPrivInstrHC);
5048 if (rc == VINF_SUCCESS)
5049 {
5050 uint8_t temp[16];
5051
5052 Assert(pPatch->cbPatchJump < sizeof(temp));
5053
5054 /* Let's first check if the guest code is still the same. */
5055 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5056 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT);
5057 if (rc == VINF_SUCCESS)
5058 {
5059 RTRCINTPTR displ = (RTRCUINTPTR)PATCHCODE_PTR_GC(pPatch) - ((RTRCUINTPTR)pPatch->pPrivInstrGC + SIZEOF_NEARJUMP32);
5060
5061 if ( temp[0] != 0xE9 /* jmp opcode */
5062 || *(RTRCINTPTR *)(&temp[1]) != displ
5063 )
5064 {
5065 Log(("PATMR3DisablePatch: Can't disable a patch who's guest code has changed!!\n"));
5066 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5067 /* Remove it completely */
5068 pPatch->uState = PATCH_DISABLED; /* don't call PATMR3DisablePatch again */
5069 rc = PATMR3RemovePatch(pVM, pInstrGC);
5070 AssertRC(rc);
5071 return VWRN_PATCH_REMOVED;
5072 }
5073 }
5074 patmRemoveJumpToPatch(pVM, pPatch);
5075
5076 }
5077 else
5078 {
5079 Log(("PATMR3DisablePatch: unable to disable patch -> mark PATCH_DISABLE_PENDING\n"));
5080 pPatch->uState = PATCH_DISABLE_PENDING;
5081 }
5082 }
5083 else
5084 {
5085 AssertMsgFailed(("Patch was refused!\n"));
5086 return VERR_PATCH_ALREADY_DISABLED;
5087 }
5088 }
5089 else
5090 if (pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
5091 {
5092 uint8_t temp[16];
5093
5094 Assert(pPatch->cbPatchJump < sizeof(temp));
5095
5096 /* Let's first check if the guest code is still the same. */
5097 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5098 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_TABLE_NOT_PRESENT || rc == VERR_PAGE_NOT_PRESENT);
5099 if (rc == VINF_SUCCESS)
5100 {
5101 if (temp[0] != 0xCC)
5102 {
5103 Log(("PATMR3DisablePatch: Can't disable a patch who's guest code has changed!!\n"));
5104 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5105 /* Remove it completely */
5106 pPatch->uState = PATCH_DISABLED; /* don't call PATMR3DisablePatch again */
5107 rc = PATMR3RemovePatch(pVM, pInstrGC);
5108 AssertRC(rc);
5109 return VWRN_PATCH_REMOVED;
5110 }
5111 patmDeactivateInt3Patch(pVM, pPatch);
5112 }
5113 }
5114
5115 if (rc == VINF_SUCCESS)
5116 {
5117 /* Save old state and mark this one as disabled (so it can be enabled later on). */
5118 if (pPatch->uState == PATCH_DISABLE_PENDING)
5119 {
5120 /* Just to be safe, let's make sure this one can never be reused; the patch might be marked dirty already (int3 at start) */
5121 pPatch->uState = PATCH_UNUSABLE;
5122 }
5123 else
5124 if (pPatch->uState != PATCH_DIRTY)
5125 {
5126 pPatch->uOldState = pPatch->uState;
5127 pPatch->uState = PATCH_DISABLED;
5128 }
5129 STAM_COUNTER_ADD(&pVM->patm.s.StatDisabled, 1);
5130 }
5131
5132 Log(("PATMR3DisablePatch: disabled patch at %RRv\n", pInstrGC));
5133 return VINF_SUCCESS;
5134 }
5135 Log(("Patch not found!\n"));
5136 return VERR_PATCH_NOT_FOUND;
5137}
5138
5139/**
5140 * Permanently disable patch for privileged instruction at specified location
5141 *
5142 * @returns VBox status code.
5143 * @param pVM The VM to operate on.
5144 * @param pInstr Guest context instruction pointer
5145 * @param pConflictAddr Guest context pointer which conflicts with specified patch
5146 * @param pConflictPatch Conflicting patch
5147 *
5148 */
5149static int patmDisableUnusablePatch(PVM pVM, RTRCPTR pInstrGC, RTRCPTR pConflictAddr, PPATCHINFO pConflictPatch)
5150{
5151#ifdef PATM_RESOLVE_CONFLICTS_WITH_JUMP_PATCHES
5152 PATCHINFO patch = {0};
5153 DISCPUSTATE cpu;
5154 R3PTRTYPE(uint8_t *) pInstrHC;
5155 uint32_t opsize;
5156 bool disret;
5157 int rc;
5158
5159 pInstrHC = PATMGCVirtToHCVirt(pVM, &patch, pInstrGC);
5160 cpu.mode = (pConflictPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
5161 disret = PATMR3DISInstr(pVM, &patch, &cpu, pInstrGC, pInstrHC, &opsize, NULL);
5162 /*
5163 * If it's a 5 byte relative jump, then we can work around the problem by replacing the 32 bits relative offset
5164 * with one that jumps right into the conflict patch.
5165 * Otherwise we must disable the conflicting patch to avoid serious problems.
5166 */
5167 if ( disret == true
5168 && (pConflictPatch->flags & PATMFL_CODE32)
5169 && (cpu.pCurInstr->opcode == OP_JMP || (cpu.pCurInstr->optype & OPTYPE_COND_CONTROLFLOW))
5170 && (cpu.param1.flags & USE_IMMEDIATE32_REL))
5171 {
5172 /* Hint patches must be enabled first. */
5173 if (pConflictPatch->flags & PATMFL_INSTR_HINT)
5174 {
5175 Log(("Enabling HINTED patch %RRv\n", pConflictPatch->pPrivInstrGC));
5176 pConflictPatch->flags &= ~PATMFL_INSTR_HINT;
5177 rc = PATMR3EnablePatch(pVM, pConflictPatch->pPrivInstrGC);
5178 Assert(rc == VINF_SUCCESS || rc == VERR_PATCH_NOT_FOUND);
5179 /* Enabling might fail if the patched code has changed in the meantime. */
5180 if (rc != VINF_SUCCESS)
5181 return rc;
5182 }
5183
5184 rc = PATMR3InstallPatch(pVM, pInstrGC, PATMFL_CODE32 | PATMFL_JUMP_CONFLICT);
5185 if (RT_SUCCESS(rc))
5186 {
5187 Log(("PATM -> CONFLICT: Installed JMP patch for patch conflict at %RRv\n", pInstrGC));
5188 STAM_COUNTER_INC(&pVM->patm.s.StatFixedConflicts);
5189 return VINF_SUCCESS;
5190 }
5191 }
5192#endif
5193
5194 if (pConflictPatch->opcode == OP_CLI)
5195 {
5196 /* Turn it into an int3 patch; our GC trap handler will call the generated code manually. */
5197 Log(("PATM -> CONFLICT: Found active patch at instruction %RRv with target %RRv -> turn into int 3 patch!!\n", pInstrGC, pConflictPatch->pPrivInstrGC));
5198 int rc = PATMR3DisablePatch(pVM, pConflictPatch->pPrivInstrGC);
5199 if (rc == VWRN_PATCH_REMOVED)
5200 return VINF_SUCCESS;
5201 if (RT_SUCCESS(rc))
5202 {
5203 pConflictPatch->flags &= ~(PATMFL_MUST_INSTALL_PATCHJMP|PATMFL_INSTR_HINT);
5204 pConflictPatch->flags |= PATMFL_INT3_REPLACEMENT_BLOCK;
5205 rc = PATMR3EnablePatch(pVM, pConflictPatch->pPrivInstrGC);
5206 if (rc == VERR_PATCH_NOT_FOUND)
5207 return VINF_SUCCESS; /* removed already */
5208
5209 AssertRC(rc);
5210 if (RT_SUCCESS(rc))
5211 {
5212 STAM_COUNTER_INC(&pVM->patm.s.StatInt3Callable);
5213 return VINF_SUCCESS;
5214 }
5215 }
5216 /* else turned into unusable patch (see below) */
5217 }
5218 else
5219 {
5220 Log(("PATM -> CONFLICT: Found active patch at instruction %RRv with target %RRv -> DISABLING it!!\n", pInstrGC, pConflictPatch->pPrivInstrGC));
5221 int rc = PATMR3DisablePatch(pVM, pConflictPatch->pPrivInstrGC);
5222 if (rc == VWRN_PATCH_REMOVED)
5223 return VINF_SUCCESS;
5224 }
5225
5226 /* No need to monitor the code anymore. */
5227 if (pConflictPatch->flags & PATMFL_CODE_MONITORED)
5228 {
5229 int rc = patmRemovePatchPages(pVM, pConflictPatch);
5230 AssertRC(rc);
5231 }
5232 pConflictPatch->uState = PATCH_UNUSABLE;
5233 STAM_COUNTER_INC(&pVM->patm.s.StatUnusable);
5234 return VERR_PATCH_DISABLED;
5235}
5236
5237/**
5238 * Enable patch for privileged instruction at specified location
5239 *
5240 * @returns VBox status code.
5241 * @param pVM The VM to operate on.
5242 * @param pInstr Guest context point to privileged instruction
5243 *
5244 * @note returns failure if patching is not allowed or possible
5245 *
5246 */
5247VMMR3DECL(int) PATMR3EnablePatch(PVM pVM, RTRCPTR pInstrGC)
5248{
5249 PPATMPATCHREC pPatchRec;
5250 PPATCHINFO pPatch;
5251
5252 Log(("PATMR3EnablePatch %RRv\n", pInstrGC));
5253 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5254 if (pPatchRec)
5255 {
5256 int rc = VINF_SUCCESS;
5257
5258 pPatch = &pPatchRec->patch;
5259
5260 if (pPatch->uState == PATCH_DISABLED)
5261 {
5262 if (pPatch->flags & PATMFL_MUST_INSTALL_PATCHJMP)
5263 {
5264 Assert(!(pPatch->flags & PATMFL_PATCHED_GUEST_CODE));
5265 /** @todo -> pPrivInstrHC is probably not valid anymore */
5266 rc = PGMPhysGCPtr2R3Ptr(VMMGetCpu0(pVM), pPatchRec->patch.pPrivInstrGC, (PRTR3PTR)&pPatchRec->patch.pPrivInstrHC);
5267 if (rc == VINF_SUCCESS)
5268 {
5269#ifdef DEBUG
5270 DISCPUSTATE cpu;
5271 char szOutput[256];
5272 uint32_t opsize, i = 0;
5273#endif
5274 uint8_t temp[16];
5275
5276 Assert(pPatch->cbPatchJump < sizeof(temp));
5277
5278 // let's first check if the guest code is still the same
5279 int rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5280 AssertRC(rc);
5281
5282 if (memcmp(temp, pPatch->aPrivInstr, pPatch->cbPatchJump))
5283 {
5284 Log(("PATMR3EnablePatch: Can't enable a patch who's guest code has changed!!\n"));
5285 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5286 /* Remove it completely */
5287 rc = PATMR3RemovePatch(pVM, pInstrGC);
5288 AssertRC(rc);
5289 return VERR_PATCH_NOT_FOUND;
5290 }
5291
5292 rc = patmGenJumpToPatch(pVM, pPatch, false);
5293 AssertRC(rc);
5294 if (RT_FAILURE(rc))
5295 return rc;
5296
5297#ifdef DEBUG
5298 bool disret;
5299 i = 0;
5300 while(i < pPatch->cbPatchJump)
5301 {
5302 cpu.mode = (pPatch->flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
5303 disret = PATMR3DISInstr(pVM, pPatch, &cpu, pPatch->pPrivInstrGC + i, &pPatch->pPrivInstrHC[i], &opsize, szOutput);
5304 Log(("Renewed patch instr: %s", szOutput));
5305 i += opsize;
5306 }
5307#endif
5308 }
5309 }
5310 else
5311 if (pPatch->flags & (PATMFL_INT3_REPLACEMENT|PATMFL_INT3_REPLACEMENT_BLOCK))
5312 {
5313 uint8_t temp[16];
5314
5315 Assert(pPatch->cbPatchJump < sizeof(temp));
5316
5317 /* Let's first check if the guest code is still the same. */
5318 int rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), temp, pPatch->pPrivInstrGC, pPatch->cbPatchJump);
5319 AssertRC(rc);
5320
5321 if (memcmp(temp, pPatch->aPrivInstr, pPatch->cbPatchJump))
5322 {
5323 Log(("PATMR3EnablePatch: Can't enable a patch who's guest code has changed!!\n"));
5324 STAM_COUNTER_INC(&pVM->patm.s.StatOverwritten);
5325 rc = PATMR3RemovePatch(pVM, pInstrGC);
5326 AssertRC(rc);
5327 return VERR_PATCH_NOT_FOUND;
5328 }
5329
5330 rc = patmActivateInt3Patch(pVM, pPatch);
5331 if (RT_FAILURE(rc))
5332 return rc;
5333 }
5334
5335 pPatch->uState = pPatch->uOldState; //restore state
5336
5337 /* Restore the entry breakpoint with the original opcode (see PATMR3DisablePatch). */
5338 if (pPatch->pPatchBlockOffset)
5339 {
5340 *PATCHCODE_PTR_HC(pPatch) = pPatch->bDirtyOpcode;
5341 }
5342
5343 STAM_COUNTER_ADD(&pVM->patm.s.StatEnabled, 1);
5344 }
5345 else
5346 Log(("PATMR3EnablePatch: Unable to enable patch %RRv with state %d\n", pInstrGC, pPatch->uState));
5347
5348 return rc;
5349 }
5350 return VERR_PATCH_NOT_FOUND;
5351}
5352
5353/**
5354 * Remove patch for privileged instruction at specified location
5355 *
5356 * @returns VBox status code.
5357 * @param pVM The VM to operate on.
5358 * @param pPatchRec Patch record
5359 * @param fForceRemove Remove *all* patches
5360 */
5361int PATMRemovePatch(PVM pVM, PPATMPATCHREC pPatchRec, bool fForceRemove)
5362{
5363 PPATCHINFO pPatch;
5364
5365 pPatch = &pPatchRec->patch;
5366
5367 /* Strictly forbidden to remove such patches. There can be dependencies!! */
5368 if (!fForceRemove && (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED)))
5369 {
5370 Log(("PATMRemovePatch %RRv REFUSED!\n", pPatch->pPrivInstrGC));
5371 return VERR_ACCESS_DENIED;
5372 }
5373 Log(("PATMRemovePatch %RRv\n", pPatch->pPrivInstrGC));
5374
5375 /** @note NEVER EVER REUSE PATCH MEMORY */
5376 /** @note PATMR3DisablePatch put a breakpoint (0xCC) at the entry of this patch */
5377
5378 if (pPatchRec->patch.pPatchBlockOffset)
5379 {
5380 PAVLOU32NODECORE pNode;
5381
5382 pNode = RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchRec->patch.pPatchBlockOffset);
5383 Assert(pNode);
5384 }
5385
5386 if (pPatchRec->patch.flags & PATMFL_CODE_MONITORED)
5387 {
5388 int rc = patmRemovePatchPages(pVM, &pPatchRec->patch);
5389 AssertRC(rc);
5390 }
5391
5392#ifdef VBOX_WITH_STATISTICS
5393 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
5394 {
5395 STAMR3Deregister(pVM, &pPatchRec->patch);
5396#ifndef DEBUG_sandervl
5397 STAMR3Deregister(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx]);
5398 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchBlockSize);
5399 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchJump);
5400 STAMR3Deregister(pVM, &pPatchRec->patch.cbPrivInstr);
5401 STAMR3Deregister(pVM, &pPatchRec->patch.cCodeWrites);
5402 STAMR3Deregister(pVM, &pPatchRec->patch.cInvalidWrites);
5403 STAMR3Deregister(pVM, &pPatchRec->patch.cTraps);
5404 STAMR3Deregister(pVM, &pPatchRec->patch.flags);
5405 STAMR3Deregister(pVM, &pPatchRec->patch.nrJumpRecs);
5406 STAMR3Deregister(pVM, &pPatchRec->patch.nrFixups);
5407 STAMR3Deregister(pVM, &pPatchRec->patch.opcode);
5408 STAMR3Deregister(pVM, &pPatchRec->patch.uState);
5409 STAMR3Deregister(pVM, &pPatchRec->patch.uOldState);
5410 STAMR3Deregister(pVM, &pPatchRec->patch.uOpMode);
5411#endif
5412 }
5413#endif
5414
5415 /** @note no need to free Guest2PatchAddrTree as those records share memory with Patch2GuestAddrTree records. */
5416 patmEmptyTreeU32(pVM, &pPatch->Patch2GuestAddrTree);
5417 pPatch->nrPatch2GuestRecs = 0;
5418 Assert(pPatch->Patch2GuestAddrTree == 0);
5419
5420 patmEmptyTree(pVM, &pPatch->FixupTree);
5421 pPatch->nrFixups = 0;
5422 Assert(pPatch->FixupTree == 0);
5423
5424 if (pPatchRec->patch.pTempInfo)
5425 MMR3HeapFree(pPatchRec->patch.pTempInfo);
5426
5427 /** @note might fail, because it has already been removed (e.g. during reset). */
5428 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pPatchRec->Core.Key);
5429
5430 /* Free the patch record */
5431 MMHyperFree(pVM, pPatchRec);
5432 return VINF_SUCCESS;
5433}
5434
5435/**
5436 * Attempt to refresh the patch by recompiling its entire code block
5437 *
5438 * @returns VBox status code.
5439 * @param pVM The VM to operate on.
5440 * @param pPatchRec Patch record
5441 */
5442int patmR3RefreshPatch(PVM pVM, PPATMPATCHREC pPatchRec)
5443{
5444 PPATCHINFO pPatch;
5445 int rc;
5446 RTRCPTR pInstrGC = pPatchRec->patch.pPrivInstrGC;
5447
5448 Log(("patmR3RefreshPatch: attempt to refresh patch at %RRv\n", pInstrGC));
5449
5450 pPatch = &pPatchRec->patch;
5451 AssertReturn(pPatch->flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER), VERR_PATCHING_REFUSED);
5452 if (pPatch->flags & PATMFL_EXTERNAL_JUMP_INSIDE)
5453 {
5454 Log(("patmR3RefreshPatch: refused because external jumps to this patch exist\n"));
5455 return VERR_PATCHING_REFUSED;
5456 }
5457
5458 /** Note: quite ugly to enable/disable/remove/insert old and new patches, but there's no easy way around it. */
5459
5460 rc = PATMR3DisablePatch(pVM, pInstrGC);
5461 AssertRC(rc);
5462
5463 /** Kick it out of the lookup tree to make sure PATMR3InstallPatch doesn't fail (hack alert) */
5464 RTAvloU32Remove(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pPatchRec->Core.Key);
5465#ifdef VBOX_WITH_STATISTICS
5466 if (PATM_STAT_INDEX_IS_VALID(pPatchRec->patch.uPatchIdx))
5467 {
5468 STAMR3Deregister(pVM, &pPatchRec->patch);
5469#ifndef DEBUG_sandervl
5470 STAMR3Deregister(pVM, &pVM->patm.s.pStatsHC[pPatchRec->patch.uPatchIdx]);
5471 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchBlockSize);
5472 STAMR3Deregister(pVM, &pPatchRec->patch.cbPatchJump);
5473 STAMR3Deregister(pVM, &pPatchRec->patch.cbPrivInstr);
5474 STAMR3Deregister(pVM, &pPatchRec->patch.cCodeWrites);
5475 STAMR3Deregister(pVM, &pPatchRec->patch.cInvalidWrites);
5476 STAMR3Deregister(pVM, &pPatchRec->patch.cTraps);
5477 STAMR3Deregister(pVM, &pPatchRec->patch.flags);
5478 STAMR3Deregister(pVM, &pPatchRec->patch.nrJumpRecs);
5479 STAMR3Deregister(pVM, &pPatchRec->patch.nrFixups);
5480 STAMR3Deregister(pVM, &pPatchRec->patch.opcode);
5481 STAMR3Deregister(pVM, &pPatchRec->patch.uState);
5482 STAMR3Deregister(pVM, &pPatchRec->patch.uOldState);
5483 STAMR3Deregister(pVM, &pPatchRec->patch.uOpMode);
5484#endif
5485 }
5486#endif
5487
5488 /** Note: We don't attempt to reuse patch memory here as it's quite common that the new code block requires more memory. */
5489
5490 /* Attempt to install a new patch. */
5491 rc = PATMR3InstallPatch(pVM, pInstrGC, pPatch->flags & (PATMFL_CODE32|PATMFL_IDTHANDLER|PATMFL_INTHANDLER|PATMFL_TRAPHANDLER|PATMFL_DUPLICATE_FUNCTION|PATMFL_TRAPHANDLER_WITH_ERRORCODE|PATMFL_IDTHANDLER_WITHOUT_ENTRYPOINT));
5492 if (RT_SUCCESS(rc))
5493 {
5494 RTRCPTR pPatchTargetGC;
5495 PPATMPATCHREC pNewPatchRec;
5496
5497 /* Determine target address in new patch */
5498 pPatchTargetGC = PATMR3QueryPatchGCPtr(pVM, pInstrGC);
5499 Assert(pPatchTargetGC);
5500 if (!pPatchTargetGC)
5501 {
5502 rc = VERR_PATCHING_REFUSED;
5503 goto failure;
5504 }
5505
5506 /* Reset offset into patch memory to put the next code blocks right at the beginning. */
5507 pPatch->uCurPatchOffset = 0;
5508
5509 /* insert jump to new patch in old patch block */
5510 rc = patmPatchGenPatchJump(pVM, pPatch, pInstrGC, pPatchTargetGC, false /* no lookup record */);
5511 if (RT_FAILURE(rc))
5512 goto failure;
5513
5514 pNewPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5515 Assert(pNewPatchRec); /* can't fail */
5516
5517 /* Remove old patch (only do that when everything is finished) */
5518 int rc2 = PATMRemovePatch(pVM, pPatchRec, true /* force removal */);
5519 AssertRC(rc2);
5520
5521 /* Put the new patch back into the tree, because removing the old one kicked this one out. (hack alert) */
5522 RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pNewPatchRec->Core);
5523
5524 LogRel(("PATM: patmR3RefreshPatch: succeeded to refresh patch at %RRv \n", pInstrGC));
5525 STAM_COUNTER_INC(&pVM->patm.s.StatPatchRefreshSuccess);
5526
5527 /* Used by another patch, so don't remove it! */
5528 pNewPatchRec->patch.flags |= PATMFL_CODE_REFERENCED;
5529 }
5530
5531failure:
5532 if (RT_FAILURE(rc))
5533 {
5534 LogRel(("PATM: patmR3RefreshPatch: failed to refresh patch at %RRv. Reactiving old one. \n", pInstrGC));
5535
5536 /* Remove the new inactive patch */
5537 rc = PATMR3RemovePatch(pVM, pInstrGC);
5538 AssertRC(rc);
5539
5540 /* Put the old patch back into the tree (or else it won't be saved) (hack alert) */
5541 RTAvloU32Insert(&pVM->patm.s.PatchLookupTreeHC->PatchTree, &pPatchRec->Core);
5542
5543 /* Enable again in case the dirty instruction is near the end and there are safe code paths. */
5544 int rc2 = PATMR3EnablePatch(pVM, pInstrGC);
5545 AssertRC(rc2);
5546
5547 STAM_COUNTER_INC(&pVM->patm.s.StatPatchRefreshFailed);
5548 }
5549 return rc;
5550}
5551
5552/**
5553 * Find patch for privileged instruction at specified location
5554 *
5555 * @returns Patch structure pointer if found; else NULL
5556 * @param pVM The VM to operate on.
5557 * @param pInstr Guest context point to instruction that might lie within 5 bytes of an existing patch jump
5558 * @param fIncludeHints Include hinted patches or not
5559 *
5560 */
5561PPATCHINFO PATMFindActivePatchByEntrypoint(PVM pVM, RTRCPTR pInstrGC, bool fIncludeHints)
5562{
5563 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
5564 /* if the patch is enabled, the pointer is not indentical to the privileged patch ptr and it lies within 5 bytes of this priv instr ptr, then we've got a hit! */
5565 if (pPatchRec)
5566 {
5567 if ( pPatchRec->patch.uState == PATCH_ENABLED
5568 && (pPatchRec->patch.flags & PATMFL_PATCHED_GUEST_CODE)
5569 && pInstrGC > pPatchRec->patch.pPrivInstrGC
5570 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5571 {
5572 Log(("Found active patch at %RRv (org %RRv)\n", pInstrGC, pPatchRec->patch.pPrivInstrGC));
5573 return &pPatchRec->patch;
5574 }
5575 else
5576 if ( fIncludeHints
5577 && pPatchRec->patch.uState == PATCH_DISABLED
5578 && (pPatchRec->patch.flags & PATMFL_INSTR_HINT)
5579 && pInstrGC > pPatchRec->patch.pPrivInstrGC
5580 && pInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5581 {
5582 Log(("Found HINT patch at %RRv (org %RRv)\n", pInstrGC, pPatchRec->patch.pPrivInstrGC));
5583 return &pPatchRec->patch;
5584 }
5585 }
5586 return NULL;
5587}
5588
5589/**
5590 * Checks whether the GC address is inside a generated patch jump
5591 *
5592 * @returns true -> yes, false -> no
5593 * @param pVM The VM to operate on.
5594 * @param pAddr Guest context address
5595 * @param pPatchAddr Guest context patch address (if true)
5596 */
5597VMMR3DECL(bool) PATMR3IsInsidePatchJump(PVM pVM, RTRCPTR pAddr, PRTGCPTR32 pPatchAddr)
5598{
5599 RTRCPTR addr;
5600 PPATCHINFO pPatch;
5601
5602 if (PATMIsEnabled(pVM) == false)
5603 return false;
5604
5605 if (pPatchAddr == NULL)
5606 pPatchAddr = &addr;
5607
5608 *pPatchAddr = 0;
5609
5610 pPatch = PATMFindActivePatchByEntrypoint(pVM, pAddr);
5611 if (pPatch)
5612 {
5613 *pPatchAddr = pPatch->pPrivInstrGC;
5614 }
5615 return *pPatchAddr == 0 ? false : true;
5616}
5617
5618/**
5619 * Remove patch for privileged instruction at specified location
5620 *
5621 * @returns VBox status code.
5622 * @param pVM The VM to operate on.
5623 * @param pInstr Guest context point to privileged instruction
5624 *
5625 * @note returns failure if patching is not allowed or possible
5626 *
5627 */
5628VMMR3DECL(int) PATMR3RemovePatch(PVM pVM, RTRCPTR pInstrGC)
5629{
5630 PPATMPATCHREC pPatchRec;
5631
5632 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC);
5633 if (pPatchRec)
5634 {
5635 int rc = PATMR3DisablePatch(pVM, pInstrGC);
5636 if (rc == VWRN_PATCH_REMOVED)
5637 return VINF_SUCCESS;
5638 return PATMRemovePatch(pVM, pPatchRec, false);
5639 }
5640 AssertFailed();
5641 return VERR_PATCH_NOT_FOUND;
5642}
5643
5644/**
5645 * Mark patch as dirty
5646 *
5647 * @returns VBox status code.
5648 * @param pVM The VM to operate on.
5649 * @param pPatch Patch record
5650 *
5651 * @note returns failure if patching is not allowed or possible
5652 *
5653 */
5654VMMR3DECL(int) PATMR3MarkDirtyPatch(PVM pVM, PPATCHINFO pPatch)
5655{
5656 if (pPatch->pPatchBlockOffset)
5657 {
5658 Log(("Invalidate patch at %RRv (HC=%RRv)\n", PATCHCODE_PTR_GC(pPatch), PATCHCODE_PTR_HC(pPatch)));
5659 pPatch->bDirtyOpcode = *PATCHCODE_PTR_HC(pPatch);
5660 *PATCHCODE_PTR_HC(pPatch) = 0xCC;
5661 }
5662
5663 STAM_COUNTER_INC(&pVM->patm.s.StatDirty);
5664 /* Put back the replaced instruction. */
5665 int rc = PATMR3DisablePatch(pVM, pPatch->pPrivInstrGC);
5666 if (rc == VWRN_PATCH_REMOVED)
5667 return VINF_SUCCESS;
5668
5669 /** @note we don't restore patch pages for patches that are not enabled! */
5670 /** @note be careful when changing this behaviour!! */
5671
5672 /* The patch pages are no longer marked for self-modifying code detection */
5673 if (pPatch->flags & PATMFL_CODE_MONITORED)
5674 {
5675 int rc = patmRemovePatchPages(pVM, pPatch);
5676 AssertRCReturn(rc, rc);
5677 }
5678 pPatch->uState = PATCH_DIRTY;
5679
5680 /* Paranoia; make sure this patch is not somewhere in the callchain, so prevent ret instructions from succeeding. */
5681 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
5682
5683 return VINF_SUCCESS;
5684}
5685
5686/**
5687 * Query the corresponding GC instruction pointer from a pointer inside the patch block itself
5688 *
5689 * @returns VBox status code.
5690 * @param pVM The VM to operate on.
5691 * @param pPatch Patch block structure pointer
5692 * @param pPatchGC GC address in patch block
5693 */
5694RTRCPTR patmPatchGCPtr2GuestGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t *) pPatchGC)
5695{
5696 Assert(pPatch->Patch2GuestAddrTree);
5697 /* Get the closest record from below. */
5698 PRECPATCHTOGUEST pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->Patch2GuestAddrTree, pPatchGC - pVM->patm.s.pPatchMemGC, false);
5699 if (pPatchToGuestRec)
5700 return pPatchToGuestRec->pOrgInstrGC;
5701
5702 return 0;
5703}
5704
5705/* Converts Guest code GC ptr to Patch code GC ptr (if found)
5706 *
5707 * @returns corresponding GC pointer in patch block
5708 * @param pVM The VM to operate on.
5709 * @param pPatch Current patch block pointer
5710 * @param pInstrGC Guest context pointer to privileged instruction
5711 *
5712 */
5713RTRCPTR patmGuestGCPtrToPatchGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t*) pInstrGC)
5714{
5715 if (pPatch->Guest2PatchAddrTree)
5716 {
5717 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32Get(&pPatch->Guest2PatchAddrTree, pInstrGC);
5718 if (pGuestToPatchRec)
5719 return pVM->patm.s.pPatchMemGC + pGuestToPatchRec->PatchOffset;
5720 }
5721
5722 return 0;
5723}
5724
5725/* Converts Guest code GC ptr to Patch code GC ptr (or nearest from below if no identical match)
5726 *
5727 * @returns corresponding GC pointer in patch block
5728 * @param pVM The VM to operate on.
5729 * @param pPatch Current patch block pointer
5730 * @param pInstrGC Guest context pointer to privileged instruction
5731 *
5732 */
5733RTRCPTR patmGuestGCPtrToClosestPatchGCPtr(PVM pVM, PPATCHINFO pPatch, RCPTRTYPE(uint8_t*) pInstrGC)
5734{
5735 PRECGUESTTOPATCH pGuestToPatchRec = (PRECGUESTTOPATCH)RTAvlU32GetBestFit(&pPatch->Guest2PatchAddrTree, pInstrGC, false);
5736 if (pGuestToPatchRec)
5737 return pVM->patm.s.pPatchMemGC + pGuestToPatchRec->PatchOffset;
5738
5739 return 0;
5740}
5741
5742/* Converts Guest code GC ptr to Patch code GC ptr (if found)
5743 *
5744 * @returns corresponding GC pointer in patch block
5745 * @param pVM The VM to operate on.
5746 * @param pInstrGC Guest context pointer to privileged instruction
5747 *
5748 */
5749VMMR3DECL(RTRCPTR) PATMR3GuestGCPtrToPatchGCPtr(PVM pVM, RCPTRTYPE(uint8_t*) pInstrGC)
5750{
5751 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pInstrGC, false);
5752 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED && pInstrGC >= pPatchRec->patch.pPrivInstrGC)
5753 {
5754 return patmGuestGCPtrToPatchGCPtr(pVM, &pPatchRec->patch, pInstrGC);
5755 }
5756 return 0;
5757}
5758
5759/**
5760 * Query the corresponding GC instruction pointer from a pointer inside the patch block itself
5761 *
5762 * @returns original GC instruction pointer or 0 if not found
5763 * @param pVM The VM to operate on.
5764 * @param pPatchGC GC address in patch block
5765 * @param pEnmState State of the translated address (out)
5766 *
5767 */
5768VMMR3DECL(RTRCPTR) PATMR3PatchToGCPtr(PVM pVM, RTRCPTR pPatchGC, PATMTRANSSTATE *pEnmState)
5769{
5770 PPATMPATCHREC pPatchRec;
5771 void *pvPatchCoreOffset;
5772 RTRCPTR pPrivInstrGC;
5773
5774 Assert(PATMIsPatchGCAddr(pVM, pPatchGC));
5775 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, pPatchGC - pVM->patm.s.pPatchMemGC, false);
5776 if (pvPatchCoreOffset == 0)
5777 {
5778 Log(("PATMR3PatchToGCPtr failed for %RRv offset %x\n", pPatchGC, pPatchGC - pVM->patm.s.pPatchMemGC));
5779 return 0;
5780 }
5781 pPatchRec = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
5782 pPrivInstrGC = patmPatchGCPtr2GuestGCPtr(pVM, &pPatchRec->patch, pPatchGC);
5783 if (pEnmState)
5784 {
5785 AssertMsg(pPrivInstrGC && ( pPatchRec->patch.uState == PATCH_ENABLED
5786 || pPatchRec->patch.uState == PATCH_DIRTY
5787 || pPatchRec->patch.uState == PATCH_DISABLE_PENDING
5788 || pPatchRec->patch.uState == PATCH_UNUSABLE),
5789 ("pPrivInstrGC=%RRv uState=%d\n", pPrivInstrGC, pPatchRec->patch.uState));
5790
5791 if ( !pPrivInstrGC
5792 || pPatchRec->patch.uState == PATCH_UNUSABLE
5793 || pPatchRec->patch.uState == PATCH_REFUSED)
5794 {
5795 pPrivInstrGC = 0;
5796 *pEnmState = PATMTRANS_FAILED;
5797 }
5798 else
5799 if (pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts == pPrivInstrGC)
5800 {
5801 *pEnmState = PATMTRANS_INHIBITIRQ;
5802 }
5803 else
5804 if ( pPatchRec->patch.uState == PATCH_ENABLED
5805 && !(pPatchRec->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAMPOLINE))
5806 && pPrivInstrGC > pPatchRec->patch.pPrivInstrGC
5807 && pPrivInstrGC < pPatchRec->patch.pPrivInstrGC + pPatchRec->patch.cbPatchJump)
5808 {
5809 *pEnmState = PATMTRANS_OVERWRITTEN;
5810 }
5811 else
5812 if (PATMFindActivePatchByEntrypoint(pVM, pPrivInstrGC))
5813 {
5814 *pEnmState = PATMTRANS_OVERWRITTEN;
5815 }
5816 else
5817 if (pPrivInstrGC == pPatchRec->patch.pPrivInstrGC)
5818 {
5819 *pEnmState = PATMTRANS_PATCHSTART;
5820 }
5821 else
5822 *pEnmState = PATMTRANS_SAFE;
5823 }
5824 return pPrivInstrGC;
5825}
5826
5827/**
5828 * Returns the GC pointer of the patch for the specified GC address
5829 *
5830 * @returns VBox status code.
5831 * @param pVM The VM to operate on.
5832 * @param pAddrGC Guest context address
5833 */
5834VMMR3DECL(RTRCPTR) PATMR3QueryPatchGCPtr(PVM pVM, RTRCPTR pAddrGC)
5835{
5836 PPATMPATCHREC pPatchRec;
5837
5838 // Find the patch record
5839 pPatchRec = (PPATMPATCHREC)RTAvloU32Get(&pVM->patm.s.PatchLookupTreeHC->PatchTree, pAddrGC);
5840 /** @todo we should only use patches that are enabled! always did this, but it's incorrect! */
5841 if (pPatchRec && (pPatchRec->patch.uState == PATCH_ENABLED || pPatchRec->patch.uState == PATCH_DIRTY))
5842 return PATCHCODE_PTR_GC(&pPatchRec->patch);
5843
5844 return 0;
5845}
5846
5847/**
5848 * Attempt to recover dirty instructions
5849 *
5850 * @returns VBox status code.
5851 * @param pVM The VM to operate on.
5852 * @param pCtx CPU context
5853 * @param pPatch Patch record
5854 * @param pPatchToGuestRec Patch to guest address record
5855 * @param pEip GC pointer of trapping instruction
5856 */
5857static int patmR3HandleDirtyInstr(PVM pVM, PCPUMCTX pCtx, PPATMPATCHREC pPatch, PRECPATCHTOGUEST pPatchToGuestRec, RTRCPTR pEip)
5858{
5859 DISCPUSTATE CpuOld, CpuNew;
5860 uint8_t *pPatchInstrHC, *pCurPatchInstrHC;
5861 int rc;
5862 RTRCPTR pCurInstrGC, pCurPatchInstrGC;
5863 uint32_t cbDirty;
5864 PRECPATCHTOGUEST pRec;
5865 PVMCPU pVCpu = VMMGetCpu0(pVM);
5866
5867 Log(("patmR3HandleDirtyInstr: dirty instruction at %RRv (%RRv)\n", pEip, pPatchToGuestRec->pOrgInstrGC));
5868
5869 pRec = pPatchToGuestRec;
5870 pCurInstrGC = pPatchToGuestRec->pOrgInstrGC;
5871 pCurPatchInstrGC = pEip;
5872 cbDirty = 0;
5873 pPatchInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
5874
5875 /* Find all adjacent dirty instructions */
5876 while (true)
5877 {
5878 if (pRec->fJumpTarget)
5879 {
5880 LogRel(("PATM: patmR3HandleDirtyInstr: dirty instruction at %RRv (%RRv) ignored, because instruction in function was reused as target of jump\n", pEip, pPatchToGuestRec->pOrgInstrGC));
5881 pRec->fDirty = false;
5882 return VERR_PATCHING_REFUSED;
5883 }
5884
5885 /* Restore original instruction opcode byte so we can check if the write was indeed safe. */
5886 pCurPatchInstrHC = patmPatchGCPtr2PatchHCPtr(pVM, pCurPatchInstrGC);
5887 *pCurPatchInstrHC = pRec->u8DirtyOpcode;
5888
5889 /* Only harmless instructions are acceptable. */
5890 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCurPatchInstrGC, &CpuOld, 0);
5891 if ( RT_FAILURE(rc)
5892 || !(CpuOld.pCurInstr->optype & OPTYPE_HARMLESS))
5893 {
5894 if (RT_SUCCESS(rc))
5895 cbDirty += CpuOld.opsize;
5896 else
5897 if (!cbDirty)
5898 cbDirty = 1;
5899 break;
5900 }
5901
5902#ifdef DEBUG
5903 char szBuf[256];
5904 szBuf[0] = '\0';
5905 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCurPatchInstrGC, 0, szBuf, sizeof(szBuf), NULL);
5906 Log(("DIRTY: %s\n", szBuf));
5907#endif
5908 /* Mark as clean; if we fail we'll let it always fault. */
5909 pRec->fDirty = false;
5910
5911 /** Remove old lookup record. */
5912 patmr3RemoveP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrGC);
5913
5914 pCurPatchInstrGC += CpuOld.opsize;
5915 cbDirty += CpuOld.opsize;
5916
5917 /* Let's see if there's another dirty instruction right after. */
5918 pRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, pCurPatchInstrGC - pVM->patm.s.pPatchMemGC, true);
5919 if (!pRec || !pRec->fDirty)
5920 break; /* no more dirty instructions */
5921
5922 /* In case of complex instructions the next guest instruction could be quite far off. */
5923 pCurPatchInstrGC = pRec->Core.Key + pVM->patm.s.pPatchMemGC;
5924 }
5925
5926 if ( RT_SUCCESS(rc)
5927 && (CpuOld.pCurInstr->optype & OPTYPE_HARMLESS)
5928 )
5929 {
5930 uint32_t cbLeft;
5931
5932 pCurPatchInstrHC = pPatchInstrHC;
5933 pCurPatchInstrGC = pEip;
5934 cbLeft = cbDirty;
5935
5936 while (cbLeft && RT_SUCCESS(rc))
5937 {
5938 bool fValidInstr;
5939
5940 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCurInstrGC, &CpuNew, 0);
5941
5942 fValidInstr = !!(CpuNew.pCurInstr->optype & OPTYPE_HARMLESS);
5943 if ( !fValidInstr
5944 && (CpuNew.pCurInstr->optype & OPTYPE_RELATIVE_CONTROLFLOW)
5945 )
5946 {
5947 RTRCPTR pTargetGC = PATMResolveBranch(&CpuNew, pCurInstrGC);
5948
5949 if ( pTargetGC >= pPatchToGuestRec->pOrgInstrGC
5950 && pTargetGC <= pPatchToGuestRec->pOrgInstrGC + cbDirty
5951 )
5952 {
5953 /* A relative jump to an instruction inside or to the end of the dirty block is acceptable. */
5954 fValidInstr = true;
5955 }
5956 }
5957
5958 /* If the instruction is completely harmless (which implies a 1:1 patch copy). */
5959 if ( rc == VINF_SUCCESS
5960 && CpuNew.opsize <= cbLeft /* must still fit */
5961 && fValidInstr
5962 )
5963 {
5964#ifdef DEBUG
5965 char szBuf[256];
5966 szBuf[0] = '\0';
5967 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCurInstrGC, 0, szBuf, sizeof(szBuf), NULL);
5968 Log(("NEW: %s\n", szBuf));
5969#endif
5970
5971 /* Copy the new instruction. */
5972 rc = PGMPhysSimpleReadGCPtr(VMMGetCpu0(pVM), pCurPatchInstrHC, pCurInstrGC, CpuNew.opsize);
5973 AssertRC(rc);
5974
5975 /* Add a new lookup record for the duplicated instruction. */
5976 patmr3AddP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrHC, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
5977 }
5978 else
5979 {
5980#ifdef DEBUG
5981 char szBuf[256];
5982 szBuf[0] = '\0';
5983 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pCurInstrGC, 0, szBuf, sizeof(szBuf), NULL);
5984 Log(("NEW: %s (FAILED)\n", szBuf));
5985#endif
5986 /* Restore the old lookup record for the duplicated instruction. */
5987 patmr3AddP2GLookupRecord(pVM, &pPatch->patch, pCurPatchInstrHC, pCurInstrGC, PATM_LOOKUP_BOTHDIR);
5988
5989 /** @todo in theory we need to restore the lookup records for the remaining dirty instructions too! */
5990 rc = VERR_PATCHING_REFUSED;
5991 break;
5992 }
5993 pCurInstrGC += CpuNew.opsize;
5994 pCurPatchInstrHC += CpuNew.opsize;
5995 pCurPatchInstrGC += CpuNew.opsize;
5996 cbLeft -= CpuNew.opsize;
5997 }
5998 }
5999 else
6000 rc = VERR_PATCHING_REFUSED;
6001
6002 if (RT_SUCCESS(rc))
6003 {
6004 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirtyGood);
6005 }
6006 else
6007 {
6008 STAM_COUNTER_INC(&pVM->patm.s.StatInstrDirtyBad);
6009 Assert(cbDirty);
6010
6011 /* Mark the whole instruction stream with breakpoints. */
6012 if (cbDirty)
6013 memset(pPatchInstrHC, 0xCC, cbDirty);
6014
6015 if ( pVM->patm.s.fOutOfMemory == false
6016 && (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_IDTHANDLER|PATMFL_TRAPHANDLER)))
6017 {
6018 rc = patmR3RefreshPatch(pVM, pPatch);
6019 if (RT_FAILURE(rc))
6020 {
6021 LogRel(("PATM: Failed to refresh dirty patch at %RRv. Disabling it.\n", pPatch->patch.pPrivInstrGC));
6022 }
6023 /* Even if we succeed, we must go back to the original instruction as the patched one could be invalid. */
6024 rc = VERR_PATCHING_REFUSED;
6025 }
6026 }
6027 return rc;
6028}
6029
6030/**
6031 * Handle trap inside patch code
6032 *
6033 * @returns VBox status code.
6034 * @param pVM The VM to operate on.
6035 * @param pCtx CPU context
6036 * @param pEip GC pointer of trapping instruction
6037 * @param ppNewEip GC pointer to new instruction
6038 */
6039VMMR3DECL(int) PATMR3HandleTrap(PVM pVM, PCPUMCTX pCtx, RTRCPTR pEip, RTGCPTR *ppNewEip)
6040{
6041 PPATMPATCHREC pPatch = 0;
6042 void *pvPatchCoreOffset;
6043 RTRCUINTPTR offset;
6044 RTRCPTR pNewEip;
6045 int rc ;
6046 PRECPATCHTOGUEST pPatchToGuestRec = 0;
6047 PVMCPU pVCpu = VMMGetCpu0(pVM);
6048
6049 Assert(pVM->cCpus == 1);
6050
6051 pNewEip = 0;
6052 *ppNewEip = 0;
6053
6054 STAM_PROFILE_ADV_START(&pVM->patm.s.StatHandleTrap, a);
6055
6056 /* Find the patch record. */
6057 /** @note there might not be a patch to guest translation record (global function) */
6058 offset = pEip - pVM->patm.s.pPatchMemGC;
6059 pvPatchCoreOffset = RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTreeByPatchAddr, offset, false);
6060 if (pvPatchCoreOffset)
6061 {
6062 pPatch = PATM_PATCHREC_FROM_COREOFFSET(pvPatchCoreOffset);
6063
6064 Assert(offset >= pPatch->patch.pPatchBlockOffset && offset < pPatch->patch.pPatchBlockOffset + pPatch->patch.cbPatchBlockSize);
6065
6066 if (pPatch->patch.uState == PATCH_DIRTY)
6067 {
6068 Log(("PATMR3HandleTrap: trap in dirty patch at %RRv\n", pEip));
6069 if (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED))
6070 {
6071 /* Function duplication patches set fPIF to 1 on entry */
6072 pVM->patm.s.pGCStateHC->fPIF = 1;
6073 }
6074 }
6075 else
6076 if (pPatch->patch.uState == PATCH_DISABLED)
6077 {
6078 Log(("PATMR3HandleTrap: trap in disabled patch at %RRv\n", pEip));
6079 if (pPatch->patch.flags & (PATMFL_DUPLICATE_FUNCTION|PATMFL_CODE_REFERENCED))
6080 {
6081 /* Function duplication patches set fPIF to 1 on entry */
6082 pVM->patm.s.pGCStateHC->fPIF = 1;
6083 }
6084 }
6085 else
6086 if (pPatch->patch.uState == PATCH_DISABLE_PENDING)
6087 {
6088 RTRCPTR pPrivInstrGC = pPatch->patch.pPrivInstrGC;
6089
6090 Log(("PATMR3HandleTrap: disable operation is pending for patch at %RRv\n", pPatch->patch.pPrivInstrGC));
6091 rc = PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6092 AssertReleaseMsg(rc != VWRN_PATCH_REMOVED, ("PATMR3DisablePatch removed patch at %RRv\n", pPrivInstrGC));
6093 AssertMsg(pPatch->patch.uState == PATCH_DISABLED || pPatch->patch.uState == PATCH_UNUSABLE, ("Unexpected failure to disable patch state=%d rc=%Rrc\n", pPatch->patch.uState, rc));
6094 }
6095
6096 pPatchToGuestRec = (PRECPATCHTOGUEST)RTAvlU32GetBestFit(&pPatch->patch.Patch2GuestAddrTree, offset, false);
6097 AssertReleaseMsg(pPatchToGuestRec, ("PATMR3HandleTrap: Unable to find corresponding guest address for %RRv (offset %x)\n", pEip, offset));
6098
6099 pNewEip = pPatchToGuestRec->pOrgInstrGC;
6100 pPatch->patch.cTraps++;
6101 PATM_STAT_FAULT_INC(&pPatch->patch);
6102 }
6103 else
6104 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 0, ("PATMR3HandleTrap: Unable to find translation record for %RRv (PIF=0)\n", pEip));
6105
6106 /* Check if we were interrupted in PATM generated instruction code. */
6107 if (pVM->patm.s.pGCStateHC->fPIF == 0)
6108 {
6109 DISCPUSTATE Cpu;
6110 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pEip, &Cpu, "PIF Trap: ");
6111 AssertRC(rc);
6112
6113 if ( rc == VINF_SUCCESS
6114 && ( Cpu.pCurInstr->opcode == OP_PUSHF
6115 || Cpu.pCurInstr->opcode == OP_PUSH
6116 || Cpu.pCurInstr->opcode == OP_CALL)
6117 )
6118 {
6119 uint64_t fFlags;
6120
6121 STAM_COUNTER_INC(&pVM->patm.s.StatPushTrap);
6122
6123 if (Cpu.pCurInstr->opcode == OP_PUSH)
6124 {
6125 rc = PGMShwGetPage(pVCpu, pCtx->esp, &fFlags, NULL);
6126 if ( rc == VINF_SUCCESS
6127 && ((fFlags & (X86_PTE_P|X86_PTE_RW)) == (X86_PTE_P|X86_PTE_RW)) )
6128 {
6129 /* The stack address is fine, so the push argument is a pointer -> emulate this instruction */
6130
6131 /* Reset the PATM stack. */
6132 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6133
6134 pVM->patm.s.pGCStateHC->fPIF = 1;
6135
6136 Log(("Faulting push -> go back to the original instruction\n"));
6137
6138 /* continue at the original instruction */
6139 *ppNewEip = pNewEip - SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), 0);
6140 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6141 return VINF_SUCCESS;
6142 }
6143 }
6144
6145 /* Typical pushf (most patches)/push (call patch) trap because of a monitored page. */
6146 rc = PGMShwModifyPage(pVCpu, pCtx->esp, 1, X86_PTE_RW, ~(uint64_t)X86_PTE_RW);
6147 AssertMsgRC(rc, ("PGMShwModifyPage -> rc=%Rrc\n", rc));
6148 if (rc == VINF_SUCCESS)
6149 {
6150
6151 /* The guest page *must* be present. */
6152 rc = PGMGstGetPage(pVCpu, pCtx->esp, &fFlags, NULL);
6153 if (rc == VINF_SUCCESS && (fFlags & X86_PTE_P))
6154 {
6155 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6156 return VINF_PATCH_CONTINUE;
6157 }
6158 }
6159 }
6160 else
6161 if (pPatch->patch.pPrivInstrGC == pNewEip)
6162 {
6163 /* Invalidated patch or first instruction overwritten.
6164 * We can ignore the fPIF state in this case.
6165 */
6166 /* Reset the PATM stack. */
6167 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6168
6169 Log(("Call to invalidated patch -> go back to the original instruction\n"));
6170
6171 pVM->patm.s.pGCStateHC->fPIF = 1;
6172
6173 /* continue at the original instruction */
6174 *ppNewEip = pNewEip - SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), 0);
6175 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6176 return VINF_SUCCESS;
6177 }
6178
6179 char szBuf[256];
6180 szBuf[0] = '\0';
6181 DBGFR3DisasInstrEx(pVM, pVCpu->idCpu, pCtx->cs, pEip, 0, szBuf, sizeof(szBuf), NULL);
6182
6183 /* Very bad. We crashed in emitted code. Probably stack? */
6184 if (pPatch)
6185 {
6186 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 1,
6187 ("Crash in patch code %RRv (%RRv) esp=%RX32\nPatch state=%x flags=%RX64 fDirty=%d\n%s\n", pEip, pNewEip, CPUMGetGuestESP(pVCpu), pPatch->patch.uState, pPatch->patch.flags, pPatchToGuestRec->fDirty, szBuf));
6188 }
6189 else
6190 AssertReleaseMsg(pVM->patm.s.pGCStateHC->fPIF == 1,
6191 ("Crash in patch code %RRv (%RRv) esp=%RX32\n%s\n", pEip, pNewEip, CPUMGetGuestESP(pVCpu), szBuf));
6192 EMR3FatalError(pVCpu, VERR_INTERNAL_ERROR);
6193 }
6194
6195 /* From here on, we must have a valid patch to guest translation. */
6196 if (pvPatchCoreOffset == 0)
6197 {
6198 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6199 AssertMsgFailed(("PATMR3HandleTrap: patch not found at address %RRv!!\n", pEip));
6200 return VERR_PATCH_NOT_FOUND; //fatal error
6201 }
6202
6203 /* Take care of dirty/changed instructions. */
6204 if (pPatchToGuestRec->fDirty)
6205 {
6206 Assert(pPatchToGuestRec->Core.Key == offset);
6207 Assert(pVM->patm.s.pGCStateHC->fPIF == 1);
6208
6209 rc = patmR3HandleDirtyInstr(pVM, pCtx, pPatch, pPatchToGuestRec, pEip);
6210 if (RT_SUCCESS(rc))
6211 {
6212 /* Retry the current instruction. */
6213 pNewEip = pEip;
6214 rc = VINF_PATCH_CONTINUE; /* Continue at current patch instruction. */
6215 }
6216 else
6217 {
6218 /* Reset the PATM stack. */
6219 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6220
6221 rc = VINF_SUCCESS; /* Continue at original instruction. */
6222 }
6223
6224 *ppNewEip = pNewEip - SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), 0);
6225 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6226 return rc;
6227 }
6228
6229#ifdef VBOX_STRICT
6230 if (pPatch->patch.flags & PATMFL_DUPLICATE_FUNCTION)
6231 {
6232 DISCPUSTATE cpu;
6233 bool disret;
6234 uint32_t opsize;
6235
6236 cpu.mode = (pPatch->patch.flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
6237 disret = PATMR3DISInstr(pVM, &pPatch->patch, &cpu, pNewEip, PATMGCVirtToHCVirt(pVM, &pPatch->patch, pNewEip), &opsize, NULL, PATMREAD_RAWCODE);
6238 if (disret && cpu.pCurInstr->opcode == OP_RETN)
6239 {
6240 RTRCPTR retaddr;
6241 PCPUMCTX pCtx;
6242
6243 pCtx = CPUMQueryGuestCtxPtr(pVCpu);
6244
6245 rc = PGMPhysSimpleReadGCPtr(pVCpu, &retaddr, pCtx->esp, sizeof(retaddr));
6246 AssertRC(rc);
6247
6248 Log(("Return failed at %RRv (%RRv)\n", pEip, pNewEip));
6249 Log(("Expected return address %RRv found address %RRv Psp=%x\n", pVM->patm.s.pGCStackHC[(pVM->patm.s.pGCStateHC->Psp+PATM_STACK_SIZE)/sizeof(RTRCPTR)], retaddr, pVM->patm.s.pGCStateHC->Psp));
6250 }
6251 }
6252#endif
6253
6254 /* Return original address, correct by subtracting the CS base address. */
6255 *ppNewEip = pNewEip - SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), 0);
6256
6257 /* Reset the PATM stack. */
6258 CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE;
6259
6260 if (pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts == pNewEip)
6261 {
6262 /* Must be a faulting instruction after sti; currently only sysexit, hlt or iret */
6263 Log(("PATMR3HandleTrap %RRv -> inhibit irqs set!\n", pEip));
6264#ifdef VBOX_STRICT
6265 DISCPUSTATE cpu;
6266 bool disret;
6267 uint32_t opsize;
6268
6269 cpu.mode = (pPatch->patch.flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
6270 disret = PATMR3DISInstr(pVM, &pPatch->patch, &cpu, pNewEip, PATMGCVirtToHCVirt(pVM, &pPatch->patch, pNewEip), &opsize, NULL, PATMREAD_ORGCODE);
6271
6272 if (disret && (cpu.pCurInstr->opcode == OP_SYSEXIT || cpu.pCurInstr->opcode == OP_HLT || cpu.pCurInstr->opcode == OP_INT3))
6273 {
6274 cpu.mode = (pPatch->patch.flags & PATMFL_CODE32) ? CPUMODE_32BIT : CPUMODE_16BIT;
6275 disret = PATMR3DISInstr(pVM, &pPatch->patch, &cpu, pNewEip, PATMGCVirtToHCVirt(pVM, &pPatch->patch, pNewEip), &opsize, NULL, PATMREAD_RAWCODE);
6276
6277 Assert(cpu.pCurInstr->opcode == OP_SYSEXIT || cpu.pCurInstr->opcode == OP_HLT || cpu.pCurInstr->opcode == OP_IRET);
6278 }
6279#endif
6280 EMSetInhibitInterruptsPC(pVCpu, pNewEip);
6281 pVM->patm.s.pGCStateHC->GCPtrInhibitInterrupts = 0;
6282 }
6283
6284 Log2(("pPatchBlockGC %RRv - pEip %RRv corresponding GC address %RRv\n", PATCHCODE_PTR_GC(&pPatch->patch), pEip, pNewEip));
6285#ifdef LOG_ENABLED
6286 CPUMR3DisasmInstr(pVM, pVCpu, pCtx, pNewEip, "PATCHRET: ");
6287#endif
6288 if (pNewEip >= pPatch->patch.pPrivInstrGC && pNewEip < pPatch->patch.pPrivInstrGC + pPatch->patch.cbPatchJump)
6289 {
6290 /* We can't jump back to code that we've overwritten with a 5 byte jump! */
6291 Log(("Disabling patch at location %RRv due to trap too close to the privileged instruction \n", pPatch->patch.pPrivInstrGC));
6292 PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6293 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6294 return VERR_PATCH_DISABLED;
6295 }
6296
6297#ifdef PATM_REMOVE_PATCH_ON_TOO_MANY_TRAPS
6298 /** @todo compare to nr of successful runs. add some aging algorithm and determine the best time to disable the patch */
6299 if (pPatch->patch.cTraps > MAX_PATCH_TRAPS)
6300 {
6301 Log(("Disabling patch at location %RRv due to too many traps inside patch code\n", pPatch->patch.pPrivInstrGC));
6302 //we are only wasting time, back out the patch
6303 PATMR3DisablePatch(pVM, pPatch->patch.pPrivInstrGC);
6304 pTrapRec->pNextPatchInstr = 0;
6305 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6306 return VERR_PATCH_DISABLED;
6307 }
6308#endif
6309
6310 STAM_PROFILE_ADV_STOP(&pVM->patm.s.StatHandleTrap, a);
6311 return VINF_SUCCESS;
6312}
6313
6314
6315/**
6316 * Handle page-fault in monitored page
6317 *
6318 * @returns VBox status code.
6319 * @param pVM The VM to operate on.
6320 */
6321VMMR3DECL(int) PATMR3HandleMonitoredPage(PVM pVM)
6322{
6323 RTRCPTR addr = pVM->patm.s.pvFaultMonitor;
6324
6325 addr &= PAGE_BASE_GC_MASK;
6326
6327 int rc = PGMHandlerVirtualDeregister(pVM, addr);
6328 AssertRC(rc); NOREF(rc);
6329
6330 PPATMPATCHREC pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, addr, false);
6331 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED && PAGE_ADDRESS(pPatchRec->patch.pPrivInstrGC) == PAGE_ADDRESS(addr))
6332 {
6333 STAM_COUNTER_INC(&pVM->patm.s.StatMonitored);
6334 Log(("Renewing patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
6335 rc = PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6336 if (rc == VWRN_PATCH_REMOVED)
6337 return VINF_SUCCESS;
6338
6339 PATMR3EnablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6340
6341 if (addr == pPatchRec->patch.pPrivInstrGC)
6342 addr++;
6343 }
6344
6345 for(;;)
6346 {
6347 pPatchRec = (PPATMPATCHREC)RTAvloU32GetBestFit(&pVM->patm.s.PatchLookupTreeHC->PatchTree, addr, true);
6348
6349 if (!pPatchRec || PAGE_ADDRESS(pPatchRec->patch.pPrivInstrGC) != PAGE_ADDRESS(addr))
6350 break;
6351
6352 if (pPatchRec && pPatchRec->patch.uState == PATCH_ENABLED)
6353 {
6354 STAM_COUNTER_INC(&pVM->patm.s.StatMonitored);
6355 Log(("Renewing patch at %RRv\n", pPatchRec->patch.pPrivInstrGC));
6356 PATMR3DisablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6357 PATMR3EnablePatch(pVM, pPatchRec->patch.pPrivInstrGC);
6358 }
6359 addr = pPatchRec->patch.pPrivInstrGC + 1;
6360 }
6361
6362 pVM->patm.s.pvFaultMonitor = 0;
6363 return VINF_SUCCESS;
6364}
6365
6366
6367#ifdef VBOX_WITH_STATISTICS
6368
6369static const char *PATMPatchType(PVM pVM, PPATCHINFO pPatch)
6370{
6371 if (pPatch->flags & PATMFL_SYSENTER)
6372 {
6373 return "SYSENT";
6374 }
6375 else
6376 if (pPatch->flags & (PATMFL_TRAPHANDLER|PATMFL_INTHANDLER))
6377 {
6378 static char szTrap[16];
6379 uint32_t iGate;
6380
6381 iGate = TRPMR3QueryGateByHandler(pVM, PATCHCODE_PTR_GC(pPatch));
6382 if (iGate < 256)
6383 RTStrPrintf(szTrap, sizeof(szTrap), (pPatch->flags & PATMFL_INTHANDLER) ? "INT-%2X" : "TRAP-%2X", iGate);
6384 else
6385 RTStrPrintf(szTrap, sizeof(szTrap), (pPatch->flags & PATMFL_INTHANDLER) ? "INT-??" : "TRAP-??");
6386 return szTrap;
6387 }
6388 else
6389 if (pPatch->flags & (PATMFL_DUPLICATE_FUNCTION))
6390 return "DUPFUNC";
6391 else
6392 if (pPatch->flags & PATMFL_REPLACE_FUNCTION_CALL)
6393 return "FUNCCALL";
6394 else
6395 if (pPatch->flags & PATMFL_TRAMPOLINE)
6396 return "TRAMP";
6397 else
6398 return patmGetInstructionString(pPatch->opcode, pPatch->flags);
6399}
6400
6401static const char *PATMPatchState(PVM pVM, PPATCHINFO pPatch)
6402{
6403 switch(pPatch->uState)
6404 {
6405 case PATCH_ENABLED:
6406 return "ENA";
6407 case PATCH_DISABLED:
6408 return "DIS";
6409 case PATCH_DIRTY:
6410 return "DIR";
6411 case PATCH_UNUSABLE:
6412 return "UNU";
6413 case PATCH_REFUSED:
6414 return "REF";
6415 case PATCH_DISABLE_PENDING:
6416 return "DIP";
6417 default:
6418 AssertFailed();
6419 return " ";
6420 }
6421}
6422
6423/**
6424 * Resets the sample.
6425 * @param pVM The VM handle.
6426 * @param pvSample The sample registered using STAMR3RegisterCallback.
6427 */
6428static void patmResetStat(PVM pVM, void *pvSample)
6429{
6430 PPATCHINFO pPatch = (PPATCHINFO)pvSample;
6431 Assert(pPatch);
6432
6433 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32A = 0;
6434 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32B = 0;
6435}
6436
6437/**
6438 * Prints the sample into the buffer.
6439 *
6440 * @param pVM The VM handle.
6441 * @param pvSample The sample registered using STAMR3RegisterCallback.
6442 * @param pszBuf The buffer to print into.
6443 * @param cchBuf The size of the buffer.
6444 */
6445static void patmPrintStat(PVM pVM, void *pvSample, char *pszBuf, size_t cchBuf)
6446{
6447 PPATCHINFO pPatch = (PPATCHINFO)pvSample;
6448 Assert(pPatch);
6449
6450 Assert(pPatch->uState != PATCH_REFUSED);
6451 Assert(!(pPatch->flags & (PATMFL_REPLACE_FUNCTION_CALL|PATMFL_MMIO_ACCESS)));
6452
6453 RTStrPrintf(pszBuf, cchBuf, "size %04x ->%3s %8s - %08d - %08d",
6454 pPatch->cbPatchBlockSize, PATMPatchState(pVM, pPatch), PATMPatchType(pVM, pPatch),
6455 pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32A, pVM->patm.s.pStatsHC[pPatch->uPatchIdx].u32B);
6456}
6457
6458/**
6459 * Returns the GC address of the corresponding patch statistics counter
6460 *
6461 * @returns Stat address
6462 * @param pVM The VM to operate on.
6463 * @param pPatch Patch structure
6464 */
6465RTRCPTR patmPatchQueryStatAddress(PVM pVM, PPATCHINFO pPatch)
6466{
6467 Assert(pPatch->uPatchIdx != PATM_STAT_INDEX_NONE);
6468 return pVM->patm.s.pStatsGC + sizeof(STAMRATIOU32) * pPatch->uPatchIdx + RT_OFFSETOF(STAMRATIOU32, u32A);
6469}
6470
6471#endif /* VBOX_WITH_STATISTICS */
6472
6473#ifdef VBOX_WITH_DEBUGGER
6474/**
6475 * The '.patmoff' command.
6476 *
6477 * @returns VBox status.
6478 * @param pCmd Pointer to the command descriptor (as registered).
6479 * @param pCmdHlp Pointer to command helper functions.
6480 * @param pVM Pointer to the current VM (if any).
6481 * @param paArgs Pointer to (readonly) array of arguments.
6482 * @param cArgs Number of arguments in the array.
6483 */
6484static DECLCALLBACK(int) patmr3CmdOff(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
6485{
6486 /*
6487 * Validate input.
6488 */
6489 if (!pVM)
6490 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: The command requires VM to be selected.\n");
6491
6492 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, DisableAllPatches, pVM);
6493 PATMR3AllowPatching(pVM, false);
6494 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Patching disabled\n");
6495}
6496
6497/**
6498 * The '.patmon' command.
6499 *
6500 * @returns VBox status.
6501 * @param pCmd Pointer to the command descriptor (as registered).
6502 * @param pCmdHlp Pointer to command helper functions.
6503 * @param pVM Pointer to the current VM (if any).
6504 * @param paArgs Pointer to (readonly) array of arguments.
6505 * @param cArgs Number of arguments in the array.
6506 */
6507static DECLCALLBACK(int) patmr3CmdOn(PCDBGCCMD pCmd, PDBGCCMDHLP pCmdHlp, PVM pVM, PCDBGCVAR paArgs, unsigned cArgs, PDBGCVAR pResult)
6508{
6509 /*
6510 * Validate input.
6511 */
6512 if (!pVM)
6513 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: The command requires VM to be selected.\n");
6514
6515 PATMR3AllowPatching(pVM, true);
6516 RTAvloU32DoWithAll(&pVM->patm.s.PatchLookupTreeHC->PatchTree, true, EnableAllPatches, pVM);
6517 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Patching enabled\n");
6518}
6519#endif
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette