From 54ea2285f05ef6f59a6f1c63df4a7bdd77d7b883 Mon Sep 17 00:00:00 2001 From: riperiperi Date: Mon, 24 May 2021 21:52:44 +0100 Subject: [PATCH] POWER - Performance Optimizations With Extensive Ramifications (#2286) * Refactoring of KMemoryManager class * Replace some trivial uses of DRAM address with VA * Get rid of GetDramAddressFromVa * Abstracting more operations on derived page table class * Run auto-format on KPageTableBase * Managed to make TryConvertVaToPa private, few uses remains now * Implement guest physical pages ref counting, remove manual freeing * Make DoMmuOperation private and call new abstract methods only from the base class * Pass pages count rather than size on Map/UnmapMemory * Change memory managers to take host pointers * Fix a guest memory leak and simplify KPageTable * Expose new methods for host range query and mapping * Some refactoring of MapPagesFromClientProcess to allow proper page ref counting and mapping without KPageLists * Remove more uses of AddVaRangeToPageList, now only one remains (shared memory page checking) * Add a SharedMemoryStorage class, will be useful for host mapping * Sayonara AddVaRangeToPageList, you served us well * Start to implement host memory mapping (WIP) * Support memory tracking through host exception handling * Fix some access violations from HLE service guest memory access and CPU * Fix memory tracking * Fix mapping list bugs, including a race and a error adding mapping ranges * Simple page table for memory tracking * Simple "volatile" region handle mode * Update UBOs directly (experimental, rough) * Fix the overlap check * Only set non-modified buffers as volatile * Fix some memory tracking issues * Fix possible race in MapBufferFromClientProcess (block list updates were not locked) * Write uniform update to memory immediately, only defer the buffer set. * Fix some memory tracking issues * Pass correct pages count on shared memory unmap * Armeilleure Signal Handler v1 + Unix changes Unix currently behaves like windows, rather than remapping physical * Actually check if the host platform is unix * Fix decommit on linux. * Implement windows 10 placeholder shared memory, fix a buffer issue. * Make PTC version something that will never match with master * Remove testing variable for block count * Add reference count for memory manager, fix dispose Can still deadlock with OpenAL * Add address validation, use page table for mapped check, add docs Might clean up the page table traversing routines. * Implement batched mapping/tracking. * Move documentation, fix tests. * Cleanup uniform buffer update stuff. * Remove unnecessary assignment. * Add unsafe host mapped memory switch On by default. Would be good to turn this off for untrusted code (homebrew, exefs mods) and give the user the option to turn it on manually, though that requires some UI work. * Remove C# exception handlers They have issues due to current .NET limitations, so the meilleure one fully replaces them for now. * Fix MapPhysicalMemory on the software MemoryManager. * Null check for GetHostAddress, docs * Add configuration for setting memory manager mode (not in UI yet) * Add config to UI * Fix type mismatch on Unix signal handler code emit * Fix 6GB DRAM mode. The size can be greater than `uint.MaxValue` when the DRAM is >4GB. * Address some feedback. * More detailed error if backing memory cannot be mapped. * SetLastError on all OS functions for consistency * Force pages dirty with UBO update instead of setting them directly. Seems to be much faster across a few games. Need retesting. * Rebase, configuration rework, fix mem tracking regression * Fix race in FreePages * Set memory managers null after decrementing ref count * Remove readonly keyword, as this is now modified. * Use a local variable for the signal handler rather than a register. * Fix bug with buffer resize, and index/uniform buffer binding. Should fix flickering in games. * Add InvalidAccessHandler to MemoryTracking Doesn't do anything yet * Call invalid access handler on unmapped read/write. Same rules as the regular memory manager. * Make unsafe mapped memory its own MemoryManagerType * Move FlushUboDirty into UpdateState. * Buffer dirty cache, rather than ubo cache Much cleaner, may be reusable for Inline2Memory updates. * This doesn't return anything anymore. * Add sigaction remove methods, correct a few function signatures. * Return empty list of physical regions for size 0. * Also on AddressSpaceManager Co-authored-by: gdkchan --- .../Instructions/InstEmitMemoryHelper.cs | 70 +- ARMeilleure/Memory/IMemoryManager.cs | 52 + ARMeilleure/Memory/MemoryManagerType.cs | 41 + ARMeilleure/Signal/NativeSignalHandler.cs | 327 ++ .../Signal/UnixSignalHandlerRegistration.cs | 57 + .../WindowsSignalHandlerRegistration.cs | 24 + ARMeilleure/Translation/PTC/Ptc.cs | 23 +- ARMeilleure/Translation/Translator.cs | 6 + .../Renderer/Server/AudioRenderSystem.cs | 12 + .../Renderer/Server/AudioRendererManager.cs | 14 + .../Configuration/MemoryManagerMode.cs | 9 + Ryujinx.Cpu/CpuContext.cs | 5 +- Ryujinx.Cpu/IVirtualMemoryManagerTracked.cs | 42 + Ryujinx.Cpu/MemoryEhMeilleure.cs | 41 + Ryujinx.Cpu/MemoryManager.cs | 293 +- Ryujinx.Cpu/MemoryManagerBase.cs | 32 + Ryujinx.Cpu/MemoryManagerHostMapped.cs | 692 ++++ Ryujinx.Cpu/Tracking/CpuMultiRegionHandle.cs | 1 + Ryujinx.Cpu/Tracking/CpuRegionHandle.cs | 2 + .../Tracking/CpuSmartMultiRegionHandle.cs | 1 + Ryujinx.Graphics.Gpu/Engine/Compute.cs | 2 + .../Engine/MethodCopyBuffer.cs | 2 + .../Engine/MethodUniformBufferBind.cs | 2 + .../Engine/MethodUniformBufferUpdate.cs | 56 +- Ryujinx.Graphics.Gpu/Engine/Methods.cs | 2 + Ryujinx.Graphics.Gpu/GpuContext.cs | 4 +- Ryujinx.Graphics.Gpu/Image/Pool.cs | 2 - Ryujinx.Graphics.Gpu/Image/TextureGroup.cs | 3 +- Ryujinx.Graphics.Gpu/Memory/Buffer.cs | 83 +- .../Memory/BufferCacheEntry.cs | 43 + Ryujinx.Graphics.Gpu/Memory/BufferManager.cs | 29 +- .../Memory/GpuRegionHandle.cs | 8 + Ryujinx.Graphics.Gpu/Memory/PhysicalMemory.cs | 24 +- Ryujinx.HLE/HLEConfiguration.cs | 7 + Ryujinx.HLE/HOS/ApplicationLoader.cs | 2 +- Ryujinx.HLE/HOS/ArmProcessContext.cs | 31 +- Ryujinx.HLE/HOS/ArmProcessContextFactory.cs | 23 +- Ryujinx.HLE/HOS/Font/SharedFontManager.cs | 23 +- Ryujinx.HLE/HOS/Horizon.cs | 37 +- .../HOS/Kernel/Ipc/KBufferDescriptorTable.cs | 22 +- Ryujinx.HLE/HOS/Kernel/Ipc/KServerSession.cs | 90 +- Ryujinx.HLE/HOS/Kernel/KernelConstants.cs | 2 +- Ryujinx.HLE/HOS/Kernel/KernelContext.cs | 14 +- .../HOS/Kernel/Memory/DramMemoryMap.cs | 5 + Ryujinx.HLE/HOS/Kernel/Memory/KMemoryBlock.cs | 4 +- .../HOS/Kernel/Memory/KMemoryBlockManager.cs | 329 ++ ...llocator.cs => KMemoryBlockSlabManager.cs} | 4 +- .../HOS/Kernel/Memory/KMemoryManager.cs | 3326 +---------------- .../HOS/Kernel/Memory/KMemoryRegionManager.cs | 291 +- Ryujinx.HLE/HOS/Kernel/Memory/KPageList.cs | 20 +- Ryujinx.HLE/HOS/Kernel/Memory/KPageTable.cs | 221 ++ .../HOS/Kernel/Memory/KPageTableBase.cs | 2797 ++++++++++++++ .../HOS/Kernel/Memory/KPageTableHostMapped.cs | 125 + .../HOS/Kernel/Memory/KScopedPageList.cs | 27 + .../HOS/Kernel/Memory/KSharedMemory.cs | 55 +- .../HOS/Kernel/Memory/KTransferMemory.cs | 13 +- .../HOS/Kernel/Memory/MemoryOperation.cs | 12 - .../HOS/Kernel/Memory/SharedMemoryStorage.cs | 103 + .../Kernel/Process/IProcessContextFactory.cs | 2 +- Ryujinx.HLE/HOS/Kernel/Process/KProcess.cs | 92 +- .../Kernel/Process/KProcessCapabilities.cs | 10 +- .../HOS/Kernel/Process/KTlsPageInfo.cs | 16 +- .../HOS/Kernel/Process/KTlsPageManager.cs | 2 +- .../Kernel/Process/ProcessContextFactory.cs | 4 +- .../HOS/Kernel/SupervisorCall/Syscall.cs | 10 +- Ryujinx.HLE/HOS/Kernel/Threading/KThread.cs | 3 - Ryujinx.HLE/HOS/ProgramLoader.cs | 18 +- Ryujinx.HLE/HOS/Services/Hid/Hid.cs | 11 +- Ryujinx.HLE/HOS/Services/Ro/IRoInterface.cs | 8 +- Ryujinx.HLE/HOS/Services/Time/TimeManager.cs | 4 +- .../HOS/Services/Time/TimeSharedMemory.cs | 34 +- Ryujinx.HLE/Switch.cs | 4 +- .../MockVirtualMemoryManager.cs | 10 +- .../MultiRegionTrackingTests.cs | 2 +- Ryujinx.Memory.Tests/TrackingTests.cs | 2 +- Ryujinx.Memory/AddressSpaceManager.cs | 259 +- Ryujinx.Memory/IRefCounted.cs | 8 + Ryujinx.Memory/IVirtualMemoryManager.cs | 114 +- .../InvalidAccessHandler.cs | 2 +- Ryujinx.Memory/MemoryAllocationFlags.cs | 8 +- Ryujinx.Memory/MemoryBlock.cs | 117 +- Ryujinx.Memory/MemoryManagement.cs | 106 +- Ryujinx.Memory/MemoryManagementUnix.cs | 210 +- Ryujinx.Memory/MemoryManagementWindows.cs | 261 +- Ryujinx.Memory/MemoryPermission.cs | 7 +- Ryujinx.Memory/NativeMemoryManager.cs | 2 +- Ryujinx.Memory/PageTable.cs | 141 + Ryujinx.Memory/Range/HostMemoryRange.cs | 71 + Ryujinx.Memory/Tracking/IMultiRegionHandle.cs | 7 + Ryujinx.Memory/Tracking/IRegionHandle.cs | 1 + Ryujinx.Memory/Tracking/MemoryTracking.cs | 20 +- Ryujinx.Memory/Tracking/MultiRegionHandle.cs | 16 +- Ryujinx.Memory/Tracking/RegionHandle.cs | 77 +- .../Tracking/SmartMultiRegionHandle.cs | 11 + Ryujinx.Memory/Tracking/VirtualRegion.cs | 22 +- .../EmulatedSharedMemoryWindows.cs | 698 ++++ .../WindowsShared/PlaceholderList.cs | 291 ++ Ryujinx.Memory/WindowsShared/WindowsFlags.cs | 52 + Ryujinx.Tests/Cpu/CpuTest.cs | 7 +- Ryujinx.Tests/Cpu/CpuTest32.cs | 7 +- Ryujinx/Config.json | 1 + .../Configuration/ConfigurationFileFormat.cs | 7 +- Ryujinx/Configuration/ConfigurationState.cs | 19 + Ryujinx/Ui/MainWindow.cs | 1 + Ryujinx/Ui/Windows/SettingsWindow.cs | 29 + Ryujinx/Ui/Windows/SettingsWindow.glade | 87 + Ryujinx/_schema.json | 14 +- 107 files changed, 8309 insertions(+), 4183 deletions(-) create mode 100644 ARMeilleure/Memory/MemoryManagerType.cs create mode 100644 ARMeilleure/Signal/NativeSignalHandler.cs create mode 100644 ARMeilleure/Signal/UnixSignalHandlerRegistration.cs create mode 100644 ARMeilleure/Signal/WindowsSignalHandlerRegistration.cs create mode 100644 Ryujinx.Common/Configuration/MemoryManagerMode.cs create mode 100644 Ryujinx.Cpu/IVirtualMemoryManagerTracked.cs create mode 100644 Ryujinx.Cpu/MemoryEhMeilleure.cs create mode 100644 Ryujinx.Cpu/MemoryManagerBase.cs create mode 100644 Ryujinx.Cpu/MemoryManagerHostMapped.cs create mode 100644 Ryujinx.Graphics.Gpu/Memory/BufferCacheEntry.cs create mode 100644 Ryujinx.HLE/HOS/Kernel/Memory/KMemoryBlockManager.cs rename Ryujinx.HLE/HOS/Kernel/Memory/{KMemoryBlockAllocator.cs => KMemoryBlockSlabManager.cs} (77%) create mode 100644 Ryujinx.HLE/HOS/Kernel/Memory/KPageTable.cs create mode 100644 Ryujinx.HLE/HOS/Kernel/Memory/KPageTableBase.cs create mode 100644 Ryujinx.HLE/HOS/Kernel/Memory/KPageTableHostMapped.cs create mode 100644 Ryujinx.HLE/HOS/Kernel/Memory/KScopedPageList.cs delete mode 100644 Ryujinx.HLE/HOS/Kernel/Memory/MemoryOperation.cs create mode 100644 Ryujinx.HLE/HOS/Kernel/Memory/SharedMemoryStorage.cs create mode 100644 Ryujinx.Memory/IRefCounted.cs rename {Ryujinx.Cpu => Ryujinx.Memory}/InvalidAccessHandler.cs (92%) create mode 100644 Ryujinx.Memory/PageTable.cs create mode 100644 Ryujinx.Memory/Range/HostMemoryRange.cs create mode 100644 Ryujinx.Memory/WindowsShared/EmulatedSharedMemoryWindows.cs create mode 100644 Ryujinx.Memory/WindowsShared/PlaceholderList.cs create mode 100644 Ryujinx.Memory/WindowsShared/WindowsFlags.cs diff --git a/ARMeilleure/Instructions/InstEmitMemoryHelper.cs b/ARMeilleure/Instructions/InstEmitMemoryHelper.cs index 2d7b6799ea..2de1230462 100644 --- a/ARMeilleure/Instructions/InstEmitMemoryHelper.cs +++ b/ARMeilleure/Instructions/InstEmitMemoryHelper.cs @@ -1,5 +1,6 @@ using ARMeilleure.Decoders; using ARMeilleure.IntermediateRepresentation; +using ARMeilleure.Memory; using ARMeilleure.Translation; using ARMeilleure.Translation.PTC; using System; @@ -141,13 +142,16 @@ namespace ARMeilleure.Instructions SetInt(context, rt, value); - context.Branch(lblEnd); + if (!context.Memory.Type.IsHostMapped()) + { + context.Branch(lblEnd); - context.MarkLabel(lblSlowPath, BasicBlockFrequency.Cold); + context.MarkLabel(lblSlowPath, BasicBlockFrequency.Cold); - EmitReadIntFallback(context, address, rt, size); + EmitReadIntFallback(context, address, rt, size); - context.MarkLabel(lblEnd); + context.MarkLabel(lblEnd); + } } public static Operand EmitReadIntAligned(ArmEmitterContext context, Operand address, int size) @@ -195,13 +199,16 @@ namespace ARMeilleure.Instructions context.Copy(GetVec(rt), value); - context.Branch(lblEnd); + if (!context.Memory.Type.IsHostMapped()) + { + context.Branch(lblEnd); - context.MarkLabel(lblSlowPath, BasicBlockFrequency.Cold); + context.MarkLabel(lblSlowPath, BasicBlockFrequency.Cold); - EmitReadVectorFallback(context, address, vector, rt, elem, size); + EmitReadVectorFallback(context, address, vector, rt, elem, size); - context.MarkLabel(lblEnd); + context.MarkLabel(lblEnd); + } } private static Operand VectorCreate(ArmEmitterContext context, Operand value) @@ -231,13 +238,16 @@ namespace ARMeilleure.Instructions case 3: context.Store (physAddr, value); break; } - context.Branch(lblEnd); + if (!context.Memory.Type.IsHostMapped()) + { + context.Branch(lblEnd); - context.MarkLabel(lblSlowPath, BasicBlockFrequency.Cold); + context.MarkLabel(lblSlowPath, BasicBlockFrequency.Cold); - EmitWriteIntFallback(context, address, rt, size); + EmitWriteIntFallback(context, address, rt, size); - context.MarkLabel(lblEnd); + context.MarkLabel(lblEnd); + } } public static void EmitWriteIntAligned(ArmEmitterContext context, Operand address, Operand value, int size) @@ -291,17 +301,25 @@ namespace ARMeilleure.Instructions case 4: context.Store (physAddr, value); break; } - context.Branch(lblEnd); + if (!context.Memory.Type.IsHostMapped()) + { + context.Branch(lblEnd); - context.MarkLabel(lblSlowPath, BasicBlockFrequency.Cold); + context.MarkLabel(lblSlowPath, BasicBlockFrequency.Cold); - EmitWriteVectorFallback(context, address, rt, elem, size); + EmitWriteVectorFallback(context, address, rt, elem, size); - context.MarkLabel(lblEnd); + context.MarkLabel(lblEnd); + } } public static Operand EmitPtPointerLoad(ArmEmitterContext context, Operand address, Operand lblSlowPath, bool write, int size) { + if (context.Memory.Type.IsHostMapped()) + { + return EmitHostMappedPointer(context, address); + } + int ptLevelBits = context.Memory.AddressSpaceBits - PageBits; int ptLevelSize = 1 << ptLevelBits; int ptLevelMask = ptLevelSize - 1; @@ -380,6 +398,26 @@ namespace ARMeilleure.Instructions return context.Add(pte, pageOffset); } + public static Operand EmitHostMappedPointer(ArmEmitterContext context, Operand address) + { + if (address.Type == OperandType.I32) + { + address = context.ZeroExtend32(OperandType.I64, address); + } + + if (context.Memory.Type == MemoryManagerType.HostMapped) + { + Operand mask = Const(ulong.MaxValue >> (64 - context.Memory.AddressSpaceBits)); + address = context.BitwiseAnd(address, mask); + } + + Operand baseAddr = Ptc.State == PtcState.Disabled + ? Const(context.Memory.PageTablePointer.ToInt64()) + : Const(context.Memory.PageTablePointer.ToInt64(), true, Ptc.PageTablePointerIndex); + + return context.Add(baseAddr, address); + } + private static void EmitReadIntFallback(ArmEmitterContext context, Operand address, int rt, int size) { MethodInfo info = null; diff --git a/ARMeilleure/Memory/IMemoryManager.cs b/ARMeilleure/Memory/IMemoryManager.cs index cacfc4ac3c..0a25eb9727 100644 --- a/ARMeilleure/Memory/IMemoryManager.cs +++ b/ARMeilleure/Memory/IMemoryManager.cs @@ -8,16 +8,68 @@ namespace ARMeilleure.Memory IntPtr PageTablePointer { get; } + MemoryManagerType Type { get; } + + event Action UnmapEvent; + + /// + /// Reads data from CPU mapped memory. + /// + /// Type of the data being read + /// Virtual address of the data in memory + /// The data T Read(ulong va) where T : unmanaged; + + /// + /// Reads data from CPU mapped memory, with read tracking + /// + /// Type of the data being read + /// Virtual address of the data in memory + /// The data T ReadTracked(ulong va) where T : unmanaged; + + /// + /// Writes data to CPU mapped memory. + /// + /// Type of the data being written + /// Virtual address to write the data into + /// Data to be written void Write(ulong va, T value) where T : unmanaged; + /// + /// Gets a read-only span of data from CPU mapped memory. + /// + /// Virtual address of the data + /// Size of the data + /// True if read tracking is triggered on the span + /// A read-only span of the data ReadOnlySpan GetSpan(ulong va, int size, bool tracked = false); + /// + /// Gets a reference for the given type at the specified virtual memory address. + /// + /// + /// The data must be located at a contiguous memory region. + /// + /// Type of the data to get the reference + /// Virtual address of the data + /// A reference to the data in memory ref T GetRef(ulong va) where T : unmanaged; + /// + /// Checks if the page at a given CPU virtual address is mapped. + /// + /// Virtual address to check + /// True if the address is mapped, false otherwise bool IsMapped(ulong va); + /// + /// Alerts the memory tracking that a given region has been read from or written to. + /// This should be called before read/write is performed. + /// + /// Virtual address of the region + /// Size of the region + /// True if the region was written, false if read void SignalMemoryTracking(ulong va, ulong size, bool write); } } \ No newline at end of file diff --git a/ARMeilleure/Memory/MemoryManagerType.cs b/ARMeilleure/Memory/MemoryManagerType.cs new file mode 100644 index 0000000000..ce84ccaf36 --- /dev/null +++ b/ARMeilleure/Memory/MemoryManagerType.cs @@ -0,0 +1,41 @@ +namespace ARMeilleure.Memory +{ + /// + /// Indicates the type of a memory manager and the method it uses for memory mapping + /// and address translation. This controls the code generated for memory accesses on the JIT. + /// + public enum MemoryManagerType + { + /// + /// Complete software MMU implementation, the read/write methods are always called, + /// without any attempt to perform faster memory access. + /// + SoftwareMmu, + + /// + /// High level implementation using a software flat page table for address translation, + /// used to speed up address translation if possible without calling the read/write methods. + /// + SoftwarePageTable, + + /// + /// High level implementation with mappings managed by the host OS, effectively using hardware + /// page tables. No address translation is performed in software and the memory is just accessed directly. + /// + HostMapped, + + /// + /// Same as the host mapped memory manager type, but without masking the address within the address space. + /// Allows invalid access from JIT code to the rest of the program, but is faster. + /// + HostMappedUnsafe + } + + static class MemoryManagerTypeExtensions + { + public static bool IsHostMapped(this MemoryManagerType type) + { + return type == MemoryManagerType.HostMapped || type == MemoryManagerType.HostMappedUnsafe; + } + } +} diff --git a/ARMeilleure/Signal/NativeSignalHandler.cs b/ARMeilleure/Signal/NativeSignalHandler.cs new file mode 100644 index 0000000000..e5387ca622 --- /dev/null +++ b/ARMeilleure/Signal/NativeSignalHandler.cs @@ -0,0 +1,327 @@ +using ARMeilleure.IntermediateRepresentation; +using ARMeilleure.Translation; +using System; +using System.Runtime.CompilerServices; +using System.Runtime.InteropServices; + +using static ARMeilleure.IntermediateRepresentation.OperandHelper; + +namespace ARMeilleure.Signal +{ + [StructLayout(LayoutKind.Sequential, Pack = 1)] + struct SignalHandlerRange + { + public int IsActive; + public nuint RangeAddress; + public nuint RangeEndAddress; + public IntPtr ActionPointer; + } + + [StructLayout(LayoutKind.Sequential, Pack = 1)] + struct SignalHandlerConfig + { + /// + /// The byte offset of the faulting address in the SigInfo or ExceptionRecord struct. + /// + public int StructAddressOffset; + + /// + /// The byte offset of the write flag in the SigInfo or ExceptionRecord struct. + /// + public int StructWriteOffset; + + /// + /// The sigaction handler that was registered before this one. (unix only) + /// + public nuint UnixOldSigaction; + + /// + /// The type of the previous sigaction. True for the 3 argument variant. (unix only) + /// + public int UnixOldSigaction3Arg; + + public SignalHandlerRange Range0; + public SignalHandlerRange Range1; + public SignalHandlerRange Range2; + public SignalHandlerRange Range3; + public SignalHandlerRange Range4; + public SignalHandlerRange Range5; + public SignalHandlerRange Range6; + public SignalHandlerRange Range7; + } + + public static class NativeSignalHandler + { + private delegate void UnixExceptionHandler(int sig, IntPtr info, IntPtr ucontext); + [UnmanagedFunctionPointer(CallingConvention.Winapi)] + private delegate int VectoredExceptionHandler(IntPtr exceptionInfo); + + private const int MaxTrackedRanges = 8; + + private const int StructAddressOffset = 0; + private const int StructWriteOffset = 4; + private const int UnixOldSigaction = 8; + private const int UnixOldSigaction3Arg = 16; + private const int RangeOffset = 20; + + private const int EXCEPTION_CONTINUE_SEARCH = 0; + private const int EXCEPTION_CONTINUE_EXECUTION = -1; + + private const uint EXCEPTION_ACCESS_VIOLATION = 0xc0000005; + + private const ulong PageSize = 0x1000; + private const ulong PageMask = PageSize - 1; + + private static IntPtr _handlerConfig; + private static IntPtr _signalHandlerPtr; + private static IntPtr _signalHandlerHandle; + + private static readonly object _lock = new object(); + private static bool _initialized; + + static NativeSignalHandler() + { + _handlerConfig = Marshal.AllocHGlobal(Unsafe.SizeOf()); + ref SignalHandlerConfig config = ref GetConfigRef(); + + config = new SignalHandlerConfig(); + } + + public static void InitializeSignalHandler() + { + if (_initialized) return; + + lock (_lock) + { + if (_initialized) return; + + Translator.PreparePool(); + + bool unix = RuntimeInformation.IsOSPlatform(OSPlatform.Linux) || RuntimeInformation.IsOSPlatform(OSPlatform.OSX); + ref SignalHandlerConfig config = ref GetConfigRef(); + + if (unix) + { + // Unix siginfo struct locations. + // NOTE: These are incredibly likely to be different between kernel version and architectures. + + config.StructAddressOffset = 16; // si_addr + config.StructWriteOffset = 8; // si_code + + _signalHandlerPtr = Marshal.GetFunctionPointerForDelegate(GenerateUnixSignalHandler(_handlerConfig)); + + SigAction old = UnixSignalHandlerRegistration.RegisterExceptionHandler(_signalHandlerPtr); + config.UnixOldSigaction = (nuint)(ulong)old.sa_handler; + config.UnixOldSigaction3Arg = old.sa_flags & 4; + } + else + { + config.StructAddressOffset = 40; // ExceptionInformation1 + config.StructWriteOffset = 32; // ExceptionInformation0 + + _signalHandlerPtr = Marshal.GetFunctionPointerForDelegate(GenerateWindowsSignalHandler(_handlerConfig)); + + _signalHandlerHandle = WindowsSignalHandlerRegistration.RegisterExceptionHandler(_signalHandlerPtr); + } + + Translator.ResetPool(); + + Translator.DisposePools(); + + _initialized = true; + } + } + + private static unsafe ref SignalHandlerConfig GetConfigRef() + { + return ref Unsafe.AsRef((void*)_handlerConfig); + } + + public static unsafe bool AddTrackedRegion(nuint address, nuint endAddress, IntPtr action) + { + var ranges = &((SignalHandlerConfig*)_handlerConfig)->Range0; + + for (int i = 0; i < MaxTrackedRanges; i++) + { + if (ranges[i].IsActive == 0) + { + ranges[i].RangeAddress = address; + ranges[i].RangeEndAddress = endAddress; + ranges[i].ActionPointer = action; + ranges[i].IsActive = 1; + + return true; + } + } + + return false; + } + + public static unsafe bool RemoveTrackedRegion(nuint address) + { + var ranges = &((SignalHandlerConfig*)_handlerConfig)->Range0; + + for (int i = 0; i < MaxTrackedRanges; i++) + { + if (ranges[i].IsActive == 1 && ranges[i].RangeAddress == address) + { + ranges[i].IsActive = 0; + + return true; + } + } + + return false; + } + + private static Operand EmitGenericRegionCheck(EmitterContext context, IntPtr signalStructPtr, Operand faultAddress, Operand isWrite) + { + Operand inRegionLocal = context.AllocateLocal(OperandType.I32); + context.Copy(inRegionLocal, Const(0)); + + Operand endLabel = Label(); + + for (int i = 0; i < MaxTrackedRanges; i++) + { + ulong rangeBaseOffset = (ulong)(RangeOffset + i * Unsafe.SizeOf()); + + Operand nextLabel = Label(); + + Operand isActive = context.Load(OperandType.I32, Const((ulong)signalStructPtr + rangeBaseOffset)); + + context.BranchIfFalse(nextLabel, isActive); + + Operand rangeAddress = context.Load(OperandType.I64, Const((ulong)signalStructPtr + rangeBaseOffset + 4)); + Operand rangeEndAddress = context.Load(OperandType.I64, Const((ulong)signalStructPtr + rangeBaseOffset + 12)); + + // Is the fault address within this tracked region? + Operand inRange = context.BitwiseAnd( + context.ICompare(faultAddress, rangeAddress, Comparison.GreaterOrEqualUI), + context.ICompare(faultAddress, rangeEndAddress, Comparison.Less) + ); + + // Only call tracking if in range. + context.BranchIfFalse(nextLabel, inRange, BasicBlockFrequency.Cold); + + context.Copy(inRegionLocal, Const(1)); + Operand offset = context.BitwiseAnd(context.Subtract(faultAddress, rangeAddress), Const(~PageMask)); + + // Call the tracking action, with the pointer's relative offset to the base address. + Operand trackingActionPtr = context.Load(OperandType.I64, Const((ulong)signalStructPtr + rangeBaseOffset + 20)); + context.Call(trackingActionPtr, OperandType.I32, offset, Const(PageSize), isWrite); + + context.Branch(endLabel); + + context.MarkLabel(nextLabel); + } + + context.MarkLabel(endLabel); + + return context.Copy(inRegionLocal); + } + + private static UnixExceptionHandler GenerateUnixSignalHandler(IntPtr signalStructPtr) + { + EmitterContext context = new EmitterContext(); + + // (int sig, SigInfo* sigInfo, void* ucontext) + Operand sigInfoPtr = context.LoadArgument(OperandType.I64, 1); + + Operand structAddressOffset = context.Load(OperandType.I64, Const((ulong)signalStructPtr + StructAddressOffset)); + Operand structWriteOffset = context.Load(OperandType.I64, Const((ulong)signalStructPtr + StructWriteOffset)); + + Operand faultAddress = context.Load(OperandType.I64, context.Add(sigInfoPtr, context.ZeroExtend32(OperandType.I64, structAddressOffset))); + Operand writeFlag = context.Load(OperandType.I64, context.Add(sigInfoPtr, context.ZeroExtend32(OperandType.I64, structWriteOffset))); + + Operand isWrite = context.ICompareNotEqual(writeFlag, Const(0L)); // Normalize to 0/1. + + Operand isInRegion = EmitGenericRegionCheck(context, signalStructPtr, faultAddress, isWrite); + + Operand endLabel = Label(); + + context.BranchIfTrue(endLabel, isInRegion); + + Operand unixOldSigaction = context.Load(OperandType.I64, Const((ulong)signalStructPtr + UnixOldSigaction)); + Operand unixOldSigaction3Arg = context.Load(OperandType.I64, Const((ulong)signalStructPtr + UnixOldSigaction3Arg)); + Operand threeArgLabel = Label(); + + context.BranchIfTrue(threeArgLabel, unixOldSigaction3Arg); + + context.Call(unixOldSigaction, OperandType.None, context.LoadArgument(OperandType.I32, 0)); + context.Branch(endLabel); + + context.MarkLabel(threeArgLabel); + + context.Call(unixOldSigaction, + OperandType.None, + context.LoadArgument(OperandType.I32, 0), + sigInfoPtr, + context.LoadArgument(OperandType.I64, 2) + ); + + context.MarkLabel(endLabel); + + context.Return(); + + ControlFlowGraph cfg = context.GetControlFlowGraph(); + + OperandType[] argTypes = new OperandType[] { OperandType.I32, OperandType.I64, OperandType.I64 }; + + return Compiler.Compile(cfg, argTypes, OperandType.None, CompilerOptions.HighCq); + } + + private static VectoredExceptionHandler GenerateWindowsSignalHandler(IntPtr signalStructPtr) + { + EmitterContext context = new EmitterContext(); + + // (ExceptionPointers* exceptionInfo) + Operand exceptionInfoPtr = context.LoadArgument(OperandType.I64, 0); + Operand exceptionRecordPtr = context.Load(OperandType.I64, exceptionInfoPtr); + + // First thing's first - this catches a number of exceptions, but we only want access violations. + Operand validExceptionLabel = Label(); + + Operand exceptionCode = context.Load(OperandType.I32, exceptionRecordPtr); + + context.BranchIf(validExceptionLabel, exceptionCode, Const(EXCEPTION_ACCESS_VIOLATION), Comparison.Equal); + + context.Return(Const(EXCEPTION_CONTINUE_SEARCH)); // Don't handle this one. + + context.MarkLabel(validExceptionLabel); + + // Next, read the address of the invalid access, and whether it is a write or not. + + Operand structAddressOffset = context.Load(OperandType.I32, Const((ulong)signalStructPtr + StructAddressOffset)); + Operand structWriteOffset = context.Load(OperandType.I32, Const((ulong)signalStructPtr + StructWriteOffset)); + + Operand faultAddress = context.Load(OperandType.I64, context.Add(exceptionRecordPtr, context.ZeroExtend32(OperandType.I64, structAddressOffset))); + Operand writeFlag = context.Load(OperandType.I64, context.Add(exceptionRecordPtr, context.ZeroExtend32(OperandType.I64, structWriteOffset))); + + Operand isWrite = context.ICompareNotEqual(writeFlag, Const(0L)); // Normalize to 0/1. + + Operand isInRegion = EmitGenericRegionCheck(context, signalStructPtr, faultAddress, isWrite); + + Operand endLabel = Label(); + + // If the region check result is false, then run the next vectored exception handler. + + context.BranchIfTrue(endLabel, isInRegion); + + context.Return(Const(EXCEPTION_CONTINUE_SEARCH)); + + context.MarkLabel(endLabel); + + // Otherwise, return to execution. + + context.Return(Const(EXCEPTION_CONTINUE_EXECUTION)); + + // Compile and return the function. + + ControlFlowGraph cfg = context.GetControlFlowGraph(); + + OperandType[] argTypes = new OperandType[] { OperandType.I64 }; + + return Compiler.Compile(cfg, argTypes, OperandType.I32, CompilerOptions.HighCq); + } + } +} diff --git a/ARMeilleure/Signal/UnixSignalHandlerRegistration.cs b/ARMeilleure/Signal/UnixSignalHandlerRegistration.cs new file mode 100644 index 0000000000..9e87749e66 --- /dev/null +++ b/ARMeilleure/Signal/UnixSignalHandlerRegistration.cs @@ -0,0 +1,57 @@ +using Mono.Unix.Native; +using System; +using System.Runtime.InteropServices; + +namespace ARMeilleure.Signal +{ + [StructLayout(LayoutKind.Sequential, Pack = 1)] + unsafe struct SigSet + { + fixed long sa_mask[16]; + } + + [StructLayout(LayoutKind.Sequential, Pack = 1)] + struct SigAction + { + public IntPtr sa_handler; + public SigSet sa_mask; + public int sa_flags; + public IntPtr sa_restorer; + } + + static class UnixSignalHandlerRegistration + { + private const int SA_SIGINFO = 0x00000004; + + [DllImport("libc", SetLastError = true)] + private static extern int sigaction(int signum, ref SigAction sigAction, out SigAction oldAction); + + [DllImport("libc", SetLastError = true)] + private static extern int sigemptyset(ref SigSet set); + + public static SigAction RegisterExceptionHandler(IntPtr action) + { + SigAction sig = new SigAction + { + sa_handler = action, + sa_flags = SA_SIGINFO + }; + + sigemptyset(ref sig.sa_mask); + + int result = sigaction((int)Signum.SIGSEGV, ref sig, out SigAction old); + + if (result != 0) + { + throw new InvalidOperationException($"Could not register sigaction. Error: {result}"); + } + + return old; + } + + public static bool RestoreExceptionHandler(SigAction oldAction) + { + return sigaction((int)Signum.SIGSEGV, ref oldAction, out SigAction _) == 0; + } + } +} diff --git a/ARMeilleure/Signal/WindowsSignalHandlerRegistration.cs b/ARMeilleure/Signal/WindowsSignalHandlerRegistration.cs new file mode 100644 index 0000000000..959d1c477d --- /dev/null +++ b/ARMeilleure/Signal/WindowsSignalHandlerRegistration.cs @@ -0,0 +1,24 @@ +using System; +using System.Runtime.InteropServices; + +namespace ARMeilleure.Signal +{ + class WindowsSignalHandlerRegistration + { + [DllImport("kernel32.dll")] + private static extern IntPtr AddVectoredExceptionHandler(uint first, IntPtr handler); + + [DllImport("kernel32.dll")] + private static extern ulong RemoveVectoredExceptionHandler(IntPtr handle); + + public static IntPtr RegisterExceptionHandler(IntPtr action) + { + return AddVectoredExceptionHandler(1, action); + } + + public static bool RemoveExceptionHandler(IntPtr handle) + { + return RemoveVectoredExceptionHandler(handle) != 0; + } + } +} diff --git a/ARMeilleure/Translation/PTC/Ptc.cs b/ARMeilleure/Translation/PTC/Ptc.cs index f6494c231b..ed4a003db1 100644 --- a/ARMeilleure/Translation/PTC/Ptc.cs +++ b/ARMeilleure/Translation/PTC/Ptc.cs @@ -28,7 +28,7 @@ namespace ARMeilleure.Translation.PTC private const string OuterHeaderMagicString = "PTCohd\0\0"; private const string InnerHeaderMagicString = "PTCihd\0\0"; - private const uint InternalVersion = 2305; //! To be incremented manually for each change to the ARMeilleure project. + private const uint InternalVersion = 2289; //! To be incremented manually for each change to the ARMeilleure project. private const string ActualDir = "0"; private const string BackupDir = "1"; @@ -64,6 +64,8 @@ namespace ARMeilleure.Translation.PTC internal static string TitleIdText { get; private set; } internal static string DisplayVersion { get; private set; } + private static MemoryManagerMode _memoryMode; + internal static string CachePathActual { get; private set; } internal static string CachePathBackup { get; private set; } @@ -98,7 +100,7 @@ namespace ARMeilleure.Translation.PTC Disable(); } - public static void Initialize(string titleIdText, string displayVersion, bool enabled) + public static void Initialize(string titleIdText, string displayVersion, bool enabled, MemoryManagerMode memoryMode) { Wait(); @@ -122,6 +124,7 @@ namespace ARMeilleure.Translation.PTC TitleIdText = titleIdText; DisplayVersion = !string.IsNullOrEmpty(displayVersion) ? displayVersion : DisplayVersionDefault; + _memoryMode = memoryMode; string workPathActual = Path.Combine(AppDataManager.GamesDirPath, TitleIdText, "cache", "cpu", ActualDir); string workPathBackup = Path.Combine(AppDataManager.GamesDirPath, TitleIdText, "cache", "cpu", BackupDir); @@ -244,6 +247,13 @@ namespace ARMeilleure.Translation.PTC return false; } + if (outerHeader.MemoryManagerMode != GetMemoryManagerMode()) + { + InvalidateCompressedStream(compressedStream); + + return false; + } + if (outerHeader.OSPlatform != GetOSPlatform()) { InvalidateCompressedStream(compressedStream); @@ -441,6 +451,7 @@ namespace ARMeilleure.Translation.PTC outerHeader.CacheFileVersion = InternalVersion; outerHeader.Endianness = GetEndianness(); outerHeader.FeatureInfo = GetFeatureInfo(); + outerHeader.MemoryManagerMode = GetMemoryManagerMode(); outerHeader.OSPlatform = GetOSPlatform(); outerHeader.UncompressedStreamSize = @@ -954,6 +965,11 @@ namespace ARMeilleure.Translation.PTC return (ulong)HardwareCapabilities.FeatureInfoEdx << 32 | (uint)HardwareCapabilities.FeatureInfoEcx; } + private static byte GetMemoryManagerMode() + { + return (byte)_memoryMode; + } + private static uint GetOSPlatform() { uint osPlatform = 0u; @@ -966,7 +982,7 @@ namespace ARMeilleure.Translation.PTC return osPlatform; } - [StructLayout(LayoutKind.Sequential, Pack = 1/*, Size = 49*/)] + [StructLayout(LayoutKind.Sequential, Pack = 1/*, Size = 50*/)] private struct OuterHeader { public ulong Magic; @@ -975,6 +991,7 @@ namespace ARMeilleure.Translation.PTC public bool Endianness; public ulong FeatureInfo; + public byte MemoryManagerMode; public uint OSPlatform; public long UncompressedStreamSize; diff --git a/ARMeilleure/Translation/Translator.cs b/ARMeilleure/Translation/Translator.cs index f8b074c973..eeeb517f64 100644 --- a/ARMeilleure/Translation/Translator.cs +++ b/ARMeilleure/Translation/Translator.cs @@ -4,6 +4,7 @@ using ARMeilleure.Diagnostics; using ARMeilleure.Instructions; using ARMeilleure.IntermediateRepresentation; using ARMeilleure.Memory; +using ARMeilleure.Signal; using ARMeilleure.State; using ARMeilleure.Translation.Cache; using ARMeilleure.Translation.PTC; @@ -63,6 +64,11 @@ namespace ARMeilleure.Translation JitCache.Initialize(allocator); DirectCallStubs.InitializeStubs(); + + if (memory.Type.IsHostMapped()) + { + NativeSignalHandler.InitializeSignalHandler(); + } } private void TranslateStackedSubs() diff --git a/Ryujinx.Audio/Renderer/Server/AudioRenderSystem.cs b/Ryujinx.Audio/Renderer/Server/AudioRenderSystem.cs index 787b8f9f04..112b0e442a 100644 --- a/Ryujinx.Audio/Renderer/Server/AudioRenderSystem.cs +++ b/Ryujinx.Audio/Renderer/Server/AudioRenderSystem.cs @@ -142,6 +142,11 @@ namespace Ryujinx.Audio.Renderer.Server _sessionId = sessionId; MemoryManager = memoryManager; + if (memoryManager is IRefCounted rc) + { + rc.IncrementReferenceCount(); + } + WorkBufferAllocator workBufferAllocator; _workBufferRegion = MemoryManager.GetWritableRegion(workBuffer, (int)workBufferSize); @@ -832,6 +837,13 @@ namespace Ryujinx.Audio.Renderer.Server _terminationEvent.Dispose(); _workBufferMemoryPin.Dispose(); _workBufferRegion.Dispose(); + + if (MemoryManager is IRefCounted rc) + { + rc.DecrementReferenceCount(); + + MemoryManager = null; + } } } } diff --git a/Ryujinx.Audio/Renderer/Server/AudioRendererManager.cs b/Ryujinx.Audio/Renderer/Server/AudioRendererManager.cs index ec8479481b..004ac656b3 100644 --- a/Ryujinx.Audio/Renderer/Server/AudioRendererManager.cs +++ b/Ryujinx.Audio/Renderer/Server/AudioRendererManager.cs @@ -22,6 +22,7 @@ using Ryujinx.Common.Logging; using Ryujinx.Memory; using System; using System.Diagnostics; +using System.Linq; using System.Threading; namespace Ryujinx.Audio.Renderer.Server @@ -319,6 +320,19 @@ namespace Ryujinx.Audio.Renderer.Server { if (disposing) { + // Clone the sessions array to dispose them outside the lock. + AudioRenderSystem[] sessions; + + lock (_sessionLock) + { + sessions = _sessions.ToArray(); + } + + foreach (AudioRenderSystem renderer in sessions) + { + renderer?.Dispose(); + } + lock (_audioProcessorLock) { if (_isRunning) diff --git a/Ryujinx.Common/Configuration/MemoryManagerMode.cs b/Ryujinx.Common/Configuration/MemoryManagerMode.cs new file mode 100644 index 0000000000..ad6c2a346e --- /dev/null +++ b/Ryujinx.Common/Configuration/MemoryManagerMode.cs @@ -0,0 +1,9 @@ +namespace Ryujinx.Common.Configuration +{ + public enum MemoryManagerMode : byte + { + SoftwarePageTable, + HostMapped, + HostMappedUnsafe + } +} diff --git a/Ryujinx.Cpu/CpuContext.cs b/Ryujinx.Cpu/CpuContext.cs index 450405861b..407353fdf2 100644 --- a/Ryujinx.Cpu/CpuContext.cs +++ b/Ryujinx.Cpu/CpuContext.cs @@ -1,4 +1,5 @@ -using ARMeilleure.State; +using ARMeilleure.Memory; +using ARMeilleure.State; using ARMeilleure.Translation; namespace Ryujinx.Cpu @@ -7,7 +8,7 @@ namespace Ryujinx.Cpu { private readonly Translator _translator; - public CpuContext(MemoryManager memory) + public CpuContext(IMemoryManager memory) { _translator = new Translator(new JitMemoryAllocator(), memory); memory.UnmapEvent += UnmapHandler; diff --git a/Ryujinx.Cpu/IVirtualMemoryManagerTracked.cs b/Ryujinx.Cpu/IVirtualMemoryManagerTracked.cs new file mode 100644 index 0000000000..8187433970 --- /dev/null +++ b/Ryujinx.Cpu/IVirtualMemoryManagerTracked.cs @@ -0,0 +1,42 @@ +using Ryujinx.Cpu.Tracking; +using Ryujinx.Memory; +using System; + +namespace Ryujinx.Cpu +{ + public interface IVirtualMemoryManagerTracked : IVirtualMemoryManager + { + /// + /// Writes data to CPU mapped memory, without write tracking. + /// + /// Virtual address to write the data into + /// Data to be written + void WriteUntracked(ulong va, ReadOnlySpan data); + + /// + /// Obtains a memory tracking handle for the given virtual region. This should be disposed when finished with. + /// + /// CPU virtual address of the region + /// Size of the region + /// The memory tracking handle + CpuRegionHandle BeginTracking(ulong address, ulong size); + + /// + /// Obtains a memory tracking handle for the given virtual region, with a specified granularity. This should be disposed when finished with. + /// + /// CPU virtual address of the region + /// Size of the region + /// Desired granularity of write tracking + /// The memory tracking handle + CpuMultiRegionHandle BeginGranularTracking(ulong address, ulong size, ulong granularity); + + /// + /// Obtains a smart memory tracking handle for the given virtual region, with a specified granularity. This should be disposed when finished with. + /// + /// CPU virtual address of the region + /// Size of the region + /// Desired granularity of write tracking + /// The memory tracking handle + CpuSmartMultiRegionHandle BeginSmartGranularTracking(ulong address, ulong size, ulong granularity); + } +} diff --git a/Ryujinx.Cpu/MemoryEhMeilleure.cs b/Ryujinx.Cpu/MemoryEhMeilleure.cs new file mode 100644 index 0000000000..ac7791b451 --- /dev/null +++ b/Ryujinx.Cpu/MemoryEhMeilleure.cs @@ -0,0 +1,41 @@ +using ARMeilleure.Signal; +using Ryujinx.Memory; +using Ryujinx.Memory.Tracking; +using System; +using System.Runtime.InteropServices; + +namespace Ryujinx.Cpu +{ + class MemoryEhMeilleure : IDisposable + { + private delegate bool TrackingEventDelegate(ulong address, ulong size, bool write); + + private readonly MemoryBlock _addressSpace; + private readonly MemoryTracking _tracking; + private readonly TrackingEventDelegate _trackingEvent; + + private readonly ulong _baseAddress; + + public MemoryEhMeilleure(MemoryBlock addressSpace, MemoryTracking tracking) + { + _addressSpace = addressSpace; + _tracking = tracking; + + _baseAddress = (ulong)_addressSpace.Pointer; + ulong endAddress = _baseAddress + addressSpace.Size; + + _trackingEvent = new TrackingEventDelegate(tracking.VirtualMemoryEvent); + bool added = NativeSignalHandler.AddTrackedRegion((nuint)_baseAddress, (nuint)endAddress, Marshal.GetFunctionPointerForDelegate(_trackingEvent)); + + if (!added) + { + throw new InvalidOperationException("Number of allowed tracked regions exceeded."); + } + } + + public void Dispose() + { + NativeSignalHandler.RemoveTrackedRegion((nuint)_baseAddress); + } + } +} diff --git a/Ryujinx.Cpu/MemoryManager.cs b/Ryujinx.Cpu/MemoryManager.cs index 591299ca71..dbc2f736ac 100644 --- a/Ryujinx.Cpu/MemoryManager.cs +++ b/Ryujinx.Cpu/MemoryManager.cs @@ -1,9 +1,11 @@ using ARMeilleure.Memory; using Ryujinx.Cpu.Tracking; using Ryujinx.Memory; +using Ryujinx.Memory.Range; using Ryujinx.Memory.Tracking; using System; using System.Collections.Generic; +using System.Linq; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Threading; @@ -13,7 +15,7 @@ namespace Ryujinx.Cpu /// /// Represents a CPU memory manager. /// - public sealed class MemoryManager : IMemoryManager, IVirtualMemoryManager, IWritableBlock, IDisposable + public sealed class MemoryManager : MemoryManagerBase, IMemoryManager, IVirtualMemoryManagerTracked, IWritableBlock { public const int PageBits = 12; public const int PageSize = 1 << PageBits; @@ -32,7 +34,6 @@ namespace Ryujinx.Cpu private readonly ulong _addressSpaceSize; - private readonly MemoryBlock _backingMemory; private readonly MemoryBlock _pageTable; /// @@ -40,17 +41,18 @@ namespace Ryujinx.Cpu /// public IntPtr PageTablePointer => _pageTable.Pointer; + public MemoryManagerType Type => MemoryManagerType.SoftwarePageTable; + public MemoryTracking Tracking { get; } - internal event Action UnmapEvent; + public event Action UnmapEvent; /// /// Creates a new instance of the memory manager. /// - /// Physical backing memory where virtual memory will be mapped to /// Size of the address space /// Optional function to handle invalid memory accesses - public MemoryManager(MemoryBlock backingMemory, ulong addressSpaceSize, InvalidAccessHandler invalidAccessHandler = null) + public MemoryManager(ulong addressSpaceSize, InvalidAccessHandler invalidAccessHandler = null) { _invalidAccessHandler = invalidAccessHandler; @@ -65,45 +67,30 @@ namespace Ryujinx.Cpu AddressSpaceBits = asBits; _addressSpaceSize = asSize; - _backingMemory = backingMemory; _pageTable = new MemoryBlock((asSize / PageSize) * PteSize); - Tracking = new MemoryTracking(this, backingMemory, PageSize); - Tracking.EnablePhysicalProtection = false; // Disabled for now, as protection is done in software. + Tracking = new MemoryTracking(this, PageSize); } - /// - /// Maps a virtual memory range into a physical memory range. - /// - /// - /// Addresses and size must be page aligned. - /// - /// Virtual memory address - /// Physical memory address - /// Size to be mapped - public void Map(ulong va, ulong pa, ulong size) + /// + public void Map(ulong va, nuint hostAddress, ulong size) { AssertValidAddressAndSize(va, size); ulong remainingSize = size; ulong oVa = va; - ulong oPa = pa; while (remainingSize != 0) { - _pageTable.Write((va / PageSize) * PteSize, PaToPte(pa)); + _pageTable.Write((va / PageSize) * PteSize, hostAddress); va += PageSize; - pa += PageSize; + hostAddress += PageSize; remainingSize -= PageSize; } - Tracking.Map(oVa, oPa, size); + Tracking.Map(oVa, size); } - /// - /// Unmaps a previously mapped range of virtual memory. - /// - /// Virtual address of the range to be unmapped - /// Size of the range to be unmapped + /// public void Unmap(ulong va, ulong size) { // If size is 0, there's nothing to unmap, just exit early. @@ -120,66 +107,39 @@ namespace Ryujinx.Cpu ulong remainingSize = size; while (remainingSize != 0) { - _pageTable.Write((va / PageSize) * PteSize, 0UL); + _pageTable.Write((va / PageSize) * PteSize, (nuint)0); va += PageSize; remainingSize -= PageSize; } } - /// - /// Reads data from CPU mapped memory. - /// - /// Type of the data being read - /// Virtual address of the data in memory - /// The data - /// Throw for unhandled invalid or unmapped memory accesses + /// public T Read(ulong va) where T : unmanaged { return MemoryMarshal.Cast(GetSpan(va, Unsafe.SizeOf(), true))[0]; } - /// - /// Reads data from CPU mapped memory, with read tracking - /// - /// Type of the data being read - /// Virtual address of the data in memory - /// The data + /// public T ReadTracked(ulong va) where T : unmanaged { SignalMemoryTracking(va, (ulong)Unsafe.SizeOf(), false); return MemoryMarshal.Cast(GetSpan(va, Unsafe.SizeOf()))[0]; } - /// - /// Reads data from CPU mapped memory. - /// - /// Virtual address of the data in memory - /// Span to store the data being read into - /// Throw for unhandled invalid or unmapped memory accesses + /// public void Read(ulong va, Span data) { ReadImpl(va, data); } - /// - /// Writes data to CPU mapped memory. - /// - /// Type of the data being written - /// Virtual address to write the data into - /// Data to be written - /// Throw for unhandled invalid or unmapped memory accesses + /// public void Write(ulong va, T value) where T : unmanaged { Write(va, MemoryMarshal.Cast(MemoryMarshal.CreateSpan(ref value, 1))); } - /// - /// Writes data to CPU mapped memory, with write tracking. - /// - /// Virtual address to write the data into - /// Data to be written - /// Throw for unhandled invalid or unmapped memory accesses + /// public void Write(ulong va, ReadOnlySpan data) { if (data.Length == 0) @@ -192,11 +152,7 @@ namespace Ryujinx.Cpu WriteImpl(va, data); } - /// - /// Writes data to CPU mapped memory, without write tracking. - /// - /// Virtual address to write the data into - /// Data to be written + /// public void WriteUntracked(ulong va, ReadOnlySpan data) { if (data.Length == 0) @@ -221,7 +177,7 @@ namespace Ryujinx.Cpu if (IsContiguousAndMapped(va, data.Length)) { - data.CopyTo(_backingMemory.GetSpan(GetPhysicalAddressInternal(va), data.Length)); + data.CopyTo(GetHostSpanContiguous(va, data.Length)); } else { @@ -229,22 +185,18 @@ namespace Ryujinx.Cpu if ((va & PageMask) != 0) { - ulong pa = GetPhysicalAddressInternal(va); - size = Math.Min(data.Length, PageSize - (int)(va & PageMask)); - data.Slice(0, size).CopyTo(_backingMemory.GetSpan(pa, size)); + data.Slice(0, size).CopyTo(GetHostSpanContiguous(va, size)); offset += size; } for (; offset < data.Length; offset += size) { - ulong pa = GetPhysicalAddressInternal(va + (ulong)offset); - size = Math.Min(data.Length - offset, PageSize); - data.Slice(offset, size).CopyTo(_backingMemory.GetSpan(pa, size)); + data.Slice(offset, size).CopyTo(GetHostSpanContiguous(va + (ulong)offset, size)); } } } @@ -257,18 +209,7 @@ namespace Ryujinx.Cpu } } - /// - /// Gets a read-only span of data from CPU mapped memory. - /// - /// - /// This may perform a allocation if the data is not contiguous in memory. - /// For this reason, the span is read-only, you can't modify the data. - /// - /// Virtual address of the data - /// Size of the data - /// True if read tracking is triggered on the span - /// A read-only span of the data - /// Throw for unhandled invalid or unmapped memory accesses + /// public ReadOnlySpan GetSpan(ulong va, int size, bool tracked = false) { if (size == 0) @@ -283,7 +224,7 @@ namespace Ryujinx.Cpu if (IsContiguousAndMapped(va, size)) { - return _backingMemory.GetSpan(GetPhysicalAddressInternal(va), size); + return GetHostSpanContiguous(va, size); } else { @@ -295,19 +236,8 @@ namespace Ryujinx.Cpu } } - /// - /// Gets a region of memory that can be written to. - /// - /// - /// If the requested region is not contiguous in physical memory, - /// this will perform an allocation, and flush the data (writing it - /// back to guest memory) on disposal. - /// - /// Virtual address of the data - /// Size of the data - /// A writable region of memory containing the data - /// Throw for unhandled invalid or unmapped memory accesses - public WritableRegion GetWritableRegion(ulong va, int size) + /// + public unsafe WritableRegion GetWritableRegion(ulong va, int size) { if (size == 0) { @@ -316,7 +246,7 @@ namespace Ryujinx.Cpu if (IsContiguousAndMapped(va, size)) { - return new WritableRegion(null, va, _backingMemory.GetMemory(GetPhysicalAddressInternal(va), size)); + return new WritableRegion(null, va, new NativeMemoryManager((byte*)GetHostAddress(va), size).Memory); } else { @@ -328,17 +258,8 @@ namespace Ryujinx.Cpu } } - /// - /// Gets a reference for the given type at the specified virtual memory address. - /// - /// - /// The data must be located at a contiguous memory region. - /// - /// Type of the data to get the reference - /// Virtual address of the data - /// A reference to the data in memory - /// Throw if the specified memory region is not contiguous in physical memory - public ref T GetRef(ulong va) where T : unmanaged + /// + public unsafe ref T GetRef(ulong va) where T : unmanaged { if (!IsContiguous(va, Unsafe.SizeOf())) { @@ -347,7 +268,7 @@ namespace Ryujinx.Cpu SignalMemoryTracking(va, (ulong)Unsafe.SizeOf(), true); - return ref _backingMemory.GetRef(GetPhysicalAddressInternal(va)); + return ref *(T*)GetHostAddress(va); } /// @@ -389,7 +310,7 @@ namespace Ryujinx.Cpu return false; } - if (GetPhysicalAddressInternal(va) + PageSize != GetPhysicalAddressInternal(va + PageSize)) + if (GetHostAddress(va) + PageSize != GetHostAddress(va + PageSize)) { return false; } @@ -400,15 +321,14 @@ namespace Ryujinx.Cpu return true; } - /// - /// Gets the physical regions that make up the given virtual address region. - /// If any part of the virtual region is unmapped, null is returned. - /// - /// Virtual address of the range - /// Size of the range - /// Array of physical regions - public (ulong address, ulong size)[] GetPhysicalRegions(ulong va, ulong size) + /// + public IEnumerable GetPhysicalRegions(ulong va, ulong size) { + if (size == 0) + { + return Enumerable.Empty(); + } + if (!ValidateAddress(va) || !ValidateAddressAndSize(va, size)) { return null; @@ -416,9 +336,9 @@ namespace Ryujinx.Cpu int pages = GetPagesCount(va, (uint)size, out va); - List<(ulong, ulong)> regions = new List<(ulong, ulong)>(); + var regions = new List(); - ulong regionStart = GetPhysicalAddressInternal(va); + nuint regionStart = GetHostAddress(va); ulong regionSize = PageSize; for (int page = 0; page < pages - 1; page++) @@ -428,12 +348,12 @@ namespace Ryujinx.Cpu return null; } - ulong newPa = GetPhysicalAddressInternal(va + PageSize); + nuint newHostAddress = GetHostAddress(va + PageSize); - if (GetPhysicalAddressInternal(va) + PageSize != newPa) + if (GetHostAddress(va) + PageSize != newHostAddress) { - regions.Add((regionStart, regionSize)); - regionStart = newPa; + regions.Add(new HostMemoryRange(regionStart, regionSize)); + regionStart = newHostAddress; regionSize = 0; } @@ -441,9 +361,9 @@ namespace Ryujinx.Cpu regionSize += PageSize; } - regions.Add((regionStart, regionSize)); + regions.Add(new HostMemoryRange(regionStart, regionSize)); - return regions.ToArray(); + return regions; } private void ReadImpl(ulong va, Span data) @@ -461,22 +381,18 @@ namespace Ryujinx.Cpu if ((va & PageMask) != 0) { - ulong pa = GetPhysicalAddressInternal(va); - size = Math.Min(data.Length, PageSize - (int)(va & PageMask)); - _backingMemory.GetSpan(pa, size).CopyTo(data.Slice(0, size)); + GetHostSpanContiguous(va, size).CopyTo(data.Slice(0, size)); offset += size; } for (; offset < data.Length; offset += size) { - ulong pa = GetPhysicalAddressInternal(va + (ulong)offset); - size = Math.Min(data.Length - offset, PageSize); - _backingMemory.GetSpan(pa, size).CopyTo(data.Slice(offset, size)); + GetHostSpanContiguous(va + (ulong)offset, size).CopyTo(data.Slice(offset, size)); } } catch (InvalidMemoryRegionException) @@ -488,12 +404,7 @@ namespace Ryujinx.Cpu } } - /// - /// Checks if a memory range is mapped. - /// - /// Virtual address of the range - /// Size of the range in bytes - /// True if the entire range is mapped, false otherwise + /// public bool IsRangeMapped(ulong va, ulong size) { if (size == 0UL) @@ -521,11 +432,7 @@ namespace Ryujinx.Cpu return true; } - /// - /// Checks if the page at a given CPU virtual address is mapped. - /// - /// Virtual address to check - /// True if the address is mapped, false otherwise + /// [MethodImpl(MethodImplOptions.AggressiveInlining)] public bool IsMapped(ulong va) { @@ -534,7 +441,7 @@ namespace Ryujinx.Cpu return false; } - return _pageTable.Read((va / PageSize) * PteSize) != 0; + return _pageTable.Read((va / PageSize) * PteSize) != 0; } private bool ValidateAddress(ulong va) @@ -569,35 +476,39 @@ namespace Ryujinx.Cpu } /// - /// Performs address translation of the address inside a CPU mapped memory range. + /// Get a span representing the given virtual address and size range in host memory. + /// This function assumes that the requested virtual memory region is contiguous. /// - /// - /// If the address is invalid or unmapped, -1 will be returned. - /// - /// Virtual address to be translated - /// The physical address - public ulong GetPhysicalAddress(ulong va) + /// Virtual address of the range + /// Size of the range in bytes + /// A span representing the given virtual range in host memory + /// Throw when the base virtual address is not mapped + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private unsafe Span GetHostSpanContiguous(ulong va, int size) { - // We return -1L if the virtual address is invalid or unmapped. - if (!ValidateAddress(va) || !IsMapped(va)) - { - return ulong.MaxValue; - } - - return GetPhysicalAddressInternal(va); - } - - private ulong GetPhysicalAddressInternal(ulong va) - { - return PteToPa(_pageTable.Read((va / PageSize) * PteSize) & ~(0xffffUL << 48)) + (va & PageMask); + return new Span((void*)GetHostAddress(va), size); } /// - /// Reprotect a region of virtual memory for tracking. Sets software protection bits. + /// Get the host address for a given virtual address, using the page table. /// - /// Virtual address base - /// Size of the region to protect - /// Memory protection to set + /// Virtual address + /// The corresponding host address for the given virtual address + /// Throw when the virtual address is not mapped + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private nuint GetHostAddress(ulong va) + { + nuint pageBase = _pageTable.Read((va / PageSize) * PteSize) & unchecked((nuint)0xffff_ffff_ffffUL); + + if (pageBase == 0) + { + ThrowInvalidMemoryRegionException($"Not mapped: va=0x{va:X16}"); + } + + return pageBase + (nuint)(va & PageMask); + } + + /// public void TrackingReprotect(ulong va, ulong size, MemoryPermission protection) { AssertValidAddressAndSize(va, size); @@ -632,47 +543,25 @@ namespace Ryujinx.Cpu } } - /// - /// Obtains a memory tracking handle for the given virtual region. This should be disposed when finished with. - /// - /// CPU virtual address of the region - /// Size of the region - /// The memory tracking handle + /// public CpuRegionHandle BeginTracking(ulong address, ulong size) { return new CpuRegionHandle(Tracking.BeginTracking(address, size)); } - /// - /// Obtains a memory tracking handle for the given virtual region, with a specified granularity. This should be disposed when finished with. - /// - /// CPU virtual address of the region - /// Size of the region - /// Desired granularity of write tracking - /// The memory tracking handle + /// public CpuMultiRegionHandle BeginGranularTracking(ulong address, ulong size, ulong granularity) { return new CpuMultiRegionHandle(Tracking.BeginGranularTracking(address, size, granularity)); } - /// - /// Obtains a smart memory tracking handle for the given virtual region, with a specified granularity. This should be disposed when finished with. - /// - /// CPU virtual address of the region - /// Size of the region - /// Desired granularity of write tracking - /// The memory tracking handle + /// public CpuSmartMultiRegionHandle BeginSmartGranularTracking(ulong address, ulong size, ulong granularity) { return new CpuSmartMultiRegionHandle(Tracking.BeginSmartGranularTracking(address, size, granularity)); } - /// - /// Alerts the memory tracking that a given region has been read from or written to. - /// This should be called before read/write is performed. - /// - /// Virtual address of the region - /// Size of the region + /// public void SignalMemoryTracking(ulong va, ulong size, bool write) { AssertValidAddressAndSize(va, size); @@ -704,19 +593,11 @@ namespace Ryujinx.Cpu } } - private ulong PaToPte(ulong pa) - { - return (ulong)_backingMemory.GetPointer(pa, PageSize).ToInt64(); - } - - private ulong PteToPa(ulong pte) - { - return (ulong)((long)pte - _backingMemory.Pointer.ToInt64()); - } - /// /// Disposes of resources used by the memory manager. /// - public void Dispose() => _pageTable.Dispose(); + protected override void Destroy() => _pageTable.Dispose(); + + private void ThrowInvalidMemoryRegionException(string message) => throw new InvalidMemoryRegionException(message); } } diff --git a/Ryujinx.Cpu/MemoryManagerBase.cs b/Ryujinx.Cpu/MemoryManagerBase.cs new file mode 100644 index 0000000000..d2fc7a1969 --- /dev/null +++ b/Ryujinx.Cpu/MemoryManagerBase.cs @@ -0,0 +1,32 @@ +using Ryujinx.Memory; +using System.Diagnostics; +using System.Threading; + +namespace Ryujinx.Cpu +{ + public abstract class MemoryManagerBase : IRefCounted + { + private int _referenceCount; + + public void IncrementReferenceCount() + { + int newRefCount = Interlocked.Increment(ref _referenceCount); + + Debug.Assert(newRefCount >= 1); + } + + public void DecrementReferenceCount() + { + int newRefCount = Interlocked.Decrement(ref _referenceCount); + + Debug.Assert(newRefCount >= 0); + + if (newRefCount == 0) + { + Destroy(); + } + } + + protected abstract void Destroy(); + } +} diff --git a/Ryujinx.Cpu/MemoryManagerHostMapped.cs b/Ryujinx.Cpu/MemoryManagerHostMapped.cs new file mode 100644 index 0000000000..da81d04f11 --- /dev/null +++ b/Ryujinx.Cpu/MemoryManagerHostMapped.cs @@ -0,0 +1,692 @@ +using ARMeilleure.Memory; +using Ryujinx.Cpu.Tracking; +using Ryujinx.Memory; +using Ryujinx.Memory.Range; +using Ryujinx.Memory.Tracking; +using System; +using System.Collections.Generic; +using System.Linq; +using System.Runtime.CompilerServices; +using System.Threading; + +namespace Ryujinx.Cpu +{ + /// + /// Represents a CPU memory manager which maps guest virtual memory directly onto a host virtual region. + /// + public class MemoryManagerHostMapped : MemoryManagerBase, IMemoryManager, IVirtualMemoryManagerTracked + { + public const int PageBits = 12; + public const int PageSize = 1 << PageBits; + public const int PageMask = PageSize - 1; + + public const int PageToPteShift = 5; // 32 pages (2 bits each) in one ulong page table entry. + public const ulong BlockMappedMask = 0x5555555555555555; // First bit of each table entry set. + + private enum HostMappedPtBits : ulong + { + Unmapped = 0, + Mapped, + WriteTracked, + ReadWriteTracked, + + MappedReplicated = 0x5555555555555555, + WriteTrackedReplicated = 0xaaaaaaaaaaaaaaaa, + ReadWriteTrackedReplicated = ulong.MaxValue + } + + private readonly InvalidAccessHandler _invalidAccessHandler; + private readonly bool _unsafeMode; + + private readonly MemoryBlock _addressSpace; + private readonly MemoryBlock _addressSpaceMirror; + private readonly ulong _addressSpaceSize; + + private readonly MemoryEhMeilleure _memoryEh; + + private ulong[] _pageTable; + + public int AddressSpaceBits { get; } + + public IntPtr PageTablePointer => _addressSpace.Pointer; + + public MemoryManagerType Type => _unsafeMode ? MemoryManagerType.HostMappedUnsafe : MemoryManagerType.HostMapped; + + public MemoryTracking Tracking { get; } + + public event Action UnmapEvent; + + /// + /// Creates a new instance of the host mapped memory manager. + /// + /// Size of the address space + /// True if unmanaged access should not be masked (unsafe), false otherwise. + /// Optional function to handle invalid memory accesses + public MemoryManagerHostMapped(ulong addressSpaceSize, bool unsafeMode, InvalidAccessHandler invalidAccessHandler = null) + { + _invalidAccessHandler = invalidAccessHandler; + _unsafeMode = unsafeMode; + _addressSpaceSize = addressSpaceSize; + + ulong asSize = PageSize; + int asBits = PageBits; + + while (asSize < addressSpaceSize) + { + asSize <<= 1; + asBits++; + } + + AddressSpaceBits = asBits; + + _pageTable = new ulong[1 << (AddressSpaceBits - (PageBits + PageToPteShift))]; + _addressSpace = new MemoryBlock(asSize, MemoryAllocationFlags.Reserve | MemoryAllocationFlags.Mirrorable); + _addressSpaceMirror = _addressSpace.CreateMirror(); + Tracking = new MemoryTracking(this, PageSize, invalidAccessHandler); + _memoryEh = new MemoryEhMeilleure(_addressSpace, Tracking); + } + + /// + /// Checks if the virtual address is part of the addressable space. + /// + /// Virtual address + /// True if the virtual address is part of the addressable space + private bool ValidateAddress(ulong va) + { + return va < _addressSpaceSize; + } + + /// + /// Checks if the combination of virtual address and size is part of the addressable space. + /// + /// Virtual address of the range + /// Size of the range in bytes + /// True if the combination of virtual address and size is part of the addressable space + private bool ValidateAddressAndSize(ulong va, ulong size) + { + ulong endVa = va + size; + return endVa >= va && endVa >= size && endVa <= _addressSpaceSize; + } + + /// + /// Ensures the combination of virtual address and size is part of the addressable space. + /// + /// Virtual address of the range + /// Size of the range in bytes + /// Throw when the memory region specified outside the addressable space + private void AssertValidAddressAndSize(ulong va, ulong size) + { + if (!ValidateAddressAndSize(va, size)) + { + throw new InvalidMemoryRegionException($"va=0x{va:X16}, size=0x{size:X16}"); + } + } + + /// + /// Ensures the combination of virtual address and size is part of the addressable space and fully mapped. + /// + /// Virtual address of the range + /// Size of the range in bytes + private void AssertMapped(ulong va, ulong size) + { + if (!ValidateAddressAndSize(va, size) || !IsRangeMappedImpl(va, size)) + { + throw new InvalidMemoryRegionException($"Not mapped: va=0x{va:X16}, size=0x{size:X16}"); + } + } + + /// + public void Map(ulong va, nuint hostAddress, ulong size) + { + AssertValidAddressAndSize(va, size); + + _addressSpace.Commit(va, size); + AddMapping(va, size); + + Tracking.Map(va, size); + } + + /// + public void Unmap(ulong va, ulong size) + { + AssertValidAddressAndSize(va, size); + + UnmapEvent?.Invoke(va, size); + Tracking.Unmap(va, size); + + RemoveMapping(va, size); + _addressSpace.Decommit(va, size); + } + + /// + public T Read(ulong va) where T : unmanaged + { + try + { + AssertMapped(va, (ulong)Unsafe.SizeOf()); + + return _addressSpaceMirror.Read(va); + } + catch (InvalidMemoryRegionException) + { + if (_invalidAccessHandler == null || !_invalidAccessHandler(va)) + { + throw; + } + + return default; + } + } + + /// + public T ReadTracked(ulong va) where T : unmanaged + { + try + { + SignalMemoryTracking(va, (ulong)Unsafe.SizeOf(), false); + + return Read(va); + } + catch (InvalidMemoryRegionException) + { + if (_invalidAccessHandler == null || !_invalidAccessHandler(va)) + { + throw; + } + + return default; + } + } + + /// + public void Read(ulong va, Span data) + { + try + { + AssertMapped(va, (ulong)data.Length); + + _addressSpaceMirror.Read(va, data); + } + catch (InvalidMemoryRegionException) + { + if (_invalidAccessHandler == null || !_invalidAccessHandler(va)) + { + throw; + } + } + } + + /// + public void Write(ulong va, T value) where T : unmanaged + { + try + { + SignalMemoryTracking(va, (ulong)Unsafe.SizeOf(), write: true); + + _addressSpaceMirror.Write(va, value); + } + catch (InvalidMemoryRegionException) + { + if (_invalidAccessHandler == null || !_invalidAccessHandler(va)) + { + throw; + } + } + } + + /// + public void Write(ulong va, ReadOnlySpan data) + { + try { + SignalMemoryTracking(va, (ulong)data.Length, write: true); + + _addressSpaceMirror.Write(va, data); + } + catch (InvalidMemoryRegionException) + { + if (_invalidAccessHandler == null || !_invalidAccessHandler(va)) + { + throw; + } + } + } + + /// + public void WriteUntracked(ulong va, ReadOnlySpan data) + { + try + { + AssertMapped(va, (ulong)data.Length); + + _addressSpaceMirror.Write(va, data); + } + catch (InvalidMemoryRegionException) + { + if (_invalidAccessHandler == null || !_invalidAccessHandler(va)) + { + throw; + } + } +} + + /// + public ReadOnlySpan GetSpan(ulong va, int size, bool tracked = false) + { + if (tracked) + { + SignalMemoryTracking(va, (ulong)size, write: false); + } + else + { + AssertMapped(va, (ulong)size); + } + + return _addressSpaceMirror.GetSpan(va, size); + } + + /// + public WritableRegion GetWritableRegion(ulong va, int size) + { + AssertMapped(va, (ulong)size); + + return _addressSpaceMirror.GetWritableRegion(va, size); + } + + /// + public ref T GetRef(ulong va) where T : unmanaged + { + SignalMemoryTracking(va, (ulong)Unsafe.SizeOf(), true); + + return ref _addressSpaceMirror.GetRef(va); + } + + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public bool IsMapped(ulong va) + { + return ValidateAddress(va) && IsMappedImpl(va); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private bool IsMappedImpl(ulong va) + { + ulong page = va >> PageBits; + + int bit = (int)((page & 31) << 1); + + int pageIndex = (int)(page >> PageToPteShift); + ref ulong pageRef = ref _pageTable[pageIndex]; + + ulong pte = Volatile.Read(ref pageRef); + + return ((pte >> bit) & 3) != 0; + } + + /// + public bool IsRangeMapped(ulong va, ulong size) + { + AssertValidAddressAndSize(va, size); + + return IsRangeMappedImpl(va, size); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private void GetPageBlockRange(ulong pageStart, ulong pageEnd, out ulong startMask, out ulong endMask, out int pageIndex, out int pageEndIndex) + { + startMask = ulong.MaxValue << ((int)(pageStart & 31) << 1); + endMask = ulong.MaxValue >> (64 - ((int)(pageEnd & 31) << 1)); + + pageIndex = (int)(pageStart >> PageToPteShift); + pageEndIndex = (int)((pageEnd - 1) >> PageToPteShift); + } + + private bool IsRangeMappedImpl(ulong va, ulong size) + { + int pages = GetPagesCount(va, size, out _); + + if (pages == 1) + { + return IsMappedImpl(va); + } + + ulong pageStart = va >> PageBits; + ulong pageEnd = pageStart + (ulong)pages; + + GetPageBlockRange(pageStart, pageEnd, out ulong startMask, out ulong endMask, out int pageIndex, out int pageEndIndex); + + // Check if either bit in each 2 bit page entry is set. + // OR the block with itself shifted down by 1, and check the first bit of each entry. + + ulong mask = BlockMappedMask & startMask; + + while (pageIndex <= pageEndIndex) + { + if (pageIndex == pageEndIndex) + { + mask &= endMask; + } + + ref ulong pageRef = ref _pageTable[pageIndex++]; + ulong pte = Volatile.Read(ref pageRef); + + pte |= pte >> 1; + if ((pte & mask) != mask) + { + return false; + } + + mask = BlockMappedMask; + } + + return true; + } + + /// + public IEnumerable GetPhysicalRegions(ulong va, ulong size) + { + if (size == 0) + { + return Enumerable.Empty(); + } + + AssertMapped(va, size); + + return new HostMemoryRange[] { new HostMemoryRange(_addressSpaceMirror.GetPointer(va, size), size) }; + } + + /// + /// + /// This function also validates that the given range is both valid and mapped, and will throw if it is not. + /// + public void SignalMemoryTracking(ulong va, ulong size, bool write) + { + AssertValidAddressAndSize(va, size); + + // Software table, used for managed memory tracking. + + int pages = GetPagesCount(va, size, out _); + ulong pageStart = va >> PageBits; + + if (pages == 1) + { + ulong tag = (ulong)(write ? HostMappedPtBits.WriteTracked : HostMappedPtBits.ReadWriteTracked); + + int bit = (int)((pageStart & 31) << 1); + + int pageIndex = (int)(pageStart >> PageToPteShift); + ref ulong pageRef = ref _pageTable[pageIndex]; + + ulong pte = Volatile.Read(ref pageRef); + ulong state = ((pte >> bit) & 3); + + if (state >= tag) + { + Tracking.VirtualMemoryEvent(va, size, write); + return; + } + else if (state == 0) + { + ThrowInvalidMemoryRegionException($"Not mapped: va=0x{va:X16}, size=0x{size:X16}"); + } + } + else + { + ulong pageEnd = pageStart + (ulong)pages; + + GetPageBlockRange(pageStart, pageEnd, out ulong startMask, out ulong endMask, out int pageIndex, out int pageEndIndex); + + ulong mask = startMask; + + ulong anyTrackingTag = (ulong)HostMappedPtBits.WriteTrackedReplicated; + + while (pageIndex <= pageEndIndex) + { + if (pageIndex == pageEndIndex) + { + mask &= endMask; + } + + ref ulong pageRef = ref _pageTable[pageIndex++]; + + ulong pte = Volatile.Read(ref pageRef); + ulong mappedMask = mask & BlockMappedMask; + + ulong mappedPte = pte | (pte >> 1); + if ((mappedPte & mappedMask) != mappedMask) + { + ThrowInvalidMemoryRegionException($"Not mapped: va=0x{va:X16}, size=0x{size:X16}"); + } + + pte &= mask; + if ((pte & anyTrackingTag) != 0) // Search for any tracking. + { + // Writes trigger any tracking. + // Only trigger tracking from reads if both bits are set on any page. + if (write || (pte & (pte >> 1) & BlockMappedMask) != 0) + { + Tracking.VirtualMemoryEvent(va, size, write); + break; + } + } + + mask = ulong.MaxValue; + } + } + } + + /// + /// Computes the number of pages in a virtual address range. + /// + /// Virtual address of the range + /// Size of the range + /// The virtual address of the beginning of the first page + /// This function does not differentiate between allocated and unallocated pages. + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private int GetPagesCount(ulong va, ulong size, out ulong startVa) + { + // WARNING: Always check if ulong does not overflow during the operations. + startVa = va & ~(ulong)PageMask; + ulong vaSpan = (va - startVa + size + PageMask) & ~(ulong)PageMask; + + return (int)(vaSpan / PageSize); + } + + /// + public void TrackingReprotect(ulong va, ulong size, MemoryPermission protection) + { + // Protection is inverted on software pages, since the default value is 0. + protection = (~protection) & MemoryPermission.ReadAndWrite; + + int pages = GetPagesCount(va, size, out va); + ulong pageStart = va >> PageBits; + + if (pages == 1) + { + ulong protTag = protection switch + { + MemoryPermission.None => (ulong)HostMappedPtBits.Mapped, + MemoryPermission.Write => (ulong)HostMappedPtBits.WriteTracked, + _ => (ulong)HostMappedPtBits.ReadWriteTracked, + }; + + int bit = (int)((pageStart & 31) << 1); + + ulong tagMask = 3UL << bit; + ulong invTagMask = ~tagMask; + + ulong tag = protTag << bit; + + int pageIndex = (int)(pageStart >> PageToPteShift); + ref ulong pageRef = ref _pageTable[pageIndex]; + + ulong pte; + + do + { + pte = Volatile.Read(ref pageRef); + } + while ((pte & tagMask) != 0 && Interlocked.CompareExchange(ref pageRef, (pte & invTagMask) | tag, pte) != pte); + } + else + { + ulong pageEnd = pageStart + (ulong)pages; + + GetPageBlockRange(pageStart, pageEnd, out ulong startMask, out ulong endMask, out int pageIndex, out int pageEndIndex); + + ulong mask = startMask; + + ulong protTag = protection switch + { + MemoryPermission.None => (ulong)HostMappedPtBits.MappedReplicated, + MemoryPermission.Write => (ulong)HostMappedPtBits.WriteTrackedReplicated, + _ => (ulong)HostMappedPtBits.ReadWriteTrackedReplicated, + }; + + while (pageIndex <= pageEndIndex) + { + if (pageIndex == pageEndIndex) + { + mask &= endMask; + } + + ref ulong pageRef = ref _pageTable[pageIndex++]; + + ulong pte; + ulong mappedMask; + + // Change the protection of all 2 bit entries that are mapped. + do + { + pte = Volatile.Read(ref pageRef); + + mappedMask = pte | (pte >> 1); + mappedMask |= (mappedMask & BlockMappedMask) << 1; + mappedMask &= mask; // Only update mapped pages within the given range. + } + while (Interlocked.CompareExchange(ref pageRef, (pte & (~mappedMask)) | (protTag & mappedMask), pte) != pte); + + mask = ulong.MaxValue; + } + } + + protection = protection switch + { + MemoryPermission.None => MemoryPermission.ReadAndWrite, + MemoryPermission.Write => MemoryPermission.Read, + _ => MemoryPermission.None + }; + + _addressSpace.Reprotect(va, size, protection, false); + } + + /// + public CpuRegionHandle BeginTracking(ulong address, ulong size) + { + return new CpuRegionHandle(Tracking.BeginTracking(address, size)); + } + + /// + public CpuMultiRegionHandle BeginGranularTracking(ulong address, ulong size, ulong granularity) + { + return new CpuMultiRegionHandle(Tracking.BeginGranularTracking(address, size, granularity)); + } + + /// + public CpuSmartMultiRegionHandle BeginSmartGranularTracking(ulong address, ulong size, ulong granularity) + { + return new CpuSmartMultiRegionHandle(Tracking.BeginSmartGranularTracking(address, size, granularity)); + } + + /// + /// Adds the given address mapping to the page table. + /// + /// Virtual memory address + /// Size to be mapped + private void AddMapping(ulong va, ulong size) + { + int pages = GetPagesCount(va, size, out _); + ulong pageStart = va >> PageBits; + ulong pageEnd = pageStart + (ulong)pages; + + GetPageBlockRange(pageStart, pageEnd, out ulong startMask, out ulong endMask, out int pageIndex, out int pageEndIndex); + + ulong mask = startMask; + + while (pageIndex <= pageEndIndex) + { + if (pageIndex == pageEndIndex) + { + mask &= endMask; + } + + ref ulong pageRef = ref _pageTable[pageIndex++]; + + ulong pte; + ulong mappedMask; + + // Map all 2-bit entries that are unmapped. + do + { + pte = Volatile.Read(ref pageRef); + + mappedMask = pte | (pte >> 1); + mappedMask |= (mappedMask & BlockMappedMask) << 1; + mappedMask |= ~mask; // Treat everything outside the range as mapped, thus unchanged. + } + while (Interlocked.CompareExchange(ref pageRef, (pte & mappedMask) | (BlockMappedMask & (~mappedMask)), pte) != pte); + + mask = ulong.MaxValue; + } + } + + /// + /// Removes the given address mapping from the page table. + /// + /// Virtual memory address + /// Size to be unmapped + private void RemoveMapping(ulong va, ulong size) + { + int pages = GetPagesCount(va, size, out _); + ulong pageStart = va >> PageBits; + ulong pageEnd = pageStart + (ulong)pages; + + GetPageBlockRange(pageStart, pageEnd, out ulong startMask, out ulong endMask, out int pageIndex, out int pageEndIndex); + + startMask = ~startMask; + endMask = ~endMask; + + ulong mask = startMask; + + while (pageIndex <= pageEndIndex) + { + if (pageIndex == pageEndIndex) + { + mask |= endMask; + } + + ref ulong pageRef = ref _pageTable[pageIndex++]; + ulong pte; + + do + { + pte = Volatile.Read(ref pageRef); + } + while (Interlocked.CompareExchange(ref pageRef, pte & mask, pte) != pte); + + mask = 0; + } + } + + /// + /// Disposes of resources used by the memory manager. + /// + protected override void Destroy() + { + _addressSpaceMirror.Dispose(); + _addressSpace.Dispose(); + _memoryEh.Dispose(); + } + + private void ThrowInvalidMemoryRegionException(string message) => throw new InvalidMemoryRegionException(message); + } +} diff --git a/Ryujinx.Cpu/Tracking/CpuMultiRegionHandle.cs b/Ryujinx.Cpu/Tracking/CpuMultiRegionHandle.cs index 8204a13eb0..344b1a7897 100644 --- a/Ryujinx.Cpu/Tracking/CpuMultiRegionHandle.cs +++ b/Ryujinx.Cpu/Tracking/CpuMultiRegionHandle.cs @@ -15,6 +15,7 @@ namespace Ryujinx.Cpu.Tracking } public void Dispose() => _impl.Dispose(); + public void ForceDirty(ulong address, ulong size) => _impl.ForceDirty(address, size); public void QueryModified(Action modifiedAction) => _impl.QueryModified(modifiedAction); public void QueryModified(ulong address, ulong size, Action modifiedAction) => _impl.QueryModified(address, size, modifiedAction); public void QueryModified(ulong address, ulong size, Action modifiedAction, int sequenceNumber) => _impl.QueryModified(address, size, modifiedAction, sequenceNumber); diff --git a/Ryujinx.Cpu/Tracking/CpuRegionHandle.cs b/Ryujinx.Cpu/Tracking/CpuRegionHandle.cs index 6a530b0e79..acb27b4017 100644 --- a/Ryujinx.Cpu/Tracking/CpuRegionHandle.cs +++ b/Ryujinx.Cpu/Tracking/CpuRegionHandle.cs @@ -19,6 +19,8 @@ namespace Ryujinx.Cpu.Tracking } public void Dispose() => _impl.Dispose(); + public bool DirtyOrVolatile() => _impl.DirtyOrVolatile(); + public void ForceDirty() => _impl.ForceDirty(); public void RegisterAction(RegionSignal action) => _impl.RegisterAction(action); public void RegisterDirtyEvent(Action action) => _impl.RegisterDirtyEvent(action); public void Reprotect(bool asDirty = false) => _impl.Reprotect(asDirty); diff --git a/Ryujinx.Cpu/Tracking/CpuSmartMultiRegionHandle.cs b/Ryujinx.Cpu/Tracking/CpuSmartMultiRegionHandle.cs index e38babfc57..944e4c02af 100644 --- a/Ryujinx.Cpu/Tracking/CpuSmartMultiRegionHandle.cs +++ b/Ryujinx.Cpu/Tracking/CpuSmartMultiRegionHandle.cs @@ -15,6 +15,7 @@ namespace Ryujinx.Cpu.Tracking } public void Dispose() => _impl.Dispose(); + public void ForceDirty(ulong address, ulong size) => _impl.ForceDirty(address, size); public void RegisterAction(RegionSignal action) => _impl.RegisterAction(action); public void QueryModified(Action modifiedAction) => _impl.QueryModified(modifiedAction); public void QueryModified(ulong address, ulong size, Action modifiedAction) => _impl.QueryModified(address, size, modifiedAction); diff --git a/Ryujinx.Graphics.Gpu/Engine/Compute.cs b/Ryujinx.Graphics.Gpu/Engine/Compute.cs index bcff595343..be317a7f4d 100644 --- a/Ryujinx.Graphics.Gpu/Engine/Compute.cs +++ b/Ryujinx.Graphics.Gpu/Engine/Compute.cs @@ -16,6 +16,8 @@ namespace Ryujinx.Graphics.Gpu.Engine /// Method call argument public void Dispatch(GpuState state, int argument) { + FlushUboDirty(); + uint qmdAddress = (uint)state.Get(MethodOffset.DispatchParamsAddress); var qmd = _context.MemoryManager.Read((ulong)qmdAddress << 8); diff --git a/Ryujinx.Graphics.Gpu/Engine/MethodCopyBuffer.cs b/Ryujinx.Graphics.Gpu/Engine/MethodCopyBuffer.cs index dd16cb2de9..a1cf86ec60 100644 --- a/Ryujinx.Graphics.Gpu/Engine/MethodCopyBuffer.cs +++ b/Ryujinx.Graphics.Gpu/Engine/MethodCopyBuffer.cs @@ -69,6 +69,8 @@ namespace Ryujinx.Graphics.Gpu.Engine return; } + FlushUboDirty(); + if (copy2D) { // Buffer to texture copy. diff --git a/Ryujinx.Graphics.Gpu/Engine/MethodUniformBufferBind.cs b/Ryujinx.Graphics.Gpu/Engine/MethodUniformBufferBind.cs index 3fee1fcf8b..16fb31d693 100644 --- a/Ryujinx.Graphics.Gpu/Engine/MethodUniformBufferBind.cs +++ b/Ryujinx.Graphics.Gpu/Engine/MethodUniformBufferBind.cs @@ -66,6 +66,8 @@ namespace Ryujinx.Graphics.Gpu.Engine int index = (argument >> 4) & 0x1f; + FlushUboDirty(); + if (enable) { var uniformBuffer = state.Get(MethodOffset.UniformBufferState); diff --git a/Ryujinx.Graphics.Gpu/Engine/MethodUniformBufferUpdate.cs b/Ryujinx.Graphics.Gpu/Engine/MethodUniformBufferUpdate.cs index 61772327f2..3e1dd15180 100644 --- a/Ryujinx.Graphics.Gpu/Engine/MethodUniformBufferUpdate.cs +++ b/Ryujinx.Graphics.Gpu/Engine/MethodUniformBufferUpdate.cs @@ -1,3 +1,4 @@ +using Ryujinx.Graphics.Gpu.Memory; using Ryujinx.Graphics.Gpu.State; using System; using System.Runtime.InteropServices; @@ -6,6 +7,25 @@ namespace Ryujinx.Graphics.Gpu.Engine { partial class Methods { + // State associated with direct uniform buffer updates. + // This state is used to attempt to batch together consecutive updates. + private ulong _ubBeginCpuAddress = 0; + private ulong _ubFollowUpAddress = 0; + private ulong _ubByteCount = 0; + + /// + /// Flushes any queued ubo updates. + /// + private void FlushUboDirty() + { + if (_ubFollowUpAddress != 0) + { + BufferManager.ForceDirty(_ubFollowUpAddress - _ubByteCount, _ubByteCount); + + _ubFollowUpAddress = 0; + } + } + /// /// Updates the uniform buffer data with inline data. /// @@ -15,11 +35,22 @@ namespace Ryujinx.Graphics.Gpu.Engine { var uniformBuffer = state.Get(MethodOffset.UniformBufferState); - _context.MemoryManager.Write(uniformBuffer.Address.Pack() + (uint)uniformBuffer.Offset, argument); + ulong address = uniformBuffer.Address.Pack() + (uint)uniformBuffer.Offset; + + if (_ubFollowUpAddress != address) + { + FlushUboDirty(); + + _ubByteCount = 0; + _ubBeginCpuAddress = _context.MemoryManager.Translate(address); + } + + _context.PhysicalMemory.WriteUntracked(_ubBeginCpuAddress + _ubByteCount, MemoryMarshal.Cast(MemoryMarshal.CreateSpan(ref argument, 1))); + + _ubFollowUpAddress = address + 4; + _ubByteCount += 4; state.SetUniformBufferOffset(uniformBuffer.Offset + 4); - - _context.AdvanceSequence(); } /// @@ -31,11 +62,24 @@ namespace Ryujinx.Graphics.Gpu.Engine { var uniformBuffer = state.Get(MethodOffset.UniformBufferState); - _context.MemoryManager.Write(uniformBuffer.Address.Pack() + (uint)uniformBuffer.Offset, MemoryMarshal.Cast(data)); + ulong address = uniformBuffer.Address.Pack() + (uint)uniformBuffer.Offset; + + ulong size = (ulong)data.Length * 4; + + if (_ubFollowUpAddress != address) + { + FlushUboDirty(); + + _ubByteCount = 0; + _ubBeginCpuAddress = _context.MemoryManager.Translate(address); + } + + _context.PhysicalMemory.WriteUntracked(_ubBeginCpuAddress + _ubByteCount, MemoryMarshal.Cast(data)); + + _ubFollowUpAddress = address + size; + _ubByteCount += size; state.SetUniformBufferOffset(uniformBuffer.Offset + data.Length * 4); - - _context.AdvanceSequence(); } } } \ No newline at end of file diff --git a/Ryujinx.Graphics.Gpu/Engine/Methods.cs b/Ryujinx.Graphics.Gpu/Engine/Methods.cs index ae9bdb0d9d..431ea44967 100644 --- a/Ryujinx.Graphics.Gpu/Engine/Methods.cs +++ b/Ryujinx.Graphics.Gpu/Engine/Methods.cs @@ -130,6 +130,8 @@ namespace Ryujinx.Graphics.Gpu.Engine _prevTfEnable = false; } + FlushUboDirty(); + // Shaders must be the first one to be updated if modified, because // some of the other state depends on information from the currently // bound shaders. diff --git a/Ryujinx.Graphics.Gpu/GpuContext.cs b/Ryujinx.Graphics.Gpu/GpuContext.cs index f131ecc331..a9386ce5cd 100644 --- a/Ryujinx.Graphics.Gpu/GpuContext.cs +++ b/Ryujinx.Graphics.Gpu/GpuContext.cs @@ -137,7 +137,7 @@ namespace Ryujinx.Graphics.Gpu /// This is required for any GPU memory access. /// /// CPU memory manager - public void SetVmm(Cpu.MemoryManager cpuMemory) + public void SetVmm(Cpu.IVirtualMemoryManagerTracked cpuMemory) { PhysicalMemory = new PhysicalMemory(cpuMemory); } @@ -187,6 +187,8 @@ namespace Ryujinx.Graphics.Gpu Renderer.Dispose(); GPFifo.Dispose(); HostInitalized.Dispose(); + + PhysicalMemory.Dispose(); } } } \ No newline at end of file diff --git a/Ryujinx.Graphics.Gpu/Image/Pool.cs b/Ryujinx.Graphics.Gpu/Image/Pool.cs index 855f63443c..0b4c2993a2 100644 --- a/Ryujinx.Graphics.Gpu/Image/Pool.cs +++ b/Ryujinx.Graphics.Gpu/Image/Pool.cs @@ -1,6 +1,4 @@ -using Ryujinx.Common; using Ryujinx.Cpu.Tracking; -using Ryujinx.Graphics.Gpu.Memory; using System; namespace Ryujinx.Graphics.Gpu.Image diff --git a/Ryujinx.Graphics.Gpu/Image/TextureGroup.cs b/Ryujinx.Graphics.Gpu/Image/TextureGroup.cs index 52129d64b7..30ca59d4ad 100644 --- a/Ryujinx.Graphics.Gpu/Image/TextureGroup.cs +++ b/Ryujinx.Graphics.Gpu/Image/TextureGroup.cs @@ -1,5 +1,4 @@ -using Ryujinx.Common; -using Ryujinx.Cpu.Tracking; +using Ryujinx.Cpu.Tracking; using Ryujinx.Graphics.GAL; using Ryujinx.Graphics.Texture; using Ryujinx.Memory.Range; diff --git a/Ryujinx.Graphics.Gpu/Memory/Buffer.cs b/Ryujinx.Graphics.Gpu/Memory/Buffer.cs index cdd61b6d95..c567e30c1e 100644 --- a/Ryujinx.Graphics.Gpu/Memory/Buffer.cs +++ b/Ryujinx.Graphics.Gpu/Memory/Buffer.cs @@ -35,6 +35,11 @@ namespace Ryujinx.Graphics.Gpu.Memory /// public ulong EndAddress => Address + Size; + /// + /// Increments when the buffer is (partially) unmapped or disposed. + /// + public int UnmappedSequence { get; private set; } + /// /// Ranges of the buffer that have been modified on the GPU. /// Ranges defined here cannot be updated from CPU until a CPU waiting sync point is reached. @@ -45,9 +50,8 @@ namespace Ryujinx.Graphics.Gpu.Memory /// private BufferModifiedRangeList _modifiedRanges = null; - private CpuMultiRegionHandle _memoryTrackingGranular; - - private CpuRegionHandle _memoryTracking; + private readonly CpuMultiRegionHandle _memoryTrackingGranular; + private readonly CpuRegionHandle _memoryTracking; private readonly RegionSignal _externalFlushDelegate; private readonly Action _loadDelegate; @@ -130,6 +134,17 @@ namespace Ryujinx.Graphics.Gpu.Memory return Address < address + size && address < EndAddress; } + /// + /// Checks if a given range is fully contained in the buffer. + /// + /// Start address of the range + /// Size in bytes of the range + /// True if the range is contained, false otherwise + public bool FullyContains(ulong address, ulong size) + { + return address >= Address && address + size <= EndAddress; + } + /// /// Performs guest to host memory synchronization of the buffer data. /// @@ -147,7 +162,7 @@ namespace Ryujinx.Graphics.Gpu.Memory } else { - if (_memoryTracking.Dirty && _context.SequenceNumber != _sequenceNumber) + if (_context.SequenceNumber != _sequenceNumber && _memoryTracking.DirtyOrVolatile()) { _memoryTracking.Reprotect(); @@ -165,6 +180,39 @@ namespace Ryujinx.Graphics.Gpu.Memory } } + /// + /// Performs guest to host memory synchronization of the buffer data, regardless of sequence number. + /// + /// + /// This causes the buffer data to be overwritten if a write was detected from the CPU, + /// since the last call to this method. + /// + /// Start address of the range to synchronize + /// Size in bytes of the range to synchronize + public void ForceSynchronizeMemory(ulong address, ulong size) + { + if (_useGranular) + { + _memoryTrackingGranular.QueryModified(address, size, _modifiedDelegate); + } + else + { + if (_memoryTracking.DirtyOrVolatile()) + { + _memoryTracking.Reprotect(); + + if (_modifiedRanges != null) + { + _modifiedRanges.ExcludeModifiedRegions(Address, Size, _loadDelegate); + } + else + { + _context.Renderer.SetBufferData(Handle, 0, _context.PhysicalMemory.GetSpan(Address, (int)Size)); + } + } + } + } + /// /// Ensure that the modified range list exists. /// @@ -316,6 +364,29 @@ namespace Ryujinx.Graphics.Gpu.Memory _context.Renderer.SetBufferData(Handle, offset, _context.PhysicalMemory.GetSpan(mAddress, (int)mSize)); } + /// + /// Force a region of the buffer to be dirty. Avoids reprotection and nullifies sequence number check. + /// + /// Start address of the modified region + /// Size of the region to force dirty + public void ForceDirty(ulong mAddress, ulong mSize) + { + if (_modifiedRanges != null) + { + _modifiedRanges.Clear(mAddress, mSize); + } + + if (_useGranular) + { + _memoryTrackingGranular.ForceDirty(mAddress, mSize); + } + else + { + _memoryTracking.ForceDirty(); + _sequenceNumber--; + } + } + /// /// Performs copy of all the buffer data from one buffer to another. /// @@ -385,6 +456,8 @@ namespace Ryujinx.Graphics.Gpu.Memory public void Unmapped(ulong address, ulong size) { _modifiedRanges?.Clear(address, size); + + UnmappedSequence++; } /// @@ -398,6 +471,8 @@ namespace Ryujinx.Graphics.Gpu.Memory _memoryTracking?.Dispose(); _context.Renderer.DeleteBuffer(Handle); + + UnmappedSequence++; } } } \ No newline at end of file diff --git a/Ryujinx.Graphics.Gpu/Memory/BufferCacheEntry.cs b/Ryujinx.Graphics.Gpu/Memory/BufferCacheEntry.cs new file mode 100644 index 0000000000..fa38b54e7b --- /dev/null +++ b/Ryujinx.Graphics.Gpu/Memory/BufferCacheEntry.cs @@ -0,0 +1,43 @@ +namespace Ryujinx.Graphics.Gpu.Memory +{ + /// + /// A cached entry for easily locating a buffer that is used often internally. + /// + class BufferCacheEntry + { + /// + /// The CPU VA of the buffer destination. + /// + public ulong Address; + + /// + /// The end GPU VA of the associated buffer, used to check if new data can fit. + /// + public ulong EndGpuAddress; + + /// + /// The buffer associated with this cache entry. + /// + public Buffer Buffer; + + /// + /// The UnmappedSequence of the buffer at the time of creation. + /// If this differs from the value currently in the buffer, then this cache entry is outdated. + /// + public int UnmappedSequence; + + /// + /// Create a new cache entry. + /// + /// The CPU VA of the buffer destination + /// The GPU VA of the buffer destination + /// The buffer object containing the target buffer + public BufferCacheEntry(ulong address, ulong gpuVa, Buffer buffer) + { + Address = address; + EndGpuAddress = gpuVa + (buffer.EndAddress - address); + Buffer = buffer; + UnmappedSequence = buffer.UnmappedSequence; + } + } +} diff --git a/Ryujinx.Graphics.Gpu/Memory/BufferManager.cs b/Ryujinx.Graphics.Gpu/Memory/BufferManager.cs index b2cd1ced70..4a794b19d3 100644 --- a/Ryujinx.Graphics.Gpu/Memory/BufferManager.cs +++ b/Ryujinx.Graphics.Gpu/Memory/BufferManager.cs @@ -114,6 +114,8 @@ namespace Ryujinx.Graphics.Gpu.Memory private bool _rebind; + private Dictionary _dirtyCache; + /// /// Creates a new instance of the buffer manager. /// @@ -143,6 +145,8 @@ namespace Ryujinx.Graphics.Gpu.Memory } _bufferTextures = new List(); + + _dirtyCache = new Dictionary(); } /// @@ -466,6 +470,29 @@ namespace Ryujinx.Graphics.Gpu.Memory CreateBufferAligned(alignedAddress, alignedEndAddress - alignedAddress); } + /// + /// Performs address translation of the GPU virtual address, and attempts to force + /// the buffer in the region as dirty. + /// The buffer lookup for this function is cached in a dictionary for quick access, which + /// accelerates common UBO updates. + /// + /// Start GPU virtual address of the buffer + /// Size in bytes of the buffer + public void ForceDirty(ulong gpuVa, ulong size) + { + BufferCacheEntry result; + + if (!_dirtyCache.TryGetValue(gpuVa, out result) || result.EndGpuAddress < gpuVa + size || result.UnmappedSequence != result.Buffer.UnmappedSequence) + { + ulong address = TranslateAndCreateBuffer(gpuVa, size); + result = new BufferCacheEntry(address, gpuVa, GetBuffer(address, size)); + + _dirtyCache[gpuVa] = result; + } + + result.Buffer.ForceDirty(result.Address, size); + } + /// /// Creates a new buffer for the specified range, if needed. /// If a buffer where this range can be fully contained already exists, @@ -520,7 +547,7 @@ namespace Ryujinx.Graphics.Gpu.Memory int dstOffset = (int)(buffer.Address - newBuffer.Address); - buffer.SynchronizeMemory(buffer.Address, buffer.Size); + buffer.ForceSynchronizeMemory(buffer.Address, buffer.Size); buffer.CopyTo(newBuffer, dstOffset); newBuffer.InheritModifiedRanges(buffer); diff --git a/Ryujinx.Graphics.Gpu/Memory/GpuRegionHandle.cs b/Ryujinx.Graphics.Gpu/Memory/GpuRegionHandle.cs index 92099b6a59..8a9c676769 100644 --- a/Ryujinx.Graphics.Gpu/Memory/GpuRegionHandle.cs +++ b/Ryujinx.Graphics.Gpu/Memory/GpuRegionHandle.cs @@ -56,5 +56,13 @@ namespace Ryujinx.Graphics.Gpu.Memory regionHandle.Reprotect(asDirty); } } + + public void ForceDirty() + { + foreach (var regionHandle in _cpuRegionHandles) + { + regionHandle.ForceDirty(); + } + } } } diff --git a/Ryujinx.Graphics.Gpu/Memory/PhysicalMemory.cs b/Ryujinx.Graphics.Gpu/Memory/PhysicalMemory.cs index 8b2401c743..3d2af5329a 100644 --- a/Ryujinx.Graphics.Gpu/Memory/PhysicalMemory.cs +++ b/Ryujinx.Graphics.Gpu/Memory/PhysicalMemory.cs @@ -12,19 +12,24 @@ namespace Ryujinx.Graphics.Gpu.Memory /// Represents physical memory, accessible from the GPU. /// This is actually working CPU virtual addresses, of memory mapped on the application process. /// - class PhysicalMemory + class PhysicalMemory : IDisposable { public const int PageSize = 0x1000; - private readonly Cpu.MemoryManager _cpuMemory; + private IVirtualMemoryManagerTracked _cpuMemory; /// /// Creates a new instance of the physical memory. /// /// CPU memory manager of the application process - public PhysicalMemory(Cpu.MemoryManager cpuMemory) + public PhysicalMemory(IVirtualMemoryManagerTracked cpuMemory) { _cpuMemory = cpuMemory; + + if (_cpuMemory is IRefCounted rc) + { + rc.IncrementReferenceCount(); + } } /// @@ -213,5 +218,18 @@ namespace Ryujinx.Graphics.Gpu.Memory { return _cpuMemory.BeginSmartGranularTracking(address, size, granularity); } + + /// + /// Release our reference to the CPU memory manager. + /// + public void Dispose() + { + if (_cpuMemory is IRefCounted rc) + { + rc.DecrementReferenceCount(); + + _cpuMemory = null; + } + } } } \ No newline at end of file diff --git a/Ryujinx.HLE/HLEConfiguration.cs b/Ryujinx.HLE/HLEConfiguration.cs index 00c79169c6..72205827db 100644 --- a/Ryujinx.HLE/HLEConfiguration.cs +++ b/Ryujinx.HLE/HLEConfiguration.cs @@ -118,6 +118,11 @@ namespace Ryujinx.HLE /// This cannot be changed after instantiation. internal readonly string TimeZone; + + /// + /// + public MemoryManagerMode MemoryManagerMode { internal get; set; } + /// /// Control the inital state of the ignore missing services setting. /// If this is set to true, when a missing service is encountered, it will try to automatically handle it instead of throwing an exception. @@ -152,6 +157,7 @@ namespace Ryujinx.HLE int fsGlobalAccessLogMode, long systemTimeOffset, string timeZone, + MemoryManagerMode memoryManagerMode, bool ignoreMissingServices, AspectRatio aspectRatio) { @@ -172,6 +178,7 @@ namespace Ryujinx.HLE FsGlobalAccessLogMode = fsGlobalAccessLogMode; SystemTimeOffset = systemTimeOffset; TimeZone = timeZone; + MemoryManagerMode = memoryManagerMode; IgnoreMissingServices = ignoreMissingServices; AspectRatio = aspectRatio; } diff --git a/Ryujinx.HLE/HOS/ApplicationLoader.cs b/Ryujinx.HLE/HOS/ApplicationLoader.cs index 3832dd3eb2..0d48cc8153 100644 --- a/Ryujinx.HLE/HOS/ApplicationLoader.cs +++ b/Ryujinx.HLE/HOS/ApplicationLoader.cs @@ -517,7 +517,7 @@ namespace Ryujinx.HLE.HOS Graphics.Gpu.GraphicsConfig.TitleId = TitleIdText; _device.Gpu.HostInitalized.Set(); - Ptc.Initialize(TitleIdText, DisplayVersion, usePtc); + Ptc.Initialize(TitleIdText, DisplayVersion, usePtc, _device.Configuration.MemoryManagerMode); ProgramLoader.LoadNsos(_device.System.KernelContext, out ProcessTamperInfo tamperInfo, metaData, executables: programs); diff --git a/Ryujinx.HLE/HOS/ArmProcessContext.cs b/Ryujinx.HLE/HOS/ArmProcessContext.cs index fb7703b791..ae5fe601f2 100644 --- a/Ryujinx.HLE/HOS/ArmProcessContext.cs +++ b/Ryujinx.HLE/HOS/ArmProcessContext.cs @@ -1,24 +1,43 @@ -using ARMeilleure.State; +using ARMeilleure.Memory; +using ARMeilleure.State; using Ryujinx.Cpu; using Ryujinx.HLE.HOS.Kernel.Process; using Ryujinx.Memory; +using System; namespace Ryujinx.HLE.HOS { - class ArmProcessContext : IProcessContext + class ArmProcessContext : IProcessContext where T : class, IVirtualMemoryManager, IMemoryManager { - private readonly MemoryManager _memoryManager; private readonly CpuContext _cpuContext; + private T _memoryManager; public IVirtualMemoryManager AddressSpace => _memoryManager; - public ArmProcessContext(MemoryManager memoryManager) + public ArmProcessContext(T memoryManager) { + if (memoryManager is IRefCounted rc) + { + rc.IncrementReferenceCount(); + } + _memoryManager = memoryManager; _cpuContext = new CpuContext(memoryManager); } - public void Execute(ExecutionContext context, ulong codeAddress) => _cpuContext.Execute(context, codeAddress); - public void Dispose() => _memoryManager.Dispose(); + public void Execute(ExecutionContext context, ulong codeAddress) + { + _cpuContext.Execute(context, codeAddress); + } + + public void Dispose() + { + if (_memoryManager is IRefCounted rc) + { + rc.DecrementReferenceCount(); + + _memoryManager = null; + } + } } } diff --git a/Ryujinx.HLE/HOS/ArmProcessContextFactory.cs b/Ryujinx.HLE/HOS/ArmProcessContextFactory.cs index 050d3690ef..14617cf222 100644 --- a/Ryujinx.HLE/HOS/ArmProcessContextFactory.cs +++ b/Ryujinx.HLE/HOS/ArmProcessContextFactory.cs @@ -1,14 +1,31 @@ -using Ryujinx.Cpu; +using Ryujinx.Common.Configuration; +using Ryujinx.Cpu; +using Ryujinx.HLE.HOS.Kernel; using Ryujinx.HLE.HOS.Kernel.Process; using Ryujinx.Memory; +using System; namespace Ryujinx.HLE.HOS { class ArmProcessContextFactory : IProcessContextFactory { - public IProcessContext Create(MemoryBlock backingMemory, ulong addressSpaceSize, InvalidAccessHandler invalidAccessHandler) + public IProcessContext Create(KernelContext context, ulong addressSpaceSize, InvalidAccessHandler invalidAccessHandler) { - return new ArmProcessContext(new MemoryManager(backingMemory, addressSpaceSize, invalidAccessHandler)); + MemoryManagerMode mode = context.Device.Configuration.MemoryManagerMode; + + switch (mode) + { + case MemoryManagerMode.SoftwarePageTable: + return new ArmProcessContext(new MemoryManager(addressSpaceSize, invalidAccessHandler)); + + case MemoryManagerMode.HostMapped: + case MemoryManagerMode.HostMappedUnsafe: + bool unsafeMode = mode == MemoryManagerMode.HostMappedUnsafe; + return new ArmProcessContext(new MemoryManagerHostMapped(addressSpaceSize, unsafeMode, invalidAccessHandler)); + + default: + throw new ArgumentOutOfRangeException(); + } } } } diff --git a/Ryujinx.HLE/HOS/Font/SharedFontManager.cs b/Ryujinx.HLE/HOS/Font/SharedFontManager.cs index d5763ff88a..fda1633fc9 100644 --- a/Ryujinx.HLE/HOS/Font/SharedFontManager.cs +++ b/Ryujinx.HLE/HOS/Font/SharedFontManager.cs @@ -6,6 +6,7 @@ using LibHac.FsSystem.NcaUtils; using Ryujinx.HLE.Exceptions; using Ryujinx.HLE.FileSystem; using Ryujinx.HLE.FileSystem.Content; +using Ryujinx.HLE.HOS.Kernel.Memory; using System; using System.Buffers.Binary; using System.Collections.Generic; @@ -17,9 +18,9 @@ namespace Ryujinx.HLE.HOS.Font { class SharedFontManager { - private Switch _device; + private readonly Switch _device; - private ulong _physicalAddress; + private readonly SharedMemoryStorage _storage; private struct FontInfo { @@ -35,10 +36,10 @@ namespace Ryujinx.HLE.HOS.Font private Dictionary _fontData; - public SharedFontManager(Switch device, ulong physicalAddress) + public SharedFontManager(Switch device, SharedMemoryStorage storage) { - _physicalAddress = physicalAddress; - _device = device; + _device = device; + _storage = storage; } public void Initialize(ContentManager contentManager) @@ -52,7 +53,7 @@ namespace Ryujinx.HLE.HOS.Font { if (_fontData == null) { - _device.Memory.ZeroFill(_physicalAddress, Horizon.FontSize); + _storage.ZeroFill(); uint fontOffset = 0; @@ -80,7 +81,7 @@ namespace Ryujinx.HLE.HOS.Font FontInfo info = new FontInfo((int)fontOffset, data.Length); - WriteMagicAndSize(_physicalAddress + fontOffset, data.Length); + WriteMagicAndSize(fontOffset, data.Length); fontOffset += 8; @@ -88,7 +89,7 @@ namespace Ryujinx.HLE.HOS.Font for (; fontOffset - start < data.Length; fontOffset++) { - _device.Memory.Write(_physicalAddress + fontOffset, data[fontOffset - start]); + _storage.GetRef(fontOffset) = data[fontOffset - start]; } return info; @@ -129,15 +130,15 @@ namespace Ryujinx.HLE.HOS.Font } } - private void WriteMagicAndSize(ulong address, int size) + private void WriteMagicAndSize(ulong offset, int size) { const int decMagic = 0x18029a7f; const int key = 0x49621806; int encryptedSize = BinaryPrimitives.ReverseEndianness(size ^ key); - _device.Memory.Write(address + 0, decMagic); - _device.Memory.Write(address + 4, encryptedSize); + _storage.GetRef(offset + 0) = decMagic; + _storage.GetRef(offset + 4) = encryptedSize; } public int GetFontSize(SharedFontType fontType) diff --git a/Ryujinx.HLE/HOS/Horizon.cs b/Ryujinx.HLE/HOS/Horizon.cs index 13d7a2afc4..6a3a9e21c7 100644 --- a/Ryujinx.HLE/HOS/Horizon.cs +++ b/Ryujinx.HLE/HOS/Horizon.cs @@ -102,7 +102,7 @@ namespace Ryujinx.HLE.HOS public int GlobalAccessLogMode { get; set; } - internal ulong HidBaseAddress { get; private set; } + internal SharedMemoryStorage HidStorage { get; private set; } internal NvHostSyncpt HostSyncpoint { get; private set; } @@ -127,38 +127,43 @@ namespace Ryujinx.HLE.HOS // Note: This is not really correct, but with HLE of services, the only memory // region used that is used is Application, so we can use the other ones for anything. - KMemoryRegionManager region = KernelContext.MemoryRegions[(int)MemoryRegion.NvServices]; + KMemoryRegionManager region = KernelContext.MemoryManager.MemoryRegions[(int)MemoryRegion.NvServices]; ulong hidPa = region.Address; ulong fontPa = region.Address + HidSize; ulong iirsPa = region.Address + HidSize + FontSize; ulong timePa = region.Address + HidSize + FontSize + IirsSize; - HidBaseAddress = hidPa - DramMemoryMap.DramBase; - KPageList hidPageList = new KPageList(); KPageList fontPageList = new KPageList(); KPageList iirsPageList = new KPageList(); KPageList timePageList = new KPageList(); - hidPageList .AddRange(hidPa, HidSize / KMemoryManager.PageSize); - fontPageList.AddRange(fontPa, FontSize / KMemoryManager.PageSize); - iirsPageList.AddRange(iirsPa, IirsSize / KMemoryManager.PageSize); - timePageList.AddRange(timePa, TimeSize / KMemoryManager.PageSize); + hidPageList.AddRange(hidPa, HidSize / KPageTableBase.PageSize); + fontPageList.AddRange(fontPa, FontSize / KPageTableBase.PageSize); + iirsPageList.AddRange(iirsPa, IirsSize / KPageTableBase.PageSize); + timePageList.AddRange(timePa, TimeSize / KPageTableBase.PageSize); - HidSharedMem = new KSharedMemory(KernelContext, hidPageList, 0, 0, KMemoryPermission.Read); - FontSharedMem = new KSharedMemory(KernelContext, fontPageList, 0, 0, KMemoryPermission.Read); - IirsSharedMem = new KSharedMemory(KernelContext, iirsPageList, 0, 0, KMemoryPermission.Read); + var hidStorage = new SharedMemoryStorage(KernelContext, hidPageList); + var fontStorage = new SharedMemoryStorage(KernelContext, fontPageList); + var iirsStorage = new SharedMemoryStorage(KernelContext, iirsPageList); + var timeStorage = new SharedMemoryStorage(KernelContext, timePageList); - KSharedMemory timeSharedMemory = new KSharedMemory(KernelContext, timePageList, 0, 0, KMemoryPermission.Read); + HidStorage = hidStorage; - TimeServiceManager.Instance.Initialize(device, this, timeSharedMemory, timePa - DramMemoryMap.DramBase, TimeSize); + HidSharedMem = new KSharedMemory(KernelContext, hidStorage, 0, 0, KMemoryPermission.Read); + FontSharedMem = new KSharedMemory(KernelContext, fontStorage, 0, 0, KMemoryPermission.Read); + IirsSharedMem = new KSharedMemory(KernelContext, iirsStorage, 0, 0, KMemoryPermission.Read); + + KSharedMemory timeSharedMemory = new KSharedMemory(KernelContext, timeStorage, 0, 0, KMemoryPermission.Read); + + TimeServiceManager.Instance.Initialize(device, this, timeSharedMemory, timeStorage, TimeSize); AppletState = new AppletStateMgr(this); AppletState.SetFocus(true); - Font = new SharedFontManager(device, fontPa - DramMemoryMap.DramBase); + Font = new SharedFontManager(device, fontStorage); VsyncEvent = new KEvent(KernelContext); @@ -397,6 +402,7 @@ namespace Ryujinx.HLE.HOS foreach (KProcess process in KernelContext.Processes.Values.Where(x => x.Flags.HasFlag(ProcessCreationFlags.IsApplication))) { process.Terminate(); + process.DecrementReferenceCount(); } // The application existed, now surface flinger can exit too. @@ -407,7 +413,10 @@ namespace Ryujinx.HLE.HOS foreach (KProcess process in KernelContext.Processes.Values.Where(x => !x.Flags.HasFlag(ProcessCreationFlags.IsApplication))) { process.Terminate(); + process.DecrementReferenceCount(); } + + KernelContext.Processes.Clear(); } // Exit ourself now! diff --git a/Ryujinx.HLE/HOS/Kernel/Ipc/KBufferDescriptorTable.cs b/Ryujinx.HLE/HOS/Kernel/Ipc/KBufferDescriptorTable.cs index fbd3284591..a4e7f5895d 100644 --- a/Ryujinx.HLE/HOS/Kernel/Ipc/KBufferDescriptorTable.cs +++ b/Ryujinx.HLE/HOS/Kernel/Ipc/KBufferDescriptorTable.cs @@ -47,7 +47,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Ipc return KernelResult.OutOfMemory; } - public KernelResult CopyBuffersToClient(KMemoryManager memoryManager) + public KernelResult CopyBuffersToClient(KPageTableBase memoryManager) { KernelResult result = CopyToClient(memoryManager, _receiveBufferDescriptors); @@ -59,7 +59,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Ipc return CopyToClient(memoryManager, _exchangeBufferDescriptors); } - private KernelResult CopyToClient(KMemoryManager memoryManager, List list) + private KernelResult CopyToClient(KPageTableBase memoryManager, List list) { foreach (KBufferDescriptor desc in list) { @@ -81,8 +81,8 @@ namespace Ryujinx.HLE.HOS.Kernel.Ipc attributeMask |= MemoryAttribute.DeviceMapped; } - ulong clientAddrTruncated = BitUtils.AlignDown(desc.ClientAddress, KMemoryManager.PageSize); - ulong clientAddrRounded = BitUtils.AlignUp (desc.ClientAddress, KMemoryManager.PageSize); + ulong clientAddrTruncated = BitUtils.AlignDown(desc.ClientAddress, KPageTableBase.PageSize); + ulong clientAddrRounded = BitUtils.AlignUp (desc.ClientAddress, KPageTableBase.PageSize); // Check if address is not aligned, in this case we need to perform 2 copies. if (clientAddrTruncated != clientAddrRounded) @@ -113,9 +113,9 @@ namespace Ryujinx.HLE.HOS.Kernel.Ipc ulong clientEndAddr = desc.ClientAddress + desc.Size; ulong serverEndAddr = desc.ServerAddress + desc.Size; - ulong clientEndAddrTruncated = BitUtils.AlignDown(clientEndAddr, KMemoryManager.PageSize); - ulong clientEndAddrRounded = BitUtils.AlignUp (clientEndAddr, KMemoryManager.PageSize); - ulong serverEndAddrTruncated = BitUtils.AlignDown(serverEndAddr, KMemoryManager.PageSize); + ulong clientEndAddrTruncated = BitUtils.AlignDown(clientEndAddr, KPageTableBase.PageSize); + ulong clientEndAddrRounded = BitUtils.AlignUp (clientEndAddr, KPageTableBase.PageSize); + ulong serverEndAddrTruncated = BitUtils.AlignDown(serverEndAddr, KPageTableBase.PageSize); if (clientEndAddrTruncated < clientEndAddrRounded && (clientAddrTruncated == clientAddrRounded || clientAddrTruncated < clientEndAddrTruncated)) @@ -140,7 +140,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Ipc return KernelResult.Success; } - public KernelResult UnmapServerBuffers(KMemoryManager memoryManager) + public KernelResult UnmapServerBuffers(KPageTableBase memoryManager) { KernelResult result = UnmapServer(memoryManager, _sendBufferDescriptors); @@ -159,7 +159,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Ipc return UnmapServer(memoryManager, _exchangeBufferDescriptors); } - private KernelResult UnmapServer(KMemoryManager memoryManager, List list) + private KernelResult UnmapServer(KPageTableBase memoryManager, List list) { foreach (KBufferDescriptor descriptor in list) { @@ -177,7 +177,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Ipc return KernelResult.Success; } - public KernelResult RestoreClientBuffers(KMemoryManager memoryManager) + public KernelResult RestoreClientBuffers(KPageTableBase memoryManager) { KernelResult result = RestoreClient(memoryManager, _sendBufferDescriptors); @@ -196,7 +196,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Ipc return RestoreClient(memoryManager, _exchangeBufferDescriptors); } - private KernelResult RestoreClient(KMemoryManager memoryManager, List list) + private KernelResult RestoreClient(KPageTableBase memoryManager, List list) { foreach (KBufferDescriptor descriptor in list) { diff --git a/Ryujinx.HLE/HOS/Kernel/Ipc/KServerSession.cs b/Ryujinx.HLE/HOS/Kernel/Ipc/KServerSession.cs index e97309d9e4..e28677ffe1 100644 --- a/Ryujinx.HLE/HOS/Kernel/Ipc/KServerSession.cs +++ b/Ryujinx.HLE/HOS/Kernel/Ipc/KServerSession.cs @@ -19,10 +19,9 @@ namespace Ryujinx.HLE.HOS.Kernel.Ipc private struct Message { - public ulong Address { get; } - public ulong DramAddress { get; } - public ulong Size { get; } - public bool IsCustom { get; } + public ulong Address { get; } + public ulong Size { get; } + public bool IsCustom { get; } public Message(KThread thread, ulong customCmdBuffAddress, ulong customCmdBuffSize) { @@ -32,16 +31,11 @@ namespace Ryujinx.HLE.HOS.Kernel.Ipc { Address = customCmdBuffAddress; Size = customCmdBuffSize; - - KProcess process = thread.Owner; - - DramAddress = process.MemoryManager.GetDramAddressFromVa(Address); } else { - Address = thread.TlsAddress; - DramAddress = thread.TlsDramAddress; - Size = 0x100; + Address = thread.TlsAddress; + Size = 0x100; } } @@ -252,7 +246,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Ipc Message clientMsg = new Message(request); Message serverMsg = new Message(serverThread, customCmdBuffAddr, customCmdBuffSize); - MessageHeader clientHeader = GetClientMessageHeader(clientMsg); + MessageHeader clientHeader = GetClientMessageHeader(clientProcess, clientMsg); MessageHeader serverHeader = GetServerMessageHeader(serverMsg); KernelResult serverResult = KernelResult.NotFound; @@ -318,6 +312,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Ipc } ulong[] receiveList = GetReceiveList( + serverProcess, serverMsg, serverHeader.ReceiveListType, serverHeader.ReceiveListOffset); @@ -351,7 +346,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Ipc for (int index = 0; index < clientHeader.CopyHandlesCount; index++) { int newHandle = 0; - int handle = KernelContext.Memory.Read(clientMsg.DramAddress + offset * 4); + int handle = clientProcess.CpuMemory.Read(clientMsg.Address + offset * 4); if (clientResult == KernelResult.Success && handle != 0) { @@ -366,7 +361,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Ipc for (int index = 0; index < clientHeader.MoveHandlesCount; index++) { int newHandle = 0; - int handle = KernelContext.Memory.Read(clientMsg.DramAddress + offset * 4); + int handle = clientProcess.CpuMemory.Read(clientMsg.Address + offset * 4); if (handle != 0) { @@ -402,7 +397,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Ipc for (int index = 0; index < clientHeader.PointerBuffersCount; index++) { - ulong pointerDesc = KernelContext.Memory.Read(clientMsg.DramAddress + offset * 4); + ulong pointerDesc = clientProcess.CpuMemory.Read(clientMsg.Address + offset * 4); PointerBufferDesc descriptor = new PointerBufferDesc(pointerDesc); @@ -461,11 +456,11 @@ namespace Ryujinx.HLE.HOS.Kernel.Ipc for (int index = 0; index < totalBuffersCount; index++) { - ulong clientDescAddress = clientMsg.DramAddress + offset * 4; + ulong clientDescAddress = clientMsg.Address + offset * 4; - uint descWord0 = KernelContext.Memory.Read(clientDescAddress + 0); - uint descWord1 = KernelContext.Memory.Read(clientDescAddress + 4); - uint descWord2 = KernelContext.Memory.Read(clientDescAddress + 8); + uint descWord0 = clientProcess.CpuMemory.Read(clientDescAddress + 0); + uint descWord1 = clientProcess.CpuMemory.Read(clientDescAddress + 4); + uint descWord2 = clientProcess.CpuMemory.Read(clientDescAddress + 8); bool isSendDesc = index < clientHeader.SendBuffersCount; bool isExchangeDesc = index >= clientHeader.SendBuffersCount + clientHeader.ReceiveBuffersCount; @@ -575,10 +570,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Ipc } else { - copySrc = clientProcess.MemoryManager.GetDramAddressFromVa(copySrc); - copyDst = serverProcess.MemoryManager.GetDramAddressFromVa(copyDst); - - KernelContext.Memory.Copy(copyDst, copySrc, copySize); + serverProcess.CpuMemory.Write(copyDst, clientProcess.CpuMemory.GetSpan(copySrc, (int)copySize)); } if (clientResult != KernelResult.Success) @@ -623,7 +615,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Ipc Message clientMsg = new Message(request); Message serverMsg = new Message(serverThread, customCmdBuffAddr, customCmdBuffSize); - MessageHeader clientHeader = GetClientMessageHeader(clientMsg); + MessageHeader clientHeader = GetClientMessageHeader(clientProcess, clientMsg); MessageHeader serverHeader = GetServerMessageHeader(serverMsg); KernelResult clientResult = KernelResult.Success; @@ -683,6 +675,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Ipc // Read receive list. ulong[] receiveList = GetReceiveList( + clientProcess, clientMsg, clientHeader.ReceiveListType, clientHeader.ReceiveListOffset); @@ -698,8 +691,8 @@ namespace Ryujinx.HLE.HOS.Kernel.Ipc } // Copy header. - KernelContext.Memory.Write(clientMsg.DramAddress + 0, serverHeader.Word0); - KernelContext.Memory.Write(clientMsg.DramAddress + 4, serverHeader.Word1); + clientProcess.CpuMemory.Write(clientMsg.Address + 0, serverHeader.Word0); + clientProcess.CpuMemory.Write(clientMsg.Address + 4, serverHeader.Word1); // Copy handles. uint offset; @@ -708,11 +701,11 @@ namespace Ryujinx.HLE.HOS.Kernel.Ipc { offset = 3; - KernelContext.Memory.Write(clientMsg.DramAddress + 8, serverHeader.Word2); + clientProcess.CpuMemory.Write(clientMsg.Address + 8, serverHeader.Word2); if (serverHeader.HasPid) { - KernelContext.Memory.Write(clientMsg.DramAddress + offset * 4, serverProcess.Pid); + clientProcess.CpuMemory.Write(clientMsg.Address + offset * 4, serverProcess.Pid); offset += 2; } @@ -728,7 +721,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Ipc GetCopyObjectHandle(serverThread, clientProcess, handle, out newHandle); } - KernelContext.Memory.Write(clientMsg.DramAddress + offset * 4, newHandle); + clientProcess.CpuMemory.Write(clientMsg.Address + offset * 4, newHandle); offset++; } @@ -751,7 +744,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Ipc } } - KernelContext.Memory.Write(clientMsg.DramAddress + offset * 4, newHandle); + clientProcess.CpuMemory.Write(clientMsg.Address + offset * 4, newHandle); offset++; } @@ -808,7 +801,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Ipc } } - ulong dstDescAddress = clientMsg.DramAddress + offset * 4; + ulong dstDescAddress = clientMsg.Address + offset * 4; ulong clientPointerDesc = (recvListBufferAddress << 32) | @@ -817,7 +810,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Ipc clientPointerDesc |= pointerDesc & 0xffff000f; - KernelContext.Memory.Write(dstDescAddress + 0, clientPointerDesc); + clientProcess.CpuMemory.Write(dstDescAddress + 0, clientPointerDesc); offset += 2; } @@ -830,11 +823,11 @@ namespace Ryujinx.HLE.HOS.Kernel.Ipc for (int index = 0; index < totalBuffersCount; index++) { - ulong dstDescAddress = clientMsg.DramAddress + offset * 4; + ulong dstDescAddress = clientMsg.Address + offset * 4; - KernelContext.Memory.Write(dstDescAddress + 0, 0); - KernelContext.Memory.Write(dstDescAddress + 4, 0); - KernelContext.Memory.Write(dstDescAddress + 8, 0); + clientProcess.CpuMemory.Write(dstDescAddress + 0, 0); + clientProcess.CpuMemory.Write(dstDescAddress + 4, 0); + clientProcess.CpuMemory.Write(dstDescAddress + 8, 0); offset += 3; } @@ -865,10 +858,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Ipc } else { - copyDst = clientProcess.MemoryManager.GetDramAddressFromVa(copyDst); - copySrc = serverProcess.MemoryManager.GetDramAddressFromVa(copySrc); - - KernelContext.Memory.Copy(copyDst, copySrc, copySize); + clientProcess.CpuMemory.Write(copyDst, serverProcess.CpuMemory.GetSpan(copySrc, (int)copySize)); } } @@ -878,11 +868,11 @@ namespace Ryujinx.HLE.HOS.Kernel.Ipc return serverResult; } - private MessageHeader GetClientMessageHeader(Message clientMsg) + private MessageHeader GetClientMessageHeader(KProcess clientProcess, Message clientMsg) { - uint word0 = KernelContext.Memory.Read(clientMsg.DramAddress + 0); - uint word1 = KernelContext.Memory.Read(clientMsg.DramAddress + 4); - uint word2 = KernelContext.Memory.Read(clientMsg.DramAddress + 8); + uint word0 = clientProcess.CpuMemory.Read(clientMsg.Address + 0); + uint word1 = clientProcess.CpuMemory.Read(clientMsg.Address + 4); + uint word2 = clientProcess.CpuMemory.Read(clientMsg.Address + 8); return new MessageHeader(word0, word1, word2); } @@ -949,7 +939,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Ipc } } - private ulong[] GetReceiveList(Message message, uint recvListType, uint recvListOffset) + private ulong[] GetReceiveList(KProcess ownerProcess, Message message, uint recvListType, uint recvListOffset) { int recvListSize = 0; @@ -964,11 +954,11 @@ namespace Ryujinx.HLE.HOS.Kernel.Ipc ulong[] receiveList = new ulong[recvListSize]; - ulong recvListAddress = message.DramAddress + recvListOffset; + ulong recvListAddress = message.Address + recvListOffset; for (int index = 0; index < recvListSize; index++) { - receiveList[index] = KernelContext.Memory.Read(recvListAddress + (ulong)index * 8); + receiveList[index] = ownerProcess.CpuMemory.Read(recvListAddress + (ulong)index * 8); } return receiveList; @@ -1219,10 +1209,10 @@ namespace Ryujinx.HLE.HOS.Kernel.Ipc if (result != KernelResult.Success) { - ulong address = clientProcess.MemoryManager.GetDramAddressFromVa(request.CustomCmdBuffAddr); + ulong address = request.CustomCmdBuffAddr; - KernelContext.Memory.Write(address, 0); - KernelContext.Memory.Write(address + 8, (int)result); + clientProcess.CpuMemory.Write(address, 0); + clientProcess.CpuMemory.Write(address + 8, (int)result); } clientProcess.MemoryManager.UnborrowIpcBuffer(request.CustomCmdBuffAddr, request.CustomCmdBuffSize); diff --git a/Ryujinx.HLE/HOS/Kernel/KernelConstants.cs b/Ryujinx.HLE/HOS/Kernel/KernelConstants.cs index 4b5d3a3244..5a1dbef26f 100644 --- a/Ryujinx.HLE/HOS/Kernel/KernelConstants.cs +++ b/Ryujinx.HLE/HOS/Kernel/KernelConstants.cs @@ -10,7 +10,7 @@ namespace Ryujinx.HLE.HOS.Kernel public const int MemoryBlockAllocatorSize = 0x2710; public const ulong UserSlabHeapBase = DramMemoryMap.SlabHeapBase; - public const ulong UserSlabHeapItemSize = KMemoryManager.PageSize; + public const ulong UserSlabHeapItemSize = KPageTableBase.PageSize; public const ulong UserSlabHeapSize = 0x3de000; } } diff --git a/Ryujinx.HLE/HOS/Kernel/KernelContext.cs b/Ryujinx.HLE/HOS/Kernel/KernelContext.cs index b57b950473..4f18faca68 100644 --- a/Ryujinx.HLE/HOS/Kernel/KernelContext.cs +++ b/Ryujinx.HLE/HOS/Kernel/KernelContext.cs @@ -28,10 +28,10 @@ namespace Ryujinx.HLE.HOS.Kernel public KResourceLimit ResourceLimit { get; } - public KMemoryRegionManager[] MemoryRegions { get; } + public KMemoryManager MemoryManager { get; } - public KMemoryBlockAllocator LargeMemoryBlockAllocator { get; } - public KMemoryBlockAllocator SmallMemoryBlockAllocator { get; } + public KMemoryBlockSlabManager LargeMemoryBlockSlabManager { get; } + public KMemoryBlockSlabManager SmallMemoryBlockSlabManager { get; } public KSlabHeap UserSlabHeapPages { get; } @@ -70,16 +70,18 @@ namespace Ryujinx.HLE.HOS.Kernel KernelInit.InitializeResourceLimit(ResourceLimit, memorySize); - MemoryRegions = KernelInit.GetMemoryRegions(memorySize, memoryArrange); + MemoryManager = new KMemoryManager(memorySize, memoryArrange); - LargeMemoryBlockAllocator = new KMemoryBlockAllocator(KernelConstants.MemoryBlockAllocatorSize * 2); - SmallMemoryBlockAllocator = new KMemoryBlockAllocator(KernelConstants.MemoryBlockAllocatorSize); + LargeMemoryBlockSlabManager = new KMemoryBlockSlabManager(KernelConstants.MemoryBlockAllocatorSize * 2); + SmallMemoryBlockSlabManager = new KMemoryBlockSlabManager(KernelConstants.MemoryBlockAllocatorSize); UserSlabHeapPages = new KSlabHeap( KernelConstants.UserSlabHeapBase, KernelConstants.UserSlabHeapItemSize, KernelConstants.UserSlabHeapSize); + memory.Commit(KernelConstants.UserSlabHeapBase - DramMemoryMap.DramBase, KernelConstants.UserSlabHeapSize); + CriticalSection = new KCriticalSection(this); Schedulers = new KScheduler[KScheduler.CpuCoresCount]; PriorityQueue = new KPriorityQueue(); diff --git a/Ryujinx.HLE/HOS/Kernel/Memory/DramMemoryMap.cs b/Ryujinx.HLE/HOS/Kernel/Memory/DramMemoryMap.cs index dea2a4efea..4941d5b783 100644 --- a/Ryujinx.HLE/HOS/Kernel/Memory/DramMemoryMap.cs +++ b/Ryujinx.HLE/HOS/Kernel/Memory/DramMemoryMap.cs @@ -9,5 +9,10 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory public const ulong SlabHeapBase = KernelReserveBase + 0x85000; public const ulong SlapHeapSize = 0xa21000; public const ulong SlabHeapEnd = SlabHeapBase + SlapHeapSize; + + public static bool IsHeapPhysicalAddress(ulong address) + { + return address >= SlabHeapEnd; + } } } \ No newline at end of file diff --git a/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryBlock.cs b/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryBlock.cs index b93b68d94a..b612022cc8 100644 --- a/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryBlock.cs +++ b/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryBlock.cs @@ -84,7 +84,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory { ulong leftAddress = BaseAddress; - ulong leftPagesCount = (address - leftAddress) / KMemoryManager.PageSize; + ulong leftPagesCount = (address - leftAddress) / KPageTableBase.PageSize; BaseAddress = address; @@ -107,7 +107,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory public KMemoryInfo GetInfo() { - ulong size = PagesCount * KMemoryManager.PageSize; + ulong size = PagesCount * KPageTableBase.PageSize; return new KMemoryInfo( BaseAddress, diff --git a/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryBlockManager.cs b/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryBlockManager.cs new file mode 100644 index 0000000000..c0d11a959a --- /dev/null +++ b/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryBlockManager.cs @@ -0,0 +1,329 @@ +using Ryujinx.HLE.HOS.Kernel.Common; +using System.Collections.Generic; +using System.Diagnostics; + +namespace Ryujinx.HLE.HOS.Kernel.Memory +{ + class KMemoryBlockManager + { + private const int PageSize = KPageTableBase.PageSize; + + private readonly LinkedList _blocks; + + public int BlocksCount => _blocks.Count; + + private KMemoryBlockSlabManager _slabManager; + + private ulong _addrSpaceEnd; + + public KMemoryBlockManager() + { + _blocks = new LinkedList(); + } + + public KernelResult Initialize(ulong addrSpaceStart, ulong addrSpaceEnd, KMemoryBlockSlabManager slabManager) + { + _slabManager = slabManager; + _addrSpaceEnd = addrSpaceEnd; + + // First insertion will always need only a single block, + // because there's nothing else to split. + if (!slabManager.CanAllocate(1)) + { + return KernelResult.OutOfResource; + } + + ulong addrSpacePagesCount = (addrSpaceEnd - addrSpaceStart) / PageSize; + + _blocks.AddFirst(new KMemoryBlock( + addrSpaceStart, + addrSpacePagesCount, + MemoryState.Unmapped, + KMemoryPermission.None, + MemoryAttribute.None)); + + return KernelResult.Success; + } + + public void InsertBlock( + ulong baseAddress, + ulong pagesCount, + MemoryState oldState, + KMemoryPermission oldPermission, + MemoryAttribute oldAttribute, + MemoryState newState, + KMemoryPermission newPermission, + MemoryAttribute newAttribute) + { + // Insert new block on the list only on areas where the state + // of the block matches the state specified on the old* state + // arguments, otherwise leave it as is. + int oldCount = _blocks.Count; + + oldAttribute |= MemoryAttribute.IpcAndDeviceMapped; + + ulong endAddr = baseAddress + pagesCount * PageSize; + + LinkedListNode node = _blocks.First; + + while (node != null) + { + LinkedListNode newNode = node; + + KMemoryBlock currBlock = node.Value; + + ulong currBaseAddr = currBlock.BaseAddress; + ulong currEndAddr = currBlock.PagesCount * PageSize + currBaseAddr; + + if (baseAddress < currEndAddr && currBaseAddr < endAddr) + { + MemoryAttribute currBlockAttr = currBlock.Attribute | MemoryAttribute.IpcAndDeviceMapped; + + if (currBlock.State != oldState || + currBlock.Permission != oldPermission || + currBlockAttr != oldAttribute) + { + node = node.Next; + + continue; + } + + if (baseAddress > currBaseAddr) + { + _blocks.AddBefore(node, currBlock.SplitRightAtAddress(baseAddress)); + } + + if (endAddr < currEndAddr) + { + newNode = _blocks.AddBefore(node, currBlock.SplitRightAtAddress(endAddr)); + } + + newNode.Value.SetState(newPermission, newState, newAttribute); + + newNode = MergeEqualStateNeighbors(newNode); + } + + if (currEndAddr - 1 >= endAddr - 1) + { + break; + } + + node = newNode.Next; + } + + _slabManager.Count += _blocks.Count - oldCount; + + ValidateInternalState(); + } + + public void InsertBlock( + ulong baseAddress, + ulong pagesCount, + MemoryState state, + KMemoryPermission permission = KMemoryPermission.None, + MemoryAttribute attribute = MemoryAttribute.None) + { + // Inserts new block at the list, replacing and splitting + // existing blocks as needed. + int oldCount = _blocks.Count; + + ulong endAddr = baseAddress + pagesCount * PageSize; + + LinkedListNode node = _blocks.First; + + while (node != null) + { + LinkedListNode newNode = node; + + KMemoryBlock currBlock = node.Value; + + ulong currBaseAddr = currBlock.BaseAddress; + ulong currEndAddr = currBlock.PagesCount * PageSize + currBaseAddr; + + if (baseAddress < currEndAddr && currBaseAddr < endAddr) + { + if (baseAddress > currBaseAddr) + { + _blocks.AddBefore(node, currBlock.SplitRightAtAddress(baseAddress)); + } + + if (endAddr < currEndAddr) + { + newNode = _blocks.AddBefore(node, currBlock.SplitRightAtAddress(endAddr)); + } + + newNode.Value.SetState(permission, state, attribute); + + newNode = MergeEqualStateNeighbors(newNode); + } + + if (currEndAddr - 1 >= endAddr - 1) + { + break; + } + + node = newNode.Next; + } + + _slabManager.Count += _blocks.Count - oldCount; + + ValidateInternalState(); + } + + public delegate void BlockMutator(KMemoryBlock block, KMemoryPermission newPerm); + + public void InsertBlock( + ulong baseAddress, + ulong pagesCount, + BlockMutator blockMutate, + KMemoryPermission permission = KMemoryPermission.None) + { + // Inserts new block at the list, replacing and splitting + // existing blocks as needed, then calling the callback + // function on the new block. + int oldCount = _blocks.Count; + + ulong endAddr = baseAddress + pagesCount * PageSize; + + LinkedListNode node = _blocks.First; + + while (node != null) + { + LinkedListNode newNode = node; + + KMemoryBlock currBlock = node.Value; + + ulong currBaseAddr = currBlock.BaseAddress; + ulong currEndAddr = currBlock.PagesCount * PageSize + currBaseAddr; + + if (baseAddress < currEndAddr && currBaseAddr < endAddr) + { + if (baseAddress > currBaseAddr) + { + _blocks.AddBefore(node, currBlock.SplitRightAtAddress(baseAddress)); + } + + if (endAddr < currEndAddr) + { + newNode = _blocks.AddBefore(node, currBlock.SplitRightAtAddress(endAddr)); + } + + KMemoryBlock newBlock = newNode.Value; + + blockMutate(newBlock, permission); + + newNode = MergeEqualStateNeighbors(newNode); + } + + if (currEndAddr - 1 >= endAddr - 1) + { + break; + } + + node = newNode.Next; + } + + _slabManager.Count += _blocks.Count - oldCount; + + ValidateInternalState(); + } + + [Conditional("DEBUG")] + private void ValidateInternalState() + { + ulong expectedAddress = 0; + + LinkedListNode node = _blocks.First; + + while (node != null) + { + LinkedListNode newNode = node; + + KMemoryBlock currBlock = node.Value; + + Debug.Assert(currBlock.BaseAddress == expectedAddress); + + expectedAddress = currBlock.BaseAddress + currBlock.PagesCount * PageSize; + + node = newNode.Next; + } + + Debug.Assert(expectedAddress == _addrSpaceEnd); + } + + private LinkedListNode MergeEqualStateNeighbors(LinkedListNode node) + { + KMemoryBlock block = node.Value; + + if (node.Previous != null) + { + KMemoryBlock previousBlock = node.Previous.Value; + + if (BlockStateEquals(block, previousBlock)) + { + LinkedListNode previousNode = node.Previous; + + _blocks.Remove(node); + + previousBlock.AddPages(block.PagesCount); + + node = previousNode; + block = previousBlock; + } + } + + if (node.Next != null) + { + KMemoryBlock nextBlock = node.Next.Value; + + if (BlockStateEquals(block, nextBlock)) + { + _blocks.Remove(node.Next); + + block.AddPages(nextBlock.PagesCount); + } + } + + return node; + } + + private static bool BlockStateEquals(KMemoryBlock lhs, KMemoryBlock rhs) + { + return lhs.State == rhs.State && + lhs.Permission == rhs.Permission && + lhs.Attribute == rhs.Attribute && + lhs.SourcePermission == rhs.SourcePermission && + lhs.DeviceRefCount == rhs.DeviceRefCount && + lhs.IpcRefCount == rhs.IpcRefCount; + } + + public KMemoryBlock FindBlock(ulong address) + { + return FindBlockNode(address)?.Value; + } + + public LinkedListNode FindBlockNode(ulong address) + { + lock (_blocks) + { + LinkedListNode node = _blocks.First; + + while (node != null) + { + KMemoryBlock block = node.Value; + + ulong currEndAddr = block.PagesCount * PageSize + block.BaseAddress; + + if (block.BaseAddress <= address && currEndAddr - 1 >= address) + { + return node; + } + + node = node.Next; + } + } + + return null; + } + } +} diff --git a/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryBlockAllocator.cs b/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryBlockSlabManager.cs similarity index 77% rename from Ryujinx.HLE/HOS/Kernel/Memory/KMemoryBlockAllocator.cs rename to Ryujinx.HLE/HOS/Kernel/Memory/KMemoryBlockSlabManager.cs index ae68bf3912..8732b507a0 100644 --- a/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryBlockAllocator.cs +++ b/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryBlockSlabManager.cs @@ -1,12 +1,12 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory { - class KMemoryBlockAllocator + class KMemoryBlockSlabManager { private ulong _capacityElements; public int Count { get; set; } - public KMemoryBlockAllocator(ulong capacityElements) + public KMemoryBlockSlabManager(ulong capacityElements) { _capacityElements = capacityElements; } diff --git a/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryManager.cs b/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryManager.cs index 5b6df53baf..6d0a165815 100644 --- a/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryManager.cs +++ b/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryManager.cs @@ -1,3335 +1,65 @@ -using Ryujinx.Common; -using Ryujinx.HLE.HOS.Kernel.Common; -using Ryujinx.HLE.HOS.Kernel.Process; -using Ryujinx.Memory; +using Ryujinx.HLE.HOS.Kernel.Common; using System; -using System.Collections.Generic; -using System.Diagnostics; namespace Ryujinx.HLE.HOS.Kernel.Memory { class KMemoryManager { - private static readonly int[] MappingUnitSizes = new int[] + public KMemoryRegionManager[] MemoryRegions { get; } + + public KMemoryManager(MemorySize size, MemoryArrange arrange) { - 0x1000, - 0x10000, - 0x200000, - 0x400000, - 0x2000000, - 0x40000000 - }; - - public const int PageSize = 0x1000; - - private const int KMemoryBlockSize = 0x40; - - // We need 2 blocks for the case where a big block - // needs to be split in 2, plus one block that will be the new one inserted. - private const int MaxBlocksNeededForInsertion = 2; - - private readonly LinkedList _blocks; - - private readonly IVirtualMemoryManager _cpuMemory; - - private readonly KernelContext _context; - - public ulong AddrSpaceStart { get; private set; } - public ulong AddrSpaceEnd { get; private set; } - - public ulong CodeRegionStart { get; private set; } - public ulong CodeRegionEnd { get; private set; } - - public ulong HeapRegionStart { get; private set; } - public ulong HeapRegionEnd { get; private set; } - - private ulong _currentHeapAddr; - - public ulong AliasRegionStart { get; private set; } - public ulong AliasRegionEnd { get; private set; } - - public ulong StackRegionStart { get; private set; } - public ulong StackRegionEnd { get; private set; } - - public ulong TlsIoRegionStart { get; private set; } - public ulong TlsIoRegionEnd { get; private set; } - - private ulong _heapCapacity; - - public ulong PhysicalMemoryUsage { get; private set; } - - private MemoryRegion _memRegion; - - private bool _aslrDisabled; - - public int AddrSpaceWidth { get; private set; } - - private bool _isKernel; - - private bool _aslrEnabled; - - private KMemoryBlockAllocator _blockAllocator; - - private int _contextId; - - private MersenneTwister _randomNumberGenerator; - - public KMemoryManager(KernelContext context, IVirtualMemoryManager cpuMemory) - { - _context = context; - _cpuMemory = cpuMemory; - - _blocks = new LinkedList(); - - _isKernel = false; + MemoryRegions = KernelInit.GetMemoryRegions(size, arrange); } - private static readonly int[] AddrSpaceSizes = new int[] { 32, 36, 32, 39 }; - - public KernelResult InitializeForProcess( - AddressSpaceType addrSpaceType, - bool aslrEnabled, - bool aslrDisabled, - MemoryRegion memRegion, - ulong address, - ulong size, - KMemoryBlockAllocator blockAllocator) + private KMemoryRegionManager GetMemoryRegion(ulong address) { - if ((uint)addrSpaceType > (uint)AddressSpaceType.Addr39Bits) + for (int i = 0; i < MemoryRegions.Length; i++) { - throw new ArgumentException(nameof(addrSpaceType)); - } + var region = MemoryRegions[i]; - _contextId = _context.ContextIdManager.GetId(); - - ulong addrSpaceBase = 0; - ulong addrSpaceSize = 1UL << AddrSpaceSizes[(int)addrSpaceType]; - - KernelResult result = CreateUserAddressSpace( - addrSpaceType, - aslrEnabled, - aslrDisabled, - addrSpaceBase, - addrSpaceSize, - memRegion, - address, - size, - blockAllocator); - - if (result != KernelResult.Success) - { - _context.ContextIdManager.PutId(_contextId); - } - - return result; - } - - private class Region - { - public ulong Start; - public ulong End; - public ulong Size; - public ulong AslrOffset; - } - - private KernelResult CreateUserAddressSpace( - AddressSpaceType addrSpaceType, - bool aslrEnabled, - bool aslrDisabled, - ulong addrSpaceStart, - ulong addrSpaceEnd, - MemoryRegion memRegion, - ulong address, - ulong size, - KMemoryBlockAllocator blockAllocator) - { - ulong endAddr = address + size; - - Region aliasRegion = new Region(); - Region heapRegion = new Region(); - Region stackRegion = new Region(); - Region tlsIoRegion = new Region(); - - ulong codeRegionSize; - ulong stackAndTlsIoStart; - ulong stackAndTlsIoEnd; - ulong baseAddress; - - switch (addrSpaceType) - { - case AddressSpaceType.Addr32Bits: - aliasRegion.Size = 0x40000000; - heapRegion.Size = 0x40000000; - stackRegion.Size = 0; - tlsIoRegion.Size = 0; - CodeRegionStart = 0x200000; - codeRegionSize = 0x3fe00000; - stackAndTlsIoStart = 0x200000; - stackAndTlsIoEnd = 0x40000000; - baseAddress = 0x200000; - AddrSpaceWidth = 32; - break; - - case AddressSpaceType.Addr36Bits: - aliasRegion.Size = 0x180000000; - heapRegion.Size = 0x180000000; - stackRegion.Size = 0; - tlsIoRegion.Size = 0; - CodeRegionStart = 0x8000000; - codeRegionSize = 0x78000000; - stackAndTlsIoStart = 0x8000000; - stackAndTlsIoEnd = 0x80000000; - baseAddress = 0x8000000; - AddrSpaceWidth = 36; - break; - - case AddressSpaceType.Addr32BitsNoMap: - aliasRegion.Size = 0; - heapRegion.Size = 0x80000000; - stackRegion.Size = 0; - tlsIoRegion.Size = 0; - CodeRegionStart = 0x200000; - codeRegionSize = 0x3fe00000; - stackAndTlsIoStart = 0x200000; - stackAndTlsIoEnd = 0x40000000; - baseAddress = 0x200000; - AddrSpaceWidth = 32; - break; - - case AddressSpaceType.Addr39Bits: - aliasRegion.Size = 0x1000000000; - heapRegion.Size = 0x180000000; - stackRegion.Size = 0x80000000; - tlsIoRegion.Size = 0x1000000000; - CodeRegionStart = BitUtils.AlignDown(address, 0x200000); - codeRegionSize = BitUtils.AlignUp (endAddr, 0x200000) - CodeRegionStart; - stackAndTlsIoStart = 0; - stackAndTlsIoEnd = 0; - baseAddress = 0x8000000; - AddrSpaceWidth = 39; - break; - - default: throw new ArgumentException(nameof(addrSpaceType)); - } - - CodeRegionEnd = CodeRegionStart + codeRegionSize; - - ulong mapBaseAddress; - ulong mapAvailableSize; - - if (CodeRegionStart - baseAddress >= addrSpaceEnd - CodeRegionEnd) - { - // Has more space before the start of the code region. - mapBaseAddress = baseAddress; - mapAvailableSize = CodeRegionStart - baseAddress; - } - else - { - // Has more space after the end of the code region. - mapBaseAddress = CodeRegionEnd; - mapAvailableSize = addrSpaceEnd - CodeRegionEnd; - } - - ulong mapTotalSize = aliasRegion.Size + heapRegion.Size + stackRegion.Size + tlsIoRegion.Size; - - ulong aslrMaxOffset = mapAvailableSize - mapTotalSize; - - _aslrEnabled = aslrEnabled; - - AddrSpaceStart = addrSpaceStart; - AddrSpaceEnd = addrSpaceEnd; - - _blockAllocator = blockAllocator; - - if (mapAvailableSize < mapTotalSize) - { - return KernelResult.OutOfMemory; - } - - if (aslrEnabled) - { - aliasRegion.AslrOffset = GetRandomValue(0, aslrMaxOffset >> 21) << 21; - heapRegion.AslrOffset = GetRandomValue(0, aslrMaxOffset >> 21) << 21; - stackRegion.AslrOffset = GetRandomValue(0, aslrMaxOffset >> 21) << 21; - tlsIoRegion.AslrOffset = GetRandomValue(0, aslrMaxOffset >> 21) << 21; - } - - // Regions are sorted based on ASLR offset. - // When ASLR is disabled, the order is Map, Heap, NewMap and TlsIo. - aliasRegion.Start = mapBaseAddress + aliasRegion.AslrOffset; - aliasRegion.End = aliasRegion.Start + aliasRegion.Size; - heapRegion.Start = mapBaseAddress + heapRegion.AslrOffset; - heapRegion.End = heapRegion.Start + heapRegion.Size; - stackRegion.Start = mapBaseAddress + stackRegion.AslrOffset; - stackRegion.End = stackRegion.Start + stackRegion.Size; - tlsIoRegion.Start = mapBaseAddress + tlsIoRegion.AslrOffset; - tlsIoRegion.End = tlsIoRegion.Start + tlsIoRegion.Size; - - SortRegion(heapRegion, aliasRegion); - - if (stackRegion.Size != 0) - { - SortRegion(stackRegion, aliasRegion); - SortRegion(stackRegion, heapRegion); - } - else - { - stackRegion.Start = stackAndTlsIoStart; - stackRegion.End = stackAndTlsIoEnd; - } - - if (tlsIoRegion.Size != 0) - { - SortRegion(tlsIoRegion, aliasRegion); - SortRegion(tlsIoRegion, heapRegion); - SortRegion(tlsIoRegion, stackRegion); - } - else - { - tlsIoRegion.Start = stackAndTlsIoStart; - tlsIoRegion.End = stackAndTlsIoEnd; - } - - AliasRegionStart = aliasRegion.Start; - AliasRegionEnd = aliasRegion.End; - HeapRegionStart = heapRegion.Start; - HeapRegionEnd = heapRegion.End; - StackRegionStart = stackRegion.Start; - StackRegionEnd = stackRegion.End; - TlsIoRegionStart = tlsIoRegion.Start; - TlsIoRegionEnd = tlsIoRegion.End; - - _currentHeapAddr = HeapRegionStart; - _heapCapacity = 0; - PhysicalMemoryUsage = 0; - - _memRegion = memRegion; - _aslrDisabled = aslrDisabled; - - return InitializeBlocks(addrSpaceStart, addrSpaceEnd); - } - - private ulong GetRandomValue(ulong min, ulong max) - { - return (ulong)GetRandomValue((long)min, (long)max); - } - - private long GetRandomValue(long min, long max) - { - if (_randomNumberGenerator == null) - { - _randomNumberGenerator = new MersenneTwister(0); - } - - return _randomNumberGenerator.GenRandomNumber(min, max); - } - - private static void SortRegion(Region lhs, Region rhs) - { - if (lhs.AslrOffset < rhs.AslrOffset) - { - rhs.Start += lhs.Size; - rhs.End += lhs.Size; - } - else - { - lhs.Start += rhs.Size; - lhs.End += rhs.Size; - } - } - - private KernelResult InitializeBlocks(ulong addrSpaceStart, ulong addrSpaceEnd) - { - // First insertion will always need only a single block, - // because there's nothing else to split. - if (!_blockAllocator.CanAllocate(1)) - { - return KernelResult.OutOfResource; - } - - ulong addrSpacePagesCount = (addrSpaceEnd - addrSpaceStart) / PageSize; - - _blocks.AddFirst(new KMemoryBlock( - addrSpaceStart, - addrSpacePagesCount, - MemoryState.Unmapped, - KMemoryPermission.None, - MemoryAttribute.None)); - - return KernelResult.Success; - } - - public KernelResult MapPages( - ulong address, - KPageList pageList, - MemoryState state, - KMemoryPermission permission) - { - ulong pagesCount = pageList.GetPagesCount(); - - ulong size = pagesCount * PageSize; - - if (!CanContain(address, size, state)) - { - return KernelResult.InvalidMemState; - } - - lock (_blocks) - { - if (!IsUnmapped(address, pagesCount * PageSize)) - { - return KernelResult.InvalidMemState; - } - - if (!_blockAllocator.CanAllocate(MaxBlocksNeededForInsertion)) - { - return KernelResult.OutOfResource; - } - - KernelResult result = MapPages(address, pageList, permission); - - if (result == KernelResult.Success) - { - InsertBlock(address, pagesCount, state, permission); - } - - return result; - } - } - - public KernelResult UnmapPages(ulong address, KPageList pageList, MemoryState stateExpected) - { - ulong pagesCount = pageList.GetPagesCount(); - - ulong size = pagesCount * PageSize; - - ulong endAddr = address + size; - - ulong addrSpacePagesCount = (AddrSpaceEnd - AddrSpaceStart) / PageSize; - - if (AddrSpaceStart > address) - { - return KernelResult.InvalidMemState; - } - - if (addrSpacePagesCount < pagesCount) - { - return KernelResult.InvalidMemState; - } - - if (endAddr - 1 > AddrSpaceEnd - 1) - { - return KernelResult.InvalidMemState; - } - - lock (_blocks) - { - KPageList currentPageList = new KPageList(); - - AddVaRangeToPageList(currentPageList, address, pagesCount); - - if (!currentPageList.IsEqual(pageList)) - { - return KernelResult.InvalidMemRange; - } - - if (CheckRange( - address, - size, - MemoryState.Mask, - stateExpected, - KMemoryPermission.None, - KMemoryPermission.None, - MemoryAttribute.Mask, - MemoryAttribute.None, - MemoryAttribute.IpcAndDeviceMapped, - out MemoryState state, - out _, - out _)) - { - if (!_blockAllocator.CanAllocate(MaxBlocksNeededForInsertion)) - { - return KernelResult.OutOfResource; - } - - KernelResult result = MmuUnmap(address, pagesCount); - - if (result == KernelResult.Success) - { - InsertBlock(address, pagesCount, MemoryState.Unmapped); - } - - return result; - } - else - { - return KernelResult.InvalidMemState; - } - } - } - - public KernelResult MapNormalMemory(long address, long size, KMemoryPermission permission) - { - // TODO. - return KernelResult.Success; - } - - public KernelResult MapIoMemory(long address, long size, KMemoryPermission permission) - { - // TODO. - return KernelResult.Success; - } - - public KernelResult AllocateOrMapPa( - ulong neededPagesCount, - int alignment, - ulong srcPa, - bool map, - ulong regionStart, - ulong regionPagesCount, - MemoryState state, - KMemoryPermission permission, - out ulong address) - { - address = 0; - - ulong regionSize = regionPagesCount * PageSize; - - ulong regionEndAddr = regionStart + regionSize; - - if (!CanContain(regionStart, regionSize, state)) - { - return KernelResult.InvalidMemState; - } - - if (regionPagesCount <= neededPagesCount) - { - return KernelResult.OutOfMemory; - } - - lock (_blocks) - { - address = AllocateVa(regionStart, regionPagesCount, neededPagesCount, alignment); - - if (address == 0) - { - return KernelResult.OutOfMemory; - } - - if (!_blockAllocator.CanAllocate(MaxBlocksNeededForInsertion)) - { - return KernelResult.OutOfResource; - } - - MemoryOperation operation = map - ? MemoryOperation.MapPa - : MemoryOperation.Allocate; - - KernelResult result = DoMmuOperation( - address, - neededPagesCount, - srcPa, - map, - permission, - operation); - - if (result != KernelResult.Success) - { - return result; - } - - InsertBlock(address, neededPagesCount, state, permission); - } - - return KernelResult.Success; - } - - public KernelResult MapNewProcessCode( - ulong address, - ulong pagesCount, - MemoryState state, - KMemoryPermission permission) - { - ulong size = pagesCount * PageSize; - - if (!CanContain(address, size, state)) - { - return KernelResult.InvalidMemState; - } - - lock (_blocks) - { - if (!IsUnmapped(address, size)) - { - return KernelResult.InvalidMemState; - } - - if (!_blockAllocator.CanAllocate(MaxBlocksNeededForInsertion)) - { - return KernelResult.OutOfResource; - } - - KernelResult result = DoMmuOperation( - address, - pagesCount, - 0, - false, - permission, - MemoryOperation.Allocate); - - if (result == KernelResult.Success) - { - InsertBlock(address, pagesCount, state, permission); - } - - return result; - } - } - - public KernelResult MapProcessCodeMemory(ulong dst, ulong src, ulong size) - { - ulong pagesCount = size / PageSize; - - lock (_blocks) - { - bool success = CheckRange( - src, - size, - MemoryState.Mask, - MemoryState.Heap, - KMemoryPermission.Mask, - KMemoryPermission.ReadAndWrite, - MemoryAttribute.Mask, - MemoryAttribute.None, - MemoryAttribute.IpcAndDeviceMapped, - out MemoryState state, - out KMemoryPermission permission, - out _); - - success &= IsUnmapped(dst, size); - - if (success) - { - if (!_blockAllocator.CanAllocate(MaxBlocksNeededForInsertion * 2)) - { - return KernelResult.OutOfResource; - } - - KPageList pageList = new KPageList(); - - AddVaRangeToPageList(pageList, src, pagesCount); - - KernelResult result = MmuChangePermission(src, pagesCount, KMemoryPermission.None); - - if (result != KernelResult.Success) - { - return result; - } - - result = MapPages(dst, pageList, KMemoryPermission.None); - - if (result != KernelResult.Success) - { - MmuChangePermission(src, pagesCount, permission); - - return result; - } - - InsertBlock(src, pagesCount, state, KMemoryPermission.None, MemoryAttribute.Borrowed); - InsertBlock(dst, pagesCount, MemoryState.ModCodeStatic); - - return KernelResult.Success; - } - else - { - return KernelResult.InvalidMemState; - } - } - } - - public KernelResult UnmapProcessCodeMemory(ulong dst, ulong src, ulong size) - { - ulong pagesCount = size / PageSize; - - lock (_blocks) - { - bool success = CheckRange( - src, - size, - MemoryState.Mask, - MemoryState.Heap, - KMemoryPermission.None, - KMemoryPermission.None, - MemoryAttribute.Mask, - MemoryAttribute.Borrowed, - MemoryAttribute.IpcAndDeviceMapped, - out _, - out _, - out _); - - success &= CheckRange( - dst, - PageSize, - MemoryState.UnmapProcessCodeMemoryAllowed, - MemoryState.UnmapProcessCodeMemoryAllowed, - KMemoryPermission.None, - KMemoryPermission.None, - MemoryAttribute.Mask, - MemoryAttribute.None, - MemoryAttribute.IpcAndDeviceMapped, - out MemoryState state, - out _, - out _); - - success &= CheckRange( - dst, - size, - MemoryState.Mask, - state, - KMemoryPermission.None, - KMemoryPermission.None, - MemoryAttribute.Mask, - MemoryAttribute.None); - - if (success) - { - KernelResult result = MmuUnmap(dst, pagesCount); - - if (result != KernelResult.Success) - { - return result; - } - - // TODO: Missing some checks here. - - if (!_blockAllocator.CanAllocate(MaxBlocksNeededForInsertion * 2)) - { - return KernelResult.OutOfResource; - } - - InsertBlock(dst, pagesCount, MemoryState.Unmapped); - InsertBlock(src, pagesCount, MemoryState.Heap, KMemoryPermission.ReadAndWrite); - - return KernelResult.Success; - } - else - { - return KernelResult.InvalidMemState; - } - } - } - - public KernelResult SetHeapSize(ulong size, out ulong address) - { - address = 0; - - if (size > HeapRegionEnd - HeapRegionStart) - { - return KernelResult.OutOfMemory; - } - - KProcess currentProcess = KernelStatic.GetCurrentProcess(); - - lock (_blocks) - { - ulong currentHeapSize = GetHeapSize(); - - if (currentHeapSize <= size) - { - // Expand. - ulong sizeDelta = size - currentHeapSize; - - if (currentProcess.ResourceLimit != null && sizeDelta != 0 && - !currentProcess.ResourceLimit.Reserve(LimitableResource.Memory, sizeDelta)) - { - return KernelResult.ResLimitExceeded; - } - - ulong pagesCount = sizeDelta / PageSize; - - KMemoryRegionManager region = GetMemoryRegionManager(); - - KernelResult result = region.AllocatePages(pagesCount, _aslrDisabled, out KPageList pageList); - - void CleanUpForError() - { - if (pageList != null) - { - region.FreePages(pageList); - } - - if (currentProcess.ResourceLimit != null && sizeDelta != 0) - { - currentProcess.ResourceLimit.Release(LimitableResource.Memory, sizeDelta); - } - } - - if (result != KernelResult.Success) - { - CleanUpForError(); - - return result; - } - - if (!_blockAllocator.CanAllocate(MaxBlocksNeededForInsertion)) - { - CleanUpForError(); - - return KernelResult.OutOfResource; - } - - if (!IsUnmapped(_currentHeapAddr, sizeDelta)) - { - CleanUpForError(); - - return KernelResult.InvalidMemState; - } - - result = DoMmuOperation( - _currentHeapAddr, - pagesCount, - pageList, - KMemoryPermission.ReadAndWrite, - MemoryOperation.MapVa); - - if (result != KernelResult.Success) - { - CleanUpForError(); - - return result; - } - - InsertBlock(_currentHeapAddr, pagesCount, MemoryState.Heap, KMemoryPermission.ReadAndWrite); - } - else - { - // Shrink. - ulong freeAddr = HeapRegionStart + size; - ulong sizeDelta = currentHeapSize - size; - - if (!_blockAllocator.CanAllocate(MaxBlocksNeededForInsertion)) - { - return KernelResult.OutOfResource; - } - - if (!CheckRange( - freeAddr, - sizeDelta, - MemoryState.Mask, - MemoryState.Heap, - KMemoryPermission.Mask, - KMemoryPermission.ReadAndWrite, - MemoryAttribute.Mask, - MemoryAttribute.None, - MemoryAttribute.IpcAndDeviceMapped, - out _, - out _, - out _)) - { - return KernelResult.InvalidMemState; - } - - ulong pagesCount = sizeDelta / PageSize; - - KernelResult result = MmuUnmap(freeAddr, pagesCount); - - if (result != KernelResult.Success) - { - return result; - } - - currentProcess.ResourceLimit?.Release(LimitableResource.Memory, sizeDelta); - - InsertBlock(freeAddr, pagesCount, MemoryState.Unmapped); - } - - _currentHeapAddr = HeapRegionStart + size; - } - - address = HeapRegionStart; - - return KernelResult.Success; - } - - public ulong GetTotalHeapSize() - { - lock (_blocks) - { - return GetHeapSize() + PhysicalMemoryUsage; - } - } - - private ulong GetHeapSize() - { - return _currentHeapAddr - HeapRegionStart; - } - - public KernelResult SetHeapCapacity(ulong capacity) - { - lock (_blocks) - { - _heapCapacity = capacity; - } - - return KernelResult.Success; - } - - public KernelResult SetMemoryAttribute( - ulong address, - ulong size, - MemoryAttribute attributeMask, - MemoryAttribute attributeValue) - { - lock (_blocks) - { - if (CheckRange( - address, - size, - MemoryState.AttributeChangeAllowed, - MemoryState.AttributeChangeAllowed, - KMemoryPermission.None, - KMemoryPermission.None, - MemoryAttribute.BorrowedAndIpcMapped, - MemoryAttribute.None, - MemoryAttribute.DeviceMappedAndUncached, - out MemoryState state, - out KMemoryPermission permission, - out MemoryAttribute attribute)) - { - if (!_blockAllocator.CanAllocate(MaxBlocksNeededForInsertion)) - { - return KernelResult.OutOfResource; - } - - ulong pagesCount = size / PageSize; - - attribute &= ~attributeMask; - attribute |= attributeMask & attributeValue; - - InsertBlock(address, pagesCount, state, permission, attribute); - - return KernelResult.Success; - } - else - { - return KernelResult.InvalidMemState; - } - } - } - - public KMemoryInfo QueryMemory(ulong address) - { - if (address >= AddrSpaceStart && - address < AddrSpaceEnd) - { - lock (_blocks) - { - return FindBlock(address).GetInfo(); - } - } - else - { - return new KMemoryInfo( - AddrSpaceEnd, - ~AddrSpaceEnd + 1, - MemoryState.Reserved, - KMemoryPermission.None, - MemoryAttribute.None, - KMemoryPermission.None, - 0, - 0); - } - } - - public KernelResult Map(ulong dst, ulong src, ulong size) - { - bool success; - - lock (_blocks) - { - success = CheckRange( - src, - size, - MemoryState.MapAllowed, - MemoryState.MapAllowed, - KMemoryPermission.Mask, - KMemoryPermission.ReadAndWrite, - MemoryAttribute.Mask, - MemoryAttribute.None, - MemoryAttribute.IpcAndDeviceMapped, - out MemoryState srcState, - out _, - out _); - - success &= IsUnmapped(dst, size); - - if (success) - { - if (!_blockAllocator.CanAllocate(MaxBlocksNeededForInsertion * 2)) - { - return KernelResult.OutOfResource; - } - - ulong pagesCount = size / PageSize; - - KPageList pageList = new KPageList(); - - AddVaRangeToPageList(pageList, src, pagesCount); - - KernelResult result = MmuChangePermission(src, pagesCount, KMemoryPermission.None); - - if (result != KernelResult.Success) - { - return result; - } - - result = MapPages(dst, pageList, KMemoryPermission.ReadAndWrite); - - if (result != KernelResult.Success) - { - if (MmuChangePermission(src, pagesCount, KMemoryPermission.ReadAndWrite) != KernelResult.Success) - { - throw new InvalidOperationException("Unexpected failure reverting memory permission."); - } - - return result; - } - - InsertBlock(src, pagesCount, srcState, KMemoryPermission.None, MemoryAttribute.Borrowed); - InsertBlock(dst, pagesCount, MemoryState.Stack, KMemoryPermission.ReadAndWrite); - - return KernelResult.Success; - } - else - { - return KernelResult.InvalidMemState; - } - } - } - - public KernelResult UnmapForKernel(ulong address, ulong pagesCount, MemoryState stateExpected) - { - ulong size = pagesCount * PageSize; - - lock (_blocks) - { - if (CheckRange( - address, - size, - MemoryState.Mask, - stateExpected, - KMemoryPermission.None, - KMemoryPermission.None, - MemoryAttribute.Mask, - MemoryAttribute.None, - MemoryAttribute.IpcAndDeviceMapped, - out _, - out _, - out _)) - { - if (!_blockAllocator.CanAllocate(MaxBlocksNeededForInsertion)) - { - return KernelResult.OutOfResource; - } - - KernelResult result = MmuUnmap(address, pagesCount); - - if (result == KernelResult.Success) - { - InsertBlock(address, pagesCount, MemoryState.Unmapped); - } - - return KernelResult.Success; - } - else - { - return KernelResult.InvalidMemState; - } - } - } - - public KernelResult Unmap(ulong dst, ulong src, ulong size) - { - bool success; - - lock (_blocks) - { - success = CheckRange( - src, - size, - MemoryState.MapAllowed, - MemoryState.MapAllowed, - KMemoryPermission.Mask, - KMemoryPermission.None, - MemoryAttribute.Mask, - MemoryAttribute.Borrowed, - MemoryAttribute.IpcAndDeviceMapped, - out MemoryState srcState, - out _, - out _); - - success &= CheckRange( - dst, - size, - MemoryState.Mask, - MemoryState.Stack, - KMemoryPermission.None, - KMemoryPermission.None, - MemoryAttribute.Mask, - MemoryAttribute.None, - MemoryAttribute.IpcAndDeviceMapped, - out _, - out KMemoryPermission dstPermission, - out _); - - if (success) - { - if (!_blockAllocator.CanAllocate(MaxBlocksNeededForInsertion * 2)) - { - return KernelResult.OutOfResource; - } - - ulong pagesCount = size / PageSize; - - KPageList srcPageList = new KPageList(); - KPageList dstPageList = new KPageList(); - - AddVaRangeToPageList(srcPageList, src, pagesCount); - AddVaRangeToPageList(dstPageList, dst, pagesCount); - - if (!dstPageList.IsEqual(srcPageList)) - { - return KernelResult.InvalidMemRange; - } - - KernelResult result = MmuUnmap(dst, pagesCount); - - if (result != KernelResult.Success) - { - return result; - } - - result = MmuChangePermission(src, pagesCount, KMemoryPermission.ReadAndWrite); - - if (result != KernelResult.Success) - { - MapPages(dst, dstPageList, dstPermission); - - return result; - } - - InsertBlock(src, pagesCount, srcState, KMemoryPermission.ReadAndWrite); - InsertBlock(dst, pagesCount, MemoryState.Unmapped); - - return KernelResult.Success; - } - else - { - return KernelResult.InvalidMemState; - } - } - } - - public KernelResult SetProcessMemoryPermission(ulong address, ulong size, KMemoryPermission permission) - { - lock (_blocks) - { - if (CheckRange( - address, - size, - MemoryState.ProcessPermissionChangeAllowed, - MemoryState.ProcessPermissionChangeAllowed, - KMemoryPermission.None, - KMemoryPermission.None, - MemoryAttribute.Mask, - MemoryAttribute.None, - MemoryAttribute.IpcAndDeviceMapped, - out MemoryState oldState, - out KMemoryPermission oldPermission, - out _)) - { - MemoryState newState = oldState; - - // If writing into the code region is allowed, then we need - // to change it to mutable. - if ((permission & KMemoryPermission.Write) != 0) - { - if (oldState == MemoryState.CodeStatic) - { - newState = MemoryState.CodeMutable; - } - else if (oldState == MemoryState.ModCodeStatic) - { - newState = MemoryState.ModCodeMutable; - } - else - { - throw new InvalidOperationException($"Memory state \"{oldState}\" not valid for this operation."); - } - } - - if (newState != oldState || permission != oldPermission) - { - if (!_blockAllocator.CanAllocate(MaxBlocksNeededForInsertion)) - { - return KernelResult.OutOfResource; - } - - ulong pagesCount = size / PageSize; - - MemoryOperation operation = (permission & KMemoryPermission.Execute) != 0 - ? MemoryOperation.ChangePermsAndAttributes - : MemoryOperation.ChangePermRw; - - KernelResult result = DoMmuOperation(address, pagesCount, 0, false, permission, operation); - - if (result != KernelResult.Success) - { - return result; - } - - InsertBlock(address, pagesCount, newState, permission); - } - - return KernelResult.Success; - } - else - { - return KernelResult.InvalidMemState; - } - } - } - - public KernelResult MapPhysicalMemory(ulong address, ulong size) - { - ulong endAddr = address + size; - - lock (_blocks) - { - ulong mappedSize = 0; - - foreach (KMemoryInfo info in IterateOverRange(address, endAddr)) - { - if (info.State != MemoryState.Unmapped) - { - mappedSize += GetSizeInRange(info, address, endAddr); - } - } - - if (mappedSize == size) - { - return KernelResult.Success; - } - - ulong remainingSize = size - mappedSize; - - ulong remainingPages = remainingSize / PageSize; - - KProcess currentProcess = KernelStatic.GetCurrentProcess(); - - if (currentProcess.ResourceLimit != null && - !currentProcess.ResourceLimit.Reserve(LimitableResource.Memory, remainingSize)) - { - return KernelResult.ResLimitExceeded; - } - - KMemoryRegionManager region = GetMemoryRegionManager(); - - KernelResult result = region.AllocatePages(remainingPages, _aslrDisabled, out KPageList pageList); - - void CleanUpForError() - { - if (pageList != null) - { - region.FreePages(pageList); - } - - currentProcess.ResourceLimit?.Release(LimitableResource.Memory, remainingSize); - } - - if (result != KernelResult.Success) - { - CleanUpForError(); - - return result; - } - - if (!_blockAllocator.CanAllocate(MaxBlocksNeededForInsertion)) - { - CleanUpForError(); - - return KernelResult.OutOfResource; - } - - MapPhysicalMemory(pageList, address, endAddr); - - PhysicalMemoryUsage += remainingSize; - - ulong pagesCount = size / PageSize; - - InsertBlock( - address, - pagesCount, - MemoryState.Unmapped, - KMemoryPermission.None, - MemoryAttribute.None, - MemoryState.Heap, - KMemoryPermission.ReadAndWrite, - MemoryAttribute.None); - } - - return KernelResult.Success; - } - - public KernelResult UnmapPhysicalMemory(ulong address, ulong size) - { - ulong endAddr = address + size; - - lock (_blocks) - { - // Scan, ensure that the region can be unmapped (all blocks are heap or - // already unmapped), fill pages list for freeing memory. - ulong heapMappedSize = 0; - - KPageList pageList = new KPageList(); - - foreach (KMemoryInfo info in IterateOverRange(address, endAddr)) - { - if (info.State == MemoryState.Heap) - { - if (info.Attribute != MemoryAttribute.None) - { - return KernelResult.InvalidMemState; - } - - ulong blockSize = GetSizeInRange(info, address, endAddr); - ulong blockAddress = GetAddrInRange(info, address); - - AddVaRangeToPageList(pageList, blockAddress, blockSize / PageSize); - - heapMappedSize += blockSize; - } - else if (info.State != MemoryState.Unmapped) - { - return KernelResult.InvalidMemState; - } - } - - if (heapMappedSize == 0) - { - return KernelResult.Success; - } - - if (!_blockAllocator.CanAllocate(MaxBlocksNeededForInsertion)) - { - return KernelResult.OutOfResource; - } - - // Try to unmap all the heap mapped memory inside range. - KernelResult result = KernelResult.Success; - - foreach (KMemoryInfo info in IterateOverRange(address, endAddr)) - { - if (info.State == MemoryState.Heap) - { - ulong blockSize = GetSizeInRange(info, address, endAddr); - ulong blockAddress = GetAddrInRange(info, address); - - ulong blockPagesCount = blockSize / PageSize; - - result = MmuUnmap(blockAddress, blockPagesCount); - - if (result != KernelResult.Success) - { - // If we failed to unmap, we need to remap everything back again. - MapPhysicalMemory(pageList, address, blockAddress + blockSize); - - break; - } - } - } - - if (result == KernelResult.Success) - { - GetMemoryRegionManager().FreePages(pageList); - - PhysicalMemoryUsage -= heapMappedSize; - - KProcess currentProcess = KernelStatic.GetCurrentProcess(); - - currentProcess.ResourceLimit?.Release(LimitableResource.Memory, heapMappedSize); - - ulong pagesCount = size / PageSize; - - InsertBlock(address, pagesCount, MemoryState.Unmapped); - } - - return result; - } - } - - private void MapPhysicalMemory(KPageList pageList, ulong address, ulong endAddr) - { - LinkedListNode pageListNode = pageList.Nodes.First; - - KPageNode pageNode = pageListNode.Value; - - ulong srcPa = pageNode.Address; - ulong srcPaPages = pageNode.PagesCount; - - foreach (KMemoryInfo info in IterateOverRange(address, endAddr)) - { - if (info.State == MemoryState.Unmapped) - { - ulong blockSize = GetSizeInRange(info, address, endAddr); - - ulong dstVaPages = blockSize / PageSize; - - ulong dstVa = GetAddrInRange(info, address); - - while (dstVaPages > 0) - { - if (srcPaPages == 0) - { - pageListNode = pageListNode.Next; - - pageNode = pageListNode.Value; - - srcPa = pageNode.Address; - srcPaPages = pageNode.PagesCount; - } - - ulong pagesCount = srcPaPages; - - if (pagesCount > dstVaPages) - { - pagesCount = dstVaPages; - } - - DoMmuOperation( - dstVa, - pagesCount, - srcPa, - true, - KMemoryPermission.ReadAndWrite, - MemoryOperation.MapPa); - - dstVa += pagesCount * PageSize; - srcPa += pagesCount * PageSize; - srcPaPages -= pagesCount; - dstVaPages -= pagesCount; - } - } - } - } - - public KernelResult CopyDataToCurrentProcess( - ulong dst, - ulong size, - ulong src, - MemoryState stateMask, - MemoryState stateExpected, - KMemoryPermission permission, - MemoryAttribute attributeMask, - MemoryAttribute attributeExpected) - { - // Client -> server. - return CopyDataFromOrToCurrentProcess( - size, - src, - dst, - stateMask, - stateExpected, - permission, - attributeMask, - attributeExpected, - toServer: true); - } - - public KernelResult CopyDataFromCurrentProcess( - ulong dst, - ulong size, - MemoryState stateMask, - MemoryState stateExpected, - KMemoryPermission permission, - MemoryAttribute attributeMask, - MemoryAttribute attributeExpected, - ulong src) - { - // Server -> client. - return CopyDataFromOrToCurrentProcess( - size, - dst, - src, - stateMask, - stateExpected, - permission, - attributeMask, - attributeExpected, - toServer: false); - } - - private KernelResult CopyDataFromOrToCurrentProcess( - ulong size, - ulong clientAddress, - ulong serverAddress, - MemoryState stateMask, - MemoryState stateExpected, - KMemoryPermission permission, - MemoryAttribute attributeMask, - MemoryAttribute attributeExpected, - bool toServer) - { - if (AddrSpaceStart > clientAddress) - { - return KernelResult.InvalidMemState; - } - - ulong srcEndAddr = clientAddress + size; - - if (srcEndAddr <= clientAddress || srcEndAddr - 1 > AddrSpaceEnd - 1) - { - return KernelResult.InvalidMemState; - } - - lock (_blocks) - { - if (CheckRange( - clientAddress, - size, - stateMask, - stateExpected, - permission, - permission, - attributeMask | MemoryAttribute.Uncached, - attributeExpected)) - { - KProcess currentProcess = KernelStatic.GetCurrentProcess(); - - while (size > 0) - { - ulong copySize = Math.Min(PageSize - (serverAddress & (PageSize - 1)), PageSize - (clientAddress & (PageSize - 1))); - - if (copySize > size) - { - copySize = size; - } - - ulong serverDramAddr = currentProcess.MemoryManager.GetDramAddressFromVa(serverAddress); - ulong clientDramAddr = GetDramAddressFromVa(clientAddress); - - if (serverDramAddr != clientDramAddr) - { - if (toServer) - { - _context.Memory.Copy(serverDramAddr, clientDramAddr, copySize); - } - else - { - _context.Memory.Copy(clientDramAddr, serverDramAddr, copySize); - } - } - - serverAddress += copySize; - clientAddress += copySize; - size -= copySize; - } - - return KernelResult.Success; - } - else - { - return KernelResult.InvalidMemState; - } - } - } - - public KernelResult MapBufferFromClientProcess( - ulong size, - ulong src, - KMemoryManager sourceMemMgr, - KMemoryPermission permission, - MemoryState state, - bool copyData, - out ulong dst) - { - dst = 0; - - KernelResult result = sourceMemMgr.GetPagesForMappingIntoAnotherProcess( - src, - size, - permission, - state, - copyData, - _aslrDisabled, - _memRegion, - out KPageList pageList); - - if (result != KernelResult.Success) - { - return result; - } - - result = MapPagesFromAnotherProcess(size, src, permission, state, pageList, out ulong va); - - if (result != KernelResult.Success) - { - sourceMemMgr.UnmapIpcRestorePermission(src, size, state); - } - else - { - dst = va; - } - - return result; - } - - private KernelResult GetPagesForMappingIntoAnotherProcess( - ulong address, - ulong size, - KMemoryPermission permission, - MemoryState state, - bool copyData, - bool aslrDisabled, - MemoryRegion region, - out KPageList pageList) - { - pageList = null; - - if (AddrSpaceStart > address) - { - return KernelResult.InvalidMemState; - } - - ulong endAddr = address + size; - - if (endAddr <= address || endAddr - 1 > AddrSpaceEnd - 1) - { - return KernelResult.InvalidMemState; - } - - MemoryState stateMask; - - switch (state) - { - case MemoryState.IpcBuffer0: stateMask = MemoryState.IpcSendAllowedType0; break; - case MemoryState.IpcBuffer1: stateMask = MemoryState.IpcSendAllowedType1; break; - case MemoryState.IpcBuffer3: stateMask = MemoryState.IpcSendAllowedType3; break; - - default: return KernelResult.InvalidCombination; - } - - KMemoryPermission permissionMask = permission == KMemoryPermission.ReadAndWrite - ? KMemoryPermission.None - : KMemoryPermission.Read; - - MemoryAttribute attributeMask = MemoryAttribute.Borrowed | MemoryAttribute.Uncached; - - if (state == MemoryState.IpcBuffer0) - { - attributeMask |= MemoryAttribute.DeviceMapped; - } - - ulong addressRounded = BitUtils.AlignUp (address, PageSize); - ulong addressTruncated = BitUtils.AlignDown(address, PageSize); - ulong endAddrRounded = BitUtils.AlignUp (endAddr, PageSize); - ulong endAddrTruncated = BitUtils.AlignDown(endAddr, PageSize); - - if (!_blockAllocator.CanAllocate(MaxBlocksNeededForInsertion)) - { - return KernelResult.OutOfResource; - } - - ulong visitedSize = 0; - - void CleanUpForError() - { - if (visitedSize == 0) - { - return; - } - - ulong endAddrVisited = address + visitedSize; - - foreach (KMemoryInfo info in IterateOverRange(addressRounded, endAddrVisited)) - { - if ((info.Permission & KMemoryPermission.ReadAndWrite) != permissionMask && info.IpcRefCount == 0) - { - ulong blockAddress = GetAddrInRange(info, addressRounded); - ulong blockSize = GetSizeInRange(info, addressRounded, endAddrVisited); - - ulong blockPagesCount = blockSize / PageSize; - - if (DoMmuOperation( - blockAddress, - blockPagesCount, - 0, - false, - info.Permission, - MemoryOperation.ChangePermRw) != KernelResult.Success) - { - throw new InvalidOperationException("Unexpected failure trying to restore permission."); - } - } - } - } - - // Signal a read for any resources tracking reads in the region, as the other process is likely to use their data. - _cpuMemory.SignalMemoryTracking(addressTruncated, endAddrRounded - addressTruncated, false); - - lock (_blocks) - { - KernelResult result; - - if (addressRounded < endAddrTruncated) - { - foreach (KMemoryInfo info in IterateOverRange(addressRounded, endAddrTruncated)) - { - // Check if the block state matches what we expect. - if ((info.State & stateMask) != stateMask || - (info.Permission & permission) != permission || - (info.Attribute & attributeMask) != MemoryAttribute.None) - { - CleanUpForError(); - - return KernelResult.InvalidMemState; - } - - ulong blockAddress = GetAddrInRange(info, addressRounded); - ulong blockSize = GetSizeInRange(info, addressRounded, endAddrTruncated); - - ulong blockPagesCount = blockSize / PageSize; - - if ((info.Permission & KMemoryPermission.ReadAndWrite) != permissionMask && info.IpcRefCount == 0) - { - result = DoMmuOperation( - blockAddress, - blockPagesCount, - 0, - false, - permissionMask, - MemoryOperation.ChangePermRw); - - if (result != KernelResult.Success) - { - CleanUpForError(); - - return result; - } - } - - visitedSize += blockSize; - } - } - - result = GetPagesForIpcTransfer(address, size, copyData, aslrDisabled, region, out pageList); - - if (result != KernelResult.Success) - { - CleanUpForError(); - - return result; - } - - if (visitedSize != 0) - { - InsertBlock(addressRounded, visitedSize / PageSize, SetIpcMappingPermissions, permissionMask); - } - } - - return KernelResult.Success; - } - - private KernelResult GetPagesForIpcTransfer( - ulong address, - ulong size, - bool copyData, - bool aslrDisabled, - MemoryRegion region, - out KPageList pageList) - { - // When the start address is unaligned, we can't safely map the - // first page as it would expose other undesirable information on the - // target process. So, instead we allocate new pages, copy the data - // inside the range, and then clear the remaining space. - // The same also holds for the last page, if the end address - // (address + size) is also not aligned. - - pageList = null; - - KPageList pages = new KPageList(); - - ulong addressTruncated = BitUtils.AlignDown(address, PageSize); - ulong addressRounded = BitUtils.AlignUp (address, PageSize); - - ulong endAddr = address + size; - - ulong dstFirstPagePa = 0; - ulong dstLastPagePa = 0; - - void CleanUpForError() - { - if (dstFirstPagePa != 0) - { - FreeSinglePage(region, dstFirstPagePa); - } - - if (dstLastPagePa != 0) - { - FreeSinglePage(region, dstLastPagePa); - } - } - - // Is the first page address aligned? - // If not, allocate a new page and copy the unaligned chunck. - if (addressTruncated < addressRounded) - { - dstFirstPagePa = AllocateSinglePage(region, aslrDisabled); - - if (dstFirstPagePa == 0) - { - return KernelResult.OutOfMemory; - } - - ulong firstPageFillAddress = dstFirstPagePa; - - if (!TryConvertVaToPa(addressTruncated, out ulong srcFirstPagePa)) - { - CleanUpForError(); - - return KernelResult.InvalidMemState; - } - - ulong unusedSizeAfter; - - if (copyData) - { - ulong unusedSizeBefore = address - addressTruncated; - - _context.Memory.ZeroFill(GetDramAddressFromPa(dstFirstPagePa), unusedSizeBefore); - - ulong copySize = addressRounded <= endAddr ? addressRounded - address : size; - - _context.Memory.Copy( - GetDramAddressFromPa(dstFirstPagePa + unusedSizeBefore), - GetDramAddressFromPa(srcFirstPagePa + unusedSizeBefore), copySize); - - firstPageFillAddress += unusedSizeBefore + copySize; - - unusedSizeAfter = addressRounded > endAddr ? addressRounded - endAddr : 0; - } - else - { - unusedSizeAfter = PageSize; - } - - if (unusedSizeAfter != 0) - { - _context.Memory.ZeroFill(GetDramAddressFromPa(firstPageFillAddress), unusedSizeAfter); - } - - if (pages.AddRange(dstFirstPagePa, 1) != KernelResult.Success) - { - CleanUpForError(); - - return KernelResult.OutOfResource; - } - } - - ulong endAddrTruncated = BitUtils.AlignDown(endAddr, PageSize); - ulong endAddrRounded = BitUtils.AlignUp (endAddr, PageSize); - - if (endAddrTruncated > addressRounded) - { - ulong alignedPagesCount = (endAddrTruncated - addressRounded) / PageSize; - - AddVaRangeToPageList(pages, addressRounded, alignedPagesCount); - } - - // Is the last page end address aligned? - // If not, allocate a new page and copy the unaligned chunck. - if (endAddrTruncated < endAddrRounded && (addressTruncated == addressRounded || addressTruncated < endAddrTruncated)) - { - dstLastPagePa = AllocateSinglePage(region, aslrDisabled); - - if (dstLastPagePa == 0) - { - CleanUpForError(); - - return KernelResult.OutOfMemory; - } - - ulong lastPageFillAddr = dstLastPagePa; - - if (!TryConvertVaToPa(endAddrTruncated, out ulong srcLastPagePa)) - { - CleanUpForError(); - - return KernelResult.InvalidMemState; - } - - ulong unusedSizeAfter; - - if (copyData) - { - ulong copySize = endAddr - endAddrTruncated; - - _context.Memory.Copy( - GetDramAddressFromPa(dstLastPagePa), - GetDramAddressFromPa(srcLastPagePa), copySize); - - lastPageFillAddr += copySize; - - unusedSizeAfter = PageSize - copySize; - } - else - { - unusedSizeAfter = PageSize; - } - - _context.Memory.ZeroFill(GetDramAddressFromPa(lastPageFillAddr), unusedSizeAfter); - - if (pages.AddRange(dstLastPagePa, 1) != KernelResult.Success) - { - CleanUpForError(); - - return KernelResult.OutOfResource; - } - } - - pageList = pages; - - return KernelResult.Success; - } - - private ulong AllocateSinglePage(MemoryRegion region, bool aslrDisabled) - { - KMemoryRegionManager regionMgr = _context.MemoryRegions[(int)region]; - - return regionMgr.AllocatePagesContiguous(1, aslrDisabled); - } - - private void FreeSinglePage(MemoryRegion region, ulong address) - { - KMemoryRegionManager regionMgr = _context.MemoryRegions[(int)region]; - - regionMgr.FreePage(address); - } - - private KernelResult MapPagesFromAnotherProcess( - ulong size, - ulong address, - KMemoryPermission permission, - MemoryState state, - KPageList pageList, - out ulong dst) - { - dst = 0; - - lock (_blocks) - { - if (!_blockAllocator.CanAllocate(MaxBlocksNeededForInsertion)) - { - return KernelResult.OutOfResource; - } - - ulong endAddr = address + size; - - ulong addressTruncated = BitUtils.AlignDown(address, PageSize); - ulong endAddrRounded = BitUtils.AlignUp (endAddr, PageSize); - - ulong neededSize = endAddrRounded - addressTruncated; - - ulong neededPagesCount = neededSize / PageSize; - - ulong regionPagesCount = (AliasRegionEnd - AliasRegionStart) / PageSize; - - ulong va = 0; - - for (int unit = MappingUnitSizes.Length - 1; unit >= 0 && va == 0; unit--) - { - int alignment = MappingUnitSizes[unit]; - - va = AllocateVa(AliasRegionStart, regionPagesCount, neededPagesCount, alignment); - } - - if (va == 0) - { - return KernelResult.OutOfVaSpace; - } - - if (pageList.Nodes.Count != 0) - { - KernelResult result = MapPages(va, pageList, permission); - - if (result != KernelResult.Success) - { - return result; - } - } - - InsertBlock(va, neededPagesCount, state, permission); - - dst = va + (address - addressTruncated); - } - - return KernelResult.Success; - } - - public KernelResult UnmapNoAttributeIfStateEquals(ulong address, ulong size, MemoryState state) - { - if (AddrSpaceStart > address) - { - return KernelResult.InvalidMemState; - } - - ulong endAddr = address + size; - - if (endAddr <= address || endAddr - 1 > AddrSpaceEnd - 1) - { - return KernelResult.InvalidMemState; - } - - lock (_blocks) - { - if (CheckRange( - address, - size, - MemoryState.Mask, - state, - KMemoryPermission.Read, - KMemoryPermission.Read, - MemoryAttribute.Mask, - MemoryAttribute.None, - MemoryAttribute.IpcAndDeviceMapped, - out _, - out _, - out _)) - { - if (!_blockAllocator.CanAllocate(MaxBlocksNeededForInsertion)) - { - return KernelResult.OutOfResource; - } - - ulong addressTruncated = BitUtils.AlignDown(address, PageSize); - ulong addressRounded = BitUtils.AlignUp (address, PageSize); - ulong endAddrTruncated = BitUtils.AlignDown(endAddr, PageSize); - ulong endAddrRounded = BitUtils.AlignUp (endAddr, PageSize); - - ulong pagesCount = (endAddrRounded - addressTruncated) / PageSize; - - // Free pages we had to create on-demand, if any of the buffer was not page aligned. - // Real kernel has page ref counting, so this is done as part of the unmap operation. - if (addressTruncated != addressRounded) - { - FreeSinglePage(_memRegion, ConvertVaToPa(addressTruncated)); - } - - if (endAddrTruncated < endAddrRounded && (addressTruncated == addressRounded || addressTruncated < endAddrTruncated)) - { - FreeSinglePage(_memRegion, ConvertVaToPa(endAddrTruncated)); - } - - KernelResult result = DoMmuOperation( - addressTruncated, - pagesCount, - 0, - false, - KMemoryPermission.None, - MemoryOperation.Unmap); - - if (result == KernelResult.Success) - { - InsertBlock(addressTruncated, pagesCount, MemoryState.Unmapped); - } - - return result; - } - else - { - return KernelResult.InvalidMemState; - } - } - } - - public KernelResult UnmapIpcRestorePermission(ulong address, ulong size, MemoryState state) - { - ulong endAddr = address + size; - - ulong addressRounded = BitUtils.AlignUp (address, PageSize); - ulong addressTruncated = BitUtils.AlignDown(address, PageSize); - ulong endAddrRounded = BitUtils.AlignUp (endAddr, PageSize); - ulong endAddrTruncated = BitUtils.AlignDown(endAddr, PageSize); - - ulong pagesCount = addressRounded < endAddrTruncated ? (endAddrTruncated - addressRounded) / PageSize : 0; - - if (pagesCount == 0) - { - return KernelResult.Success; - } - - MemoryState stateMask; - - switch (state) - { - case MemoryState.IpcBuffer0: stateMask = MemoryState.IpcSendAllowedType0; break; - case MemoryState.IpcBuffer1: stateMask = MemoryState.IpcSendAllowedType1; break; - case MemoryState.IpcBuffer3: stateMask = MemoryState.IpcSendAllowedType3; break; - - default: return KernelResult.InvalidCombination; - } - - MemoryAttribute attributeMask = - MemoryAttribute.Borrowed | - MemoryAttribute.IpcMapped | - MemoryAttribute.Uncached; - - if (state == MemoryState.IpcBuffer0) - { - attributeMask |= MemoryAttribute.DeviceMapped; - } - - if (!_blockAllocator.CanAllocate(MaxBlocksNeededForInsertion)) - { - return KernelResult.OutOfResource; - } - - // Anything on the client side should see this memory as modified. - _cpuMemory.SignalMemoryTracking(addressTruncated, endAddrRounded - addressTruncated, true); - - lock (_blocks) - { - foreach (KMemoryInfo info in IterateOverRange(addressRounded, endAddrTruncated)) - { - // Check if the block state matches what we expect. - if ((info.State & stateMask) != stateMask || - (info.Attribute & attributeMask) != MemoryAttribute.IpcMapped) - { - return KernelResult.InvalidMemState; - } - - if (info.Permission != info.SourcePermission && info.IpcRefCount == 1) - { - ulong blockAddress = GetAddrInRange(info, addressRounded); - ulong blockSize = GetSizeInRange(info, addressRounded, endAddrTruncated); - - ulong blockPagesCount = blockSize / PageSize; - - KernelResult result = DoMmuOperation( - blockAddress, - blockPagesCount, - 0, - false, - info.SourcePermission, - MemoryOperation.ChangePermRw); - - if (result != KernelResult.Success) - { - return result; - } - } - } - - InsertBlock(addressRounded, pagesCount, RestoreIpcMappingPermissions); - - return KernelResult.Success; - } - } - - public KernelResult BorrowIpcBuffer(ulong address, ulong size) - { - return SetAttributesAndChangePermission( - address, - size, - MemoryState.IpcBufferAllowed, - MemoryState.IpcBufferAllowed, - KMemoryPermission.Mask, - KMemoryPermission.ReadAndWrite, - MemoryAttribute.Mask, - MemoryAttribute.None, - KMemoryPermission.None, - MemoryAttribute.Borrowed); - } - - public KernelResult BorrowTransferMemory(KPageList pageList, ulong address, ulong size, KMemoryPermission permission) - { - return SetAttributesAndChangePermission( - address, - size, - MemoryState.TransferMemoryAllowed, - MemoryState.TransferMemoryAllowed, - KMemoryPermission.Mask, - KMemoryPermission.ReadAndWrite, - MemoryAttribute.Mask, - MemoryAttribute.None, - permission, - MemoryAttribute.Borrowed, - pageList); - } - - private KernelResult SetAttributesAndChangePermission( - ulong address, - ulong size, - MemoryState stateMask, - MemoryState stateExpected, - KMemoryPermission permissionMask, - KMemoryPermission permissionExpected, - MemoryAttribute attributeMask, - MemoryAttribute attributeExpected, - KMemoryPermission newPermission, - MemoryAttribute attributeSetMask, - KPageList pageList = null) - { - if (address + size <= address || !InsideAddrSpace(address, size)) - { - return KernelResult.InvalidMemState; - } - - lock (_blocks) - { - if (CheckRange( - address, - size, - stateMask | MemoryState.IsPoolAllocated, - stateExpected | MemoryState.IsPoolAllocated, - permissionMask, - permissionExpected, - attributeMask, - attributeExpected, - MemoryAttribute.IpcAndDeviceMapped, - out MemoryState oldState, - out KMemoryPermission oldPermission, - out MemoryAttribute oldAttribute)) - { - ulong pagesCount = size / PageSize; - - if (pageList != null) - { - AddVaRangeToPageList(pageList, address, pagesCount); - } - - if (!_blockAllocator.CanAllocate(MaxBlocksNeededForInsertion)) - { - return KernelResult.OutOfResource; - } - - if (newPermission == KMemoryPermission.None) - { - newPermission = oldPermission; - } - - if (newPermission != oldPermission) - { - KernelResult result = DoMmuOperation( - address, - pagesCount, - 0, - false, - newPermission, - MemoryOperation.ChangePermRw); - - if (result != KernelResult.Success) - { - return result; - } - } - - MemoryAttribute newAttribute = oldAttribute | attributeSetMask; - - InsertBlock(address, pagesCount, oldState, newPermission, newAttribute); - - return KernelResult.Success; - } - else - { - return KernelResult.InvalidMemState; - } - } - } - - public KernelResult UnborrowIpcBuffer(ulong address, ulong size) - { - return ClearAttributesAndChangePermission( - address, - size, - MemoryState.IpcBufferAllowed, - MemoryState.IpcBufferAllowed, - KMemoryPermission.None, - KMemoryPermission.None, - MemoryAttribute.Mask, - MemoryAttribute.Borrowed, - KMemoryPermission.ReadAndWrite, - MemoryAttribute.Borrowed); - } - - public KernelResult UnborrowTransferMemory(ulong address, ulong size, KPageList pageList) - { - return ClearAttributesAndChangePermission( - address, - size, - MemoryState.TransferMemoryAllowed, - MemoryState.TransferMemoryAllowed, - KMemoryPermission.None, - KMemoryPermission.None, - MemoryAttribute.Mask, - MemoryAttribute.Borrowed, - KMemoryPermission.ReadAndWrite, - MemoryAttribute.Borrowed, - pageList); - } - - private KernelResult ClearAttributesAndChangePermission( - ulong address, - ulong size, - MemoryState stateMask, - MemoryState stateExpected, - KMemoryPermission permissionMask, - KMemoryPermission permissionExpected, - MemoryAttribute attributeMask, - MemoryAttribute attributeExpected, - KMemoryPermission newPermission, - MemoryAttribute attributeClearMask, - KPageList pageList = null) - { - if (address + size <= address || !InsideAddrSpace(address, size)) - { - return KernelResult.InvalidMemState; - } - - lock (_blocks) - { - if (CheckRange( - address, - size, - stateMask | MemoryState.IsPoolAllocated, - stateExpected | MemoryState.IsPoolAllocated, - permissionMask, - permissionExpected, - attributeMask, - attributeExpected, - MemoryAttribute.IpcAndDeviceMapped, - out MemoryState oldState, - out KMemoryPermission oldPermission, - out MemoryAttribute oldAttribute)) - { - ulong pagesCount = size / PageSize; - - if (pageList != null) - { - KPageList currPageList = new KPageList(); - - AddVaRangeToPageList(currPageList, address, pagesCount); - - if (!currPageList.IsEqual(pageList)) - { - return KernelResult.InvalidMemRange; - } - } - - if (!_blockAllocator.CanAllocate(MaxBlocksNeededForInsertion)) - { - return KernelResult.OutOfResource; - } - - if (newPermission == KMemoryPermission.None) - { - newPermission = oldPermission; - } - - if (newPermission != oldPermission) - { - KernelResult result = DoMmuOperation( - address, - pagesCount, - 0, - false, - newPermission, - MemoryOperation.ChangePermRw); - - if (result != KernelResult.Success) - { - return result; - } - } - - MemoryAttribute newAttribute = oldAttribute & ~attributeClearMask; - - InsertBlock(address, pagesCount, oldState, newPermission, newAttribute); - - return KernelResult.Success; - } - else - { - return KernelResult.InvalidMemState; - } - } - } - - private void AddVaRangeToPageList(KPageList pageList, ulong start, ulong pagesCount) - { - ulong address = start; - - while (address < start + pagesCount * PageSize) - { - if (!TryConvertVaToPa(address, out ulong pa)) - { - throw new InvalidOperationException("Unexpected failure translating virtual address."); - } - - pageList.AddRange(pa, 1); - - address += PageSize; - } - } - - private static ulong GetAddrInRange(KMemoryInfo info, ulong start) - { - if (info.Address < start) - { - return start; - } - - return info.Address; - } - - private static ulong GetSizeInRange(KMemoryInfo info, ulong start, ulong end) - { - ulong endAddr = info.Size + info.Address; - ulong size = info.Size; - - if (info.Address < start) - { - size -= start - info.Address; - } - - if (endAddr > end) - { - size -= endAddr - end; - } - - return size; - } - - private bool IsUnmapped(ulong address, ulong size) - { - return CheckRange( - address, - size, - MemoryState.Mask, - MemoryState.Unmapped, - KMemoryPermission.Mask, - KMemoryPermission.None, - MemoryAttribute.Mask, - MemoryAttribute.None, - MemoryAttribute.IpcAndDeviceMapped, - out _, - out _, - out _); - } - - private bool CheckRange( - ulong address, - ulong size, - MemoryState stateMask, - MemoryState stateExpected, - KMemoryPermission permissionMask, - KMemoryPermission permissionExpected, - MemoryAttribute attributeMask, - MemoryAttribute attributeExpected, - MemoryAttribute attributeIgnoreMask, - out MemoryState outState, - out KMemoryPermission outPermission, - out MemoryAttribute outAttribute) - { - ulong endAddr = address + size; - - LinkedListNode node = FindBlockNode(address); - - KMemoryInfo info = node.Value.GetInfo(); - - MemoryState firstState = info.State; - KMemoryPermission firstPermission = info.Permission; - MemoryAttribute firstAttribute = info.Attribute; - - do - { - info = node.Value.GetInfo(); - - // Check if the block state matches what we expect. - if ( firstState != info.State || - firstPermission != info.Permission || - (info.Attribute & attributeMask) != attributeExpected || - (firstAttribute | attributeIgnoreMask) != (info.Attribute | attributeIgnoreMask) || - (firstState & stateMask) != stateExpected || - (firstPermission & permissionMask) != permissionExpected) - { - outState = MemoryState.Unmapped; - outPermission = KMemoryPermission.None; - outAttribute = MemoryAttribute.None; - - return false; - } - } - while (info.Address + info.Size - 1 < endAddr - 1 && (node = node.Next) != null); - - outState = firstState; - outPermission = firstPermission; - outAttribute = firstAttribute & ~attributeIgnoreMask; - - return true; - } - - private bool CheckRange( - ulong address, - ulong size, - MemoryState stateMask, - MemoryState stateExpected, - KMemoryPermission permissionMask, - KMemoryPermission permissionExpected, - MemoryAttribute attributeMask, - MemoryAttribute attributeExpected) - { - foreach (KMemoryInfo info in IterateOverRange(address, address + size)) - { - // Check if the block state matches what we expect. - if ((info.State & stateMask) != stateExpected || - (info.Permission & permissionMask) != permissionExpected || - (info.Attribute & attributeMask) != attributeExpected) - { - return false; - } - } - - return true; - } - - private IEnumerable IterateOverRange(ulong start, ulong end) - { - LinkedListNode node = FindBlockNode(start); - - KMemoryInfo info; - - do - { - info = node.Value.GetInfo(); - - yield return info; - } - while (info.Address + info.Size - 1 < end - 1 && (node = node.Next) != null); - } - - private void InsertBlock( - ulong baseAddress, - ulong pagesCount, - MemoryState oldState, - KMemoryPermission oldPermission, - MemoryAttribute oldAttribute, - MemoryState newState, - KMemoryPermission newPermission, - MemoryAttribute newAttribute) - { - // Insert new block on the list only on areas where the state - // of the block matches the state specified on the old* state - // arguments, otherwise leave it as is. - int oldCount = _blocks.Count; - - oldAttribute |= MemoryAttribute.IpcAndDeviceMapped; - - ulong endAddr = baseAddress + pagesCount * PageSize; - - LinkedListNode node = _blocks.First; - - while (node != null) - { - LinkedListNode newNode = node; - - KMemoryBlock currBlock = node.Value; - - ulong currBaseAddr = currBlock.BaseAddress; - ulong currEndAddr = currBlock.PagesCount * PageSize + currBaseAddr; - - if (baseAddress < currEndAddr && currBaseAddr < endAddr) - { - MemoryAttribute currBlockAttr = currBlock.Attribute | MemoryAttribute.IpcAndDeviceMapped; - - if (currBlock.State != oldState || - currBlock.Permission != oldPermission || - currBlockAttr != oldAttribute) - { - node = node.Next; - - continue; - } - - if (baseAddress > currBaseAddr) - { - _blocks.AddBefore(node, currBlock.SplitRightAtAddress(baseAddress)); - } - - if (endAddr < currEndAddr) - { - newNode = _blocks.AddBefore(node, currBlock.SplitRightAtAddress(endAddr)); - } - - newNode.Value.SetState(newPermission, newState, newAttribute); - - newNode = MergeEqualStateNeighbors(newNode); - } - - if (currEndAddr - 1 >= endAddr - 1) - { - break; - } - - node = newNode.Next; - } - - _blockAllocator.Count += _blocks.Count - oldCount; - - ValidateInternalState(); - } - - private void InsertBlock( - ulong baseAddress, - ulong pagesCount, - MemoryState state, - KMemoryPermission permission = KMemoryPermission.None, - MemoryAttribute attribute = MemoryAttribute.None) - { - // Inserts new block at the list, replacing and splitting - // existing blocks as needed. - int oldCount = _blocks.Count; - - ulong endAddr = baseAddress + pagesCount * PageSize; - - LinkedListNode node = _blocks.First; - - while (node != null) - { - LinkedListNode newNode = node; - - KMemoryBlock currBlock = node.Value; - - ulong currBaseAddr = currBlock.BaseAddress; - ulong currEndAddr = currBlock.PagesCount * PageSize + currBaseAddr; - - if (baseAddress < currEndAddr && currBaseAddr < endAddr) - { - if (baseAddress > currBaseAddr) - { - _blocks.AddBefore(node, currBlock.SplitRightAtAddress(baseAddress)); - } - - if (endAddr < currEndAddr) - { - newNode = _blocks.AddBefore(node, currBlock.SplitRightAtAddress(endAddr)); - } - - newNode.Value.SetState(permission, state, attribute); - - newNode = MergeEqualStateNeighbors(newNode); - } - - if (currEndAddr - 1 >= endAddr - 1) - { - break; - } - - node = newNode.Next; - } - - _blockAllocator.Count += _blocks.Count - oldCount; - - ValidateInternalState(); - } - - private static void SetIpcMappingPermissions(KMemoryBlock block, KMemoryPermission permission) - { - block.SetIpcMappingPermission(permission); - } - - private static void RestoreIpcMappingPermissions(KMemoryBlock block, KMemoryPermission permission) - { - block.RestoreIpcMappingPermission(); - } - - private delegate void BlockMutator(KMemoryBlock block, KMemoryPermission newPerm); - - private void InsertBlock( - ulong baseAddress, - ulong pagesCount, - BlockMutator blockMutate, - KMemoryPermission permission = KMemoryPermission.None) - { - // Inserts new block at the list, replacing and splitting - // existing blocks as needed, then calling the callback - // function on the new block. - int oldCount = _blocks.Count; - - ulong endAddr = baseAddress + pagesCount * PageSize; - - LinkedListNode node = _blocks.First; - - while (node != null) - { - LinkedListNode newNode = node; - - KMemoryBlock currBlock = node.Value; - - ulong currBaseAddr = currBlock.BaseAddress; - ulong currEndAddr = currBlock.PagesCount * PageSize + currBaseAddr; - - if (baseAddress < currEndAddr && currBaseAddr < endAddr) - { - if (baseAddress > currBaseAddr) - { - _blocks.AddBefore(node, currBlock.SplitRightAtAddress(baseAddress)); - } - - if (endAddr < currEndAddr) - { - newNode = _blocks.AddBefore(node, currBlock.SplitRightAtAddress(endAddr)); - } - - KMemoryBlock newBlock = newNode.Value; - - blockMutate(newBlock, permission); - - newNode = MergeEqualStateNeighbors(newNode); - } - - if (currEndAddr - 1 >= endAddr - 1) - { - break; - } - - node = newNode.Next; - } - - _blockAllocator.Count += _blocks.Count - oldCount; - - ValidateInternalState(); - } - - [Conditional("DEBUG")] - private void ValidateInternalState() - { - ulong expectedAddress = 0; - - LinkedListNode node = _blocks.First; - - while (node != null) - { - LinkedListNode newNode = node; - - KMemoryBlock currBlock = node.Value; - - Debug.Assert(currBlock.BaseAddress == expectedAddress); - - expectedAddress = currBlock.BaseAddress + currBlock.PagesCount * PageSize; - - node = newNode.Next; - } - - Debug.Assert(expectedAddress == AddrSpaceEnd); - } - - private LinkedListNode MergeEqualStateNeighbors(LinkedListNode node) - { - KMemoryBlock block = node.Value; - - if (node.Previous != null) - { - KMemoryBlock previousBlock = node.Previous.Value; - - if (BlockStateEquals(block, previousBlock)) - { - LinkedListNode previousNode = node.Previous; - - _blocks.Remove(node); - - previousBlock.AddPages(block.PagesCount); - - node = previousNode; - block = previousBlock; - } - } - - if (node.Next != null) - { - KMemoryBlock nextBlock = node.Next.Value; - - if (BlockStateEquals(block, nextBlock)) - { - _blocks.Remove(node.Next); - - block.AddPages(nextBlock.PagesCount); - } - } - - return node; - } - - private static bool BlockStateEquals(KMemoryBlock lhs, KMemoryBlock rhs) - { - return lhs.State == rhs.State && - lhs.Permission == rhs.Permission && - lhs.Attribute == rhs.Attribute && - lhs.SourcePermission == rhs.SourcePermission && - lhs.DeviceRefCount == rhs.DeviceRefCount && - lhs.IpcRefCount == rhs.IpcRefCount; - } - - private ulong AllocateVa( - ulong regionStart, - ulong regionPagesCount, - ulong neededPagesCount, - int alignment) - { - ulong address = 0; - - ulong regionEndAddr = regionStart + regionPagesCount * PageSize; - - ulong reservedPagesCount = _isKernel ? 1UL : 4UL; - - if (_aslrEnabled) - { - ulong totalNeededSize = (reservedPagesCount + neededPagesCount) * PageSize; - - ulong remainingPages = regionPagesCount - neededPagesCount; - - ulong aslrMaxOffset = ((remainingPages + reservedPagesCount) * PageSize) / (ulong)alignment; - - for (int attempt = 0; attempt < 8; attempt++) - { - address = BitUtils.AlignDown(regionStart + GetRandomValue(0, aslrMaxOffset) * (ulong)alignment, alignment); - - ulong endAddr = address + totalNeededSize; - - KMemoryInfo info = FindBlock(address).GetInfo(); - - if (info.State != MemoryState.Unmapped) - { - continue; - } - - ulong currBaseAddr = info.Address + reservedPagesCount * PageSize; - ulong currEndAddr = info.Address + info.Size; - - if (address >= regionStart && - address >= currBaseAddr && - endAddr - 1 <= regionEndAddr - 1 && - endAddr - 1 <= currEndAddr - 1) - { - break; - } - } - - if (address == 0) - { - ulong aslrPage = GetRandomValue(0, aslrMaxOffset); - - address = FindFirstFit( - regionStart + aslrPage * PageSize, - regionPagesCount - aslrPage, - neededPagesCount, - alignment, - 0, - reservedPagesCount); - } - } - - if (address == 0) - { - address = FindFirstFit( - regionStart, - regionPagesCount, - neededPagesCount, - alignment, - 0, - reservedPagesCount); - } - - return address; - } - - private ulong FindFirstFit( - ulong regionStart, - ulong regionPagesCount, - ulong neededPagesCount, - int alignment, - ulong reservedStart, - ulong reservedPagesCount) - { - ulong reservedSize = reservedPagesCount * PageSize; - - ulong totalNeededSize = reservedSize + neededPagesCount * PageSize; - - ulong regionEndAddr = regionStart + regionPagesCount * PageSize; - - LinkedListNode node = FindBlockNode(regionStart); - - KMemoryInfo info = node.Value.GetInfo(); - - while (regionEndAddr >= info.Address) - { - if (info.State == MemoryState.Unmapped) - { - ulong currBaseAddr = info.Address + reservedSize; - ulong currEndAddr = info.Address + info.Size - 1; - - ulong address = BitUtils.AlignDown(currBaseAddr, alignment) + reservedStart; - - if (currBaseAddr > address) - { - address += (ulong)alignment; - } - - ulong allocationEndAddr = address + totalNeededSize - 1; - - if (allocationEndAddr <= regionEndAddr && - allocationEndAddr <= currEndAddr && - address < allocationEndAddr) - { - return address; - } - } - - node = node.Next; - - if (node == null) - { - break; - } - - info = node.Value.GetInfo(); - } - - return 0; - } - - private KMemoryBlock FindBlock(ulong address) - { - return FindBlockNode(address)?.Value; - } - - private LinkedListNode FindBlockNode(ulong address) - { - lock (_blocks) - { - LinkedListNode node = _blocks.First; - - while (node != null) + if (address >= region.Address && address < region.EndAddr) { - KMemoryBlock block = node.Value; - - ulong currEndAddr = block.PagesCount * PageSize + block.BaseAddress; - - if (block.BaseAddress <= address && currEndAddr - 1 >= address) - { - return node; - } - - node = node.Next; + return region; } } return null; } - public bool CanContain(ulong address, ulong size, MemoryState state) + public void IncrementPagesReferenceCount(ulong address, ulong pagesCount) { - ulong endAddr = address + size; - - ulong regionBaseAddr = GetBaseAddress(state); - ulong regionEndAddr = regionBaseAddr + GetSize(state); - - bool InsideRegion() - { - return regionBaseAddr <= address && - endAddr > address && - endAddr - 1 <= regionEndAddr - 1; - } - - bool OutsideHeapRegion() => endAddr <= HeapRegionStart || address >= HeapRegionEnd; - bool OutsideAliasRegion() => endAddr <= AliasRegionStart || address >= AliasRegionEnd; - - switch (state) - { - case MemoryState.Io: - case MemoryState.Normal: - case MemoryState.CodeStatic: - case MemoryState.CodeMutable: - case MemoryState.SharedMemory: - case MemoryState.ModCodeStatic: - case MemoryState.ModCodeMutable: - case MemoryState.Stack: - case MemoryState.ThreadLocal: - case MemoryState.TransferMemoryIsolated: - case MemoryState.TransferMemory: - case MemoryState.ProcessMemory: - case MemoryState.CodeReadOnly: - case MemoryState.CodeWritable: - return InsideRegion() && OutsideHeapRegion() && OutsideAliasRegion(); - - case MemoryState.Heap: - return InsideRegion() && OutsideAliasRegion(); - - case MemoryState.IpcBuffer0: - case MemoryState.IpcBuffer1: - case MemoryState.IpcBuffer3: - return InsideRegion() && OutsideHeapRegion(); - - case MemoryState.KernelStack: - return InsideRegion(); - } - - throw new ArgumentException($"Invalid state value \"{state}\"."); + IncrementOrDecrementPagesReferenceCount(address, pagesCount, true); } - private ulong GetBaseAddress(MemoryState state) + public void DecrementPagesReferenceCount(ulong address, ulong pagesCount) { - switch (state) - { - case MemoryState.Io: - case MemoryState.Normal: - case MemoryState.ThreadLocal: - return TlsIoRegionStart; - - case MemoryState.CodeStatic: - case MemoryState.CodeMutable: - case MemoryState.SharedMemory: - case MemoryState.ModCodeStatic: - case MemoryState.ModCodeMutable: - case MemoryState.TransferMemoryIsolated: - case MemoryState.TransferMemory: - case MemoryState.ProcessMemory: - case MemoryState.CodeReadOnly: - case MemoryState.CodeWritable: - return GetAddrSpaceBaseAddr(); - - case MemoryState.Heap: - return HeapRegionStart; - - case MemoryState.IpcBuffer0: - case MemoryState.IpcBuffer1: - case MemoryState.IpcBuffer3: - return AliasRegionStart; - - case MemoryState.Stack: - return StackRegionStart; - - case MemoryState.KernelStack: - return AddrSpaceStart; - } - - throw new ArgumentException($"Invalid state value \"{state}\"."); + IncrementOrDecrementPagesReferenceCount(address, pagesCount, false); } - private ulong GetSize(MemoryState state) + private void IncrementOrDecrementPagesReferenceCount(ulong address, ulong pagesCount, bool increment) { - switch (state) + while (pagesCount != 0) { - case MemoryState.Io: - case MemoryState.Normal: - case MemoryState.ThreadLocal: - return TlsIoRegionEnd - TlsIoRegionStart; + var region = GetMemoryRegion(address); - case MemoryState.CodeStatic: - case MemoryState.CodeMutable: - case MemoryState.SharedMemory: - case MemoryState.ModCodeStatic: - case MemoryState.ModCodeMutable: - case MemoryState.TransferMemoryIsolated: - case MemoryState.TransferMemory: - case MemoryState.ProcessMemory: - case MemoryState.CodeReadOnly: - case MemoryState.CodeWritable: - return GetAddrSpaceSize(); + ulong countToProcess = Math.Min(pagesCount, region.GetPageOffsetFromEnd(address)); - case MemoryState.Heap: - return HeapRegionEnd - HeapRegionStart; - - case MemoryState.IpcBuffer0: - case MemoryState.IpcBuffer1: - case MemoryState.IpcBuffer3: - return AliasRegionEnd - AliasRegionStart; - - case MemoryState.Stack: - return StackRegionEnd - StackRegionStart; - - case MemoryState.KernelStack: - return AddrSpaceEnd - AddrSpaceStart; - } - - throw new ArgumentException($"Invalid state value \"{state}\"."); - } - - public ulong GetAddrSpaceBaseAddr() - { - if (AddrSpaceWidth == 36 || AddrSpaceWidth == 39) - { - return 0x8000000; - } - else if (AddrSpaceWidth == 32) - { - return 0x200000; - } - else - { - throw new InvalidOperationException("Invalid address space width!"); - } - } - - public ulong GetAddrSpaceSize() - { - if (AddrSpaceWidth == 36) - { - return 0xff8000000; - } - else if (AddrSpaceWidth == 39) - { - return 0x7ff8000000; - } - else if (AddrSpaceWidth == 32) - { - return 0xffe00000; - } - else - { - throw new InvalidOperationException("Invalid address space width!"); - } - } - - private KernelResult MapPages(ulong address, KPageList pageList, KMemoryPermission permission) - { - ulong currAddr = address; - - KernelResult result = KernelResult.Success; - - foreach (KPageNode pageNode in pageList) - { - result = DoMmuOperation( - currAddr, - pageNode.PagesCount, - pageNode.Address, - true, - permission, - MemoryOperation.MapPa); - - if (result != KernelResult.Success) + lock (region) { - KMemoryInfo info = FindBlock(currAddr).GetInfo(); - - ulong pagesCount = (address - currAddr) / PageSize; - - result = MmuUnmap(address, pagesCount); - - break; - } - - currAddr += pageNode.PagesCount * PageSize; - } - - return result; - } - - private KernelResult MmuUnmap(ulong address, ulong pagesCount) - { - return DoMmuOperation( - address, - pagesCount, - 0, - false, - KMemoryPermission.None, - MemoryOperation.Unmap); - } - - private KernelResult MmuChangePermission(ulong address, ulong pagesCount, KMemoryPermission permission) - { - return DoMmuOperation( - address, - pagesCount, - 0, - false, - permission, - MemoryOperation.ChangePermRw); - } - - private KernelResult DoMmuOperation( - ulong dstVa, - ulong pagesCount, - ulong srcPa, - bool map, - KMemoryPermission permission, - MemoryOperation operation) - { - if (map != (operation == MemoryOperation.MapPa)) - { - throw new ArgumentException(nameof(map) + " value is invalid for this operation."); - } - - KernelResult result; - - switch (operation) - { - case MemoryOperation.MapPa: - { - ulong size = pagesCount * PageSize; - - _cpuMemory.Map(dstVa, srcPa - DramMemoryMap.DramBase, size); - - result = KernelResult.Success; - - break; - } - - case MemoryOperation.Allocate: - { - KMemoryRegionManager region = GetMemoryRegionManager(); - - result = region.AllocatePages(pagesCount, _aslrDisabled, out KPageList pageList); - - if (result == KernelResult.Success) + if (increment) { - result = MmuMapPages(dstVa, pageList); + region.IncrementPagesReferenceCount(address, countToProcess); + } + else + { + region.DecrementPagesReferenceCount(address, countToProcess); } - - break; } - case MemoryOperation.Unmap: - { - ulong size = pagesCount * PageSize; - - _cpuMemory.Unmap(dstVa, size); - - result = KernelResult.Success; - - break; - } - - case MemoryOperation.ChangePermRw: result = KernelResult.Success; break; - case MemoryOperation.ChangePermsAndAttributes: result = KernelResult.Success; break; - - default: throw new ArgumentException($"Invalid operation \"{operation}\"."); + pagesCount -= countToProcess; + address += countToProcess * KPageTableBase.PageSize; } - - return result; - } - - private KernelResult DoMmuOperation( - ulong address, - ulong pagesCount, - KPageList pageList, - KMemoryPermission permission, - MemoryOperation operation) - { - if (operation != MemoryOperation.MapVa) - { - throw new ArgumentException($"Invalid memory operation \"{operation}\" specified."); - } - - return MmuMapPages(address, pageList); - } - - private KMemoryRegionManager GetMemoryRegionManager() - { - return _context.MemoryRegions[(int)_memRegion]; - } - - private KernelResult MmuMapPages(ulong address, KPageList pageList) - { - foreach (KPageNode pageNode in pageList) - { - ulong size = pageNode.PagesCount * PageSize; - - _cpuMemory.Map(address, pageNode.Address - DramMemoryMap.DramBase, size); - - address += size; - } - - return KernelResult.Success; - } - - public ulong GetDramAddressFromVa(ulong va) - { - return _cpuMemory.GetPhysicalAddress(va); - } - - public ulong ConvertVaToPa(ulong va) - { - if (!TryConvertVaToPa(va, out ulong pa)) - { - throw new ArgumentException($"Invalid virtual address 0x{va:X} specified."); - } - - return pa; - } - - public bool TryConvertVaToPa(ulong va, out ulong pa) - { - pa = DramMemoryMap.DramBase + _cpuMemory.GetPhysicalAddress(va); - - return true; - } - - public static ulong GetDramAddressFromPa(ulong pa) - { - return pa - DramMemoryMap.DramBase; - } - - public long GetMmUsedPages() - { - lock (_blocks) - { - return BitUtils.DivRoundUp(GetMmUsedSize(), PageSize); - } - } - - private long GetMmUsedSize() - { - return _blocks.Count * KMemoryBlockSize; - } - - public bool IsInvalidRegion(ulong address, ulong size) - { - return address + size - 1 > GetAddrSpaceBaseAddr() + GetAddrSpaceSize() - 1; - } - - public bool InsideAddrSpace(ulong address, ulong size) - { - return AddrSpaceStart <= address && address + size - 1 <= AddrSpaceEnd - 1; - } - - public bool InsideAliasRegion(ulong address, ulong size) - { - return address + size > AliasRegionStart && AliasRegionEnd > address; - } - - public bool InsideHeapRegion(ulong address, ulong size) - { - return address + size > HeapRegionStart && HeapRegionEnd > address; - } - - public bool InsideStackRegion(ulong address, ulong size) - { - return address + size > StackRegionStart && StackRegionEnd > address; - } - - public bool OutsideAliasRegion(ulong address, ulong size) - { - return AliasRegionStart > address || address + size - 1 > AliasRegionEnd - 1; - } - - public bool OutsideAddrSpace(ulong address, ulong size) - { - return AddrSpaceStart > address || address + size - 1 > AddrSpaceEnd - 1; - } - - public bool OutsideStackRegion(ulong address, ulong size) - { - return StackRegionStart > address || address + size - 1 > StackRegionEnd - 1; } } -} \ No newline at end of file +} diff --git a/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryRegionManager.cs b/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryRegionManager.cs index bb4989fcb1..f35a3c3631 100644 --- a/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryRegionManager.cs +++ b/Ryujinx.HLE/HOS/Kernel/Memory/KMemoryRegionManager.cs @@ -1,5 +1,6 @@ using Ryujinx.Common; using Ryujinx.HLE.HOS.Kernel.Common; +using System.Diagnostics; namespace Ryujinx.HLE.HOS.Kernel.Memory { @@ -13,7 +14,9 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory private int _blockOrdersCount; - private KMemoryRegionBlock[] _blocks; + private readonly KMemoryRegionBlock[] _blocks; + + private readonly ushort[] _pageReferenceCounts; public KMemoryRegionManager(ulong address, ulong size, ulong endAddr) { @@ -80,9 +83,11 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory } } + _pageReferenceCounts = new ushort[size / KPageTableBase.PageSize]; + if (size != 0) { - FreePages(address, size / KMemoryManager.PageSize); + FreePages(address, size / KPageTableBase.PageSize); } } @@ -90,15 +95,33 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory { lock (_blocks) { - return AllocatePagesImpl(pagesCount, backwards, out pageList); + KernelResult result = AllocatePagesImpl(pagesCount, backwards, out pageList); + + if (result == KernelResult.Success) + { + foreach (var node in pageList) + { + IncrementPagesReferenceCount(node.Address, node.PagesCount); + } + } + + return result; } } - public ulong AllocatePagesContiguous(ulong pagesCount, bool backwards) + public ulong AllocatePagesContiguous(KernelContext context, ulong pagesCount, bool backwards) { lock (_blocks) { - return AllocatePagesContiguousImpl(pagesCount, backwards); + ulong address = AllocatePagesContiguousImpl(pagesCount, backwards); + + if (address != 0) + { + IncrementPagesReferenceCount(address, pagesCount); + context.Memory.Commit(address - DramMemoryMap.DramBase, pagesCount * KPageTableBase.PageSize); + } + + return address; } } @@ -124,7 +147,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory ulong bestFitBlockSize = 1UL << block.Order; - ulong blockPagesCount = bestFitBlockSize / KMemoryManager.PageSize; + ulong blockPagesCount = bestFitBlockSize / KPageTableBase.PageSize; // Check if this is the best fit for this page size. // If so, try allocating as much requested pages as possible. @@ -185,7 +208,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory int blockIndex = 0; - while ((1UL << _blocks[blockIndex].Order) / KMemoryManager.PageSize < pagesCount) + while ((1UL << _blocks[blockIndex].Order) / KPageTableBase.PageSize < pagesCount) { if (++blockIndex >= _blocks.Length) { @@ -197,11 +220,11 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory ulong address = AllocatePagesForOrder(blockIndex, backwards, tightestFitBlockSize); - ulong requiredSize = pagesCount * KMemoryManager.PageSize; + ulong requiredSize = pagesCount * KPageTableBase.PageSize; if (address != 0 && tightestFitBlockSize > requiredSize) { - FreePages(address + requiredSize, (tightestFitBlockSize - requiredSize) / KMemoryManager.PageSize); + FreePages(address + requiredSize, (tightestFitBlockSize - requiredSize) / KPageTableBase.PageSize); } return address; @@ -327,137 +350,121 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory if (firstFreeBlockSize > bestFitBlockSize) { - FreePages(address + bestFitBlockSize, (firstFreeBlockSize - bestFitBlockSize) / KMemoryManager.PageSize); + FreePages(address + bestFitBlockSize, (firstFreeBlockSize - bestFitBlockSize) / KPageTableBase.PageSize); } } return address; } - public void FreePage(ulong address) - { - lock (_blocks) - { - FreePages(address, 1); - } - } - - public void FreePages(KPageList pageList) - { - lock (_blocks) - { - foreach (KPageNode pageNode in pageList) - { - FreePages(pageNode.Address, pageNode.PagesCount); - } - } - } - private void FreePages(ulong address, ulong pagesCount) { - ulong endAddr = address + pagesCount * KMemoryManager.PageSize; - - int blockIndex = _blockOrdersCount - 1; - - ulong addressRounded = 0; - ulong endAddrTruncated = 0; - - for (; blockIndex >= 0; blockIndex--) + lock (_blocks) { - KMemoryRegionBlock allocInfo = _blocks[blockIndex]; + ulong endAddr = address + pagesCount * KPageTableBase.PageSize; - int blockSize = 1 << allocInfo.Order; + int blockIndex = _blockOrdersCount - 1; - addressRounded = BitUtils.AlignUp (address, blockSize); - endAddrTruncated = BitUtils.AlignDown(endAddr, blockSize); + ulong addressRounded = 0; + ulong endAddrTruncated = 0; - if (addressRounded < endAddrTruncated) + for (; blockIndex >= 0; blockIndex--) { - break; - } - } + KMemoryRegionBlock allocInfo = _blocks[blockIndex]; - void FreeRegion(ulong currAddress) - { - for (int currBlockIndex = blockIndex; - currBlockIndex < _blockOrdersCount && currAddress != 0; - currBlockIndex++) - { - KMemoryRegionBlock block = _blocks[currBlockIndex]; + int blockSize = 1 << allocInfo.Order; - block.FreeCount++; + addressRounded = BitUtils.AlignUp (address, blockSize); + endAddrTruncated = BitUtils.AlignDown(endAddr, blockSize); - ulong freedBlocks = (currAddress - block.StartAligned) >> block.Order; - - int index = (int)freedBlocks; - - for (int level = block.MaxLevel - 1; level >= 0; level--, index /= 64) - { - long mask = block.Masks[level][index / 64]; - - block.Masks[level][index / 64] = mask | (1L << (index & 63)); - - if (mask != 0) - { - break; - } - } - - int blockSizeDelta = 1 << (block.NextOrder - block.Order); - - int freedBlocksTruncated = BitUtils.AlignDown((int)freedBlocks, blockSizeDelta); - - if (!block.TryCoalesce(freedBlocksTruncated, blockSizeDelta)) + if (addressRounded < endAddrTruncated) { break; } - - currAddress = block.StartAligned + ((ulong)freedBlocksTruncated << block.Order); } - } - // Free inside aligned region. - ulong baseAddress = addressRounded; - - while (baseAddress < endAddrTruncated) - { - ulong blockSize = 1UL << _blocks[blockIndex].Order; - - FreeRegion(baseAddress); - - baseAddress += blockSize; - } - - int nextBlockIndex = blockIndex - 1; - - // Free region between Address and aligned region start. - baseAddress = addressRounded; - - for (blockIndex = nextBlockIndex; blockIndex >= 0; blockIndex--) - { - ulong blockSize = 1UL << _blocks[blockIndex].Order; - - while (baseAddress - blockSize >= address) + void FreeRegion(ulong currAddress) { - baseAddress -= blockSize; + for (int currBlockIndex = blockIndex; + currBlockIndex < _blockOrdersCount && currAddress != 0; + currBlockIndex++) + { + KMemoryRegionBlock block = _blocks[currBlockIndex]; - FreeRegion(baseAddress); + block.FreeCount++; + + ulong freedBlocks = (currAddress - block.StartAligned) >> block.Order; + + int index = (int)freedBlocks; + + for (int level = block.MaxLevel - 1; level >= 0; level--, index /= 64) + { + long mask = block.Masks[level][index / 64]; + + block.Masks[level][index / 64] = mask | (1L << (index & 63)); + + if (mask != 0) + { + break; + } + } + + int blockSizeDelta = 1 << (block.NextOrder - block.Order); + + int freedBlocksTruncated = BitUtils.AlignDown((int)freedBlocks, blockSizeDelta); + + if (!block.TryCoalesce(freedBlocksTruncated, blockSizeDelta)) + { + break; + } + + currAddress = block.StartAligned + ((ulong)freedBlocksTruncated << block.Order); + } } - } - // Free region between aligned region end and End Address. - baseAddress = endAddrTruncated; + // Free inside aligned region. + ulong baseAddress = addressRounded; - for (blockIndex = nextBlockIndex; blockIndex >= 0; blockIndex--) - { - ulong blockSize = 1UL << _blocks[blockIndex].Order; - - while (baseAddress + blockSize <= endAddr) + while (baseAddress < endAddrTruncated) { + ulong blockSize = 1UL << _blocks[blockIndex].Order; + FreeRegion(baseAddress); baseAddress += blockSize; } + + int nextBlockIndex = blockIndex - 1; + + // Free region between Address and aligned region start. + baseAddress = addressRounded; + + for (blockIndex = nextBlockIndex; blockIndex >= 0; blockIndex--) + { + ulong blockSize = 1UL << _blocks[blockIndex].Order; + + while (baseAddress - blockSize >= address) + { + baseAddress -= blockSize; + + FreeRegion(baseAddress); + } + } + + // Free region between aligned region end and End Address. + baseAddress = endAddrTruncated; + + for (blockIndex = nextBlockIndex; blockIndex >= 0; blockIndex--) + { + ulong blockSize = 1UL << _blocks[blockIndex].Order; + + while (baseAddress + blockSize <= endAddr) + { + FreeRegion(baseAddress); + + baseAddress += blockSize; + } + } } } @@ -477,12 +484,76 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory { KMemoryRegionBlock block = _blocks[blockIndex]; - ulong blockPagesCount = (1UL << block.Order) / KMemoryManager.PageSize; + ulong blockPagesCount = (1UL << block.Order) / KPageTableBase.PageSize; availablePages += blockPagesCount * block.FreeCount; } return availablePages; } + + public void IncrementPagesReferenceCount(ulong address, ulong pagesCount) + { + ulong index = GetPageOffset(address); + ulong endIndex = index + pagesCount; + + while (index < endIndex) + { + ushort referenceCount = ++_pageReferenceCounts[index]; + Debug.Assert(referenceCount >= 1); + + index++; + } + } + + public void DecrementPagesReferenceCount(ulong address, ulong pagesCount) + { + ulong index = GetPageOffset(address); + ulong endIndex = index + pagesCount; + + ulong freeBaseIndex = 0; + ulong freePagesCount = 0; + + while (index < endIndex) + { + Debug.Assert(_pageReferenceCounts[index] > 0); + ushort referenceCount = --_pageReferenceCounts[index]; + + if (referenceCount == 0) + { + if (freePagesCount != 0) + { + freePagesCount++; + } + else + { + freeBaseIndex = index; + freePagesCount = 1; + } + } + else if (freePagesCount != 0) + { + FreePages(Address + freeBaseIndex * KPageTableBase.PageSize, freePagesCount); + freePagesCount = 0; + } + + index++; + } + + if (freePagesCount != 0) + { + FreePages(Address + freeBaseIndex * KPageTableBase.PageSize, freePagesCount); + } + } + + public ulong GetPageOffset(ulong address) + { + return (address - Address) / KPageTableBase.PageSize; + } + + public ulong GetPageOffsetFromEnd(ulong address) + { + return (EndAddr - address) / KPageTableBase.PageSize; + } } } \ No newline at end of file diff --git a/Ryujinx.HLE/HOS/Kernel/Memory/KPageList.cs b/Ryujinx.HLE/HOS/Kernel/Memory/KPageList.cs index f0935dcc1c..7f2f1ba67b 100644 --- a/Ryujinx.HLE/HOS/Kernel/Memory/KPageList.cs +++ b/Ryujinx.HLE/HOS/Kernel/Memory/KPageList.cs @@ -6,7 +6,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory { class KPageList : IEnumerable { - public LinkedList Nodes { get; private set; } + public LinkedList Nodes { get; } public KPageList() { @@ -21,7 +21,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory { KPageNode lastNode = Nodes.Last.Value; - if (lastNode.Address + lastNode.PagesCount * KMemoryManager.PageSize == address) + if (lastNode.Address + lastNode.PagesCount * KPageTableBase.PageSize == address) { address = lastNode.Address; pagesCount += lastNode.PagesCount; @@ -68,6 +68,22 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory return thisNode == null && otherNode == null; } + public void IncrementPagesReferenceCount(KMemoryManager manager) + { + foreach (var node in this) + { + manager.IncrementPagesReferenceCount(node.Address, node.PagesCount); + } + } + + public void DecrementPagesReferenceCount(KMemoryManager manager) + { + foreach (var node in this) + { + manager.DecrementPagesReferenceCount(node.Address, node.PagesCount); + } + } + public IEnumerator GetEnumerator() { return Nodes.GetEnumerator(); diff --git a/Ryujinx.HLE/HOS/Kernel/Memory/KPageTable.cs b/Ryujinx.HLE/HOS/Kernel/Memory/KPageTable.cs new file mode 100644 index 0000000000..20a13f5769 --- /dev/null +++ b/Ryujinx.HLE/HOS/Kernel/Memory/KPageTable.cs @@ -0,0 +1,221 @@ +using Ryujinx.HLE.HOS.Kernel.Common; +using Ryujinx.Memory; +using Ryujinx.Memory.Range; +using System; +using System.Collections.Generic; +using System.Diagnostics; +using System.Linq; + +namespace Ryujinx.HLE.HOS.Kernel.Memory +{ + class KPageTable : KPageTableBase + { + private readonly IVirtualMemoryManager _cpuMemory; + + public override bool SupportsMemoryAliasing => true; + + public KPageTable(KernelContext context, IVirtualMemoryManager cpuMemory) : base(context) + { + _cpuMemory = cpuMemory; + } + + /// + protected override IEnumerable GetPhysicalRegions(ulong va, ulong size) + { + return _cpuMemory.GetPhysicalRegions(va, size); + } + + /// + protected override ReadOnlySpan GetSpan(ulong va, int size) + { + return _cpuMemory.GetSpan(va, size); + } + + /// + protected override KernelResult MapMemory(ulong src, ulong dst, ulong pagesCount, KMemoryPermission oldSrcPermission, KMemoryPermission newDstPermission) + { + var srcRanges = GetPhysicalRegions(src, pagesCount * PageSize); + + KernelResult result = Reprotect(src, pagesCount, KMemoryPermission.None); + + if (result != KernelResult.Success) + { + return result; + } + + result = MapPages(dst, srcRanges, newDstPermission); + + if (result != KernelResult.Success) + { + KernelResult reprotectResult = Reprotect(src, pagesCount, oldSrcPermission); + Debug.Assert(reprotectResult == KernelResult.Success); + } + + return result; + } + + /// + protected override KernelResult UnmapMemory(ulong dst, ulong src, ulong pagesCount, KMemoryPermission oldDstPermission, KMemoryPermission newSrcPermission) + { + ulong size = pagesCount * PageSize; + + var srcRanges = GetPhysicalRegions(src, size); + var dstRanges = GetPhysicalRegions(dst, size); + + if (!dstRanges.SequenceEqual(srcRanges)) + { + return KernelResult.InvalidMemRange; + } + + KernelResult result = Unmap(dst, pagesCount); + + if (result != KernelResult.Success) + { + return result; + } + + result = Reprotect(src, pagesCount, newSrcPermission); + + if (result != KernelResult.Success) + { + KernelResult mapResult = MapPages(dst, dstRanges, oldDstPermission); + Debug.Assert(mapResult == KernelResult.Success); + } + + return result; + } + + /// + protected override KernelResult MapPages(ulong dstVa, ulong pagesCount, ulong srcPa, KMemoryPermission permission) + { + ulong size = pagesCount * PageSize; + + Context.Memory.Commit(srcPa - DramMemoryMap.DramBase, size); + + _cpuMemory.Map(dstVa, Context.Memory.GetPointer(srcPa - DramMemoryMap.DramBase, size), size); + + if (DramMemoryMap.IsHeapPhysicalAddress(srcPa)) + { + Context.MemoryManager.IncrementPagesReferenceCount(srcPa, pagesCount); + } + + return KernelResult.Success; + } + + /// + protected override KernelResult MapPages(ulong address, KPageList pageList, KMemoryPermission permission) + { + using var scopedPageList = new KScopedPageList(Context.MemoryManager, pageList); + + ulong currentVa = address; + + foreach (var pageNode in pageList) + { + ulong addr = pageNode.Address - DramMemoryMap.DramBase; + ulong size = pageNode.PagesCount * PageSize; + + Context.Memory.Commit(addr, size); + + _cpuMemory.Map(currentVa, Context.Memory.GetPointer(addr, size), size); + + currentVa += size; + } + + scopedPageList.SignalSuccess(); + + return KernelResult.Success; + } + + /// + protected override KernelResult MapPages(ulong address, IEnumerable ranges, KMemoryPermission permission) + { + ulong currentVa = address; + + foreach (var range in ranges) + { + ulong size = range.Size; + + ulong pa = GetDramAddressFromHostAddress(range.Address); + if (pa != ulong.MaxValue) + { + pa += DramMemoryMap.DramBase; + if (DramMemoryMap.IsHeapPhysicalAddress(pa)) + { + Context.MemoryManager.IncrementPagesReferenceCount(pa, size / PageSize); + } + } + + _cpuMemory.Map(currentVa, range.Address, size); + + currentVa += size; + } + + return KernelResult.Success; + } + + /// + protected override KernelResult Unmap(ulong address, ulong pagesCount) + { + KPageList pagesToClose = new KPageList(); + + var regions = _cpuMemory.GetPhysicalRegions(address, pagesCount * PageSize); + + foreach (var region in regions) + { + ulong pa = GetDramAddressFromHostAddress(region.Address); + if (pa == ulong.MaxValue) + { + continue; + } + + pa += DramMemoryMap.DramBase; + if (DramMemoryMap.IsHeapPhysicalAddress(pa)) + { + pagesToClose.AddRange(pa, region.Size / PageSize); + } + } + + _cpuMemory.Unmap(address, pagesCount * PageSize); + + pagesToClose.DecrementPagesReferenceCount(Context.MemoryManager); + + return KernelResult.Success; + } + + /// + protected override KernelResult Reprotect(ulong address, ulong pagesCount, KMemoryPermission permission) + { + // TODO. + return KernelResult.Success; + } + + /// + protected override KernelResult ReprotectWithAttributes(ulong address, ulong pagesCount, KMemoryPermission permission) + { + // TODO. + return KernelResult.Success; + } + + /// + protected override void SignalMemoryTracking(ulong va, ulong size, bool write) + { + _cpuMemory.SignalMemoryTracking(va, size, write); + } + + /// + protected override void Write(ulong va, ReadOnlySpan data) + { + _cpuMemory.Write(va, data); + } + + private ulong GetDramAddressFromHostAddress(nuint hostAddress) + { + if (hostAddress < (nuint)(ulong)Context.Memory.Pointer || hostAddress >= (nuint)((ulong)Context.Memory.Pointer + Context.Memory.Size)) + { + return ulong.MaxValue; + } + + return hostAddress - (ulong)Context.Memory.Pointer; + } + } +} diff --git a/Ryujinx.HLE/HOS/Kernel/Memory/KPageTableBase.cs b/Ryujinx.HLE/HOS/Kernel/Memory/KPageTableBase.cs new file mode 100644 index 0000000000..a2db8dcc12 --- /dev/null +++ b/Ryujinx.HLE/HOS/Kernel/Memory/KPageTableBase.cs @@ -0,0 +1,2797 @@ +using Ryujinx.Common; +using Ryujinx.HLE.HOS.Kernel.Common; +using Ryujinx.HLE.HOS.Kernel.Process; +using Ryujinx.Memory.Range; +using System; +using System.Collections.Generic; +using System.Diagnostics; +using System.Linq; + +namespace Ryujinx.HLE.HOS.Kernel.Memory +{ + abstract class KPageTableBase + { + private static readonly int[] MappingUnitSizes = new int[] + { + 0x1000, + 0x10000, + 0x200000, + 0x400000, + 0x2000000, + 0x40000000 + }; + + public const int PageSize = 0x1000; + + private const int KMemoryBlockSize = 0x40; + + // We need 2 blocks for the case where a big block + // needs to be split in 2, plus one block that will be the new one inserted. + private const int MaxBlocksNeededForInsertion = 2; + + protected readonly KernelContext Context; + + public ulong AddrSpaceStart { get; private set; } + public ulong AddrSpaceEnd { get; private set; } + + public ulong CodeRegionStart { get; private set; } + public ulong CodeRegionEnd { get; private set; } + + public ulong HeapRegionStart { get; private set; } + public ulong HeapRegionEnd { get; private set; } + + private ulong _currentHeapAddr; + + public ulong AliasRegionStart { get; private set; } + public ulong AliasRegionEnd { get; private set; } + + public ulong StackRegionStart { get; private set; } + public ulong StackRegionEnd { get; private set; } + + public ulong TlsIoRegionStart { get; private set; } + public ulong TlsIoRegionEnd { get; private set; } + + private ulong _heapCapacity; + + public ulong PhysicalMemoryUsage { get; private set; } + + private readonly KMemoryBlockManager _blockManager; + + private MemoryRegion _memRegion; + + private bool _aslrDisabled; + + public int AddrSpaceWidth { get; private set; } + + private bool _isKernel; + + private bool _aslrEnabled; + + private KMemoryBlockSlabManager _slabManager; + + private int _contextId; + + private MersenneTwister _randomNumberGenerator; + + public abstract bool SupportsMemoryAliasing { get; } + + public KPageTableBase(KernelContext context) + { + Context = context; + + _blockManager = new KMemoryBlockManager(); + + _isKernel = false; + } + + private static readonly int[] AddrSpaceSizes = new int[] { 32, 36, 32, 39 }; + + public KernelResult InitializeForProcess( + AddressSpaceType addrSpaceType, + bool aslrEnabled, + bool aslrDisabled, + MemoryRegion memRegion, + ulong address, + ulong size, + KMemoryBlockSlabManager slabManager) + { + if ((uint)addrSpaceType > (uint)AddressSpaceType.Addr39Bits) + { + throw new ArgumentException(nameof(addrSpaceType)); + } + + _contextId = Context.ContextIdManager.GetId(); + + ulong addrSpaceBase = 0; + ulong addrSpaceSize = 1UL << AddrSpaceSizes[(int)addrSpaceType]; + + KernelResult result = CreateUserAddressSpace( + addrSpaceType, + aslrEnabled, + aslrDisabled, + addrSpaceBase, + addrSpaceSize, + memRegion, + address, + size, + slabManager); + + if (result != KernelResult.Success) + { + Context.ContextIdManager.PutId(_contextId); + } + + return result; + } + + private class Region + { + public ulong Start; + public ulong End; + public ulong Size; + public ulong AslrOffset; + } + + private KernelResult CreateUserAddressSpace( + AddressSpaceType addrSpaceType, + bool aslrEnabled, + bool aslrDisabled, + ulong addrSpaceStart, + ulong addrSpaceEnd, + MemoryRegion memRegion, + ulong address, + ulong size, + KMemoryBlockSlabManager slabManager) + { + ulong endAddr = address + size; + + Region aliasRegion = new Region(); + Region heapRegion = new Region(); + Region stackRegion = new Region(); + Region tlsIoRegion = new Region(); + + ulong codeRegionSize; + ulong stackAndTlsIoStart; + ulong stackAndTlsIoEnd; + ulong baseAddress; + + switch (addrSpaceType) + { + case AddressSpaceType.Addr32Bits: + aliasRegion.Size = 0x40000000; + heapRegion.Size = 0x40000000; + stackRegion.Size = 0; + tlsIoRegion.Size = 0; + CodeRegionStart = 0x200000; + codeRegionSize = 0x3fe00000; + stackAndTlsIoStart = 0x200000; + stackAndTlsIoEnd = 0x40000000; + baseAddress = 0x200000; + AddrSpaceWidth = 32; + break; + + case AddressSpaceType.Addr36Bits: + aliasRegion.Size = 0x180000000; + heapRegion.Size = 0x180000000; + stackRegion.Size = 0; + tlsIoRegion.Size = 0; + CodeRegionStart = 0x8000000; + codeRegionSize = 0x78000000; + stackAndTlsIoStart = 0x8000000; + stackAndTlsIoEnd = 0x80000000; + baseAddress = 0x8000000; + AddrSpaceWidth = 36; + break; + + case AddressSpaceType.Addr32BitsNoMap: + aliasRegion.Size = 0; + heapRegion.Size = 0x80000000; + stackRegion.Size = 0; + tlsIoRegion.Size = 0; + CodeRegionStart = 0x200000; + codeRegionSize = 0x3fe00000; + stackAndTlsIoStart = 0x200000; + stackAndTlsIoEnd = 0x40000000; + baseAddress = 0x200000; + AddrSpaceWidth = 32; + break; + + case AddressSpaceType.Addr39Bits: + aliasRegion.Size = 0x1000000000; + heapRegion.Size = 0x180000000; + stackRegion.Size = 0x80000000; + tlsIoRegion.Size = 0x1000000000; + CodeRegionStart = BitUtils.AlignDown(address, 0x200000); + codeRegionSize = BitUtils.AlignUp(endAddr, 0x200000) - CodeRegionStart; + stackAndTlsIoStart = 0; + stackAndTlsIoEnd = 0; + baseAddress = 0x8000000; + AddrSpaceWidth = 39; + break; + + default: throw new ArgumentException(nameof(addrSpaceType)); + } + + CodeRegionEnd = CodeRegionStart + codeRegionSize; + + ulong mapBaseAddress; + ulong mapAvailableSize; + + if (CodeRegionStart - baseAddress >= addrSpaceEnd - CodeRegionEnd) + { + // Has more space before the start of the code region. + mapBaseAddress = baseAddress; + mapAvailableSize = CodeRegionStart - baseAddress; + } + else + { + // Has more space after the end of the code region. + mapBaseAddress = CodeRegionEnd; + mapAvailableSize = addrSpaceEnd - CodeRegionEnd; + } + + ulong mapTotalSize = aliasRegion.Size + heapRegion.Size + stackRegion.Size + tlsIoRegion.Size; + + ulong aslrMaxOffset = mapAvailableSize - mapTotalSize; + + _aslrEnabled = aslrEnabled; + + AddrSpaceStart = addrSpaceStart; + AddrSpaceEnd = addrSpaceEnd; + + _slabManager = slabManager; + + if (mapAvailableSize < mapTotalSize) + { + return KernelResult.OutOfMemory; + } + + if (aslrEnabled) + { + aliasRegion.AslrOffset = GetRandomValue(0, aslrMaxOffset >> 21) << 21; + heapRegion.AslrOffset = GetRandomValue(0, aslrMaxOffset >> 21) << 21; + stackRegion.AslrOffset = GetRandomValue(0, aslrMaxOffset >> 21) << 21; + tlsIoRegion.AslrOffset = GetRandomValue(0, aslrMaxOffset >> 21) << 21; + } + + // Regions are sorted based on ASLR offset. + // When ASLR is disabled, the order is Map, Heap, NewMap and TlsIo. + aliasRegion.Start = mapBaseAddress + aliasRegion.AslrOffset; + aliasRegion.End = aliasRegion.Start + aliasRegion.Size; + heapRegion.Start = mapBaseAddress + heapRegion.AslrOffset; + heapRegion.End = heapRegion.Start + heapRegion.Size; + stackRegion.Start = mapBaseAddress + stackRegion.AslrOffset; + stackRegion.End = stackRegion.Start + stackRegion.Size; + tlsIoRegion.Start = mapBaseAddress + tlsIoRegion.AslrOffset; + tlsIoRegion.End = tlsIoRegion.Start + tlsIoRegion.Size; + + SortRegion(heapRegion, aliasRegion); + + if (stackRegion.Size != 0) + { + SortRegion(stackRegion, aliasRegion); + SortRegion(stackRegion, heapRegion); + } + else + { + stackRegion.Start = stackAndTlsIoStart; + stackRegion.End = stackAndTlsIoEnd; + } + + if (tlsIoRegion.Size != 0) + { + SortRegion(tlsIoRegion, aliasRegion); + SortRegion(tlsIoRegion, heapRegion); + SortRegion(tlsIoRegion, stackRegion); + } + else + { + tlsIoRegion.Start = stackAndTlsIoStart; + tlsIoRegion.End = stackAndTlsIoEnd; + } + + AliasRegionStart = aliasRegion.Start; + AliasRegionEnd = aliasRegion.End; + HeapRegionStart = heapRegion.Start; + HeapRegionEnd = heapRegion.End; + StackRegionStart = stackRegion.Start; + StackRegionEnd = stackRegion.End; + TlsIoRegionStart = tlsIoRegion.Start; + TlsIoRegionEnd = tlsIoRegion.End; + + _currentHeapAddr = HeapRegionStart; + _heapCapacity = 0; + PhysicalMemoryUsage = 0; + + _memRegion = memRegion; + _aslrDisabled = aslrDisabled; + + return _blockManager.Initialize(addrSpaceStart, addrSpaceEnd, slabManager); + } + + private ulong GetRandomValue(ulong min, ulong max) + { + return (ulong)GetRandomValue((long)min, (long)max); + } + + private long GetRandomValue(long min, long max) + { + if (_randomNumberGenerator == null) + { + _randomNumberGenerator = new MersenneTwister(0); + } + + return _randomNumberGenerator.GenRandomNumber(min, max); + } + + private static void SortRegion(Region lhs, Region rhs) + { + if (lhs.AslrOffset < rhs.AslrOffset) + { + rhs.Start += lhs.Size; + rhs.End += lhs.Size; + } + else + { + lhs.Start += rhs.Size; + lhs.End += rhs.Size; + } + } + + public KernelResult MapPages(ulong address, KPageList pageList, MemoryState state, KMemoryPermission permission) + { + ulong pagesCount = pageList.GetPagesCount(); + + ulong size = pagesCount * PageSize; + + if (!CanContain(address, size, state)) + { + return KernelResult.InvalidMemState; + } + + lock (_blockManager) + { + if (!IsUnmapped(address, pagesCount * PageSize)) + { + return KernelResult.InvalidMemState; + } + + if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion)) + { + return KernelResult.OutOfResource; + } + + KernelResult result = MapPages(address, pageList, permission); + + if (result == KernelResult.Success) + { + _blockManager.InsertBlock(address, pagesCount, state, permission); + } + + return result; + } + } + + public KernelResult UnmapPages(ulong address, ulong pagesCount, IEnumerable ranges, MemoryState stateExpected) + { + ulong size = pagesCount * PageSize; + + ulong endAddr = address + size; + + ulong addrSpacePagesCount = (AddrSpaceEnd - AddrSpaceStart) / PageSize; + + if (AddrSpaceStart > address) + { + return KernelResult.InvalidMemState; + } + + if (addrSpacePagesCount < pagesCount) + { + return KernelResult.InvalidMemState; + } + + if (endAddr - 1 > AddrSpaceEnd - 1) + { + return KernelResult.InvalidMemState; + } + + lock (_blockManager) + { + var currentRanges = GetPhysicalRegions(address, size); + + if (!currentRanges.SequenceEqual(ranges)) + { + return KernelResult.InvalidMemRange; + } + + if (CheckRange( + address, + size, + MemoryState.Mask, + stateExpected, + KMemoryPermission.None, + KMemoryPermission.None, + MemoryAttribute.Mask, + MemoryAttribute.None, + MemoryAttribute.IpcAndDeviceMapped, + out MemoryState state, + out _, + out _)) + { + if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion)) + { + return KernelResult.OutOfResource; + } + + KernelResult result = Unmap(address, pagesCount); + + if (result == KernelResult.Success) + { + _blockManager.InsertBlock(address, pagesCount, MemoryState.Unmapped); + } + + return result; + } + else + { + return KernelResult.InvalidMemState; + } + } + } + + public KernelResult MapNormalMemory(long address, long size, KMemoryPermission permission) + { + // TODO. + return KernelResult.Success; + } + + public KernelResult MapIoMemory(long address, long size, KMemoryPermission permission) + { + // TODO. + return KernelResult.Success; + } + + public KernelResult MapPages( + ulong pagesCount, + int alignment, + ulong srcPa, + bool paIsValid, + ulong regionStart, + ulong regionPagesCount, + MemoryState state, + KMemoryPermission permission, + out ulong address) + { + address = 0; + + ulong regionSize = regionPagesCount * PageSize; + + if (!CanContain(regionStart, regionSize, state)) + { + return KernelResult.InvalidMemState; + } + + if (regionPagesCount <= pagesCount) + { + return KernelResult.OutOfMemory; + } + + lock (_blockManager) + { + address = AllocateVa(regionStart, regionPagesCount, pagesCount, alignment); + + if (address == 0) + { + return KernelResult.OutOfMemory; + } + + if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion)) + { + return KernelResult.OutOfResource; + } + + KernelResult result; + + if (paIsValid) + { + result = MapPages(address, pagesCount, srcPa, permission); + } + else + { + result = AllocateAndMapPages(address, pagesCount, permission); + } + + if (result != KernelResult.Success) + { + return result; + } + + _blockManager.InsertBlock(address, pagesCount, state, permission); + } + + return KernelResult.Success; + } + + public KernelResult MapPages(ulong address, ulong pagesCount, MemoryState state, KMemoryPermission permission) + { + ulong size = pagesCount * PageSize; + + if (!CanContain(address, size, state)) + { + return KernelResult.InvalidMemState; + } + + lock (_blockManager) + { + if (!IsUnmapped(address, size)) + { + return KernelResult.InvalidMemState; + } + + if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion)) + { + return KernelResult.OutOfResource; + } + + KernelResult result = AllocateAndMapPages(address, pagesCount, permission); + + if (result == KernelResult.Success) + { + _blockManager.InsertBlock(address, pagesCount, state, permission); + } + + return result; + } + } + + private KernelResult AllocateAndMapPages(ulong address, ulong pagesCount, KMemoryPermission permission) + { + KMemoryRegionManager region = GetMemoryRegionManager(); + + KernelResult result = region.AllocatePages(pagesCount, _aslrDisabled, out KPageList pageList); + + if (result != KernelResult.Success) + { + return result; + } + + using var _ = new OnScopeExit(() => pageList.DecrementPagesReferenceCount(Context.MemoryManager)); + + return MapPages(address, pageList, permission); + } + + public KernelResult MapProcessCodeMemory(ulong dst, ulong src, ulong size) + { + lock (_blockManager) + { + bool success = CheckRange( + src, + size, + MemoryState.Mask, + MemoryState.Heap, + KMemoryPermission.Mask, + KMemoryPermission.ReadAndWrite, + MemoryAttribute.Mask, + MemoryAttribute.None, + MemoryAttribute.IpcAndDeviceMapped, + out MemoryState state, + out KMemoryPermission permission, + out _); + + success &= IsUnmapped(dst, size); + + if (success) + { + if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion * 2)) + { + return KernelResult.OutOfResource; + } + + ulong pagesCount = size / PageSize; + + KernelResult result = MapMemory(src, dst, pagesCount, permission, KMemoryPermission.None); + + _blockManager.InsertBlock(src, pagesCount, state, KMemoryPermission.None, MemoryAttribute.Borrowed); + _blockManager.InsertBlock(dst, pagesCount, MemoryState.ModCodeStatic); + + return KernelResult.Success; + } + else + { + return KernelResult.InvalidMemState; + } + } + } + + public KernelResult UnmapProcessCodeMemory(ulong dst, ulong src, ulong size) + { + lock (_blockManager) + { + bool success = CheckRange( + src, + size, + MemoryState.Mask, + MemoryState.Heap, + KMemoryPermission.None, + KMemoryPermission.None, + MemoryAttribute.Mask, + MemoryAttribute.Borrowed, + MemoryAttribute.IpcAndDeviceMapped, + out _, + out _, + out _); + + success &= CheckRange( + dst, + PageSize, + MemoryState.UnmapProcessCodeMemoryAllowed, + MemoryState.UnmapProcessCodeMemoryAllowed, + KMemoryPermission.None, + KMemoryPermission.None, + MemoryAttribute.Mask, + MemoryAttribute.None, + MemoryAttribute.IpcAndDeviceMapped, + out MemoryState state, + out _, + out _); + + success &= CheckRange( + dst, + size, + MemoryState.Mask, + state, + KMemoryPermission.None, + KMemoryPermission.None, + MemoryAttribute.Mask, + MemoryAttribute.None); + + if (success) + { + ulong pagesCount = size / PageSize; + + KernelResult result = Unmap(dst, pagesCount); + + if (result != KernelResult.Success) + { + return result; + } + + // TODO: Missing some checks here. + + if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion * 2)) + { + return KernelResult.OutOfResource; + } + + _blockManager.InsertBlock(dst, pagesCount, MemoryState.Unmapped); + _blockManager.InsertBlock(src, pagesCount, MemoryState.Heap, KMemoryPermission.ReadAndWrite); + + return KernelResult.Success; + } + else + { + return KernelResult.InvalidMemState; + } + } + } + + public KernelResult SetHeapSize(ulong size, out ulong address) + { + address = 0; + + if (size > HeapRegionEnd - HeapRegionStart) + { + return KernelResult.OutOfMemory; + } + + KProcess currentProcess = KernelStatic.GetCurrentProcess(); + + lock (_blockManager) + { + ulong currentHeapSize = GetHeapSize(); + + if (currentHeapSize <= size) + { + // Expand. + ulong sizeDelta = size - currentHeapSize; + + if (currentProcess.ResourceLimit != null && sizeDelta != 0 && + !currentProcess.ResourceLimit.Reserve(LimitableResource.Memory, sizeDelta)) + { + return KernelResult.ResLimitExceeded; + } + + ulong pagesCount = sizeDelta / PageSize; + + KMemoryRegionManager region = GetMemoryRegionManager(); + + KernelResult result = region.AllocatePages(pagesCount, _aslrDisabled, out KPageList pageList); + + using var _ = new OnScopeExit(() => pageList.DecrementPagesReferenceCount(Context.MemoryManager)); + + void CleanUpForError() + { + if (currentProcess.ResourceLimit != null && sizeDelta != 0) + { + currentProcess.ResourceLimit.Release(LimitableResource.Memory, sizeDelta); + } + } + + if (result != KernelResult.Success) + { + CleanUpForError(); + + return result; + } + + if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion)) + { + CleanUpForError(); + + return KernelResult.OutOfResource; + } + + if (!IsUnmapped(_currentHeapAddr, sizeDelta)) + { + CleanUpForError(); + + return KernelResult.InvalidMemState; + } + + result = MapPages(_currentHeapAddr, pageList, KMemoryPermission.ReadAndWrite); + + if (result != KernelResult.Success) + { + CleanUpForError(); + + return result; + } + + _blockManager.InsertBlock(_currentHeapAddr, pagesCount, MemoryState.Heap, KMemoryPermission.ReadAndWrite); + } + else + { + // Shrink. + ulong freeAddr = HeapRegionStart + size; + ulong sizeDelta = currentHeapSize - size; + + if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion)) + { + return KernelResult.OutOfResource; + } + + if (!CheckRange( + freeAddr, + sizeDelta, + MemoryState.Mask, + MemoryState.Heap, + KMemoryPermission.Mask, + KMemoryPermission.ReadAndWrite, + MemoryAttribute.Mask, + MemoryAttribute.None, + MemoryAttribute.IpcAndDeviceMapped, + out _, + out _, + out _)) + { + return KernelResult.InvalidMemState; + } + + ulong pagesCount = sizeDelta / PageSize; + + KernelResult result = Unmap(freeAddr, pagesCount); + + if (result != KernelResult.Success) + { + return result; + } + + currentProcess.ResourceLimit?.Release(LimitableResource.Memory, sizeDelta); + + _blockManager.InsertBlock(freeAddr, pagesCount, MemoryState.Unmapped); + } + + _currentHeapAddr = HeapRegionStart + size; + } + + address = HeapRegionStart; + + return KernelResult.Success; + } + + public ulong GetTotalHeapSize() + { + lock (_blockManager) + { + return GetHeapSize() + PhysicalMemoryUsage; + } + } + + private ulong GetHeapSize() + { + return _currentHeapAddr - HeapRegionStart; + } + + public KernelResult SetHeapCapacity(ulong capacity) + { + lock (_blockManager) + { + _heapCapacity = capacity; + } + + return KernelResult.Success; + } + + public KernelResult SetMemoryAttribute( + ulong address, + ulong size, + MemoryAttribute attributeMask, + MemoryAttribute attributeValue) + { + lock (_blockManager) + { + if (CheckRange( + address, + size, + MemoryState.AttributeChangeAllowed, + MemoryState.AttributeChangeAllowed, + KMemoryPermission.None, + KMemoryPermission.None, + MemoryAttribute.BorrowedAndIpcMapped, + MemoryAttribute.None, + MemoryAttribute.DeviceMappedAndUncached, + out MemoryState state, + out KMemoryPermission permission, + out MemoryAttribute attribute)) + { + if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion)) + { + return KernelResult.OutOfResource; + } + + ulong pagesCount = size / PageSize; + + attribute &= ~attributeMask; + attribute |= attributeMask & attributeValue; + + _blockManager.InsertBlock(address, pagesCount, state, permission, attribute); + + return KernelResult.Success; + } + else + { + return KernelResult.InvalidMemState; + } + } + } + + public KMemoryInfo QueryMemory(ulong address) + { + if (address >= AddrSpaceStart && + address < AddrSpaceEnd) + { + lock (_blockManager) + { + return _blockManager.FindBlock(address).GetInfo(); + } + } + else + { + return new KMemoryInfo( + AddrSpaceEnd, + ~AddrSpaceEnd + 1, + MemoryState.Reserved, + KMemoryPermission.None, + MemoryAttribute.None, + KMemoryPermission.None, + 0, + 0); + } + } + + public KernelResult Map(ulong dst, ulong src, ulong size) + { + bool success; + + lock (_blockManager) + { + success = CheckRange( + src, + size, + MemoryState.MapAllowed, + MemoryState.MapAllowed, + KMemoryPermission.Mask, + KMemoryPermission.ReadAndWrite, + MemoryAttribute.Mask, + MemoryAttribute.None, + MemoryAttribute.IpcAndDeviceMapped, + out MemoryState srcState, + out _, + out _); + + success &= IsUnmapped(dst, size); + + if (success) + { + if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion * 2)) + { + return KernelResult.OutOfResource; + } + + ulong pagesCount = size / PageSize; + + KernelResult result = MapMemory(src, dst, pagesCount, KMemoryPermission.ReadAndWrite, KMemoryPermission.ReadAndWrite); + + if (result != KernelResult.Success) + { + return result; + } + + _blockManager.InsertBlock(src, pagesCount, srcState, KMemoryPermission.None, MemoryAttribute.Borrowed); + _blockManager.InsertBlock(dst, pagesCount, MemoryState.Stack, KMemoryPermission.ReadAndWrite); + + return KernelResult.Success; + } + else + { + return KernelResult.InvalidMemState; + } + } + } + + public KernelResult UnmapForKernel(ulong address, ulong pagesCount, MemoryState stateExpected) + { + ulong size = pagesCount * PageSize; + + lock (_blockManager) + { + if (CheckRange( + address, + size, + MemoryState.Mask, + stateExpected, + KMemoryPermission.None, + KMemoryPermission.None, + MemoryAttribute.Mask, + MemoryAttribute.None, + MemoryAttribute.IpcAndDeviceMapped, + out _, + out _, + out _)) + { + if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion)) + { + return KernelResult.OutOfResource; + } + + KernelResult result = Unmap(address, pagesCount); + + if (result == KernelResult.Success) + { + _blockManager.InsertBlock(address, pagesCount, MemoryState.Unmapped); + } + + return KernelResult.Success; + } + else + { + return KernelResult.InvalidMemState; + } + } + } + + public KernelResult Unmap(ulong dst, ulong src, ulong size) + { + bool success; + + lock (_blockManager) + { + success = CheckRange( + src, + size, + MemoryState.MapAllowed, + MemoryState.MapAllowed, + KMemoryPermission.Mask, + KMemoryPermission.None, + MemoryAttribute.Mask, + MemoryAttribute.Borrowed, + MemoryAttribute.IpcAndDeviceMapped, + out MemoryState srcState, + out _, + out _); + + success &= CheckRange( + dst, + size, + MemoryState.Mask, + MemoryState.Stack, + KMemoryPermission.None, + KMemoryPermission.None, + MemoryAttribute.Mask, + MemoryAttribute.None, + MemoryAttribute.IpcAndDeviceMapped, + out _, + out KMemoryPermission dstPermission, + out _); + + if (success) + { + if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion * 2)) + { + return KernelResult.OutOfResource; + } + + ulong pagesCount = size / PageSize; + + KernelResult result = UnmapMemory(dst, src, pagesCount, dstPermission, KMemoryPermission.ReadAndWrite); + + if (result != KernelResult.Success) + { + return result; + } + + _blockManager.InsertBlock(src, pagesCount, srcState, KMemoryPermission.ReadAndWrite); + _blockManager.InsertBlock(dst, pagesCount, MemoryState.Unmapped); + + return KernelResult.Success; + } + else + { + return KernelResult.InvalidMemState; + } + } + } + + public KernelResult SetProcessMemoryPermission(ulong address, ulong size, KMemoryPermission permission) + { + lock (_blockManager) + { + if (CheckRange( + address, + size, + MemoryState.ProcessPermissionChangeAllowed, + MemoryState.ProcessPermissionChangeAllowed, + KMemoryPermission.None, + KMemoryPermission.None, + MemoryAttribute.Mask, + MemoryAttribute.None, + MemoryAttribute.IpcAndDeviceMapped, + out MemoryState oldState, + out KMemoryPermission oldPermission, + out _)) + { + MemoryState newState = oldState; + + // If writing into the code region is allowed, then we need + // to change it to mutable. + if ((permission & KMemoryPermission.Write) != 0) + { + if (oldState == MemoryState.CodeStatic) + { + newState = MemoryState.CodeMutable; + } + else if (oldState == MemoryState.ModCodeStatic) + { + newState = MemoryState.ModCodeMutable; + } + else + { + throw new InvalidOperationException($"Memory state \"{oldState}\" not valid for this operation."); + } + } + + if (newState != oldState || permission != oldPermission) + { + if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion)) + { + return KernelResult.OutOfResource; + } + + ulong pagesCount = size / PageSize; + + KernelResult result; + + if ((oldPermission & KMemoryPermission.Execute) != 0) + { + result = ReprotectWithAttributes(address, pagesCount, permission); + } + else + { + result = Reprotect(address, pagesCount, permission); + } + + if (result != KernelResult.Success) + { + return result; + } + + _blockManager.InsertBlock(address, pagesCount, newState, permission); + } + + return KernelResult.Success; + } + else + { + return KernelResult.InvalidMemState; + } + } + } + + public KernelResult MapPhysicalMemory(ulong address, ulong size) + { + ulong endAddr = address + size; + + lock (_blockManager) + { + ulong mappedSize = 0; + + foreach (KMemoryInfo info in IterateOverRange(address, endAddr)) + { + if (info.State != MemoryState.Unmapped) + { + mappedSize += GetSizeInRange(info, address, endAddr); + } + } + + if (mappedSize == size) + { + return KernelResult.Success; + } + + ulong remainingSize = size - mappedSize; + + ulong remainingPages = remainingSize / PageSize; + + KProcess currentProcess = KernelStatic.GetCurrentProcess(); + + if (currentProcess.ResourceLimit != null && + !currentProcess.ResourceLimit.Reserve(LimitableResource.Memory, remainingSize)) + { + return KernelResult.ResLimitExceeded; + } + + KMemoryRegionManager region = GetMemoryRegionManager(); + + KernelResult result = region.AllocatePages(remainingPages, _aslrDisabled, out KPageList pageList); + + using var _ = new OnScopeExit(() => pageList.DecrementPagesReferenceCount(Context.MemoryManager)); + + void CleanUpForError() + { + currentProcess.ResourceLimit?.Release(LimitableResource.Memory, remainingSize); + } + + if (result != KernelResult.Success) + { + CleanUpForError(); + + return result; + } + + if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion)) + { + CleanUpForError(); + + return KernelResult.OutOfResource; + } + + LinkedListNode pageListNode = pageList.Nodes.First; + + KPageNode pageNode = pageListNode.Value; + + ulong srcPa = pageNode.Address; + ulong srcPaPages = pageNode.PagesCount; + + foreach (KMemoryInfo info in IterateOverRange(address, endAddr)) + { + if (info.State != MemoryState.Unmapped) + { + continue; + } + + ulong blockSize = GetSizeInRange(info, address, endAddr); + + ulong dstVaPages = blockSize / PageSize; + + ulong dstVa = GetAddrInRange(info, address); + + while (dstVaPages > 0) + { + if (srcPaPages == 0) + { + pageListNode = pageListNode.Next; + + pageNode = pageListNode.Value; + + srcPa = pageNode.Address; + srcPaPages = pageNode.PagesCount; + } + + ulong currentPagesCount = Math.Min(srcPaPages, dstVaPages); + + MapPages(dstVa, currentPagesCount, srcPa, KMemoryPermission.ReadAndWrite); + + dstVa += currentPagesCount * PageSize; + srcPa += currentPagesCount * PageSize; + srcPaPages -= currentPagesCount; + dstVaPages -= currentPagesCount; + } + } + + PhysicalMemoryUsage += remainingSize; + + ulong pagesCount = size / PageSize; + + _blockManager.InsertBlock( + address, + pagesCount, + MemoryState.Unmapped, + KMemoryPermission.None, + MemoryAttribute.None, + MemoryState.Heap, + KMemoryPermission.ReadAndWrite, + MemoryAttribute.None); + } + + return KernelResult.Success; + } + + public KernelResult UnmapPhysicalMemory(ulong address, ulong size) + { + ulong endAddr = address + size; + + lock (_blockManager) + { + // Scan, ensure that the region can be unmapped (all blocks are heap or + // already unmapped), fill pages list for freeing memory. + ulong heapMappedSize = 0; + + foreach (KMemoryInfo info in IterateOverRange(address, endAddr)) + { + if (info.State == MemoryState.Heap) + { + if (info.Attribute != MemoryAttribute.None) + { + return KernelResult.InvalidMemState; + } + + ulong blockSize = GetSizeInRange(info, address, endAddr); + + heapMappedSize += blockSize; + } + else if (info.State != MemoryState.Unmapped) + { + return KernelResult.InvalidMemState; + } + } + + if (heapMappedSize == 0) + { + return KernelResult.Success; + } + + if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion)) + { + return KernelResult.OutOfResource; + } + + // Try to unmap all the heap mapped memory inside range. + KernelResult result = KernelResult.Success; + + foreach (KMemoryInfo info in IterateOverRange(address, endAddr)) + { + if (info.State == MemoryState.Heap) + { + ulong blockSize = GetSizeInRange(info, address, endAddr); + ulong blockAddress = GetAddrInRange(info, address); + + ulong blockPagesCount = blockSize / PageSize; + + result = Unmap(blockAddress, blockPagesCount); + + // The kernel would attempt to remap if this fails, but we don't because: + // - The implementation may not support remapping if memory aliasing is not supported on the platform. + // - Unmap can't ever fail here anyway. + Debug.Assert(result == KernelResult.Success); + } + } + + if (result == KernelResult.Success) + { + PhysicalMemoryUsage -= heapMappedSize; + + KProcess currentProcess = KernelStatic.GetCurrentProcess(); + + currentProcess.ResourceLimit?.Release(LimitableResource.Memory, heapMappedSize); + + ulong pagesCount = size / PageSize; + + _blockManager.InsertBlock(address, pagesCount, MemoryState.Unmapped); + } + + return result; + } + } + + public KernelResult CopyDataToCurrentProcess( + ulong dst, + ulong size, + ulong src, + MemoryState stateMask, + MemoryState stateExpected, + KMemoryPermission permission, + MemoryAttribute attributeMask, + MemoryAttribute attributeExpected) + { + // Client -> server. + return CopyDataFromOrToCurrentProcess( + size, + src, + dst, + stateMask, + stateExpected, + permission, + attributeMask, + attributeExpected, + toServer: true); + } + + public KernelResult CopyDataFromCurrentProcess( + ulong dst, + ulong size, + MemoryState stateMask, + MemoryState stateExpected, + KMemoryPermission permission, + MemoryAttribute attributeMask, + MemoryAttribute attributeExpected, + ulong src) + { + // Server -> client. + return CopyDataFromOrToCurrentProcess( + size, + dst, + src, + stateMask, + stateExpected, + permission, + attributeMask, + attributeExpected, + toServer: false); + } + + private KernelResult CopyDataFromOrToCurrentProcess( + ulong size, + ulong clientAddress, + ulong serverAddress, + MemoryState stateMask, + MemoryState stateExpected, + KMemoryPermission permission, + MemoryAttribute attributeMask, + MemoryAttribute attributeExpected, + bool toServer) + { + if (AddrSpaceStart > clientAddress) + { + return KernelResult.InvalidMemState; + } + + ulong srcEndAddr = clientAddress + size; + + if (srcEndAddr <= clientAddress || srcEndAddr - 1 > AddrSpaceEnd - 1) + { + return KernelResult.InvalidMemState; + } + + lock (_blockManager) + { + if (CheckRange( + clientAddress, + size, + stateMask, + stateExpected, + permission, + permission, + attributeMask | MemoryAttribute.Uncached, + attributeExpected)) + { + KProcess currentProcess = KernelStatic.GetCurrentProcess(); + + while (size > 0) + { + ulong copySize = 0x100000; // Copy chunck size. Any value will do, moderate sizes are recommended. + + if (copySize > size) + { + copySize = size; + } + + if (toServer) + { + currentProcess.CpuMemory.Write(serverAddress, GetSpan(clientAddress, (int)copySize)); + } + else + { + Write(clientAddress, currentProcess.CpuMemory.GetSpan(serverAddress, (int)copySize)); + } + + serverAddress += copySize; + clientAddress += copySize; + size -= copySize; + } + + return KernelResult.Success; + } + else + { + return KernelResult.InvalidMemState; + } + } + } + + public KernelResult MapBufferFromClientProcess( + ulong size, + ulong src, + KPageTableBase srcPageTable, + KMemoryPermission permission, + MemoryState state, + bool send, + out ulong dst) + { + dst = 0; + + lock (srcPageTable._blockManager) + { + lock (_blockManager) + { + KernelResult result = srcPageTable.ReprotectClientProcess( + src, + size, + permission, + state, + out int blocksNeeded); + + if (result != KernelResult.Success) + { + return result; + } + + if (!srcPageTable._slabManager.CanAllocate(blocksNeeded)) + { + return KernelResult.OutOfResource; + } + + ulong srcMapAddress = BitUtils.AlignUp(src, PageSize); + ulong srcMapEndAddr = BitUtils.AlignDown(src + size, PageSize); + ulong srcMapSize = srcMapEndAddr - srcMapAddress; + + result = MapPagesFromClientProcess(size, src, permission, state, srcPageTable, send, out ulong va); + + if (result != KernelResult.Success) + { + if (srcMapEndAddr > srcMapAddress) + { + srcPageTable.UnmapIpcRestorePermission(src, size, state); + } + + return result; + } + + if (srcMapAddress < srcMapEndAddr) + { + KMemoryPermission permissionMask = permission == KMemoryPermission.ReadAndWrite + ? KMemoryPermission.None + : KMemoryPermission.Read; + + srcPageTable._blockManager.InsertBlock(srcMapAddress, srcMapSize / PageSize, SetIpcMappingPermissions, permissionMask); + } + + dst = va; + } + } + + return KernelResult.Success; + } + + private KernelResult ReprotectClientProcess( + ulong address, + ulong size, + KMemoryPermission permission, + MemoryState state, + out int blocksNeeded) + { + blocksNeeded = 0; + + if (AddrSpaceStart > address) + { + return KernelResult.InvalidMemState; + } + + ulong endAddr = address + size; + + if (endAddr <= address || endAddr - 1 > AddrSpaceEnd - 1) + { + return KernelResult.InvalidMemState; + } + + MemoryState stateMask; + + switch (state) + { + case MemoryState.IpcBuffer0: stateMask = MemoryState.IpcSendAllowedType0; break; + case MemoryState.IpcBuffer1: stateMask = MemoryState.IpcSendAllowedType1; break; + case MemoryState.IpcBuffer3: stateMask = MemoryState.IpcSendAllowedType3; break; + + default: return KernelResult.InvalidCombination; + } + + KMemoryPermission permissionMask = permission == KMemoryPermission.ReadAndWrite + ? KMemoryPermission.None + : KMemoryPermission.Read; + + MemoryAttribute attributeMask = MemoryAttribute.Borrowed | MemoryAttribute.Uncached; + + if (state == MemoryState.IpcBuffer0) + { + attributeMask |= MemoryAttribute.DeviceMapped; + } + + ulong addressRounded = BitUtils.AlignUp(address, PageSize); + ulong addressTruncated = BitUtils.AlignDown(address, PageSize); + ulong endAddrRounded = BitUtils.AlignUp(endAddr, PageSize); + ulong endAddrTruncated = BitUtils.AlignDown(endAddr, PageSize); + + if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion)) + { + return KernelResult.OutOfResource; + } + + ulong visitedSize = 0; + + void CleanUpForError() + { + if (visitedSize == 0) + { + return; + } + + ulong endAddrVisited = address + visitedSize; + + foreach (KMemoryInfo info in IterateOverRange(addressRounded, endAddrVisited)) + { + if ((info.Permission & KMemoryPermission.ReadAndWrite) != permissionMask && info.IpcRefCount == 0) + { + ulong blockAddress = GetAddrInRange(info, addressRounded); + ulong blockSize = GetSizeInRange(info, addressRounded, endAddrVisited); + + ulong blockPagesCount = blockSize / PageSize; + + KernelResult reprotectResult = Reprotect(blockAddress, blockPagesCount, info.Permission); + Debug.Assert(reprotectResult == KernelResult.Success); + } + } + } + + // Signal a read for any resources tracking reads in the region, as the other process is likely to use their data. + SignalMemoryTracking(addressTruncated, endAddrRounded - addressTruncated, false); + + // Reprotect the aligned pages range on the client to make them inaccessible from the client process. + KernelResult result; + + if (addressRounded < endAddrTruncated) + { + foreach (KMemoryInfo info in IterateOverRange(addressRounded, endAddrTruncated)) + { + // Check if the block state matches what we expect. + if ((info.State & stateMask) != stateMask || + (info.Permission & permission) != permission || + (info.Attribute & attributeMask) != MemoryAttribute.None) + { + CleanUpForError(); + + return KernelResult.InvalidMemState; + } + + ulong blockAddress = GetAddrInRange(info, addressRounded); + ulong blockSize = GetSizeInRange(info, addressRounded, endAddrTruncated); + + ulong blockPagesCount = blockSize / PageSize; + + // If the first block starts before the aligned range, it will need to be split. + if (info.Address < addressRounded) + { + blocksNeeded++; + } + + // If the last block ends after the aligned range, it will need to be split. + if (endAddrTruncated - 1 < info.Address + info.Size - 1) + { + blocksNeeded++; + } + + if ((info.Permission & KMemoryPermission.ReadAndWrite) != permissionMask && info.IpcRefCount == 0) + { + result = Reprotect(blockAddress, blockPagesCount, permissionMask); + + if (result != KernelResult.Success) + { + CleanUpForError(); + + return result; + } + } + + visitedSize += blockSize; + } + } + + return KernelResult.Success; + } + + private KernelResult MapPagesFromClientProcess( + ulong size, + ulong address, + KMemoryPermission permission, + MemoryState state, + KPageTableBase srcPageTable, + bool send, + out ulong dst) + { + if (!SupportsMemoryAliasing) + { + throw new NotSupportedException("Memory aliasing not supported, can't map IPC buffers."); + } + + dst = 0; + + if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion)) + { + return KernelResult.OutOfResource; + } + + ulong endAddr = address + size; + + ulong addressTruncated = BitUtils.AlignDown(address, PageSize); + ulong addressRounded = BitUtils.AlignUp(address, PageSize); + ulong endAddrTruncated = BitUtils.AlignDown(endAddr, PageSize); + ulong endAddrRounded = BitUtils.AlignUp(endAddr, PageSize); + + ulong neededSize = endAddrRounded - addressTruncated; + + ulong neededPagesCount = neededSize / PageSize; + + ulong regionPagesCount = (AliasRegionEnd - AliasRegionStart) / PageSize; + + ulong va = 0; + + for (int unit = MappingUnitSizes.Length - 1; unit >= 0 && va == 0; unit--) + { + int alignment = MappingUnitSizes[unit]; + + va = AllocateVa(AliasRegionStart, regionPagesCount, neededPagesCount, alignment); + } + + if (va == 0) + { + return KernelResult.OutOfVaSpace; + } + + ulong dstFirstPagePa = 0; + ulong dstLastPagePa = 0; + ulong currentVa = va; + + using var _ = new OnScopeExit(() => + { + if (dstFirstPagePa != 0) + { + Context.MemoryManager.DecrementPagesReferenceCount(dstFirstPagePa, 1); + } + + if (dstLastPagePa != 0) + { + Context.MemoryManager.DecrementPagesReferenceCount(dstLastPagePa, 1); + } + }); + + void CleanUpForError() + { + if (currentVa != va) + { + Unmap(va, (currentVa - va) / PageSize); + } + } + + // Is the first page address aligned? + // If not, allocate a new page and copy the unaligned chunck. + if (addressTruncated < addressRounded) + { + dstFirstPagePa = GetMemoryRegionManager().AllocatePagesContiguous(Context, 1, _aslrDisabled); + + if (dstFirstPagePa == 0) + { + CleanUpForError(); + + return KernelResult.OutOfMemory; + } + } + + // Is the last page end address aligned? + // If not, allocate a new page and copy the unaligned chunck. + if (endAddrTruncated < endAddrRounded && (addressTruncated == addressRounded || addressTruncated < endAddrTruncated)) + { + dstLastPagePa = GetMemoryRegionManager().AllocatePagesContiguous(Context, 1, _aslrDisabled); + + if (dstLastPagePa == 0) + { + CleanUpForError(); + + return KernelResult.OutOfMemory; + } + } + + if (dstFirstPagePa != 0) + { + ulong firstPageFillAddress = dstFirstPagePa; + ulong unusedSizeAfter; + + if (send) + { + ulong unusedSizeBefore = address - addressTruncated; + + Context.Memory.ZeroFill(GetDramAddressFromPa(dstFirstPagePa), unusedSizeBefore); + + ulong copySize = addressRounded <= endAddr ? addressRounded - address : size; + var data = srcPageTable.GetSpan(addressTruncated + unusedSizeBefore, (int)copySize); + + Context.Memory.Write(GetDramAddressFromPa(dstFirstPagePa + unusedSizeBefore), data); + + firstPageFillAddress += unusedSizeBefore + copySize; + + unusedSizeAfter = addressRounded > endAddr ? addressRounded - endAddr : 0; + } + else + { + unusedSizeAfter = PageSize; + } + + if (unusedSizeAfter != 0) + { + Context.Memory.ZeroFill(GetDramAddressFromPa(firstPageFillAddress), unusedSizeAfter); + } + + KernelResult result = MapPages(currentVa, 1, dstFirstPagePa, permission); + + if (result != KernelResult.Success) + { + CleanUpForError(); + + return result; + } + + currentVa += PageSize; + } + + if (endAddrTruncated > addressRounded) + { + ulong alignedSize = endAddrTruncated - addressRounded; + + KernelResult result = MapPages(currentVa, srcPageTable.GetPhysicalRegions(addressRounded, alignedSize), permission); + + if (result != KernelResult.Success) + { + CleanUpForError(); + + return result; + } + + currentVa += alignedSize; + } + + if (dstLastPagePa != 0) + { + ulong lastPageFillAddr = dstLastPagePa; + ulong unusedSizeAfter; + + if (send) + { + ulong copySize = endAddr - endAddrTruncated; + var data = srcPageTable.GetSpan(endAddrTruncated, (int)copySize); + + Context.Memory.Write(GetDramAddressFromPa(dstLastPagePa), data); + + lastPageFillAddr += copySize; + + unusedSizeAfter = PageSize - copySize; + } + else + { + unusedSizeAfter = PageSize; + } + + Context.Memory.ZeroFill(GetDramAddressFromPa(lastPageFillAddr), unusedSizeAfter); + + KernelResult result = MapPages(currentVa, 1, dstLastPagePa, permission); + + if (result != KernelResult.Success) + { + CleanUpForError(); + + return result; + } + } + + _blockManager.InsertBlock(va, neededPagesCount, state, permission); + + dst = va + (address - addressTruncated); + + return KernelResult.Success; + } + + public KernelResult UnmapNoAttributeIfStateEquals(ulong address, ulong size, MemoryState state) + { + if (AddrSpaceStart > address) + { + return KernelResult.InvalidMemState; + } + + ulong endAddr = address + size; + + if (endAddr <= address || endAddr - 1 > AddrSpaceEnd - 1) + { + return KernelResult.InvalidMemState; + } + + lock (_blockManager) + { + if (CheckRange( + address, + size, + MemoryState.Mask, + state, + KMemoryPermission.Read, + KMemoryPermission.Read, + MemoryAttribute.Mask, + MemoryAttribute.None, + MemoryAttribute.IpcAndDeviceMapped, + out _, + out _, + out _)) + { + if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion)) + { + return KernelResult.OutOfResource; + } + + ulong addressTruncated = BitUtils.AlignDown(address, PageSize); + ulong addressRounded = BitUtils.AlignUp(address, PageSize); + ulong endAddrTruncated = BitUtils.AlignDown(endAddr, PageSize); + ulong endAddrRounded = BitUtils.AlignUp(endAddr, PageSize); + + ulong pagesCount = (endAddrRounded - addressTruncated) / PageSize; + + KernelResult result = Unmap(addressTruncated, pagesCount); + + if (result == KernelResult.Success) + { + _blockManager.InsertBlock(addressTruncated, pagesCount, MemoryState.Unmapped); + } + + return result; + } + else + { + return KernelResult.InvalidMemState; + } + } + } + + public KernelResult UnmapIpcRestorePermission(ulong address, ulong size, MemoryState state) + { + ulong endAddr = address + size; + + ulong addressRounded = BitUtils.AlignUp(address, PageSize); + ulong addressTruncated = BitUtils.AlignDown(address, PageSize); + ulong endAddrRounded = BitUtils.AlignUp(endAddr, PageSize); + ulong endAddrTruncated = BitUtils.AlignDown(endAddr, PageSize); + + ulong pagesCount = addressRounded < endAddrTruncated ? (endAddrTruncated - addressRounded) / PageSize : 0; + + if (pagesCount == 0) + { + return KernelResult.Success; + } + + MemoryState stateMask; + + switch (state) + { + case MemoryState.IpcBuffer0: stateMask = MemoryState.IpcSendAllowedType0; break; + case MemoryState.IpcBuffer1: stateMask = MemoryState.IpcSendAllowedType1; break; + case MemoryState.IpcBuffer3: stateMask = MemoryState.IpcSendAllowedType3; break; + + default: return KernelResult.InvalidCombination; + } + + MemoryAttribute attributeMask = + MemoryAttribute.Borrowed | + MemoryAttribute.IpcMapped | + MemoryAttribute.Uncached; + + if (state == MemoryState.IpcBuffer0) + { + attributeMask |= MemoryAttribute.DeviceMapped; + } + + if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion)) + { + return KernelResult.OutOfResource; + } + + // Anything on the client side should see this memory as modified. + SignalMemoryTracking(addressTruncated, endAddrRounded - addressTruncated, true); + + lock (_blockManager) + { + foreach (KMemoryInfo info in IterateOverRange(addressRounded, endAddrTruncated)) + { + // Check if the block state matches what we expect. + if ((info.State & stateMask) != stateMask || + (info.Attribute & attributeMask) != MemoryAttribute.IpcMapped) + { + return KernelResult.InvalidMemState; + } + + if (info.Permission != info.SourcePermission && info.IpcRefCount == 1) + { + ulong blockAddress = GetAddrInRange(info, addressRounded); + ulong blockSize = GetSizeInRange(info, addressRounded, endAddrTruncated); + + ulong blockPagesCount = blockSize / PageSize; + + KernelResult result = Reprotect(blockAddress, blockPagesCount, info.SourcePermission); + + if (result != KernelResult.Success) + { + return result; + } + } + } + + _blockManager.InsertBlock(addressRounded, pagesCount, RestoreIpcMappingPermissions); + + return KernelResult.Success; + } + } + + private static void SetIpcMappingPermissions(KMemoryBlock block, KMemoryPermission permission) + { + block.SetIpcMappingPermission(permission); + } + + private static void RestoreIpcMappingPermissions(KMemoryBlock block, KMemoryPermission permission) + { + block.RestoreIpcMappingPermission(); + } + + public KernelResult BorrowIpcBuffer(ulong address, ulong size) + { + return SetAttributesAndChangePermission( + address, + size, + MemoryState.IpcBufferAllowed, + MemoryState.IpcBufferAllowed, + KMemoryPermission.Mask, + KMemoryPermission.ReadAndWrite, + MemoryAttribute.Mask, + MemoryAttribute.None, + KMemoryPermission.None, + MemoryAttribute.Borrowed); + } + + public KernelResult BorrowTransferMemory(List ranges, ulong address, ulong size, KMemoryPermission permission) + { + return SetAttributesAndChangePermission( + address, + size, + MemoryState.TransferMemoryAllowed, + MemoryState.TransferMemoryAllowed, + KMemoryPermission.Mask, + KMemoryPermission.ReadAndWrite, + MemoryAttribute.Mask, + MemoryAttribute.None, + permission, + MemoryAttribute.Borrowed, + ranges); + } + + private KernelResult SetAttributesAndChangePermission( + ulong address, + ulong size, + MemoryState stateMask, + MemoryState stateExpected, + KMemoryPermission permissionMask, + KMemoryPermission permissionExpected, + MemoryAttribute attributeMask, + MemoryAttribute attributeExpected, + KMemoryPermission newPermission, + MemoryAttribute attributeSetMask, + List ranges = null) + { + if (address + size <= address || !InsideAddrSpace(address, size)) + { + return KernelResult.InvalidMemState; + } + + lock (_blockManager) + { + if (CheckRange( + address, + size, + stateMask | MemoryState.IsPoolAllocated, + stateExpected | MemoryState.IsPoolAllocated, + permissionMask, + permissionExpected, + attributeMask, + attributeExpected, + MemoryAttribute.IpcAndDeviceMapped, + out MemoryState oldState, + out KMemoryPermission oldPermission, + out MemoryAttribute oldAttribute)) + { + ulong pagesCount = size / PageSize; + + ranges?.AddRange(GetPhysicalRegions(address, size)); + + if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion)) + { + return KernelResult.OutOfResource; + } + + if (newPermission == KMemoryPermission.None) + { + newPermission = oldPermission; + } + + if (newPermission != oldPermission) + { + KernelResult result = Reprotect(address, pagesCount, newPermission); + + if (result != KernelResult.Success) + { + return result; + } + } + + MemoryAttribute newAttribute = oldAttribute | attributeSetMask; + + _blockManager.InsertBlock(address, pagesCount, oldState, newPermission, newAttribute); + + return KernelResult.Success; + } + else + { + return KernelResult.InvalidMemState; + } + } + } + + public KernelResult UnborrowIpcBuffer(ulong address, ulong size) + { + return ClearAttributesAndChangePermission( + address, + size, + MemoryState.IpcBufferAllowed, + MemoryState.IpcBufferAllowed, + KMemoryPermission.None, + KMemoryPermission.None, + MemoryAttribute.Mask, + MemoryAttribute.Borrowed, + KMemoryPermission.ReadAndWrite, + MemoryAttribute.Borrowed); + } + + public KernelResult UnborrowTransferMemory(ulong address, ulong size, List ranges) + { + return ClearAttributesAndChangePermission( + address, + size, + MemoryState.TransferMemoryAllowed, + MemoryState.TransferMemoryAllowed, + KMemoryPermission.None, + KMemoryPermission.None, + MemoryAttribute.Mask, + MemoryAttribute.Borrowed, + KMemoryPermission.ReadAndWrite, + MemoryAttribute.Borrowed, + ranges); + } + + private KernelResult ClearAttributesAndChangePermission( + ulong address, + ulong size, + MemoryState stateMask, + MemoryState stateExpected, + KMemoryPermission permissionMask, + KMemoryPermission permissionExpected, + MemoryAttribute attributeMask, + MemoryAttribute attributeExpected, + KMemoryPermission newPermission, + MemoryAttribute attributeClearMask, + List ranges = null) + { + if (address + size <= address || !InsideAddrSpace(address, size)) + { + return KernelResult.InvalidMemState; + } + + lock (_blockManager) + { + if (CheckRange( + address, + size, + stateMask | MemoryState.IsPoolAllocated, + stateExpected | MemoryState.IsPoolAllocated, + permissionMask, + permissionExpected, + attributeMask, + attributeExpected, + MemoryAttribute.IpcAndDeviceMapped, + out MemoryState oldState, + out KMemoryPermission oldPermission, + out MemoryAttribute oldAttribute)) + { + ulong pagesCount = size / PageSize; + + if (ranges != null) + { + var currentRanges = GetPhysicalRegions(address, size); + + if (!currentRanges.SequenceEqual(ranges)) + { + return KernelResult.InvalidMemRange; + } + } + + if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion)) + { + return KernelResult.OutOfResource; + } + + if (newPermission == KMemoryPermission.None) + { + newPermission = oldPermission; + } + + if (newPermission != oldPermission) + { + KernelResult result = Reprotect(address, pagesCount, newPermission); + + if (result != KernelResult.Success) + { + return result; + } + } + + MemoryAttribute newAttribute = oldAttribute & ~attributeClearMask; + + _blockManager.InsertBlock(address, pagesCount, oldState, newPermission, newAttribute); + + return KernelResult.Success; + } + else + { + return KernelResult.InvalidMemState; + } + } + } + + private static ulong GetAddrInRange(KMemoryInfo info, ulong start) + { + if (info.Address < start) + { + return start; + } + + return info.Address; + } + + private static ulong GetSizeInRange(KMemoryInfo info, ulong start, ulong end) + { + ulong endAddr = info.Size + info.Address; + ulong size = info.Size; + + if (info.Address < start) + { + size -= start - info.Address; + } + + if (endAddr > end) + { + size -= endAddr - end; + } + + return size; + } + + private bool IsUnmapped(ulong address, ulong size) + { + return CheckRange( + address, + size, + MemoryState.Mask, + MemoryState.Unmapped, + KMemoryPermission.Mask, + KMemoryPermission.None, + MemoryAttribute.Mask, + MemoryAttribute.None, + MemoryAttribute.IpcAndDeviceMapped, + out _, + out _, + out _); + } + + private bool CheckRange( + ulong address, + ulong size, + MemoryState stateMask, + MemoryState stateExpected, + KMemoryPermission permissionMask, + KMemoryPermission permissionExpected, + MemoryAttribute attributeMask, + MemoryAttribute attributeExpected, + MemoryAttribute attributeIgnoreMask, + out MemoryState outState, + out KMemoryPermission outPermission, + out MemoryAttribute outAttribute) + { + ulong endAddr = address + size; + + LinkedListNode node = _blockManager.FindBlockNode(address); + + KMemoryInfo info = node.Value.GetInfo(); + + MemoryState firstState = info.State; + KMemoryPermission firstPermission = info.Permission; + MemoryAttribute firstAttribute = info.Attribute; + + do + { + info = node.Value.GetInfo(); + + // Check if the block state matches what we expect. + if (firstState != info.State || + firstPermission != info.Permission || + (info.Attribute & attributeMask) != attributeExpected || + (firstAttribute | attributeIgnoreMask) != (info.Attribute | attributeIgnoreMask) || + (firstState & stateMask) != stateExpected || + (firstPermission & permissionMask) != permissionExpected) + { + outState = MemoryState.Unmapped; + outPermission = KMemoryPermission.None; + outAttribute = MemoryAttribute.None; + + return false; + } + } + while (info.Address + info.Size - 1 < endAddr - 1 && (node = node.Next) != null); + + outState = firstState; + outPermission = firstPermission; + outAttribute = firstAttribute & ~attributeIgnoreMask; + + return true; + } + + private bool CheckRange( + ulong address, + ulong size, + MemoryState stateMask, + MemoryState stateExpected, + KMemoryPermission permissionMask, + KMemoryPermission permissionExpected, + MemoryAttribute attributeMask, + MemoryAttribute attributeExpected) + { + foreach (KMemoryInfo info in IterateOverRange(address, address + size)) + { + // Check if the block state matches what we expect. + if ((info.State & stateMask) != stateExpected || + (info.Permission & permissionMask) != permissionExpected || + (info.Attribute & attributeMask) != attributeExpected) + { + return false; + } + } + + return true; + } + + private IEnumerable IterateOverRange(ulong start, ulong end) + { + LinkedListNode node = _blockManager.FindBlockNode(start); + + KMemoryInfo info; + + do + { + info = node.Value.GetInfo(); + + yield return info; + } + while (info.Address + info.Size - 1 < end - 1 && (node = node.Next) != null); + } + + private ulong AllocateVa(ulong regionStart, ulong regionPagesCount, ulong neededPagesCount, int alignment) + { + ulong address = 0; + + ulong regionEndAddr = regionStart + regionPagesCount * PageSize; + + ulong reservedPagesCount = _isKernel ? 1UL : 4UL; + + if (_aslrEnabled) + { + ulong totalNeededSize = (reservedPagesCount + neededPagesCount) * PageSize; + + ulong remainingPages = regionPagesCount - neededPagesCount; + + ulong aslrMaxOffset = ((remainingPages + reservedPagesCount) * PageSize) / (ulong)alignment; + + for (int attempt = 0; attempt < 8; attempt++) + { + address = BitUtils.AlignDown(regionStart + GetRandomValue(0, aslrMaxOffset) * (ulong)alignment, alignment); + + ulong endAddr = address + totalNeededSize; + + KMemoryInfo info = _blockManager.FindBlock(address).GetInfo(); + + if (info.State != MemoryState.Unmapped) + { + continue; + } + + ulong currBaseAddr = info.Address + reservedPagesCount * PageSize; + ulong currEndAddr = info.Address + info.Size; + + if (address >= regionStart && + address >= currBaseAddr && + endAddr - 1 <= regionEndAddr - 1 && + endAddr - 1 <= currEndAddr - 1) + { + break; + } + } + + if (address == 0) + { + ulong aslrPage = GetRandomValue(0, aslrMaxOffset); + + address = FindFirstFit( + regionStart + aslrPage * PageSize, + regionPagesCount - aslrPage, + neededPagesCount, + alignment, + 0, + reservedPagesCount); + } + } + + if (address == 0) + { + address = FindFirstFit( + regionStart, + regionPagesCount, + neededPagesCount, + alignment, + 0, + reservedPagesCount); + } + + return address; + } + + private ulong FindFirstFit( + ulong regionStart, + ulong regionPagesCount, + ulong neededPagesCount, + int alignment, + ulong reservedStart, + ulong reservedPagesCount) + { + ulong reservedSize = reservedPagesCount * PageSize; + + ulong totalNeededSize = reservedSize + neededPagesCount * PageSize; + + ulong regionEndAddr = regionStart + regionPagesCount * PageSize; + + LinkedListNode node = _blockManager.FindBlockNode(regionStart); + + KMemoryInfo info = node.Value.GetInfo(); + + while (regionEndAddr >= info.Address) + { + if (info.State == MemoryState.Unmapped) + { + ulong currBaseAddr = info.Address + reservedSize; + ulong currEndAddr = info.Address + info.Size - 1; + + ulong address = BitUtils.AlignDown(currBaseAddr, alignment) + reservedStart; + + if (currBaseAddr > address) + { + address += (ulong)alignment; + } + + ulong allocationEndAddr = address + totalNeededSize - 1; + + if (allocationEndAddr <= regionEndAddr && + allocationEndAddr <= currEndAddr && + address < allocationEndAddr) + { + return address; + } + } + + node = node.Next; + + if (node == null) + { + break; + } + + info = node.Value.GetInfo(); + } + + return 0; + } + + public bool CanContain(ulong address, ulong size, MemoryState state) + { + ulong endAddr = address + size; + + ulong regionBaseAddr = GetBaseAddress(state); + ulong regionEndAddr = regionBaseAddr + GetSize(state); + + bool InsideRegion() + { + return regionBaseAddr <= address && + endAddr > address && + endAddr - 1 <= regionEndAddr - 1; + } + + bool OutsideHeapRegion() + { + return endAddr <= HeapRegionStart || address >= HeapRegionEnd; + } + + bool OutsideAliasRegion() + { + return endAddr <= AliasRegionStart || address >= AliasRegionEnd; + } + + switch (state) + { + case MemoryState.Io: + case MemoryState.Normal: + case MemoryState.CodeStatic: + case MemoryState.CodeMutable: + case MemoryState.SharedMemory: + case MemoryState.ModCodeStatic: + case MemoryState.ModCodeMutable: + case MemoryState.Stack: + case MemoryState.ThreadLocal: + case MemoryState.TransferMemoryIsolated: + case MemoryState.TransferMemory: + case MemoryState.ProcessMemory: + case MemoryState.CodeReadOnly: + case MemoryState.CodeWritable: + return InsideRegion() && OutsideHeapRegion() && OutsideAliasRegion(); + + case MemoryState.Heap: + return InsideRegion() && OutsideAliasRegion(); + + case MemoryState.IpcBuffer0: + case MemoryState.IpcBuffer1: + case MemoryState.IpcBuffer3: + return InsideRegion() && OutsideHeapRegion(); + + case MemoryState.KernelStack: + return InsideRegion(); + } + + throw new ArgumentException($"Invalid state value \"{state}\"."); + } + + private ulong GetBaseAddress(MemoryState state) + { + switch (state) + { + case MemoryState.Io: + case MemoryState.Normal: + case MemoryState.ThreadLocal: + return TlsIoRegionStart; + + case MemoryState.CodeStatic: + case MemoryState.CodeMutable: + case MemoryState.SharedMemory: + case MemoryState.ModCodeStatic: + case MemoryState.ModCodeMutable: + case MemoryState.TransferMemoryIsolated: + case MemoryState.TransferMemory: + case MemoryState.ProcessMemory: + case MemoryState.CodeReadOnly: + case MemoryState.CodeWritable: + return GetAddrSpaceBaseAddr(); + + case MemoryState.Heap: + return HeapRegionStart; + + case MemoryState.IpcBuffer0: + case MemoryState.IpcBuffer1: + case MemoryState.IpcBuffer3: + return AliasRegionStart; + + case MemoryState.Stack: + return StackRegionStart; + + case MemoryState.KernelStack: + return AddrSpaceStart; + } + + throw new ArgumentException($"Invalid state value \"{state}\"."); + } + + private ulong GetSize(MemoryState state) + { + switch (state) + { + case MemoryState.Io: + case MemoryState.Normal: + case MemoryState.ThreadLocal: + return TlsIoRegionEnd - TlsIoRegionStart; + + case MemoryState.CodeStatic: + case MemoryState.CodeMutable: + case MemoryState.SharedMemory: + case MemoryState.ModCodeStatic: + case MemoryState.ModCodeMutable: + case MemoryState.TransferMemoryIsolated: + case MemoryState.TransferMemory: + case MemoryState.ProcessMemory: + case MemoryState.CodeReadOnly: + case MemoryState.CodeWritable: + return GetAddrSpaceSize(); + + case MemoryState.Heap: + return HeapRegionEnd - HeapRegionStart; + + case MemoryState.IpcBuffer0: + case MemoryState.IpcBuffer1: + case MemoryState.IpcBuffer3: + return AliasRegionEnd - AliasRegionStart; + + case MemoryState.Stack: + return StackRegionEnd - StackRegionStart; + + case MemoryState.KernelStack: + return AddrSpaceEnd - AddrSpaceStart; + } + + throw new ArgumentException($"Invalid state value \"{state}\"."); + } + + public ulong GetAddrSpaceBaseAddr() + { + if (AddrSpaceWidth == 36 || AddrSpaceWidth == 39) + { + return 0x8000000; + } + else if (AddrSpaceWidth == 32) + { + return 0x200000; + } + else + { + throw new InvalidOperationException("Invalid address space width!"); + } + } + + public ulong GetAddrSpaceSize() + { + if (AddrSpaceWidth == 36) + { + return 0xff8000000; + } + else if (AddrSpaceWidth == 39) + { + return 0x7ff8000000; + } + else if (AddrSpaceWidth == 32) + { + return 0xffe00000; + } + else + { + throw new InvalidOperationException("Invalid address space width!"); + } + } + + private static ulong GetDramAddressFromPa(ulong pa) + { + return pa - DramMemoryMap.DramBase; + } + + protected KMemoryRegionManager GetMemoryRegionManager() + { + return Context.MemoryManager.MemoryRegions[(int)_memRegion]; + } + + public long GetMmUsedPages() + { + lock (_blockManager) + { + return BitUtils.DivRoundUp(GetMmUsedSize(), PageSize); + } + } + + private long GetMmUsedSize() + { + return _blockManager.BlocksCount * KMemoryBlockSize; + } + + public bool IsInvalidRegion(ulong address, ulong size) + { + return address + size - 1 > GetAddrSpaceBaseAddr() + GetAddrSpaceSize() - 1; + } + + public bool InsideAddrSpace(ulong address, ulong size) + { + return AddrSpaceStart <= address && address + size - 1 <= AddrSpaceEnd - 1; + } + + public bool InsideAliasRegion(ulong address, ulong size) + { + return address + size > AliasRegionStart && AliasRegionEnd > address; + } + + public bool InsideHeapRegion(ulong address, ulong size) + { + return address + size > HeapRegionStart && HeapRegionEnd > address; + } + + public bool InsideStackRegion(ulong address, ulong size) + { + return address + size > StackRegionStart && StackRegionEnd > address; + } + + public bool OutsideAliasRegion(ulong address, ulong size) + { + return AliasRegionStart > address || address + size - 1 > AliasRegionEnd - 1; + } + + public bool OutsideAddrSpace(ulong address, ulong size) + { + return AddrSpaceStart > address || address + size - 1 > AddrSpaceEnd - 1; + } + + public bool OutsideStackRegion(ulong address, ulong size) + { + return StackRegionStart > address || address + size - 1 > StackRegionEnd - 1; + } + + /// + /// Gets the physical regions that make up the given virtual address region. + /// If any part of the virtual region is unmapped, null is returned. + /// + /// Virtual address of the range + /// Size of the range + /// Array of physical regions + protected abstract IEnumerable GetPhysicalRegions(ulong va, ulong size); + + /// + /// Gets a read-only span of data from CPU mapped memory. + /// + /// + /// This may perform a allocation if the data is not contiguous in memory. + /// For this reason, the span is read-only, you can't modify the data. + /// + /// Virtual address of the data + /// Size of the data + /// True if read tracking is triggered on the span + /// A read-only span of the data + /// Throw for unhandled invalid or unmapped memory accesses + protected abstract ReadOnlySpan GetSpan(ulong va, int size); + + /// + /// Maps a new memory region with the contents of a existing memory region. + /// + /// Source memory region where the data will be taken from + /// Destination memory region to map + /// Number of pages to map + /// Current protection of the source memory region + /// Desired protection for the destination memory region + /// Result of the mapping operation + protected abstract KernelResult MapMemory(ulong src, ulong dst, ulong pagesCount, KMemoryPermission oldSrcPermission, KMemoryPermission newDstPermission); + + /// + /// Unmaps a region of memory that was previously mapped with . + /// + /// Destination memory region to be unmapped + /// Source memory region that was originally remapped + /// Number of pages to unmap + /// Current protection of the destination memory region + /// Desired protection of the source memory region + /// Result of the unmapping operation + protected abstract KernelResult UnmapMemory(ulong dst, ulong src, ulong pagesCount, KMemoryPermission oldDstPermission, KMemoryPermission newSrcPermission); + + /// + /// Maps a region of memory into the specified physical memory region. + /// + /// Destination virtual address that should be mapped + /// Number of pages to map + /// Physical address where the pages should be mapped. May be ignored if aliasing is not supported + /// Permission of the region to be mapped + /// Result of the mapping operation + protected abstract KernelResult MapPages(ulong dstVa, ulong pagesCount, ulong srcPa, KMemoryPermission permission); + + /// + /// Maps a region of memory into the specified physical memory region. + /// + /// Destination virtual address that should be mapped + /// List of physical memory pages where the pages should be mapped. May be ignored if aliasing is not supported + /// Permission of the region to be mapped + /// Result of the mapping operation + protected abstract KernelResult MapPages(ulong address, KPageList pageList, KMemoryPermission permission); + + /// + /// Maps a region of memory into the specified host memory ranges. + /// + /// Destination virtual address that should be mapped + /// Ranges of host memory that should be mapped + /// Permission of the region to be mapped + /// Result of the mapping operation + /// The implementation does not support memory aliasing + protected abstract KernelResult MapPages(ulong address, IEnumerable ranges, KMemoryPermission permission); + + /// + /// Unmaps a region of memory that was previously mapped with one of the page mapping methods. + /// + /// Virtual address of the region to unmap + /// Number of pages to unmap + /// Result of the unmapping operation + protected abstract KernelResult Unmap(ulong address, ulong pagesCount); + + /// + /// Changes the permissions of a given virtual memory region. + /// + /// Virtual address of the region to have the permission changes + /// Number of pages to have their permissions changed + /// New permission + /// Result of the permission change operation + protected abstract KernelResult Reprotect(ulong address, ulong pagesCount, KMemoryPermission permission); + + /// + /// Changes the permissions of a given virtual memory region. + /// + /// Virtual address of the region to have the permission changes + /// Number of pages to have their permissions changed + /// New permission + /// Result of the permission change operation + protected abstract KernelResult ReprotectWithAttributes(ulong address, ulong pagesCount, KMemoryPermission permission); + + /// + /// Alerts the memory tracking that a given region has been read from or written to. + /// This should be called before read/write is performed. + /// + /// Virtual address of the region + /// Size of the region + protected abstract void SignalMemoryTracking(ulong va, ulong size, bool write); + + /// + /// Writes data to CPU mapped memory, with write tracking. + /// + /// Virtual address to write the data into + /// Data to be written + /// Throw for unhandled invalid or unmapped memory accesses + protected abstract void Write(ulong va, ReadOnlySpan data); + } +} \ No newline at end of file diff --git a/Ryujinx.HLE/HOS/Kernel/Memory/KPageTableHostMapped.cs b/Ryujinx.HLE/HOS/Kernel/Memory/KPageTableHostMapped.cs new file mode 100644 index 0000000000..cd51bab7c2 --- /dev/null +++ b/Ryujinx.HLE/HOS/Kernel/Memory/KPageTableHostMapped.cs @@ -0,0 +1,125 @@ +using Ryujinx.HLE.HOS.Kernel.Common; +using Ryujinx.Memory; +using Ryujinx.Memory.Range; +using System; +using System.Collections.Generic; +using System.Linq; + +namespace Ryujinx.HLE.HOS.Kernel.Memory +{ + class KPageTableHostMapped : KPageTableBase + { + private const int CopyChunckSize = 0x100000; + + private readonly IVirtualMemoryManager _cpuMemory; + + public override bool SupportsMemoryAliasing => false; + + public KPageTableHostMapped(KernelContext context, IVirtualMemoryManager cpuMemory) : base(context) + { + _cpuMemory = cpuMemory; + } + + /// + protected override IEnumerable GetPhysicalRegions(ulong va, ulong size) + { + return _cpuMemory.GetPhysicalRegions(va, size); + } + + /// + protected override ReadOnlySpan GetSpan(ulong va, int size) + { + return _cpuMemory.GetSpan(va, size); + } + + /// + protected override KernelResult MapMemory(ulong src, ulong dst, ulong pagesCount, KMemoryPermission oldSrcPermission, KMemoryPermission newDstPermission) + { + ulong size = pagesCount * PageSize; + + _cpuMemory.Map(dst, 0, size); + + ulong currentSize = size; + while (currentSize > 0) + { + ulong copySize = Math.Min(currentSize, CopyChunckSize); + _cpuMemory.Write(dst, _cpuMemory.GetSpan(src, (int)copySize)); + currentSize -= copySize; + } + + return KernelResult.Success; + } + + /// + protected override KernelResult UnmapMemory(ulong dst, ulong src, ulong pagesCount, KMemoryPermission oldDstPermission, KMemoryPermission newSrcPermission) + { + ulong size = pagesCount * PageSize; + + // TODO: Validation. + + ulong currentSize = size; + while (currentSize > 0) + { + ulong copySize = Math.Min(currentSize, CopyChunckSize); + _cpuMemory.Write(src, _cpuMemory.GetSpan(dst, (int)copySize)); + currentSize -= copySize; + } + + _cpuMemory.Unmap(dst, size); + return KernelResult.Success; + } + + /// + protected override KernelResult MapPages(ulong dstVa, ulong pagesCount, ulong srcPa, KMemoryPermission permission) + { + _cpuMemory.Map(dstVa, 0, pagesCount * PageSize); + return KernelResult.Success; + } + + /// + protected override KernelResult MapPages(ulong address, KPageList pageList, KMemoryPermission permission) + { + _cpuMemory.Map(address, 0, pageList.GetPagesCount() * PageSize); + return KernelResult.Success; + } + + /// + protected override KernelResult MapPages(ulong address, IEnumerable ranges, KMemoryPermission permission) + { + throw new NotSupportedException(); + } + + /// + protected override KernelResult Unmap(ulong address, ulong pagesCount) + { + _cpuMemory.Unmap(address, pagesCount * PageSize); + return KernelResult.Success; + } + + /// + protected override KernelResult Reprotect(ulong address, ulong pagesCount, KMemoryPermission permission) + { + // TODO. + return KernelResult.Success; + } + + /// + protected override KernelResult ReprotectWithAttributes(ulong address, ulong pagesCount, KMemoryPermission permission) + { + // TODO. + return KernelResult.Success; + } + + /// + protected override void SignalMemoryTracking(ulong va, ulong size, bool write) + { + _cpuMemory.SignalMemoryTracking(va, size, write); + } + + /// + protected override void Write(ulong va, ReadOnlySpan data) + { + _cpuMemory.Write(va, data); + } + } +} diff --git a/Ryujinx.HLE/HOS/Kernel/Memory/KScopedPageList.cs b/Ryujinx.HLE/HOS/Kernel/Memory/KScopedPageList.cs new file mode 100644 index 0000000000..a0c19f9c14 --- /dev/null +++ b/Ryujinx.HLE/HOS/Kernel/Memory/KScopedPageList.cs @@ -0,0 +1,27 @@ +using System; + +namespace Ryujinx.HLE.HOS.Kernel.Memory +{ + struct KScopedPageList : IDisposable + { + private readonly KMemoryManager _manager; + private KPageList _pageList; + + public KScopedPageList(KMemoryManager manager, KPageList pageList) + { + _manager = manager; + _pageList = pageList; + pageList.IncrementPagesReferenceCount(manager); + } + + public void SignalSuccess() + { + _pageList = null; + } + + public void Dispose() + { + _pageList?.DecrementPagesReferenceCount(_manager); + } + } +} diff --git a/Ryujinx.HLE/HOS/Kernel/Memory/KSharedMemory.cs b/Ryujinx.HLE/HOS/Kernel/Memory/KSharedMemory.cs index ca0e3421ab..61c883d807 100644 --- a/Ryujinx.HLE/HOS/Kernel/Memory/KSharedMemory.cs +++ b/Ryujinx.HLE/HOS/Kernel/Memory/KSharedMemory.cs @@ -6,7 +6,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory { class KSharedMemory : KAutoObject { - private readonly KPageList _pageList; + private readonly SharedMemoryStorage _storage; private readonly long _ownerPid; @@ -14,28 +14,29 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory private readonly KMemoryPermission _userPermission; public KSharedMemory( - KernelContext context, - KPageList pageList, - long ownerPid, + KernelContext context, + SharedMemoryStorage storage, + long ownerPid, KMemoryPermission ownerPermission, KMemoryPermission userPermission) : base(context) { - _pageList = pageList; - _ownerPid = ownerPid; + _storage = storage; + _ownerPid = ownerPid; _ownerPermission = ownerPermission; - _userPermission = userPermission; + _userPermission = userPermission; } public KernelResult MapIntoProcess( - KMemoryManager memoryManager, - ulong address, - ulong size, - KProcess process, + KPageTableBase memoryManager, + ulong address, + ulong size, + KProcess process, KMemoryPermission permission) { - ulong pagesCountRounded = BitUtils.DivRoundUp(size, KMemoryManager.PageSize); + ulong pagesCountRounded = BitUtils.DivRoundUp(size, KPageTableBase.PageSize); - if (_pageList.GetPagesCount() != pagesCountRounded) + var pageList = _storage.GetPageList(); + if (pageList.GetPagesCount() != pagesCountRounded) { return KernelResult.InvalidSize; } @@ -49,23 +50,35 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory return KernelResult.InvalidPermission; } - return memoryManager.MapPages(address, _pageList, MemoryState.SharedMemory, permission); + KernelResult result = memoryManager.MapPages(address, pageList, MemoryState.SharedMemory, permission); + + if (result == KernelResult.Success && !memoryManager.SupportsMemoryAliasing) + { + _storage.Borrow(process, address); + } + + return result; } public KernelResult UnmapFromProcess( - KMemoryManager memoryManager, - ulong address, - ulong size, - KProcess process) + KPageTableBase memoryManager, + ulong address, + ulong size, + KProcess process) { - ulong pagesCountRounded = BitUtils.DivRoundUp(size, KMemoryManager.PageSize); + ulong pagesCountRounded = BitUtils.DivRoundUp(size, KPageTableBase.PageSize); - if (_pageList.GetPagesCount() != pagesCountRounded) + var pageList = _storage.GetPageList(); + ulong pagesCount = pageList.GetPagesCount(); + + if (pagesCount != pagesCountRounded) { return KernelResult.InvalidSize; } - return memoryManager.UnmapPages(address, _pageList, MemoryState.SharedMemory); + var ranges = _storage.GetRanges(); + + return memoryManager.UnmapPages(address, pagesCount, ranges, MemoryState.SharedMemory); } } } \ No newline at end of file diff --git a/Ryujinx.HLE/HOS/Kernel/Memory/KTransferMemory.cs b/Ryujinx.HLE/HOS/Kernel/Memory/KTransferMemory.cs index 7107d497a8..c75d8e6981 100644 --- a/Ryujinx.HLE/HOS/Kernel/Memory/KTransferMemory.cs +++ b/Ryujinx.HLE/HOS/Kernel/Memory/KTransferMemory.cs @@ -1,6 +1,8 @@ using Ryujinx.HLE.HOS.Kernel.Common; using Ryujinx.HLE.HOS.Kernel.Process; +using Ryujinx.Memory.Range; using System; +using System.Collections.Generic; namespace Ryujinx.HLE.HOS.Kernel.Memory { @@ -11,10 +13,10 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory // TODO: Remove when we no longer need to read it from the owner directly. public KProcess Creator => _creator; - private readonly KPageList _pageList; + private readonly List _ranges; public ulong Address { get; private set; } - public ulong Size => _pageList.GetPagesCount() * KMemoryManager.PageSize; + public ulong Size { get; private set; } public KMemoryPermission Permission { get; private set; } @@ -23,7 +25,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory public KTransferMemory(KernelContext context) : base(context) { - _pageList = new KPageList(); + _ranges = new List(); } public KernelResult Initialize(ulong address, ulong size, KMemoryPermission permission) @@ -32,7 +34,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory _creator = creator; - KernelResult result = creator.MemoryManager.BorrowTransferMemory(_pageList, address, size, permission); + KernelResult result = creator.MemoryManager.BorrowTransferMemory(_ranges, address, size, permission); if (result != KernelResult.Success) { @@ -43,6 +45,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory Permission = permission; Address = address; + Size = size; _hasBeenInitialized = true; _isMapped = false; @@ -53,7 +56,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory { if (_hasBeenInitialized) { - if (!_isMapped && _creator.MemoryManager.UnborrowTransferMemory(Address, Size, _pageList) != KernelResult.Success) + if (!_isMapped && _creator.MemoryManager.UnborrowTransferMemory(Address, Size, _ranges) != KernelResult.Success) { throw new InvalidOperationException("Unexpected failure restoring transfer memory attributes."); } diff --git a/Ryujinx.HLE/HOS/Kernel/Memory/MemoryOperation.cs b/Ryujinx.HLE/HOS/Kernel/Memory/MemoryOperation.cs deleted file mode 100644 index 7f7f29deed..0000000000 --- a/Ryujinx.HLE/HOS/Kernel/Memory/MemoryOperation.cs +++ /dev/null @@ -1,12 +0,0 @@ -namespace Ryujinx.HLE.HOS.Kernel.Memory -{ - enum MemoryOperation - { - MapPa, - MapVa, - Allocate, - Unmap, - ChangePermRw, - ChangePermsAndAttributes - } -} \ No newline at end of file diff --git a/Ryujinx.HLE/HOS/Kernel/Memory/SharedMemoryStorage.cs b/Ryujinx.HLE/HOS/Kernel/Memory/SharedMemoryStorage.cs new file mode 100644 index 0000000000..cd22b65f7c --- /dev/null +++ b/Ryujinx.HLE/HOS/Kernel/Memory/SharedMemoryStorage.cs @@ -0,0 +1,103 @@ +using Ryujinx.HLE.HOS.Kernel.Process; +using Ryujinx.Memory; +using Ryujinx.Memory.Range; +using System; +using System.Collections.Generic; + +namespace Ryujinx.HLE.HOS.Kernel.Memory +{ + class SharedMemoryStorage + { + private readonly KernelContext _context; + private readonly KPageList _pageList; + private readonly ulong _size; + + private IVirtualMemoryManager _borrowerMemory; + private ulong _borrowerVa; + + public SharedMemoryStorage(KernelContext context, KPageList pageList) + { + _context = context; + _pageList = pageList; + _size = pageList.GetPagesCount() * KPageTableBase.PageSize; + + foreach (KPageNode pageNode in pageList) + { + ulong address = pageNode.Address - DramMemoryMap.DramBase; + ulong size = pageNode.PagesCount * KPageTableBase.PageSize; + context.Memory.Commit(address, size); + } + } + + public void Borrow(KProcess dstProcess, ulong va) + { + ulong currentOffset = 0; + + foreach (KPageNode pageNode in _pageList) + { + ulong address = pageNode.Address - DramMemoryMap.DramBase; + ulong size = pageNode.PagesCount * KPageTableBase.PageSize; + + dstProcess.CpuMemory.Write(va + currentOffset, _context.Memory.GetSpan(address + currentOffset, (int)size)); + + currentOffset += size; + } + + _borrowerMemory = dstProcess.CpuMemory; + _borrowerVa = va; + } + + public void ZeroFill() + { + for (ulong offset = 0; offset < _size; offset += sizeof(ulong)) + { + GetRef(offset) = 0; + } + } + + public ref T GetRef(ulong offset) where T : unmanaged + { + if (_borrowerMemory == null) + { + if (_pageList.Nodes.Count == 1) + { + ulong address = _pageList.Nodes.First.Value.Address - DramMemoryMap.DramBase; + return ref _context.Memory.GetRef(address + offset); + } + + throw new NotImplementedException("Non-contiguous shared memory is not yet supported."); + } + else + { + return ref _borrowerMemory.GetRef(_borrowerVa + offset); + } + } + + public IEnumerable GetRanges() + { + if (_borrowerMemory == null) + { + var ranges = new List(); + + foreach (KPageNode pageNode in _pageList) + { + ulong address = pageNode.Address - DramMemoryMap.DramBase; + ulong size = pageNode.PagesCount * KPageTableBase.PageSize; + + ranges.Add(new HostMemoryRange(_context.Memory.GetPointer(address, size), size)); + } + + return ranges; + } + else + { + return _borrowerMemory.GetPhysicalRegions(_borrowerVa, _size); + } + } + + public KPageList GetPageList() + { + return _pageList; + } + } +} diff --git a/Ryujinx.HLE/HOS/Kernel/Process/IProcessContextFactory.cs b/Ryujinx.HLE/HOS/Kernel/Process/IProcessContextFactory.cs index c438b570e6..e9fbf618a2 100644 --- a/Ryujinx.HLE/HOS/Kernel/Process/IProcessContextFactory.cs +++ b/Ryujinx.HLE/HOS/Kernel/Process/IProcessContextFactory.cs @@ -5,6 +5,6 @@ namespace Ryujinx.HLE.HOS.Kernel.Process { interface IProcessContextFactory { - IProcessContext Create(MemoryBlock backingMemory, ulong addressSpaceSize, InvalidAccessHandler invalidAccessHandler); + IProcessContext Create(KernelContext context, ulong addressSpaceSize, InvalidAccessHandler invalidAccessHandler); } } diff --git a/Ryujinx.HLE/HOS/Kernel/Process/KProcess.cs b/Ryujinx.HLE/HOS/Kernel/Process/KProcess.cs index 516b53f48e..f2ba675f90 100644 --- a/Ryujinx.HLE/HOS/Kernel/Process/KProcess.cs +++ b/Ryujinx.HLE/HOS/Kernel/Process/KProcess.cs @@ -25,7 +25,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Process (KernelVersionMinor << 15) | (KernelVersionRevision << 0); - public KMemoryManager MemoryManager { get; private set; } + public KPageTableBase MemoryManager { get; private set; } private SortedDictionary _fullTlsPages; private SortedDictionary _freeTlsPages; @@ -132,11 +132,11 @@ namespace Ryujinx.HLE.HOS.Kernel.Process ulong codeAddress = creationInfo.CodeAddress; - ulong codeSize = (ulong)creationInfo.CodePagesCount * KMemoryManager.PageSize; + ulong codeSize = (ulong)creationInfo.CodePagesCount * KPageTableBase.PageSize; - KMemoryBlockAllocator memoryBlockAllocator = creationInfo.Flags.HasFlag(ProcessCreationFlags.IsApplication) - ? KernelContext.LargeMemoryBlockAllocator - : KernelContext.SmallMemoryBlockAllocator; + KMemoryBlockSlabManager slabManager = creationInfo.Flags.HasFlag(ProcessCreationFlags.IsApplication) + ? KernelContext.LargeMemoryBlockSlabManager + : KernelContext.SmallMemoryBlockSlabManager; KernelResult result = MemoryManager.InitializeForProcess( addrSpaceType, @@ -145,7 +145,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Process memRegion, codeAddress, codeSize, - memoryBlockAllocator); + slabManager); if (result != KernelResult.Success) { @@ -157,11 +157,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Process return KernelResult.InvalidMemRange; } - result = MemoryManager.MapPages( - codeAddress, - pageList, - MemoryState.CodeStatic, - KMemoryPermission.None); + result = MemoryManager.MapPages(codeAddress, pageList, MemoryState.CodeStatic, KMemoryPermission.None); if (result != KernelResult.Success) { @@ -202,7 +198,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Process ulong codePagesCount = (ulong)creationInfo.CodePagesCount; - ulong neededSizeForProcess = personalMmHeapSize + codePagesCount * KMemoryManager.PageSize; + ulong neededSizeForProcess = personalMmHeapSize + codePagesCount * KPageTableBase.PageSize; if (neededSizeForProcess != 0 && resourceLimit != null) { @@ -222,17 +218,17 @@ namespace Ryujinx.HLE.HOS.Kernel.Process PersonalMmHeapPagesCount = (ulong)creationInfo.SystemResourcePagesCount; - KMemoryBlockAllocator memoryBlockAllocator; + KMemoryBlockSlabManager slabManager; if (PersonalMmHeapPagesCount != 0) { - memoryBlockAllocator = new KMemoryBlockAllocator(PersonalMmHeapPagesCount * KMemoryManager.PageSize); + slabManager = new KMemoryBlockSlabManager(PersonalMmHeapPagesCount * KPageTableBase.PageSize); } else { - memoryBlockAllocator = creationInfo.Flags.HasFlag(ProcessCreationFlags.IsApplication) - ? KernelContext.LargeMemoryBlockAllocator - : KernelContext.SmallMemoryBlockAllocator; + slabManager = creationInfo.Flags.HasFlag(ProcessCreationFlags.IsApplication) + ? KernelContext.LargeMemoryBlockSlabManager + : KernelContext.SmallMemoryBlockSlabManager; } AddressSpaceType addrSpaceType = (AddressSpaceType)((int)(creationInfo.Flags & ProcessCreationFlags.AddressSpaceMask) >> (int)ProcessCreationFlags.AddressSpaceShift); @@ -243,7 +239,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Process ulong codeAddress = creationInfo.CodeAddress; - ulong codeSize = codePagesCount * KMemoryManager.PageSize; + ulong codeSize = codePagesCount * KPageTableBase.PageSize; KernelResult result = MemoryManager.InitializeForProcess( addrSpaceType, @@ -252,7 +248,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Process memRegion, codeAddress, codeSize, - memoryBlockAllocator); + slabManager); if (result != KernelResult.Success) { @@ -268,7 +264,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Process return KernelResult.InvalidMemRange; } - result = MemoryManager.MapNewProcessCode( + result = MemoryManager.MapPages( codeAddress, codePagesCount, MemoryState.CodeStatic, @@ -352,7 +348,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Process _version = creationInfo.Version; TitleId = creationInfo.TitleId; _entrypoint = creationInfo.CodeAddress; - _imageSize = (ulong)creationInfo.CodePagesCount * KMemoryManager.PageSize; + _imageSize = (ulong)creationInfo.CodePagesCount * KPageTableBase.PageSize; switch (Flags & ProcessCreationFlags.AddressSpaceMask) { @@ -396,9 +392,9 @@ namespace Ryujinx.HLE.HOS.Kernel.Process if (pageInfo.IsFull()) { - _freeTlsPages.Remove(pageInfo.PageAddr); + _freeTlsPages.Remove(pageInfo.PageVirtualAddress); - _fullTlsPages.Add(pageInfo.PageAddr, pageInfo); + _fullTlsPages.Add(pageInfo.PageVirtualAddress, pageInfo); } result = KernelResult.Success; @@ -415,7 +411,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Process throw new InvalidOperationException("Unexpected failure getting free TLS page!"); } - _freeTlsPages.Add(pageInfo.PageAddr, pageInfo); + _freeTlsPages.Add(pageInfo.PageVirtualAddress, pageInfo); } else { @@ -440,11 +436,11 @@ namespace Ryujinx.HLE.HOS.Kernel.Process ulong regionStart = MemoryManager.TlsIoRegionStart; ulong regionSize = MemoryManager.TlsIoRegionEnd - regionStart; - ulong regionPagesCount = regionSize / KMemoryManager.PageSize; + ulong regionPagesCount = regionSize / KPageTableBase.PageSize; - KernelResult result = MemoryManager.AllocateOrMapPa( + KernelResult result = MemoryManager.MapPages( 1, - KMemoryManager.PageSize, + KPageTableBase.PageSize, tlsPagePa, true, regionStart, @@ -459,9 +455,9 @@ namespace Ryujinx.HLE.HOS.Kernel.Process } else { - pageInfo = new KTlsPageInfo(tlsPageVa); + pageInfo = new KTlsPageInfo(tlsPageVa, tlsPagePa); - MemoryHelper.FillWithZeros(CpuMemory, tlsPageVa, KMemoryManager.PageSize); + MemoryHelper.FillWithZeros(CpuMemory, tlsPageVa, KPageTableBase.PageSize); } return result; @@ -469,7 +465,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Process public KernelResult FreeThreadLocalStorage(ulong tlsSlotAddr) { - ulong tlsPageAddr = BitUtils.AlignDown(tlsSlotAddr, KMemoryManager.PageSize); + ulong tlsPageAddr = BitUtils.AlignDown(tlsSlotAddr, KPageTableBase.PageSize); KernelContext.CriticalSection.Enter(); @@ -514,16 +510,11 @@ namespace Ryujinx.HLE.HOS.Kernel.Process private KernelResult FreeTlsPage(KTlsPageInfo pageInfo) { - if (!MemoryManager.TryConvertVaToPa(pageInfo.PageAddr, out ulong tlsPagePa)) - { - throw new InvalidOperationException("Unexpected failure translating virtual address to physical."); - } - - KernelResult result = MemoryManager.UnmapForKernel(pageInfo.PageAddr, 1, MemoryState.ThreadLocal); + KernelResult result = MemoryManager.UnmapForKernel(pageInfo.PageVirtualAddress, 1, MemoryState.ThreadLocal); if (result == KernelResult.Success) { - KernelContext.UserSlabHeapPages.Free(tlsPagePa); + KernelContext.UserSlabHeapPages.Free(pageInfo.PagePhysicalAddress); } return result; @@ -556,7 +547,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Process throw new InvalidOperationException("Trying to start a process with a invalid state!"); } - ulong stackSizeRounded = BitUtils.AlignUp(stackSize, KMemoryManager.PageSize); + ulong stackSizeRounded = BitUtils.AlignUp(stackSize, KPageTableBase.PageSize); ulong neededSize = stackSizeRounded + _imageSize; @@ -598,7 +589,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Process { ulong stackBottom = stackTop - _mainThreadStackSize; - ulong stackPagesCount = _mainThreadStackSize / KMemoryManager.PageSize; + ulong stackPagesCount = _mainThreadStackSize / KPageTableBase.PageSize; MemoryManager.UnmapForKernel(stackBottom, stackPagesCount, MemoryState.Stack); @@ -611,16 +602,16 @@ namespace Ryujinx.HLE.HOS.Kernel.Process if (stackSizeRounded != 0) { - ulong stackPagesCount = stackSizeRounded / KMemoryManager.PageSize; + ulong stackPagesCount = stackSizeRounded / KPageTableBase.PageSize; ulong regionStart = MemoryManager.StackRegionStart; ulong regionSize = MemoryManager.StackRegionEnd - regionStart; - ulong regionPagesCount = regionSize / KMemoryManager.PageSize; + ulong regionPagesCount = regionSize / KPageTableBase.PageSize; - result = MemoryManager.AllocateOrMapPa( + result = MemoryManager.MapPages( stackPagesCount, - KMemoryManager.PageSize, + KPageTableBase.PageSize, 0, false, regionStart, @@ -834,7 +825,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Process return 0; } - return personalMmHeapPagesCount * KMemoryManager.PageSize; + return personalMmHeapPagesCount * KPageTableBase.PageSize; } public void AddCpuTime(long ticks) @@ -1058,16 +1049,23 @@ namespace Ryujinx.HLE.HOS.Kernel.Process _ => 39 }; - Context = _contextFactory.Create(KernelContext.Memory, 1UL << addrSpaceBits, InvalidAccessHandler); + Context = _contextFactory.Create(KernelContext, 1UL << addrSpaceBits, InvalidAccessHandler); // TODO: This should eventually be removed. // The GPU shouldn't depend on the CPU memory manager at all. if (flags.HasFlag(ProcessCreationFlags.IsApplication)) { - KernelContext.Device.Gpu.SetVmm((MemoryManager)CpuMemory); + KernelContext.Device.Gpu.SetVmm((IVirtualMemoryManagerTracked)CpuMemory); } - MemoryManager = new KMemoryManager(KernelContext, CpuMemory); + if (Context.AddressSpace is MemoryManagerHostMapped) + { + MemoryManager = new KPageTableHostMapped(KernelContext, CpuMemory); + } + else + { + MemoryManager = new KPageTable(KernelContext, CpuMemory); + } } private bool InvalidAccessHandler(ulong va) diff --git a/Ryujinx.HLE/HOS/Kernel/Process/KProcessCapabilities.cs b/Ryujinx.HLE/HOS/Kernel/Process/KProcessCapabilities.cs index e1cdb30f73..28944c4f55 100644 --- a/Ryujinx.HLE/HOS/Kernel/Process/KProcessCapabilities.cs +++ b/Ryujinx.HLE/HOS/Kernel/Process/KProcessCapabilities.cs @@ -25,7 +25,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Process IrqAccessMask = new byte[0x80]; } - public KernelResult InitializeForKernel(ReadOnlySpan capabilities, KMemoryManager memoryManager) + public KernelResult InitializeForKernel(ReadOnlySpan capabilities, KPageTableBase memoryManager) { AllowedCpuCoresMask = 0xf; AllowedThreadPriosMask = -1; @@ -35,12 +35,12 @@ namespace Ryujinx.HLE.HOS.Kernel.Process return Parse(capabilities, memoryManager); } - public KernelResult InitializeForUser(ReadOnlySpan capabilities, KMemoryManager memoryManager) + public KernelResult InitializeForUser(ReadOnlySpan capabilities, KPageTableBase memoryManager) { return Parse(capabilities, memoryManager); } - private KernelResult Parse(ReadOnlySpan capabilities, KMemoryManager memoryManager) + private KernelResult Parse(ReadOnlySpan capabilities, KPageTableBase memoryManager) { int mask0 = 0; int mask1 = 0; @@ -117,7 +117,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Process return KernelResult.Success; } - private KernelResult ParseCapability(int cap, ref int mask0, ref int mask1, KMemoryManager memoryManager) + private KernelResult ParseCapability(int cap, ref int mask0, ref int mask1, KPageTableBase memoryManager) { int code = (cap + 1) & ~cap; @@ -217,7 +217,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Process { long address = ((long)(uint)cap << 4) & 0xffffff000; - memoryManager.MapIoMemory(address, KMemoryManager.PageSize, KMemoryPermission.ReadAndWrite); + memoryManager.MapIoMemory(address, KPageTableBase.PageSize, KMemoryPermission.ReadAndWrite); break; } diff --git a/Ryujinx.HLE/HOS/Kernel/Process/KTlsPageInfo.cs b/Ryujinx.HLE/HOS/Kernel/Process/KTlsPageInfo.cs index 5ce5a299da..f55e3c10eb 100644 --- a/Ryujinx.HLE/HOS/Kernel/Process/KTlsPageInfo.cs +++ b/Ryujinx.HLE/HOS/Kernel/Process/KTlsPageInfo.cs @@ -6,15 +6,17 @@ namespace Ryujinx.HLE.HOS.Kernel.Process { public const int TlsEntrySize = 0x200; - public ulong PageAddr { get; private set; } + public ulong PageVirtualAddress { get; } + public ulong PagePhysicalAddress { get; } - private bool[] _isSlotFree; + private readonly bool[] _isSlotFree; - public KTlsPageInfo(ulong pageAddress) + public KTlsPageInfo(ulong pageVirtualAddress, ulong pagePhysicalAddress) { - PageAddr = pageAddress; + PageVirtualAddress = pageVirtualAddress; + PagePhysicalAddress = pagePhysicalAddress; - _isSlotFree = new bool[KMemoryManager.PageSize / TlsEntrySize]; + _isSlotFree = new bool[KPageTableBase.PageSize / TlsEntrySize]; for (int index = 0; index < _isSlotFree.Length; index++) { @@ -24,7 +26,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Process public bool TryGetFreePage(out ulong address) { - address = PageAddr; + address = PageVirtualAddress; for (int index = 0; index < _isSlotFree.Length; index++) { @@ -69,7 +71,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Process public void FreeTlsSlot(ulong address) { - _isSlotFree[(address - PageAddr) / TlsEntrySize] = true; + _isSlotFree[(address - PageVirtualAddress) / TlsEntrySize] = true; } } } \ No newline at end of file diff --git a/Ryujinx.HLE/HOS/Kernel/Process/KTlsPageManager.cs b/Ryujinx.HLE/HOS/Kernel/Process/KTlsPageManager.cs index 03174e5bbc..0fde495cab 100644 --- a/Ryujinx.HLE/HOS/Kernel/Process/KTlsPageManager.cs +++ b/Ryujinx.HLE/HOS/Kernel/Process/KTlsPageManager.cs @@ -20,7 +20,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Process { _pagePosition = pagePosition; - _slots = new bool[KMemoryManager.PageSize / TlsEntrySize]; + _slots = new bool[KPageTableBase.PageSize / TlsEntrySize]; } public bool TryGetFreeTlsAddr(out long position) diff --git a/Ryujinx.HLE/HOS/Kernel/Process/ProcessContextFactory.cs b/Ryujinx.HLE/HOS/Kernel/Process/ProcessContextFactory.cs index 03db62fa97..29860b3bb7 100644 --- a/Ryujinx.HLE/HOS/Kernel/Process/ProcessContextFactory.cs +++ b/Ryujinx.HLE/HOS/Kernel/Process/ProcessContextFactory.cs @@ -5,9 +5,9 @@ namespace Ryujinx.HLE.HOS.Kernel.Process { class ProcessContextFactory : IProcessContextFactory { - public IProcessContext Create(MemoryBlock backingMemory, ulong addressSpaceSize, InvalidAccessHandler invalidAccessHandler) + public IProcessContext Create(KernelContext context, ulong addressSpaceSize, InvalidAccessHandler invalidAccessHandler) { - return new ProcessContext(new AddressSpaceManager(backingMemory, addressSpaceSize)); + return new ProcessContext(new AddressSpaceManager(addressSpaceSize)); } } } diff --git a/Ryujinx.HLE/HOS/Kernel/SupervisorCall/Syscall.cs b/Ryujinx.HLE/HOS/Kernel/SupervisorCall/Syscall.cs index 39161f2206..b3e202b30f 100644 --- a/Ryujinx.HLE/HOS/Kernel/SupervisorCall/Syscall.cs +++ b/Ryujinx.HLE/HOS/Kernel/SupervisorCall/Syscall.cs @@ -1278,7 +1278,7 @@ namespace Ryujinx.HLE.HOS.Kernel.SupervisorCall private static bool PageAligned(ulong position) { - return (position & (KMemoryManager.PageSize - 1)) == 0; + return (position & (KPageTableBase.PageSize - 1)) == 0; } // System @@ -1504,12 +1504,12 @@ namespace Ryujinx.HLE.HOS.Kernel.SupervisorCall value = (long)(process.MemoryManager.StackRegionEnd - process.MemoryManager.StackRegionStart); break; - case 16: value = (long)process.PersonalMmHeapPagesCount * KMemoryManager.PageSize; break; + case 16: value = (long)process.PersonalMmHeapPagesCount * KPageTableBase.PageSize; break; case 17: if (process.PersonalMmHeapPagesCount != 0) { - value = process.MemoryManager.GetMmUsedPages() * KMemoryManager.PageSize; + value = process.MemoryManager.GetMmUsedPages() * KPageTableBase.PageSize; } break; @@ -1760,7 +1760,7 @@ namespace Ryujinx.HLE.HOS.Kernel.SupervisorCall return KernelResult.InvalidCombination; } - KMemoryRegionManager region = _context.MemoryRegions[subId]; + KMemoryRegionManager region = _context.MemoryManager.MemoryRegions[subId]; switch (id) { @@ -1772,7 +1772,7 @@ namespace Ryujinx.HLE.HOS.Kernel.SupervisorCall { ulong freePagesCount = region.GetFreePages(); - value = (long)(freePagesCount * KMemoryManager.PageSize); + value = (long)(freePagesCount * KPageTableBase.PageSize); break; } diff --git a/Ryujinx.HLE/HOS/Kernel/Threading/KThread.cs b/Ryujinx.HLE/HOS/Kernel/Threading/KThread.cs index 3ea03f1662..7224cca121 100644 --- a/Ryujinx.HLE/HOS/Kernel/Threading/KThread.cs +++ b/Ryujinx.HLE/HOS/Kernel/Threading/KThread.cs @@ -49,7 +49,6 @@ namespace Ryujinx.HLE.HOS.Kernel.Threading private ulong _tlsAddress; public ulong TlsAddress => _tlsAddress; - public ulong TlsDramAddress { get; private set; } public KSynchronizationObject[] WaitSyncObjects { get; } public int[] WaitSyncHandles { get; } @@ -159,8 +158,6 @@ namespace Ryujinx.HLE.HOS.Kernel.Threading return KernelResult.OutOfMemory; } - TlsDramAddress = owner.MemoryManager.GetDramAddressFromVa(_tlsAddress); - MemoryHelper.FillWithZeros(owner.CpuMemory, _tlsAddress, KTlsPageInfo.TlsEntrySize); } diff --git a/Ryujinx.HLE/HOS/ProgramLoader.cs b/Ryujinx.HLE/HOS/ProgramLoader.cs index 73a73a8bcb..cc1c76c24f 100644 --- a/Ryujinx.HLE/HOS/ProgramLoader.cs +++ b/Ryujinx.HLE/HOS/ProgramLoader.cs @@ -29,9 +29,9 @@ namespace Ryujinx.HLE.HOS endOffset = kip.BssOffset + kip.BssSize; } - uint codeSize = BitUtils.AlignUp(kip.TextOffset + endOffset, KMemoryManager.PageSize); + uint codeSize = BitUtils.AlignUp(kip.TextOffset + endOffset, KPageTableBase.PageSize); - int codePagesCount = (int)(codeSize / KMemoryManager.PageSize); + int codePagesCount = (int)(codeSize / KPageTableBase.PageSize); ulong codeBaseAddress = kip.Is64BitAddressSpace ? 0x8000000UL : 0x200000UL; @@ -70,7 +70,7 @@ namespace Ryujinx.HLE.HOS ? MemoryRegion.Service : MemoryRegion.Application; - KMemoryRegionManager region = context.MemoryRegions[(int)memoryRegion]; + KMemoryRegionManager region = context.MemoryManager.MemoryRegions[(int)memoryRegion]; KernelResult result = region.AllocatePages((ulong)codePagesCount, false, out KPageList pageList); @@ -161,7 +161,7 @@ namespace Ryujinx.HLE.HOS nsoSize = dataEnd; } - nsoSize = BitUtils.AlignUp(nsoSize, KMemoryManager.PageSize); + nsoSize = BitUtils.AlignUp(nsoSize, KPageTableBase.PageSize); nsoBase[index] = codeStart + (ulong)codeSize; @@ -171,7 +171,7 @@ namespace Ryujinx.HLE.HOS { argsStart = (ulong)codeSize; - argsSize = (uint)BitUtils.AlignDown(arguments.Length * 2 + ArgsTotalSize - 1, KMemoryManager.PageSize); + argsSize = (uint)BitUtils.AlignDown(arguments.Length * 2 + ArgsTotalSize - 1, KPageTableBase.PageSize); codeSize += argsSize; } @@ -180,9 +180,9 @@ namespace Ryujinx.HLE.HOS PtcProfiler.StaticCodeStart = codeStart; PtcProfiler.StaticCodeSize = (ulong)codeSize; - int codePagesCount = (int)(codeSize / KMemoryManager.PageSize); + int codePagesCount = (int)(codeSize / KPageTableBase.PageSize); - int personalMmHeapPagesCount = metaData.PersonalMmHeapSize / KMemoryManager.PageSize; + int personalMmHeapPagesCount = metaData.PersonalMmHeapSize / KPageTableBase.PageSize; ProcessCreationInfo creationInfo = new ProcessCreationInfo( metaData.TitleName, @@ -198,7 +198,7 @@ namespace Ryujinx.HLE.HOS KResourceLimit resourceLimit = new KResourceLimit(context); - long applicationRgSize = (long)context.MemoryRegions[(int)MemoryRegion.Application].Size; + long applicationRgSize = (long)context.MemoryManager.MemoryRegions[(int)MemoryRegion.Application].Size; result = resourceLimit.SetLimitValue(LimitableResource.Memory, applicationRgSize); result |= resourceLimit.SetLimitValue(LimitableResource.Thread, 608); @@ -312,7 +312,7 @@ namespace Ryujinx.HLE.HOS return KernelResult.Success; } - size = BitUtils.AlignUp(size, KMemoryManager.PageSize); + size = BitUtils.AlignUp(size, KPageTableBase.PageSize); return process.MemoryManager.SetProcessMemoryPermission(address, size, permission); } diff --git a/Ryujinx.HLE/HOS/Services/Hid/Hid.cs b/Ryujinx.HLE/HOS/Services/Hid/Hid.cs index 61a12d9ed8..85c811fa2f 100644 --- a/Ryujinx.HLE/HOS/Services/Hid/Hid.cs +++ b/Ryujinx.HLE/HOS/Services/Hid/Hid.cs @@ -11,6 +11,7 @@ using Ryujinx.HLE.HOS.Services.Hid.Types.SharedMemory.Keyboard; using Ryujinx.HLE.HOS.Services.Hid.Types.SharedMemory.DebugPad; using Ryujinx.HLE.HOS.Services.Hid.Types.SharedMemory.TouchScreen; using Ryujinx.HLE.HOS.Services.Hid.Types.SharedMemory.Npad; +using Ryujinx.HLE.HOS.Kernel.Memory; namespace Ryujinx.HLE.HOS.Services.Hid { @@ -18,9 +19,9 @@ namespace Ryujinx.HLE.HOS.Services.Hid { private readonly Switch _device; - private readonly ulong _hidMemoryAddress; + private readonly SharedMemoryStorage _storage; - internal ref SharedMemory SharedMemory => ref _device.Memory.GetRef(_hidMemoryAddress); + internal ref SharedMemory SharedMemory => ref _storage.GetRef(0); internal const int SharedMemEntryCount = 17; @@ -48,10 +49,10 @@ namespace Ryujinx.HLE.HOS.Services.Hid CheckTypeSizeOrThrow(Horizon.HidSize); } - public Hid(in Switch device, ulong sharedHidMemoryAddress) + internal Hid(in Switch device, SharedMemoryStorage storage) { - _device = device; - _hidMemoryAddress = sharedHidMemoryAddress; + _device = device; + _storage = storage; SharedMemory = SharedMemory.Create(); } diff --git a/Ryujinx.HLE/HOS/Services/Ro/IRoInterface.cs b/Ryujinx.HLE/HOS/Services/Ro/IRoInterface.cs index 8070cf5482..ff5db94f7b 100644 --- a/Ryujinx.HLE/HOS/Services/Ro/IRoInterface.cs +++ b/Ryujinx.HLE/HOS/Services/Ro/IRoInterface.cs @@ -206,7 +206,7 @@ namespace Ryujinx.HLE.HOS.Services.Ro private ResultCode MapNro(KProcess process, NroInfo info, out ulong nroMappedAddress) { - KMemoryManager memMgr = process.MemoryManager; + KPageTableBase memMgr = process.MemoryManager; int retryCount = 0; @@ -252,7 +252,7 @@ namespace Ryujinx.HLE.HOS.Services.Ro private bool CanAddGuardRegionsInProcess(KProcess process, ulong baseAddress, ulong size) { - KMemoryManager memMgr = process.MemoryManager; + KPageTableBase memMgr = process.MemoryManager; KMemoryInfo memInfo = memMgr.QueryMemory(baseAddress - 1); @@ -270,7 +270,7 @@ namespace Ryujinx.HLE.HOS.Services.Ro private ResultCode MapCodeMemoryInProcess(KProcess process, ulong baseAddress, ulong size, out ulong targetAddress) { - KMemoryManager memMgr = process.MemoryManager; + KPageTableBase memMgr = process.MemoryManager; targetAddress = 0; @@ -327,7 +327,7 @@ namespace Ryujinx.HLE.HOS.Services.Ro ulong bssStart = dataStart + (ulong)relocatableObject.Data.Length; - ulong bssEnd = BitUtils.AlignUp(bssStart + (ulong)relocatableObject.BssSize, KMemoryManager.PageSize); + ulong bssEnd = BitUtils.AlignUp(bssStart + (ulong)relocatableObject.BssSize, KPageTableBase.PageSize); process.CpuMemory.Write(textStart, relocatableObject.Text); process.CpuMemory.Write(roStart, relocatableObject.Ro); diff --git a/Ryujinx.HLE/HOS/Services/Time/TimeManager.cs b/Ryujinx.HLE/HOS/Services/Time/TimeManager.cs index 7b17b18d65..e221789061 100644 --- a/Ryujinx.HLE/HOS/Services/Time/TimeManager.cs +++ b/Ryujinx.HLE/HOS/Services/Time/TimeManager.cs @@ -55,9 +55,9 @@ namespace Ryujinx.HLE.HOS.Services.Time EphemeralClockContextWriter = new EphemeralNetworkSystemClockContextWriter(); } - public void Initialize(Switch device, Horizon system, KSharedMemory sharedMemory, ulong timeSharedMemoryAddress, int timeSharedMemorySize) + public void Initialize(Switch device, Horizon system, KSharedMemory sharedMemory, SharedMemoryStorage timeSharedMemoryStorage, int timeSharedMemorySize) { - SharedMemory.Initialize(device, sharedMemory, timeSharedMemoryAddress, timeSharedMemorySize); + SharedMemory.Initialize(device, sharedMemory, timeSharedMemoryStorage, timeSharedMemorySize); // Here we use system on purpose as device. System isn't initialized at this point. StandardUserSystemClock.CreateAutomaticCorrectionEvent(system); diff --git a/Ryujinx.HLE/HOS/Services/Time/TimeSharedMemory.cs b/Ryujinx.HLE/HOS/Services/Time/TimeSharedMemory.cs index e368307694..8b08b040af 100644 --- a/Ryujinx.HLE/HOS/Services/Time/TimeSharedMemory.cs +++ b/Ryujinx.HLE/HOS/Services/Time/TimeSharedMemory.cs @@ -12,25 +12,25 @@ namespace Ryujinx.HLE.HOS.Services.Time { class TimeSharedMemory { - private Switch _device; - private KSharedMemory _sharedMemory; - private ulong _timeSharedMemoryAddress; - private int _timeSharedMemorySize; + private Switch _device; + private KSharedMemory _sharedMemory; + private SharedMemoryStorage _timeSharedMemoryStorage; + private int _timeSharedMemorySize; private const uint SteadyClockContextOffset = 0x00; private const uint LocalSystemClockContextOffset = 0x38; private const uint NetworkSystemClockContextOffset = 0x80; private const uint AutomaticCorrectionEnabledOffset = 0xC8; - public void Initialize(Switch device, KSharedMemory sharedMemory, ulong timeSharedMemoryAddress, int timeSharedMemorySize) + public void Initialize(Switch device, KSharedMemory sharedMemory, SharedMemoryStorage timeSharedMemoryStorage, int timeSharedMemorySize) { _device = device; _sharedMemory = sharedMemory; - _timeSharedMemoryAddress = timeSharedMemoryAddress; + _timeSharedMemoryStorage = timeSharedMemoryStorage; _timeSharedMemorySize = timeSharedMemorySize; // Clean the shared memory - _device.Memory.ZeroFill(_timeSharedMemoryAddress, (ulong)_timeSharedMemorySize); + timeSharedMemoryStorage.ZeroFill(); } public KSharedMemory GetSharedMemory() @@ -89,23 +89,21 @@ namespace Ryujinx.HLE.HOS.Services.Time private T ReadObjectFromSharedMemory(ulong offset, ulong padding) where T : unmanaged { - ulong indexOffset = _timeSharedMemoryAddress + offset; - T result; uint index; uint possiblyNewIndex; do { - index = _device.Memory.Read(indexOffset); + index = _timeSharedMemoryStorage.GetRef(offset); - ulong objectOffset = indexOffset + 4 + padding + (ulong)((index & 1) * Unsafe.SizeOf()); + ulong objectOffset = offset + 4 + padding + (ulong)((index & 1) * Unsafe.SizeOf()); - result = _device.Memory.Read(objectOffset); + result = _timeSharedMemoryStorage.GetRef(objectOffset); Thread.MemoryBarrier(); - possiblyNewIndex = _device.Memory.Read(indexOffset); + possiblyNewIndex = _device.Memory.Read(offset); } while (index != possiblyNewIndex); return result; @@ -113,15 +111,15 @@ namespace Ryujinx.HLE.HOS.Services.Time private void WriteObjectToSharedMemory(ulong offset, ulong padding, T value) where T : unmanaged { - ulong indexOffset = _timeSharedMemoryAddress + offset; - uint newIndex = _device.Memory.Read(indexOffset) + 1; - ulong objectOffset = indexOffset + 4 + padding + (ulong)((newIndex & 1) * Unsafe.SizeOf()); + uint newIndex = _timeSharedMemoryStorage.GetRef(offset) + 1; - _device.Memory.Write(objectOffset, value); + ulong objectOffset = offset + 4 + padding + (ulong)((newIndex & 1) * Unsafe.SizeOf()); + + _timeSharedMemoryStorage.GetRef(objectOffset) = value; Thread.MemoryBarrier(); - _device.Memory.Write(indexOffset, newIndex); + _timeSharedMemoryStorage.GetRef(offset) = newIndex; } } } diff --git a/Ryujinx.HLE/Switch.cs b/Ryujinx.HLE/Switch.cs index ee359ddaef..e05968ccb4 100644 --- a/Ryujinx.HLE/Switch.cs +++ b/Ryujinx.HLE/Switch.cs @@ -67,7 +67,7 @@ namespace Ryujinx.HLE AudioDeviceDriver = new CompatLayerHardwareDeviceDriver(configuration.AudioDeviceDriver); - Memory = new MemoryBlock(configuration.MemoryConfiguration.ToDramSize()); + Memory = new MemoryBlock(configuration.MemoryConfiguration.ToDramSize(), MemoryAllocationFlags.Reserve); Gpu = new GpuContext(configuration.GpuRenderer); @@ -99,7 +99,7 @@ namespace Ryujinx.HLE Statistics = new PerformanceStatistics(); - Hid = new Hid(this, System.HidBaseAddress); + Hid = new Hid(this, System.HidStorage); Hid.InitDevices(); Application = new ApplicationLoader(this); diff --git a/Ryujinx.Memory.Tests/MockVirtualMemoryManager.cs b/Ryujinx.Memory.Tests/MockVirtualMemoryManager.cs index 245d365e7f..5051b20660 100644 --- a/Ryujinx.Memory.Tests/MockVirtualMemoryManager.cs +++ b/Ryujinx.Memory.Tests/MockVirtualMemoryManager.cs @@ -1,4 +1,6 @@ -using System; +using Ryujinx.Memory.Range; +using System; +using System.Collections.Generic; namespace Ryujinx.Memory.Tests { @@ -12,7 +14,7 @@ namespace Ryujinx.Memory.Tests { } - public void Map(ulong va, ulong pa, ulong size) + public void Map(ulong va, nuint hostAddress, ulong size) { throw new NotImplementedException(); } @@ -57,9 +59,9 @@ namespace Ryujinx.Memory.Tests throw new NotImplementedException(); } - public (ulong address, ulong size)[] GetPhysicalRegions(ulong va, ulong size) + IEnumerable IVirtualMemoryManager.GetPhysicalRegions(ulong va, ulong size) { - return NoMappings ? new (ulong address, ulong size)[0] : new (ulong address, ulong size)[] { (va, size) }; + return NoMappings ? new HostMemoryRange[0] : new HostMemoryRange[] { new HostMemoryRange((nuint)va, size) }; } public bool IsMapped(ulong va) diff --git a/Ryujinx.Memory.Tests/MultiRegionTrackingTests.cs b/Ryujinx.Memory.Tests/MultiRegionTrackingTests.cs index 6959b8c4a4..22e198c51e 100644 --- a/Ryujinx.Memory.Tests/MultiRegionTrackingTests.cs +++ b/Ryujinx.Memory.Tests/MultiRegionTrackingTests.cs @@ -22,7 +22,7 @@ namespace Ryujinx.Memory.Tests { _memoryBlock = new MemoryBlock(MemorySize); _memoryManager = new MockVirtualMemoryManager(MemorySize, PageSize); - _tracking = new MemoryTracking(_memoryManager, _memoryBlock, PageSize); + _tracking = new MemoryTracking(_memoryManager, PageSize); } [TearDown] diff --git a/Ryujinx.Memory.Tests/TrackingTests.cs b/Ryujinx.Memory.Tests/TrackingTests.cs index 37a2b867a3..8f0612a10c 100644 --- a/Ryujinx.Memory.Tests/TrackingTests.cs +++ b/Ryujinx.Memory.Tests/TrackingTests.cs @@ -23,7 +23,7 @@ namespace Ryujinx.Memory.Tests { _memoryBlock = new MemoryBlock(MemorySize); _memoryManager = new MockVirtualMemoryManager(MemorySize, PageSize); - _tracking = new MemoryTracking(_memoryManager, _memoryBlock, PageSize); + _tracking = new MemoryTracking(_memoryManager, PageSize); } [TearDown] diff --git a/Ryujinx.Memory/AddressSpaceManager.cs b/Ryujinx.Memory/AddressSpaceManager.cs index 916a381633..d8ee47467b 100644 --- a/Ryujinx.Memory/AddressSpaceManager.cs +++ b/Ryujinx.Memory/AddressSpaceManager.cs @@ -1,4 +1,7 @@ -using System; +using Ryujinx.Memory.Range; +using System; +using System.Collections.Generic; +using System.Linq; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; @@ -10,15 +13,9 @@ namespace Ryujinx.Memory /// public sealed class AddressSpaceManager : IVirtualMemoryManager, IWritableBlock { - public const int PageBits = 12; - public const int PageSize = 1 << PageBits; - public const int PageMask = PageSize - 1; - - private const int PtLevelBits = 9; // 9 * 4 + 12 = 48 (max address space size) - private const int PtLevelSize = 1 << PtLevelBits; - private const int PtLevelMask = PtLevelSize - 1; - - private const ulong Unmapped = ulong.MaxValue; + public const int PageBits = PageTable.PageBits; + public const int PageSize = PageTable.PageSize; + public const int PageMask = PageTable.PageMask; /// /// Address space width in bits. @@ -27,16 +24,14 @@ namespace Ryujinx.Memory private readonly ulong _addressSpaceSize; - private readonly MemoryBlock _backingMemory; - - private readonly ulong[][][][] _pageTable; + private readonly PageTable _pageTable; /// /// Creates a new instance of the memory manager. /// /// Physical backing memory where virtual memory will be mapped to /// Size of the address space - public AddressSpaceManager(MemoryBlock backingMemory, ulong addressSpaceSize) + public AddressSpaceManager(ulong addressSpaceSize) { ulong asSize = PageSize; int asBits = PageBits; @@ -49,8 +44,7 @@ namespace Ryujinx.Memory AddressSpaceBits = asBits; _addressSpaceSize = asSize; - _backingMemory = backingMemory; - _pageTable = new ulong[PtLevelSize][][][]; + _pageTable = new PageTable(); } /// @@ -60,18 +54,18 @@ namespace Ryujinx.Memory /// Addresses and size must be page aligned. /// /// Virtual memory address - /// Physical memory address + /// Physical memory address /// Size to be mapped - public void Map(ulong va, ulong pa, ulong size) + public void Map(ulong va, nuint hostAddress, ulong size) { AssertValidAddressAndSize(va, size); while (size != 0) { - PtMap(va, pa); + _pageTable.Map(va, hostAddress); va += PageSize; - pa += PageSize; + hostAddress += PageSize; size -= PageSize; } } @@ -87,7 +81,7 @@ namespace Ryujinx.Memory while (size != 0) { - PtUnmap(va); + _pageTable.Unmap(va); va += PageSize; size -= PageSize; @@ -146,7 +140,7 @@ namespace Ryujinx.Memory if (IsContiguousAndMapped(va, data.Length)) { - data.CopyTo(_backingMemory.GetSpan(GetPhysicalAddressInternal(va), data.Length)); + data.CopyTo(GetHostSpanContiguous(va, data.Length)); } else { @@ -154,22 +148,18 @@ namespace Ryujinx.Memory if ((va & PageMask) != 0) { - ulong pa = GetPhysicalAddressInternal(va); - size = Math.Min(data.Length, PageSize - (int)(va & PageMask)); - data.Slice(0, size).CopyTo(_backingMemory.GetSpan(pa, size)); + data.Slice(0, size).CopyTo(GetHostSpanContiguous(va, size)); offset += size; } for (; offset < data.Length; offset += size) { - ulong pa = GetPhysicalAddressInternal(va + (ulong)offset); - size = Math.Min(data.Length - offset, PageSize); - data.Slice(offset, size).CopyTo(_backingMemory.GetSpan(pa, size)); + data.Slice(offset, size).CopyTo(GetHostSpanContiguous(va + (ulong)offset, size)); } } } @@ -195,7 +185,7 @@ namespace Ryujinx.Memory if (IsContiguousAndMapped(va, size)) { - return _backingMemory.GetSpan(GetPhysicalAddressInternal(va), size); + return GetHostSpanContiguous(va, size); } else { @@ -219,7 +209,7 @@ namespace Ryujinx.Memory /// Size of the data /// A writable region of memory containing the data /// Throw for unhandled invalid or unmapped memory accesses - public WritableRegion GetWritableRegion(ulong va, int size) + public unsafe WritableRegion GetWritableRegion(ulong va, int size) { if (size == 0) { @@ -228,7 +218,7 @@ namespace Ryujinx.Memory if (IsContiguousAndMapped(va, size)) { - return new WritableRegion(null, va, _backingMemory.GetMemory(GetPhysicalAddressInternal(va), size)); + return new WritableRegion(null, va, new NativeMemoryManager((byte*)GetHostAddress(va), size).Memory); } else { @@ -250,14 +240,14 @@ namespace Ryujinx.Memory /// Virtual address of the data /// A reference to the data in memory /// Throw if the specified memory region is not contiguous in physical memory - public ref T GetRef(ulong va) where T : unmanaged + public unsafe ref T GetRef(ulong va) where T : unmanaged { if (!IsContiguous(va, Unsafe.SizeOf())) { ThrowMemoryNotContiguous(); } - return ref _backingMemory.GetRef(GetPhysicalAddressInternal(va)); + return ref *(T*)GetHostAddress(va); } /// @@ -299,7 +289,7 @@ namespace Ryujinx.Memory return false; } - if (GetPhysicalAddressInternal(va) + PageSize != GetPhysicalAddressInternal(va + PageSize)) + if (GetHostAddress(va) + PageSize != GetHostAddress(va + PageSize)) { return false; } @@ -317,9 +307,48 @@ namespace Ryujinx.Memory /// Virtual address of the range /// Size of the range /// Array of physical regions - public (ulong address, ulong size)[] GetPhysicalRegions(ulong va, ulong size) + public IEnumerable GetPhysicalRegions(ulong va, ulong size) { - throw new NotImplementedException(); + if (size == 0) + { + return Enumerable.Empty(); + } + + if (!ValidateAddress(va) || !ValidateAddressAndSize(va, size)) + { + return null; + } + + int pages = GetPagesCount(va, (uint)size, out va); + + var regions = new List(); + + nuint regionStart = GetHostAddress(va); + ulong regionSize = PageSize; + + for (int page = 0; page < pages - 1; page++) + { + if (!ValidateAddress(va + PageSize)) + { + return null; + } + + nuint newHostAddress = GetHostAddress(va + PageSize); + + if (GetHostAddress(va) + PageSize != newHostAddress) + { + regions.Add(new HostMemoryRange(regionStart, regionSize)); + regionStart = newHostAddress; + regionSize = 0; + } + + va += PageSize; + regionSize += PageSize; + } + + regions.Add(new HostMemoryRange(regionStart, regionSize)); + + return regions; } private void ReadImpl(ulong va, Span data) @@ -335,22 +364,18 @@ namespace Ryujinx.Memory if ((va & PageMask) != 0) { - ulong pa = GetPhysicalAddressInternal(va); - size = Math.Min(data.Length, PageSize - (int)(va & PageMask)); - _backingMemory.GetSpan(pa, size).CopyTo(data.Slice(0, size)); + GetHostSpanContiguous(va, size).CopyTo(data.Slice(0, size)); offset += size; } for (; offset < data.Length; offset += size) { - ulong pa = GetPhysicalAddressInternal(va + (ulong)offset); - size = Math.Min(data.Length - offset, PageSize); - _backingMemory.GetSpan(pa, size).CopyTo(data.Slice(offset, size)); + GetHostSpanContiguous(va + (ulong)offset, size).CopyTo(data.Slice(offset, size)); } } @@ -367,7 +392,7 @@ namespace Ryujinx.Memory return false; } - return PtRead(va) != Unmapped; + return _pageTable.Read(va) != 0; } /// @@ -434,28 +459,14 @@ namespace Ryujinx.Memory } } - /// - /// Performs address translation of the address inside a mapped memory range. - /// - /// - /// If the address is invalid or unmapped, -1 will be returned. - /// - /// Virtual address to be translated - /// The physical address - public ulong GetPhysicalAddress(ulong va) + private unsafe Span GetHostSpanContiguous(ulong va, int size) { - // We return -1L if the virtual address is invalid or unmapped. - if (!ValidateAddress(va) || !IsMapped(va)) - { - return ulong.MaxValue; - } - - return GetPhysicalAddressInternal(va); + return new Span((void*)GetHostAddress(va), size); } - private ulong GetPhysicalAddressInternal(ulong va) + private nuint GetHostAddress(ulong va) { - return PtRead(va) + (va & PageMask); + return _pageTable.Read(va) + (nuint)(va & PageMask); } /// @@ -469,132 +480,6 @@ namespace Ryujinx.Memory throw new NotImplementedException(); } - private ulong PtRead(ulong va) - { - int l3 = (int)(va >> PageBits) & PtLevelMask; - int l2 = (int)(va >> (PageBits + PtLevelBits)) & PtLevelMask; - int l1 = (int)(va >> (PageBits + PtLevelBits * 2)) & PtLevelMask; - int l0 = (int)(va >> (PageBits + PtLevelBits * 3)) & PtLevelMask; - - if (_pageTable[l0] == null) - { - return Unmapped; - } - - if (_pageTable[l0][l1] == null) - { - return Unmapped; - } - - if (_pageTable[l0][l1][l2] == null) - { - return Unmapped; - } - - return _pageTable[l0][l1][l2][l3]; - } - - private void PtMap(ulong va, ulong value) - { - int l3 = (int)(va >> PageBits) & PtLevelMask; - int l2 = (int)(va >> (PageBits + PtLevelBits)) & PtLevelMask; - int l1 = (int)(va >> (PageBits + PtLevelBits * 2)) & PtLevelMask; - int l0 = (int)(va >> (PageBits + PtLevelBits * 3)) & PtLevelMask; - - if (_pageTable[l0] == null) - { - _pageTable[l0] = new ulong[PtLevelSize][][]; - } - - if (_pageTable[l0][l1] == null) - { - _pageTable[l0][l1] = new ulong[PtLevelSize][]; - } - - if (_pageTable[l0][l1][l2] == null) - { - _pageTable[l0][l1][l2] = new ulong[PtLevelSize]; - - for (int i = 0; i < _pageTable[l0][l1][l2].Length; i++) - { - _pageTable[l0][l1][l2][i] = Unmapped; - } - } - - _pageTable[l0][l1][l2][l3] = value; - } - - private void PtUnmap(ulong va) - { - int l3 = (int)(va >> PageBits) & PtLevelMask; - int l2 = (int)(va >> (PageBits + PtLevelBits)) & PtLevelMask; - int l1 = (int)(va >> (PageBits + PtLevelBits * 2)) & PtLevelMask; - int l0 = (int)(va >> (PageBits + PtLevelBits * 3)) & PtLevelMask; - - if (_pageTable[l0] == null) - { - return; - } - - if (_pageTable[l0][l1] == null) - { - return; - } - - if (_pageTable[l0][l1][l2] == null) - { - return; - } - - _pageTable[l0][l1][l2][l3] = Unmapped; - - bool empty = true; - - for (int i = 0; i < _pageTable[l0][l1][l2].Length; i++) - { - empty &= (_pageTable[l0][l1][l2][i] == Unmapped); - } - - if (empty) - { - _pageTable[l0][l1][l2] = null; - - RemoveIfAllNull(l0, l1); - } - } - - private void RemoveIfAllNull(int l0, int l1) - { - bool empty = true; - - for (int i = 0; i < _pageTable[l0][l1].Length; i++) - { - empty &= (_pageTable[l0][l1][i] == null); - } - - if (empty) - { - _pageTable[l0][l1] = null; - - RemoveIfAllNull(l0); - } - } - - private void RemoveIfAllNull(int l0) - { - bool empty = true; - - for (int i = 0; i < _pageTable[l0].Length; i++) - { - empty &= (_pageTable[l0][i] == null); - } - - if (empty) - { - _pageTable[l0] = null; - } - } - public void SignalMemoryTracking(ulong va, ulong size, bool write) { // Only the ARM Memory Manager has tracking for now. diff --git a/Ryujinx.Memory/IRefCounted.cs b/Ryujinx.Memory/IRefCounted.cs new file mode 100644 index 0000000000..e0a311d6da --- /dev/null +++ b/Ryujinx.Memory/IRefCounted.cs @@ -0,0 +1,8 @@ +namespace Ryujinx.Memory +{ + public interface IRefCounted + { + void IncrementReferenceCount(); + void DecrementReferenceCount(); + } +} diff --git a/Ryujinx.Memory/IVirtualMemoryManager.cs b/Ryujinx.Memory/IVirtualMemoryManager.cs index f52c4b2205..b5e080199e 100644 --- a/Ryujinx.Memory/IVirtualMemoryManager.cs +++ b/Ryujinx.Memory/IVirtualMemoryManager.cs @@ -1,16 +1,61 @@ -using System; +using Ryujinx.Memory.Range; +using System; +using System.Collections.Generic; namespace Ryujinx.Memory { public interface IVirtualMemoryManager { - void Map(ulong va, ulong pa, ulong size); + /// + /// Maps a virtual memory range into a physical memory range. + /// + /// + /// Addresses and size must be page aligned. + /// + /// Virtual memory address + /// Pointer where the region should be mapped to + /// Size to be mapped + void Map(ulong va, nuint hostAddress, ulong size); + + /// + /// Unmaps a previously mapped range of virtual memory. + /// + /// Virtual address of the range to be unmapped + /// Size of the range to be unmapped void Unmap(ulong va, ulong size); + /// + /// Reads data from CPU mapped memory. + /// + /// Type of the data being read + /// Virtual address of the data in memory + /// The data + /// Throw for unhandled invalid or unmapped memory accesses T Read(ulong va) where T : unmanaged; + + /// + /// Reads data from CPU mapped memory. + /// + /// Virtual address of the data in memory + /// Span to store the data being read into + /// Throw for unhandled invalid or unmapped memory accesses void Read(ulong va, Span data); + /// + /// Writes data to CPU mapped memory. + /// + /// Type of the data being written + /// Virtual address to write the data into + /// Data to be written + /// Throw for unhandled invalid or unmapped memory accesses void Write(ulong va, T value) where T : unmanaged; + + /// + /// Writes data to CPU mapped memory, with write tracking. + /// + /// Virtual address to write the data into + /// Data to be written + /// Throw for unhandled invalid or unmapped memory accesses void Write(ulong va, ReadOnlySpan data); void Fill(ulong va, ulong size, byte value) @@ -25,17 +70,76 @@ namespace Ryujinx.Memory } } + /// + /// Gets a read-only span of data from CPU mapped memory. + /// + /// Virtual address of the data + /// Size of the data + /// True if read tracking is triggered on the span + /// A read-only span of the data + /// Throw for unhandled invalid or unmapped memory accesses ReadOnlySpan GetSpan(ulong va, int size, bool tracked = false); + + /// + /// Gets a region of memory that can be written to. + /// + /// Virtual address of the data + /// Size of the data + /// A writable region of memory containing the data + /// Throw for unhandled invalid or unmapped memory accesses WritableRegion GetWritableRegion(ulong va, int size); + + /// + /// Gets a reference for the given type at the specified virtual memory address. + /// + /// + /// The data must be located at a contiguous memory region. + /// + /// Type of the data to get the reference + /// Virtual address of the data + /// A reference to the data in memory + /// Throw if the specified memory region is not contiguous in physical memory ref T GetRef(ulong va) where T : unmanaged; - (ulong address, ulong size)[] GetPhysicalRegions(ulong va, ulong size); + /// + /// Gets the physical regions that make up the given virtual address region. + /// If any part of the virtual region is unmapped, null is returned. + /// + /// Virtual address of the range + /// Size of the range + /// Array of physical regions + IEnumerable GetPhysicalRegions(ulong va, ulong size); + /// + /// Checks if the page at a given CPU virtual address is mapped. + /// + /// Virtual address to check + /// True if the address is mapped, false otherwise bool IsMapped(ulong va); - bool IsRangeMapped(ulong va, ulong size); - ulong GetPhysicalAddress(ulong va); + /// + /// Checks if a memory range is mapped. + /// + /// Virtual address of the range + /// Size of the range in bytes + /// True if the entire range is mapped, false otherwise + bool IsRangeMapped(ulong va, ulong size); + + /// + /// Alerts the memory tracking that a given region has been read from or written to. + /// This should be called before read/write is performed. + /// + /// Virtual address of the region + /// Size of the region + /// True if the region was written, false if read void SignalMemoryTracking(ulong va, ulong size, bool write); + + /// + /// Reprotect a region of virtual memory for tracking. + /// + /// Virtual address base + /// Size of the region to protect + /// Memory protection to set void TrackingReprotect(ulong va, ulong size, MemoryPermission protection); } } diff --git a/Ryujinx.Cpu/InvalidAccessHandler.cs b/Ryujinx.Memory/InvalidAccessHandler.cs similarity index 92% rename from Ryujinx.Cpu/InvalidAccessHandler.cs rename to Ryujinx.Memory/InvalidAccessHandler.cs index 0d3d387d83..3dadb766d6 100644 --- a/Ryujinx.Cpu/InvalidAccessHandler.cs +++ b/Ryujinx.Memory/InvalidAccessHandler.cs @@ -1,4 +1,4 @@ -namespace Ryujinx.Cpu +namespace Ryujinx.Memory { /// /// Function that handles a invalid memory access from the emulated CPU. diff --git a/Ryujinx.Memory/MemoryAllocationFlags.cs b/Ryujinx.Memory/MemoryAllocationFlags.cs index 94025d384b..d9420dd371 100644 --- a/Ryujinx.Memory/MemoryAllocationFlags.cs +++ b/Ryujinx.Memory/MemoryAllocationFlags.cs @@ -23,6 +23,12 @@ namespace Ryujinx.Memory /// Enables read and write tracking of the memory block. /// This currently does nothing and is reserved for future use. /// - Tracked = 1 << 1 + Tracked = 1 << 1, + + /// + /// Enables mirroring of the memory block through aliasing of memory pages. + /// When enabled, this allows creating more memory blocks sharing the same backing storage. + /// + Mirrorable = 1 << 2 } } diff --git a/Ryujinx.Memory/MemoryBlock.cs b/Ryujinx.Memory/MemoryBlock.cs index 4e775bba70..e331a4537e 100644 --- a/Ryujinx.Memory/MemoryBlock.cs +++ b/Ryujinx.Memory/MemoryBlock.cs @@ -7,8 +7,11 @@ namespace Ryujinx.Memory /// /// Represents a block of contiguous physical guest memory. /// - public sealed class MemoryBlock : IDisposable + public sealed class MemoryBlock : IWritableBlock, IDisposable { + private readonly bool _usesSharedMemory; + private readonly bool _isMirror; + private IntPtr _sharedMemory; private IntPtr _pointer; /// @@ -22,15 +25,21 @@ namespace Ryujinx.Memory public ulong Size { get; } /// - /// Initializes a new instance of the memory block class. + /// Creates a new instance of the memory block class. /// - /// Size of the memory block + /// Size of the memory block in bytes /// Flags that controls memory block memory allocation /// Throw when there's no enough memory to allocate the requested size /// Throw when the current platform is not supported public MemoryBlock(ulong size, MemoryAllocationFlags flags = MemoryAllocationFlags.None) { - if (flags.HasFlag(MemoryAllocationFlags.Reserve)) + if (flags.HasFlag(MemoryAllocationFlags.Mirrorable)) + { + _sharedMemory = MemoryManagement.CreateSharedMemory(size, flags.HasFlag(MemoryAllocationFlags.Reserve)); + _pointer = MemoryManagement.MapSharedMemory(_sharedMemory); + _usesSharedMemory = true; + } + else if (flags.HasFlag(MemoryAllocationFlags.Reserve)) { _pointer = MemoryManagement.Reserve(size); } @@ -42,6 +51,39 @@ namespace Ryujinx.Memory Size = size; } + /// + /// Creates a new instance of the memory block class, with a existing backing storage. + /// + /// Size of the memory block in bytes + /// Shared memory to use as backing storage for this block + /// Throw when there's no enough address space left to map the shared memory + /// Throw when the current platform is not supported + private MemoryBlock(ulong size, IntPtr sharedMemory) + { + _pointer = MemoryManagement.MapSharedMemory(sharedMemory); + Size = size; + _usesSharedMemory = true; + _isMirror = true; + } + + /// + /// Creates a memory block that shares the backing storage with this block. + /// The memory and page commitments will be shared, however memory protections are separate. + /// + /// A new memory block that shares storage with this one + /// Throw when the current memory block does not support mirroring + /// Throw when there's no enough address space left to map the shared memory + /// Throw when the current platform is not supported + public MemoryBlock CreateMirror() + { + if (_sharedMemory == IntPtr.Zero) + { + throw new NotSupportedException("Mirroring is not supported on the memory block because the Mirrorable flag was not set."); + } + + return new MemoryBlock(Size, _sharedMemory); + } + /// /// Commits a region of memory that has previously been reserved. /// This can be used to allocate memory on demand. @@ -56,18 +98,47 @@ namespace Ryujinx.Memory return MemoryManagement.Commit(GetPointerInternal(offset, size), size); } + /// + /// Decommits a region of memory that has previously been reserved and optionally comitted. + /// This can be used to free previously allocated memory on demand. + /// + /// Starting offset of the range to be decommitted + /// Size of the range to be decommitted + /// True if the operation was successful, false otherwise + /// Throw when the memory block has already been disposed + /// Throw when either or are out of range + public bool Decommit(ulong offset, ulong size) + { + return MemoryManagement.Decommit(GetPointerInternal(offset, size), size); + } + /// /// Reprotects a region of memory. /// /// Starting offset of the range to be reprotected /// Size of the range to be reprotected /// New memory permissions + /// True if a failed reprotect should throw /// Throw when the memory block has already been disposed /// Throw when either or are out of range /// Throw when is invalid - public void Reprotect(ulong offset, ulong size, MemoryPermission permission) + public void Reprotect(ulong offset, ulong size, MemoryPermission permission, bool throwOnFail = true) { - MemoryManagement.Reprotect(GetPointerInternal(offset, size), size, permission); + MemoryManagement.Reprotect(GetPointerInternal(offset, size), size, permission, throwOnFail); + } + + /// + /// Remaps a region of memory into this memory block. + /// + /// Starting offset of the range to be remapped into + /// Starting offset of the range to be remapped from + /// Size of the range to be remapped + /// Throw when the memory block has already been disposed + /// Throw when either or are out of range + /// Throw when is invalid + public void Remap(ulong offset, IntPtr sourceAddress, ulong size) + { + MemoryManagement.Remap(GetPointerInternal(offset, size), sourceAddress, size); } /// @@ -202,7 +273,7 @@ namespace Ryujinx.Memory /// Throw when the memory block has already been disposed /// Throw when either or are out of range [MethodImpl(MethodImplOptions.AggressiveInlining)] - public IntPtr GetPointer(ulong offset, int size) => GetPointerInternal(offset, (ulong)size); + public nuint GetPointer(ulong offset, ulong size) => (nuint)(ulong)GetPointerInternal(offset, size); [MethodImpl(MethodImplOptions.AggressiveInlining)] private IntPtr GetPointerInternal(ulong offset, ulong size) @@ -235,7 +306,7 @@ namespace Ryujinx.Memory [MethodImpl(MethodImplOptions.AggressiveInlining)] public unsafe Span GetSpan(ulong offset, int size) { - return new Span((void*)GetPointer(offset, size), size); + return new Span((void*)GetPointerInternal(offset, (ulong)size), size); } /// @@ -249,7 +320,20 @@ namespace Ryujinx.Memory [MethodImpl(MethodImplOptions.AggressiveInlining)] public unsafe Memory GetMemory(ulong offset, int size) { - return new NativeMemoryManager((byte*)GetPointer(offset, size), size).Memory; + return new NativeMemoryManager((byte*)GetPointerInternal(offset, (ulong)size), size).Memory; + } + + /// + /// Gets a writable region of a given memory block region. + /// + /// Start offset of the memory region + /// Size in bytes of the region + /// Writable region of the memory region + /// Throw when the memory block has already been disposed + /// Throw when either or are out of range + public WritableRegion GetWritableRegion(ulong offset, int size) + { + return new WritableRegion(this, offset, GetMemory(offset, size)); } /// @@ -280,7 +364,20 @@ namespace Ryujinx.Memory // If pointer is null, the memory was already freed or never allocated. if (ptr != IntPtr.Zero) { - MemoryManagement.Free(ptr); + if (_usesSharedMemory) + { + MemoryManagement.UnmapSharedMemory(ptr); + + if (_sharedMemory != IntPtr.Zero && !_isMirror) + { + MemoryManagement.DestroySharedMemory(_sharedMemory); + _sharedMemory = IntPtr.Zero; + } + } + else + { + MemoryManagement.Free(ptr); + } } } diff --git a/Ryujinx.Memory/MemoryManagement.cs b/Ryujinx.Memory/MemoryManagement.cs index 2525bef72d..3e5ec3418d 100644 --- a/Ryujinx.Memory/MemoryManagement.cs +++ b/Ryujinx.Memory/MemoryManagement.cs @@ -62,7 +62,26 @@ namespace Ryujinx.Memory } } - public static void Reprotect(IntPtr address, ulong size, MemoryPermission permission) + public static bool Decommit(IntPtr address, ulong size) + { + if (RuntimeInformation.IsOSPlatform(OSPlatform.Windows)) + { + IntPtr sizeNint = new IntPtr((long)size); + + return MemoryManagementWindows.Decommit(address, sizeNint); + } + else if (RuntimeInformation.IsOSPlatform(OSPlatform.Linux) || + RuntimeInformation.IsOSPlatform(OSPlatform.OSX)) + { + return MemoryManagementUnix.Decommit(address, size); + } + else + { + throw new PlatformNotSupportedException(); + } + } + + public static void Reprotect(IntPtr address, ulong size, MemoryPermission permission, bool throwOnFail) { bool result; @@ -82,7 +101,7 @@ namespace Ryujinx.Memory throw new PlatformNotSupportedException(); } - if (!result) + if (!result && throwOnFail) { throw new MemoryProtectionException(permission); } @@ -104,5 +123,88 @@ namespace Ryujinx.Memory throw new PlatformNotSupportedException(); } } + + public static IntPtr CreateSharedMemory(ulong size, bool reserve) + { + if (RuntimeInformation.IsOSPlatform(OSPlatform.Windows)) + { + IntPtr sizeNint = new IntPtr((long)size); + + return MemoryManagementWindows.CreateSharedMemory(sizeNint, reserve); + } + else if (RuntimeInformation.IsOSPlatform(OSPlatform.Linux) || + RuntimeInformation.IsOSPlatform(OSPlatform.OSX)) + { + return MemoryManagementUnix.CreateSharedMemory(size, reserve); + } + else + { + throw new PlatformNotSupportedException(); + } + } + + public static void DestroySharedMemory(IntPtr handle) + { + if (RuntimeInformation.IsOSPlatform(OSPlatform.Windows)) + { + MemoryManagementWindows.DestroySharedMemory(handle); + } + else if (RuntimeInformation.IsOSPlatform(OSPlatform.Linux) || + RuntimeInformation.IsOSPlatform(OSPlatform.OSX)) + { + MemoryManagementUnix.DestroySharedMemory(handle); + } + else + { + throw new PlatformNotSupportedException(); + } + } + + public static IntPtr MapSharedMemory(IntPtr handle) + { + if (RuntimeInformation.IsOSPlatform(OSPlatform.Windows)) + { + return MemoryManagementWindows.MapSharedMemory(handle); + } + else if (RuntimeInformation.IsOSPlatform(OSPlatform.Linux) || + RuntimeInformation.IsOSPlatform(OSPlatform.OSX)) + { + return MemoryManagementUnix.MapSharedMemory(handle); + } + else + { + throw new PlatformNotSupportedException(); + } + } + + public static void UnmapSharedMemory(IntPtr address) + { + if (RuntimeInformation.IsOSPlatform(OSPlatform.Windows)) + { + MemoryManagementWindows.UnmapSharedMemory(address); + } + else if (RuntimeInformation.IsOSPlatform(OSPlatform.Linux) || + RuntimeInformation.IsOSPlatform(OSPlatform.OSX)) + { + MemoryManagementUnix.UnmapSharedMemory(address); + } + else + { + throw new PlatformNotSupportedException(); + } + } + + public static IntPtr Remap(IntPtr target, IntPtr source, ulong size) + { + if (RuntimeInformation.IsOSPlatform(OSPlatform.Linux) || + RuntimeInformation.IsOSPlatform(OSPlatform.OSX)) + { + return MemoryManagementUnix.Remap(target, source, size); + } + else + { + throw new PlatformNotSupportedException(); + } + } } } \ No newline at end of file diff --git a/Ryujinx.Memory/MemoryManagementUnix.cs b/Ryujinx.Memory/MemoryManagementUnix.cs index 810968675e..6985278703 100644 --- a/Ryujinx.Memory/MemoryManagementUnix.cs +++ b/Ryujinx.Memory/MemoryManagementUnix.cs @@ -1,11 +1,31 @@ using Mono.Unix.Native; using System; using System.Collections.Concurrent; +using System.Collections.Generic; +using System.Runtime.InteropServices; namespace Ryujinx.Memory { static class MemoryManagementUnix { + private struct UnixSharedMemory + { + public IntPtr Pointer; + public ulong Size; + public IntPtr SourcePointer; + } + + [DllImport("libc", SetLastError = true)] + public static extern IntPtr mremap(IntPtr old_address, ulong old_size, ulong new_size, MremapFlags flags, IntPtr new_address); + + [DllImport("libc", SetLastError = true)] + public static extern int madvise(IntPtr address, ulong size, int advice); + + private const int MADV_DONTNEED = 4; + private const int MADV_REMOVE = 9; + + private static readonly List _sharedMemory = new List(); + private static readonly ConcurrentDictionary _sharedMemorySource = new ConcurrentDictionary(); private static readonly ConcurrentDictionary _allocations = new ConcurrentDictionary(); public static IntPtr Allocate(ulong size) @@ -18,9 +38,23 @@ namespace Ryujinx.Memory return AllocateInternal(size, MmapProts.PROT_NONE); } - private static IntPtr AllocateInternal(ulong size, MmapProts prot) + private static IntPtr AllocateInternal(ulong size, MmapProts prot, bool shared = false) { - const MmapFlags flags = MmapFlags.MAP_PRIVATE | MmapFlags.MAP_ANONYMOUS; + MmapFlags flags = MmapFlags.MAP_ANONYMOUS; + + if (shared) + { + flags |= MmapFlags.MAP_SHARED | (MmapFlags)0x80000; + } + else + { + flags |= MmapFlags.MAP_PRIVATE; + } + + if (prot == MmapProts.PROT_NONE) + { + flags |= MmapFlags.MAP_NORESERVE; + } IntPtr ptr = Syscall.mmap(IntPtr.Zero, size, prot, flags, -1, 0); @@ -40,7 +74,42 @@ namespace Ryujinx.Memory public static bool Commit(IntPtr address, ulong size) { - return Syscall.mprotect(address, size, MmapProts.PROT_READ | MmapProts.PROT_WRITE) == 0; + bool success = Syscall.mprotect(address, size, MmapProts.PROT_READ | MmapProts.PROT_WRITE) == 0; + + if (success) + { + foreach (var shared in _sharedMemory) + { + if ((ulong)address + size > (ulong)shared.SourcePointer && (ulong)address < (ulong)shared.SourcePointer + shared.Size) + { + ulong sharedAddress = ((ulong)address - (ulong)shared.SourcePointer) + (ulong)shared.Pointer; + + if (Syscall.mprotect((IntPtr)sharedAddress, size, MmapProts.PROT_READ | MmapProts.PROT_WRITE) != 0) + { + return false; + } + } + } + } + + return success; + } + + public static bool Decommit(IntPtr address, ulong size) + { + bool isShared; + + lock (_sharedMemory) + { + isShared = _sharedMemory.Exists(x => (ulong)address >= (ulong)x.Pointer && (ulong)address + size <= (ulong)x.Pointer + x.Size); + } + + // Must be writable for madvise to work properly. + Syscall.mprotect(address, size, MmapProts.PROT_READ | MmapProts.PROT_WRITE); + + madvise(address, size, isShared ? MADV_REMOVE : MADV_DONTNEED); + + return Syscall.mprotect(address, size, MmapProts.PROT_NONE) == 0; } public static bool Reprotect(IntPtr address, ulong size, MemoryPermission permission) @@ -71,5 +140,140 @@ namespace Ryujinx.Memory return false; } + + public static IntPtr Remap(IntPtr target, IntPtr source, ulong size) + { + int flags = (int)MremapFlags.MREMAP_MAYMOVE; + + if (target != IntPtr.Zero) + { + flags |= 2; + } + + IntPtr result = mremap(source, 0, size, (MremapFlags)(flags), target); + + if (result == IntPtr.Zero) + { + throw new InvalidOperationException(); + } + + return result; + } + + public static IntPtr CreateSharedMemory(ulong size, bool reserve) + { + IntPtr result = AllocateInternal( + size, + reserve ? MmapProts.PROT_NONE : MmapProts.PROT_READ | MmapProts.PROT_WRITE, + true); + + if (result == IntPtr.Zero) + { + throw new OutOfMemoryException(); + } + + _sharedMemorySource[result] = (ulong)size; + + return result; + } + + public static void DestroySharedMemory(IntPtr handle) + { + lock (_sharedMemory) + { + foreach (var memory in _sharedMemory) + { + if (memory.SourcePointer == handle) + { + throw new InvalidOperationException("Shared memory cannot be destroyed unless fully unmapped."); + } + } + } + + _sharedMemorySource.Remove(handle, out ulong _); + } + + public static IntPtr MapSharedMemory(IntPtr handle) + { + // Try find the handle for this shared memory. If it is mapped, then we want to map + // it a second time in another location. + // If it is not mapped, then its handle is the mapping. + + ulong size = _sharedMemorySource[handle]; + + if (size == 0) + { + throw new InvalidOperationException("Shared memory cannot be mapped after its source is unmapped."); + } + + lock (_sharedMemory) + { + foreach (var memory in _sharedMemory) + { + if (memory.Pointer == handle) + { + IntPtr result = AllocateInternal( + memory.Size, + MmapProts.PROT_NONE + ); + + if (result == IntPtr.Zero) + { + throw new OutOfMemoryException(); + } + + Remap(result, handle, memory.Size); + + _sharedMemory.Add(new UnixSharedMemory + { + Pointer = result, + Size = memory.Size, + + SourcePointer = handle + }); + + return result; + } + } + + _sharedMemory.Add(new UnixSharedMemory + { + Pointer = handle, + Size = size, + + SourcePointer = handle + }); + } + + return handle; + } + + public static void UnmapSharedMemory(IntPtr address) + { + lock (_sharedMemory) + { + int removed = _sharedMemory.RemoveAll(memory => + { + if (memory.Pointer == address) + { + if (memory.Pointer == memory.SourcePointer) + { + // After removing the original mapping, it cannot be mapped again. + _sharedMemorySource[memory.SourcePointer] = 0; + } + + Free(address); + return true; + } + + return false; + }); + + if (removed == 0) + { + throw new InvalidOperationException("Shared memory mapping could not be found."); + } + } + } } } \ No newline at end of file diff --git a/Ryujinx.Memory/MemoryManagementWindows.cs b/Ryujinx.Memory/MemoryManagementWindows.cs index 9513bb540e..b14fb6c1e0 100644 --- a/Ryujinx.Memory/MemoryManagementWindows.cs +++ b/Ryujinx.Memory/MemoryManagementWindows.cs @@ -1,57 +1,69 @@ -using System; +using Ryujinx.Memory.WindowsShared; +using System; +using System.Collections.Generic; using System.Runtime.InteropServices; namespace Ryujinx.Memory { static class MemoryManagementWindows { - [Flags] - private enum AllocationType : uint - { - Commit = 0x1000, - Reserve = 0x2000, - Decommit = 0x4000, - Release = 0x8000, - Reset = 0x80000, - Physical = 0x400000, - TopDown = 0x100000, - WriteWatch = 0x200000, - LargePages = 0x20000000 - } + private static readonly IntPtr InvalidHandleValue = new IntPtr(-1); + private static bool UseWin10Placeholders; - [Flags] - private enum MemoryProtection : uint - { - NoAccess = 0x01, - ReadOnly = 0x02, - ReadWrite = 0x04, - WriteCopy = 0x08, - Execute = 0x10, - ExecuteRead = 0x20, - ExecuteReadWrite = 0x40, - ExecuteWriteCopy = 0x80, - GuardModifierflag = 0x100, - NoCacheModifierflag = 0x200, - WriteCombineModifierflag = 0x400 - } + private static object _emulatedHandleLock = new object(); + private static EmulatedSharedMemoryWindows[] _emulatedShared = new EmulatedSharedMemoryWindows[64]; + private static List _emulatedSharedList = new List(); - [DllImport("kernel32.dll")] + [DllImport("kernel32.dll", SetLastError = true)] private static extern IntPtr VirtualAlloc( IntPtr lpAddress, IntPtr dwSize, AllocationType flAllocationType, MemoryProtection flProtect); - [DllImport("kernel32.dll")] + [DllImport("kernel32.dll", SetLastError = true)] private static extern bool VirtualProtect( IntPtr lpAddress, IntPtr dwSize, MemoryProtection flNewProtect, out MemoryProtection lpflOldProtect); - [DllImport("kernel32.dll")] + [DllImport("kernel32.dll", SetLastError = true)] private static extern bool VirtualFree(IntPtr lpAddress, IntPtr dwSize, AllocationType dwFreeType); + [DllImport("kernel32.dll", SetLastError = true)] + private static extern IntPtr CreateFileMapping( + IntPtr hFile, + IntPtr lpFileMappingAttributes, + FileMapProtection flProtect, + uint dwMaximumSizeHigh, + uint dwMaximumSizeLow, + [MarshalAs(UnmanagedType.LPWStr)] string lpName); + + [DllImport("kernel32.dll", SetLastError = true)] + private static extern bool CloseHandle(IntPtr hObject); + + [DllImport("kernel32.dll", SetLastError = true)] + private static extern IntPtr MapViewOfFile( + IntPtr hFileMappingObject, + uint dwDesiredAccess, + uint dwFileOffsetHigh, + uint dwFileOffsetLow, + IntPtr dwNumberOfBytesToMap); + + [DllImport("kernel32.dll", SetLastError = true)] + private static extern bool UnmapViewOfFile(IntPtr lpBaseAddress); + + [DllImport("kernel32.dll", SetLastError = true)] + private static extern uint GetLastError(); + + static MemoryManagementWindows() + { + Version version = Environment.OSVersion.Version; + + UseWin10Placeholders = (version.Major == 10 && version.Build >= 17134) || version.Major > 10; + } + public static IntPtr Allocate(IntPtr size) { return AllocateInternal(size, AllocationType.Reserve | AllocationType.Commit); @@ -76,12 +88,68 @@ namespace Ryujinx.Memory public static bool Commit(IntPtr location, IntPtr size) { + if (UseWin10Placeholders) + { + lock (_emulatedSharedList) + { + foreach (var shared in _emulatedSharedList) + { + if (shared.CommitMap(location, size)) + { + return true; + } + } + } + } + return VirtualAlloc(location, size, AllocationType.Commit, MemoryProtection.ReadWrite) != IntPtr.Zero; } + public static bool Decommit(IntPtr location, IntPtr size) + { + if (UseWin10Placeholders) + { + lock (_emulatedSharedList) + { + foreach (var shared in _emulatedSharedList) + { + if (shared.DecommitMap(location, size)) + { + return true; + } + } + } + } + + return VirtualFree(location, size, AllocationType.Decommit); + } + public static bool Reprotect(IntPtr address, IntPtr size, MemoryPermission permission) { - return VirtualProtect(address, size, GetProtection(permission), out _); + if (UseWin10Placeholders) + { + ulong uaddress = (ulong)address; + ulong usize = (ulong)size; + while (usize > 0) + { + ulong nextGranular = (uaddress & ~EmulatedSharedMemoryWindows.MappingMask) + EmulatedSharedMemoryWindows.MappingGranularity; + ulong mapSize = Math.Min(usize, nextGranular - uaddress); + + if (!VirtualProtect((IntPtr)uaddress, (IntPtr)mapSize, GetProtection(permission), out _)) + { + return false; + } + + uaddress = nextGranular; + usize -= mapSize; + } + + return true; + } + else + { + return VirtualProtect(address, size, GetProtection(permission), out _); + } } private static MemoryProtection GetProtection(MemoryPermission permission) @@ -102,5 +170,132 @@ namespace Ryujinx.Memory { return VirtualFree(address, IntPtr.Zero, AllocationType.Release); } + + private static int GetEmulatedHandle() + { + // Assumes we have the handle lock. + + for (int i = 0; i < _emulatedShared.Length; i++) + { + if (_emulatedShared[i] == null) + { + return i + 1; + } + } + + throw new InvalidProgramException("Too many shared memory handles were created."); + } + + public static bool EmulatedHandleValid(ref int handle) + { + handle--; + return handle >= 0 && handle < _emulatedShared.Length && _emulatedShared[handle] != null; + } + + public static IntPtr CreateSharedMemory(IntPtr size, bool reserve) + { + if (UseWin10Placeholders && reserve) + { + lock (_emulatedHandleLock) + { + int handle = GetEmulatedHandle(); + _emulatedShared[handle - 1] = new EmulatedSharedMemoryWindows((ulong)size); + _emulatedSharedList.Add(_emulatedShared[handle - 1]); + + return (IntPtr)handle; + } + } + else + { + var prot = reserve ? FileMapProtection.SectionReserve : FileMapProtection.SectionCommit; + + IntPtr handle = CreateFileMapping( + InvalidHandleValue, + IntPtr.Zero, + FileMapProtection.PageReadWrite | prot, + (uint)(size.ToInt64() >> 32), + (uint)size.ToInt64(), + null); + + if (handle == IntPtr.Zero) + { + throw new OutOfMemoryException(); + } + + return handle; + } + } + + public static void DestroySharedMemory(IntPtr handle) + { + if (UseWin10Placeholders) + { + lock (_emulatedHandleLock) + { + int iHandle = (int)(ulong)handle; + + if (EmulatedHandleValid(ref iHandle)) + { + _emulatedSharedList.Remove(_emulatedShared[iHandle]); + _emulatedShared[iHandle].Dispose(); + _emulatedShared[iHandle] = null; + + return; + } + } + } + + if (!CloseHandle(handle)) + { + throw new ArgumentException("Invalid handle.", nameof(handle)); + } + } + + public static IntPtr MapSharedMemory(IntPtr handle) + { + if (UseWin10Placeholders) + { + lock (_emulatedHandleLock) + { + int iHandle = (int)(ulong)handle; + + if (EmulatedHandleValid(ref iHandle)) + { + return _emulatedShared[iHandle].Map(); + } + } + } + + IntPtr ptr = MapViewOfFile(handle, 4 | 2, 0, 0, IntPtr.Zero); + + if (ptr == IntPtr.Zero) + { + throw new OutOfMemoryException(); + } + + return ptr; + } + + public static void UnmapSharedMemory(IntPtr address) + { + if (UseWin10Placeholders) + { + lock (_emulatedHandleLock) + { + foreach (EmulatedSharedMemoryWindows shared in _emulatedSharedList) + { + if (shared.Unmap((ulong)address)) + { + return; + } + } + } + } + + if (!UnmapViewOfFile(address)) + { + throw new ArgumentException("Invalid address.", nameof(address)); + } + } } } \ No newline at end of file diff --git a/Ryujinx.Memory/MemoryPermission.cs b/Ryujinx.Memory/MemoryPermission.cs index 38f2d90993..8c3e33cf78 100644 --- a/Ryujinx.Memory/MemoryPermission.cs +++ b/Ryujinx.Memory/MemoryPermission.cs @@ -41,6 +41,11 @@ namespace Ryujinx.Memory /// /// Allow reads, writes, and code execution on the memory region. /// - ReadWriteExecute = Read | Write | Execute + ReadWriteExecute = Read | Write | Execute, + + /// + /// Indicates an invalid protection. + /// + Invalid = 255 } } diff --git a/Ryujinx.Memory/NativeMemoryManager.cs b/Ryujinx.Memory/NativeMemoryManager.cs index ef599dd48a..d175793540 100644 --- a/Ryujinx.Memory/NativeMemoryManager.cs +++ b/Ryujinx.Memory/NativeMemoryManager.cs @@ -3,7 +3,7 @@ using System.Buffers; namespace Ryujinx.Memory { - unsafe class NativeMemoryManager : MemoryManager where T : unmanaged + public unsafe class NativeMemoryManager : MemoryManager where T : unmanaged { private readonly T* _pointer; private readonly int _length; diff --git a/Ryujinx.Memory/PageTable.cs b/Ryujinx.Memory/PageTable.cs new file mode 100644 index 0000000000..71db1e7625 --- /dev/null +++ b/Ryujinx.Memory/PageTable.cs @@ -0,0 +1,141 @@ +namespace Ryujinx.Memory +{ + class PageTable where T : unmanaged + { + public const int PageBits = 12; + public const int PageSize = 1 << PageBits; + public const int PageMask = PageSize - 1; + + private const int PtLevelBits = 9; // 9 * 4 + 12 = 48 (max address space size) + private const int PtLevelSize = 1 << PtLevelBits; + private const int PtLevelMask = PtLevelSize - 1; + + private readonly T[][][][] _pageTable; + + public PageTable() + { + _pageTable = new T[PtLevelSize][][][]; + } + + public T Read(ulong va) + { + int l3 = (int)(va >> PageBits) & PtLevelMask; + int l2 = (int)(va >> (PageBits + PtLevelBits)) & PtLevelMask; + int l1 = (int)(va >> (PageBits + PtLevelBits * 2)) & PtLevelMask; + int l0 = (int)(va >> (PageBits + PtLevelBits * 3)) & PtLevelMask; + + if (_pageTable[l0] == null) + { + return default; + } + + if (_pageTable[l0][l1] == null) + { + return default; + } + + if (_pageTable[l0][l1][l2] == null) + { + return default; + } + + return _pageTable[l0][l1][l2][l3]; + } + + public void Map(ulong va, T value) + { + int l3 = (int)(va >> PageBits) & PtLevelMask; + int l2 = (int)(va >> (PageBits + PtLevelBits)) & PtLevelMask; + int l1 = (int)(va >> (PageBits + PtLevelBits * 2)) & PtLevelMask; + int l0 = (int)(va >> (PageBits + PtLevelBits * 3)) & PtLevelMask; + + if (_pageTable[l0] == null) + { + _pageTable[l0] = new T[PtLevelSize][][]; + } + + if (_pageTable[l0][l1] == null) + { + _pageTable[l0][l1] = new T[PtLevelSize][]; + } + + if (_pageTable[l0][l1][l2] == null) + { + _pageTable[l0][l1][l2] = new T[PtLevelSize]; + } + + _pageTable[l0][l1][l2][l3] = value; + } + + public void Unmap(ulong va) + { + int l3 = (int)(va >> PageBits) & PtLevelMask; + int l2 = (int)(va >> (PageBits + PtLevelBits)) & PtLevelMask; + int l1 = (int)(va >> (PageBits + PtLevelBits * 2)) & PtLevelMask; + int l0 = (int)(va >> (PageBits + PtLevelBits * 3)) & PtLevelMask; + + if (_pageTable[l0] == null) + { + return; + } + + if (_pageTable[l0][l1] == null) + { + return; + } + + if (_pageTable[l0][l1][l2] == null) + { + return; + } + + _pageTable[l0][l1][l2][l3] = default; + + bool empty = true; + + for (int i = 0; i < _pageTable[l0][l1][l2].Length; i++) + { + empty &= _pageTable[l0][l1][l2][i].Equals(default); + } + + if (empty) + { + _pageTable[l0][l1][l2] = null; + + RemoveIfAllNull(l0, l1); + } + } + + private void RemoveIfAllNull(int l0, int l1) + { + bool empty = true; + + for (int i = 0; i < _pageTable[l0][l1].Length; i++) + { + empty &= (_pageTable[l0][l1][i] == null); + } + + if (empty) + { + _pageTable[l0][l1] = null; + + RemoveIfAllNull(l0); + } + } + + private void RemoveIfAllNull(int l0) + { + bool empty = true; + + for (int i = 0; i < _pageTable[l0].Length; i++) + { + empty &= (_pageTable[l0][i] == null); + } + + if (empty) + { + _pageTable[l0] = null; + } + } + } +} diff --git a/Ryujinx.Memory/Range/HostMemoryRange.cs b/Ryujinx.Memory/Range/HostMemoryRange.cs new file mode 100644 index 0000000000..c6d8689c51 --- /dev/null +++ b/Ryujinx.Memory/Range/HostMemoryRange.cs @@ -0,0 +1,71 @@ +using System; + +namespace Ryujinx.Memory.Range +{ + /// + /// Range of memory composed of an address and size. + /// + public struct HostMemoryRange : IEquatable + { + /// + /// An empty memory range, with a null address and zero size. + /// + public static HostMemoryRange Empty => new HostMemoryRange(0, 0); + + /// + /// Start address of the range. + /// + public nuint Address { get; } + + /// + /// Size of the range in bytes. + /// + public ulong Size { get; } + + /// + /// Address where the range ends (exclusive). + /// + public nuint EndAddress => Address + (nuint)Size; + + /// + /// Creates a new memory range with the specified address and size. + /// + /// Start address + /// Size in bytes + public HostMemoryRange(nuint address, ulong size) + { + Address = address; + Size = size; + } + + /// + /// Checks if the range overlaps with another. + /// + /// The other range to check for overlap + /// True if the ranges overlap, false otherwise + public bool OverlapsWith(HostMemoryRange other) + { + nuint thisAddress = Address; + nuint thisEndAddress = EndAddress; + nuint otherAddress = other.Address; + nuint otherEndAddress = other.EndAddress; + + return thisAddress < otherEndAddress && otherAddress < thisEndAddress; + } + + public override bool Equals(object obj) + { + return obj is HostMemoryRange other && Equals(other); + } + + public bool Equals(HostMemoryRange other) + { + return Address == other.Address && Size == other.Size; + } + + public override int GetHashCode() + { + return HashCode.Combine(Address, Size); + } + } +} diff --git a/Ryujinx.Memory/Tracking/IMultiRegionHandle.cs b/Ryujinx.Memory/Tracking/IMultiRegionHandle.cs index 357b8c5c96..71bd602fae 100644 --- a/Ryujinx.Memory/Tracking/IMultiRegionHandle.cs +++ b/Ryujinx.Memory/Tracking/IMultiRegionHandle.cs @@ -9,6 +9,13 @@ namespace Ryujinx.Memory.Tracking /// bool Dirty { get; } + /// + /// Force the range of handles to be dirty, without reprotecting. + /// + /// Start address of the range + /// Size of the range + public void ForceDirty(ulong address, ulong size); + /// /// Check if any part of the region has been modified, and perform an action for each. /// Contiguous modified regions are combined. diff --git a/Ryujinx.Memory/Tracking/IRegionHandle.cs b/Ryujinx.Memory/Tracking/IRegionHandle.cs index cd33e5c8f3..ec802cb365 100644 --- a/Ryujinx.Memory/Tracking/IRegionHandle.cs +++ b/Ryujinx.Memory/Tracking/IRegionHandle.cs @@ -10,6 +10,7 @@ namespace Ryujinx.Memory.Tracking ulong Size { get; } ulong EndAddress { get; } + void ForceDirty(); void Reprotect(bool asDirty = false); void RegisterAction(RegionSignal action); } diff --git a/Ryujinx.Memory/Tracking/MemoryTracking.cs b/Ryujinx.Memory/Tracking/MemoryTracking.cs index 425552f832..70951e8c9f 100644 --- a/Ryujinx.Memory/Tracking/MemoryTracking.cs +++ b/Ryujinx.Memory/Tracking/MemoryTracking.cs @@ -9,7 +9,7 @@ namespace Ryujinx.Memory.Tracking public class MemoryTracking { private readonly IVirtualMemoryManager _memoryManager; - private readonly MemoryBlock _block; + private readonly InvalidAccessHandler _invalidAccessHandler; // Only use these from within the lock. private readonly NonOverlappingRangeList _virtualRegions; @@ -25,8 +25,6 @@ namespace Ryujinx.Memory.Tracking /// internal object TrackingLock = new object(); - public bool EnablePhysicalProtection { get; set; } - /// /// Create a new tracking structure for the given "physical" memory block, /// with a given "virtual" memory manager that will provide mappings and virtual memory protection. @@ -34,11 +32,11 @@ namespace Ryujinx.Memory.Tracking /// Virtual memory manager /// Physical memory block /// Page size of the virtual memory space - public MemoryTracking(IVirtualMemoryManager memoryManager, MemoryBlock block, int pageSize) + public MemoryTracking(IVirtualMemoryManager memoryManager, int pageSize, InvalidAccessHandler invalidAccessHandler = null) { _memoryManager = memoryManager; - _block = block; _pageSize = pageSize; + _invalidAccessHandler = invalidAccessHandler; _virtualRegions = new NonOverlappingRangeList(); } @@ -56,9 +54,8 @@ namespace Ryujinx.Memory.Tracking /// Should be called after the mapping is complete. /// /// Virtual memory address - /// Physical memory address /// Size to be mapped - public void Map(ulong va, ulong pa, ulong size) + public void Map(ulong va, ulong size) { // A mapping may mean we need to re-evaluate each VirtualRegion's affected area. // Find all handles that overlap with the range, we need to recalculate their physical regions @@ -208,6 +205,15 @@ namespace Ryujinx.Memory.Tracking if (count == 0) { + if (!_memoryManager.IsMapped(address)) + { + _invalidAccessHandler?.Invoke(address); + + // We can't continue - it's impossible to remove protection from the page. + // Even if the access handler wants us to continue, we wouldn't be able to. + throw new InvalidMemoryRegionException(); + } + _memoryManager.TrackingReprotect(address & ~(ulong)(_pageSize - 1), (ulong)_pageSize, MemoryPermission.ReadAndWrite); return false; // We can't handle this - it's probably a real invalid access. } diff --git a/Ryujinx.Memory/Tracking/MultiRegionHandle.cs b/Ryujinx.Memory/Tracking/MultiRegionHandle.cs index df154bc220..1f09807a62 100644 --- a/Ryujinx.Memory/Tracking/MultiRegionHandle.cs +++ b/Ryujinx.Memory/Tracking/MultiRegionHandle.cs @@ -34,6 +34,20 @@ namespace Ryujinx.Memory.Tracking Size = size; } + public void ForceDirty(ulong address, ulong size) + { + Dirty = true; + + int startHandle = (int)((address - Address) / Granularity); + int lastHandle = (int)((address + (size - 1) - Address) / Granularity); + + for (int i = startHandle; i <= lastHandle; i++) + { + _handles[i].SequenceNumber--; + _handles[i].ForceDirty(); + } + } + public void SignalWrite() { Dirty = true; @@ -98,7 +112,7 @@ namespace Ryujinx.Memory.Tracking { RegionHandle handle = _handles[i]; - if (handle.Dirty && sequenceNumber != handle.SequenceNumber) + if (sequenceNumber != handle.SequenceNumber && handle.DirtyOrVolatile()) { rgSize += handle.Size; handle.Reprotect(); diff --git a/Ryujinx.Memory/Tracking/RegionHandle.cs b/Ryujinx.Memory/Tracking/RegionHandle.cs index 5c32fba462..69d7797702 100644 --- a/Ryujinx.Memory/Tracking/RegionHandle.cs +++ b/Ryujinx.Memory/Tracking/RegionHandle.cs @@ -11,6 +11,17 @@ namespace Ryujinx.Memory.Tracking /// public class RegionHandle : IRegionHandle, IRange { + /// + /// If more than this number of checks have been performed on a dirty flag since its last reprotect, + /// then it is dirtied infrequently. + /// + private static int CheckCountForInfrequent = 3; + + /// + /// Number of frequent dirty/consume in a row to make this handle volatile. + /// + private static int VolatileThreshold = 5; + public bool Dirty { get; private set; } public bool Unmapped { get; private set; } @@ -28,6 +39,10 @@ namespace Ryujinx.Memory.Tracking private readonly MemoryTracking _tracking; private bool _disposed; + private int _checkCount = 0; + private int _volatileCount = 0; + private bool _volatile; + internal MemoryPermission RequiredPermission => _preAction != null ? MemoryPermission.None : (Dirty ? MemoryPermission.ReadAndWrite : MemoryPermission.Read); internal RegionSignal PreAction => _preAction; @@ -55,6 +70,25 @@ namespace Ryujinx.Memory.Tracking } } + /// + /// Clear the volatile state of this handle. + /// + private void ClearVolatile() + { + _volatileCount = 0; + _volatile = false; + } + + /// + /// Check if this handle is dirty, or if it is volatile. (changes very often) + /// + /// True if the handle is dirty or volatile, false otherwise + public bool DirtyOrVolatile() + { + _checkCount++; + return Dirty || _volatile; + } + /// /// Signal that a memory action occurred within this handle's virtual regions. /// @@ -76,19 +110,57 @@ namespace Ryujinx.Memory.Tracking } } + /// + /// Force this handle to be dirty, without reprotecting. + /// + public void ForceDirty() + { + Dirty = true; + } + /// /// Consume the dirty flag for this handle, and reprotect so it can be set on the next write. /// public void Reprotect(bool asDirty = false) { + if (_volatile) return; + Dirty = asDirty; + + bool protectionChanged = false; + lock (_tracking.TrackingLock) { foreach (VirtualRegion region in _regions) { - region.UpdateProtection(); + protectionChanged |= region.UpdateProtection(); } } + + if (!protectionChanged) + { + // Counteract the check count being incremented when this handle was forced dirty. + // It doesn't count for protected write tracking. + + _checkCount--; + } + else if (!asDirty) + { + if (_checkCount > 0 && _checkCount < CheckCountForInfrequent) + { + if (++_volatileCount >= VolatileThreshold && _preAction == null) + { + _volatile = true; + return; + } + } + else + { + _volatileCount = 0; + } + + _checkCount = 0; + } } /// @@ -98,6 +170,8 @@ namespace Ryujinx.Memory.Tracking /// Action to call on read or write public void RegisterAction(RegionSignal action) { + ClearVolatile(); + RegionSignal lastAction = Interlocked.Exchange(ref _preAction, action); if (lastAction == null && action != lastAction) { @@ -142,6 +216,7 @@ namespace Ryujinx.Memory.Tracking if (Unmapped) { + ClearVolatile(); Dirty = false; } } diff --git a/Ryujinx.Memory/Tracking/SmartMultiRegionHandle.cs b/Ryujinx.Memory/Tracking/SmartMultiRegionHandle.cs index 8bc10c411e..eabbd7231e 100644 --- a/Ryujinx.Memory/Tracking/SmartMultiRegionHandle.cs +++ b/Ryujinx.Memory/Tracking/SmartMultiRegionHandle.cs @@ -41,6 +41,17 @@ namespace Ryujinx.Memory.Tracking Dirty = true; } + public void ForceDirty(ulong address, ulong size) + { + foreach (var handle in _handles) + { + if (handle != null && handle.OverlapsWith(address, size)) + { + handle.ForceDirty(); + } + } + } + public void RegisterAction(RegionSignal action) { foreach (var handle in _handles) diff --git a/Ryujinx.Memory/Tracking/VirtualRegion.cs b/Ryujinx.Memory/Tracking/VirtualRegion.cs index 696d35606d..e758f38eb4 100644 --- a/Ryujinx.Memory/Tracking/VirtualRegion.cs +++ b/Ryujinx.Memory/Tracking/VirtualRegion.cs @@ -11,9 +11,11 @@ namespace Ryujinx.Memory.Tracking public List Handles = new List(); private readonly MemoryTracking _tracking; + private MemoryPermission _lastPermission; - public VirtualRegion(MemoryTracking tracking, ulong address, ulong size) : base(address, size) + public VirtualRegion(MemoryTracking tracking, ulong address, ulong size, MemoryPermission lastPermission = MemoryPermission.Invalid) : base(address, size) { + _lastPermission = lastPermission; _tracking = tracking; } @@ -33,6 +35,8 @@ namespace Ryujinx.Memory.Tracking /// True if the region has been mapped, false if unmapped public void SignalMappingChanged(bool mapped) { + _lastPermission = MemoryPermission.Invalid; + foreach (RegionHandle handle in Handles) { handle.SignalMappingChanged(mapped); @@ -61,9 +65,19 @@ namespace Ryujinx.Memory.Tracking /// /// Updates the protection for this virtual region. /// - public void UpdateProtection() + public bool UpdateProtection() { - _tracking.ProtectVirtualRegion(this, GetRequiredPermission()); + MemoryPermission permission = GetRequiredPermission(); + + if (_lastPermission != permission) + { + _tracking.ProtectVirtualRegion(this, permission); + _lastPermission = permission; + + return true; + } + + return false; } /// @@ -85,7 +99,7 @@ namespace Ryujinx.Memory.Tracking public override INonOverlappingRange Split(ulong splitAddress) { - VirtualRegion newRegion = new VirtualRegion(_tracking, splitAddress, EndAddress - splitAddress); + VirtualRegion newRegion = new VirtualRegion(_tracking, splitAddress, EndAddress - splitAddress, _lastPermission); Size = splitAddress - Address; // The new region inherits all of our parents. diff --git a/Ryujinx.Memory/WindowsShared/EmulatedSharedMemoryWindows.cs b/Ryujinx.Memory/WindowsShared/EmulatedSharedMemoryWindows.cs new file mode 100644 index 0000000000..46399504ef --- /dev/null +++ b/Ryujinx.Memory/WindowsShared/EmulatedSharedMemoryWindows.cs @@ -0,0 +1,698 @@ +using Ryujinx.Memory.Range; +using System; +using System.Collections.Generic; +using System.Linq; +using System.Runtime.InteropServices; + +namespace Ryujinx.Memory.WindowsShared +{ + class EmulatedSharedMemoryWindows : IDisposable + { + private static readonly IntPtr InvalidHandleValue = new IntPtr(-1); + private static readonly IntPtr CurrentProcessHandle = new IntPtr(-1); + + public const int MappingBits = 16; // Windows 64kb granularity. + public const ulong MappingGranularity = 1 << MappingBits; + public const ulong MappingMask = MappingGranularity - 1; + + public const ulong BackingSize32GB = 32UL * 1024UL * 1024UL * 1024UL; // Reasonable max size of 32GB. + + private class SharedMemoryMapping : INonOverlappingRange + { + public ulong Address { get; } + + public ulong Size { get; private set; } + + public ulong EndAddress { get; private set; } + + public List Blocks; + + public SharedMemoryMapping(ulong address, ulong size, List blocks = null) + { + Address = address; + Size = size; + EndAddress = address + size; + + Blocks = blocks ?? new List(); + } + + public bool OverlapsWith(ulong address, ulong size) + { + return Address < address + size && address < EndAddress; + } + + public void ExtendTo(ulong endAddress) + { + EndAddress = endAddress; + Size = endAddress - Address; + } + + public void AddBlocks(IEnumerable blocks) + { + if (Blocks.Count > 0 && blocks.Count() > 0 && Blocks.Last() == blocks.First()) + { + Blocks.AddRange(blocks.Skip(1)); + } + else + { + Blocks.AddRange(blocks); + } + } + + public INonOverlappingRange Split(ulong splitAddress) + { + SharedMemoryMapping newRegion = new SharedMemoryMapping(splitAddress, EndAddress - splitAddress); + + int end = (int)((EndAddress + MappingMask) >> MappingBits); + int start = (int)(Address >> MappingBits); + + Size = splitAddress - Address; + EndAddress = splitAddress; + + int splitEndBlock = (int)((splitAddress + MappingMask) >> MappingBits); + int splitStartBlock = (int)(splitAddress >> MappingBits); + + newRegion.AddBlocks(Blocks.Skip(splitStartBlock - start)); + Blocks.RemoveRange(splitEndBlock - start, end - splitEndBlock); + + return newRegion; + } + } + + [DllImport("kernel32.dll", SetLastError = true)] + private static extern IntPtr CreateFileMapping( + IntPtr hFile, + IntPtr lpFileMappingAttributes, + FileMapProtection flProtect, + uint dwMaximumSizeHigh, + uint dwMaximumSizeLow, + [MarshalAs(UnmanagedType.LPWStr)] string lpName); + + [DllImport("kernel32.dll", SetLastError = true)] + private static extern bool CloseHandle(IntPtr hObject); + + [DllImport("KernelBase.dll", SetLastError = true)] + private static extern IntPtr VirtualAlloc2( + IntPtr process, + IntPtr lpAddress, + IntPtr dwSize, + AllocationType flAllocationType, + MemoryProtection flProtect, + IntPtr extendedParameters, + ulong parameterCount); + + [DllImport("kernel32.dll", SetLastError = true)] + private static extern bool VirtualFree(IntPtr lpAddress, IntPtr dwSize, AllocationType dwFreeType); + + [DllImport("KernelBase.dll", SetLastError = true)] + private static extern IntPtr MapViewOfFile3( + IntPtr hFileMappingObject, + IntPtr process, + IntPtr baseAddress, + ulong offset, + IntPtr dwNumberOfBytesToMap, + ulong allocationType, + MemoryProtection dwDesiredAccess, + IntPtr extendedParameters, + ulong parameterCount); + + [DllImport("KernelBase.dll", SetLastError = true)] + private static extern bool UnmapViewOfFile2(IntPtr process, IntPtr lpBaseAddress, ulong unmapFlags); + + private ulong _size; + + private object _lock = new object(); + + private ulong _backingSize; + private IntPtr _backingMemHandle; + private int _backingEnd; + private int _backingAllocated; + private Queue _backingFreeList; + + private List _mappedBases; + private RangeList _mappings; + private SharedMemoryMapping[] _foundMappings = new SharedMemoryMapping[32]; + private PlaceholderList _placeholders; + + public EmulatedSharedMemoryWindows(ulong size) + { + ulong backingSize = BackingSize32GB; + + _size = size; + _backingSize = backingSize; + + _backingMemHandle = CreateFileMapping( + InvalidHandleValue, + IntPtr.Zero, + FileMapProtection.PageReadWrite | FileMapProtection.SectionReserve, + (uint)(backingSize >> 32), + (uint)backingSize, + null); + + if (_backingMemHandle == IntPtr.Zero) + { + throw new OutOfMemoryException(); + } + + _backingFreeList = new Queue(); + _mappings = new RangeList(); + _mappedBases = new List(); + _placeholders = new PlaceholderList(size >> MappingBits); + } + + private (ulong granularStart, ulong granularEnd) GetAlignedRange(ulong address, ulong size) + { + return (address & (~MappingMask), (address + size + MappingMask) & (~MappingMask)); + } + + private void Commit(ulong address, ulong size) + { + (ulong granularStart, ulong granularEnd) = GetAlignedRange(address, size); + + ulong endAddress = address + size; + + lock (_lock) + { + // Search a bit before and after the new mapping. + // When adding our new mapping, we may need to join an existing mapping into our new mapping (or in some cases, to the other side!) + ulong searchStart = granularStart == 0 ? 0 : (granularStart - 1); + int mappingCount = _mappings.FindOverlapsNonOverlapping(searchStart, (granularEnd - searchStart) + 1, ref _foundMappings); + + int first = -1; + int last = -1; + SharedMemoryMapping startOverlap = null; + SharedMemoryMapping endOverlap = null; + + int lastIndex = (int)(address >> MappingBits); + int endIndex = (int)((endAddress + MappingMask) >> MappingBits); + int firstBlock = -1; + int endBlock = -1; + + for (int i = 0; i < mappingCount; i++) + { + SharedMemoryMapping mapping = _foundMappings[i]; + + if (mapping.Address < address) + { + if (mapping.EndAddress >= address) + { + startOverlap = mapping; + } + + if ((int)((mapping.EndAddress - 1) >> MappingBits) == lastIndex) + { + lastIndex = (int)((mapping.EndAddress + MappingMask) >> MappingBits); + firstBlock = mapping.Blocks.Last(); + } + } + + if (mapping.EndAddress > endAddress) + { + if (mapping.Address <= endAddress) + { + endOverlap = mapping; + } + + if ((int)((mapping.Address) >> MappingBits) + 1 == endIndex) + { + endIndex = (int)((mapping.Address) >> MappingBits); + endBlock = mapping.Blocks.First(); + } + } + + if (mapping.OverlapsWith(address, size)) + { + if (first == -1) + { + first = i; + } + + last = i; + } + } + + if (startOverlap == endOverlap && startOverlap != null) + { + // Already fully committed. + return; + } + + var blocks = new List(); + int lastBlock = -1; + + if (firstBlock != -1) + { + blocks.Add(firstBlock); + lastBlock = firstBlock; + } + + bool hasMapped = false; + Action map = () => + { + if (!hasMapped) + { + _placeholders.EnsurePlaceholders(address >> MappingBits, (granularEnd - granularStart) >> MappingBits, SplitPlaceholder); + hasMapped = true; + } + + // There's a gap between this index and the last. Allocate blocks to fill it. + blocks.Add(MapBackingBlock(MappingGranularity * (ulong)lastIndex++)); + }; + + if (first != -1) + { + for (int i = first; i <= last; i++) + { + SharedMemoryMapping mapping = _foundMappings[i]; + int mapIndex = (int)(mapping.Address >> MappingBits); + + while (lastIndex < mapIndex) + { + map(); + } + + if (lastBlock == mapping.Blocks[0]) + { + blocks.AddRange(mapping.Blocks.Skip(1)); + } + else + { + blocks.AddRange(mapping.Blocks); + } + + lastIndex = (int)((mapping.EndAddress - 1) >> MappingBits) + 1; + } + } + + while (lastIndex < endIndex) + { + map(); + } + + if (endBlock != -1 && endBlock != lastBlock) + { + blocks.Add(endBlock); + } + + if (startOverlap != null && endOverlap != null) + { + // Both sides should be coalesced. Extend the start overlap to contain the end overlap, and add together their blocks. + + _mappings.Remove(endOverlap); + + startOverlap.ExtendTo(endOverlap.EndAddress); + + startOverlap.AddBlocks(blocks); + startOverlap.AddBlocks(endOverlap.Blocks); + } + else if (startOverlap != null) + { + startOverlap.ExtendTo(endAddress); + + startOverlap.AddBlocks(blocks); + } + else + { + var mapping = new SharedMemoryMapping(address, size, blocks); + + if (endOverlap != null) + { + mapping.ExtendTo(endOverlap.EndAddress); + + mapping.AddBlocks(endOverlap.Blocks); + + _mappings.Remove(endOverlap); + } + + _mappings.Add(mapping); + } + } + } + + private void Decommit(ulong address, ulong size) + { + (ulong granularStart, ulong granularEnd) = GetAlignedRange(address, size); + ulong endAddress = address + size; + + lock (_lock) + { + int mappingCount = _mappings.FindOverlapsNonOverlapping(granularStart, granularEnd - granularStart, ref _foundMappings); + + int first = -1; + int last = -1; + + for (int i = 0; i < mappingCount; i++) + { + SharedMemoryMapping mapping = _foundMappings[i]; + + if (mapping.OverlapsWith(address, size)) + { + if (first == -1) + { + first = i; + } + + last = i; + } + } + + if (first == -1) + { + return; // Could not find any regions to decommit. + } + + int lastReleasedBlock = -1; + + bool releasedFirst = false; + bool releasedLast = false; + + for (int i = last; i >= first; i--) + { + SharedMemoryMapping mapping = _foundMappings[i]; + bool releaseEnd = true; + bool releaseStart = true; + + if (i == last) + { + // If this is the last region, do not release the block if there is a page ahead of us, or the block continues after us. (it is keeping the block alive) + releaseEnd = last == mappingCount - 1; + + // If the end region starts after the decommit end address, split and readd it after modifying its base address. + if (mapping.EndAddress > endAddress) + { + var newMapping = (SharedMemoryMapping)mapping.Split(endAddress); + _mappings.Add(newMapping); + + if ((endAddress & MappingMask) != 0) + { + releaseEnd = false; + } + } + + releasedLast = releaseEnd; + } + + if (i == first) + { + // If this is the first region, do not release the block if there is a region behind us. (it is keeping the block alive) + releaseStart = first == 0; + + // If the first region starts before the decommit address, split it by modifying its end address. + if (mapping.Address < address) + { + mapping = (SharedMemoryMapping)mapping.Split(address); + + if ((address & MappingMask) != 0) + { + releaseStart = false; + } + } + + releasedFirst = releaseStart; + } + + _mappings.Remove(mapping); + + ulong releasePointer = (mapping.EndAddress + MappingMask) & (~MappingMask); + for (int j = mapping.Blocks.Count - 1; j >= 0; j--) + { + int blockId = mapping.Blocks[j]; + + releasePointer -= MappingGranularity; + + if (lastReleasedBlock == blockId) + { + // When committed regions are fragmented, multiple will have the same block id for their start/end granular block. + // Avoid releasing these blocks twice. + continue; + } + + if ((j != 0 || releaseStart) && (j != mapping.Blocks.Count - 1 || releaseEnd)) + { + ReleaseBackingBlock(releasePointer, blockId); + } + + lastReleasedBlock = blockId; + } + } + + ulong placeholderStart = (granularStart >> MappingBits) + (releasedFirst ? 0UL : 1UL); + ulong placeholderEnd = (granularEnd >> MappingBits) - (releasedLast ? 0UL : 1UL); + + if (placeholderEnd > placeholderStart) + { + _placeholders.RemovePlaceholders(placeholderStart, placeholderEnd - placeholderStart, CoalescePlaceholder); + } + } + } + + public bool CommitMap(IntPtr address, IntPtr size) + { + lock (_lock) + { + foreach (ulong mapping in _mappedBases) + { + ulong offset = (ulong)address - mapping; + + if (offset < _size) + { + Commit(offset, (ulong)size); + return true; + } + } + } + + return false; + } + + public bool DecommitMap(IntPtr address, IntPtr size) + { + lock (_lock) + { + foreach (ulong mapping in _mappedBases) + { + ulong offset = (ulong)address - mapping; + + if (offset < _size) + { + Decommit(offset, (ulong)size); + return true; + } + } + } + + return false; + } + + private int MapBackingBlock(ulong offset) + { + bool allocate = false; + int backing; + + if (_backingFreeList.Count > 0) + { + backing = _backingFreeList.Dequeue(); + } + else + { + if (_backingAllocated == _backingEnd) + { + // Allocate the backing. + _backingAllocated++; + allocate = true; + } + + backing = _backingEnd++; + } + + ulong backingOffset = MappingGranularity * (ulong)backing; + + foreach (ulong baseAddress in _mappedBases) + { + CommitToMap(baseAddress, offset, MappingGranularity, backingOffset, allocate); + allocate = false; + } + + return backing; + } + + private void ReleaseBackingBlock(ulong offset, int id) + { + foreach (ulong baseAddress in _mappedBases) + { + DecommitFromMap(baseAddress, offset); + } + + if (_backingEnd - 1 == id) + { + _backingEnd = id; + } + else + { + _backingFreeList.Enqueue(id); + } + } + + public IntPtr Map() + { + IntPtr newMapping = VirtualAlloc2( + CurrentProcessHandle, + IntPtr.Zero, + (IntPtr)_size, + AllocationType.Reserve | AllocationType.ReservePlaceholder, + MemoryProtection.NoAccess, + IntPtr.Zero, + 0); + + if (newMapping == IntPtr.Zero) + { + throw new OutOfMemoryException(); + } + + // Apply all existing mappings to the new mapping + lock (_lock) + { + int lastBlock = -1; + foreach (SharedMemoryMapping mapping in _mappings) + { + ulong blockAddress = mapping.Address & (~MappingMask); + foreach (int block in mapping.Blocks) + { + if (block != lastBlock) + { + ulong backingOffset = MappingGranularity * (ulong)block; + + CommitToMap((ulong)newMapping, blockAddress, MappingGranularity, backingOffset, false); + + lastBlock = block; + } + + blockAddress += MappingGranularity; + } + } + + _mappedBases.Add((ulong)newMapping); + } + + return newMapping; + } + + private void SplitPlaceholder(ulong address, ulong size) + { + ulong byteAddress = address << MappingBits; + IntPtr byteSize = (IntPtr)(size << MappingBits); + + foreach (ulong mapAddress in _mappedBases) + { + bool result = VirtualFree((IntPtr)(mapAddress + byteAddress), byteSize, AllocationType.PreservePlaceholder | AllocationType.Release); + + if (!result) + { + throw new InvalidOperationException("Placeholder could not be split."); + } + } + } + + private void CoalescePlaceholder(ulong address, ulong size) + { + ulong byteAddress = address << MappingBits; + IntPtr byteSize = (IntPtr)(size << MappingBits); + + foreach (ulong mapAddress in _mappedBases) + { + bool result = VirtualFree((IntPtr)(mapAddress + byteAddress), byteSize, AllocationType.CoalescePlaceholders | AllocationType.Release); + + if (!result) + { + throw new InvalidOperationException("Placeholder could not be coalesced."); + } + } + } + + private void CommitToMap(ulong mapAddress, ulong address, ulong size, ulong backingOffset, bool allocate) + { + IntPtr targetAddress = (IntPtr)(mapAddress + address); + + // Assume the placeholder worked (or already exists) + // Map the backing memory into the mapped location. + + IntPtr mapped = MapViewOfFile3( + _backingMemHandle, + CurrentProcessHandle, + targetAddress, + backingOffset, + (IntPtr)MappingGranularity, + 0x4000, // REPLACE_PLACEHOLDER + MemoryProtection.ReadWrite, + IntPtr.Zero, + 0); + + if (mapped == IntPtr.Zero) + { + throw new InvalidOperationException($"Could not map view of backing memory. (va=0x{address:X16} size=0x{size:X16}, error code {Marshal.GetLastWin32Error()})"); + } + + if (allocate) + { + // Commit this part of the shared memory. + VirtualAlloc2(CurrentProcessHandle, targetAddress, (IntPtr)MappingGranularity, AllocationType.Commit, MemoryProtection.ReadWrite, IntPtr.Zero, 0); + } + } + + private void DecommitFromMap(ulong baseAddress, ulong address) + { + UnmapViewOfFile2(CurrentProcessHandle, (IntPtr)(baseAddress + address), 2); + } + + public bool Unmap(ulong baseAddress) + { + lock (_lock) + { + if (_mappedBases.Remove(baseAddress)) + { + int lastBlock = -1; + + foreach (SharedMemoryMapping mapping in _mappings) + { + ulong blockAddress = mapping.Address & (~MappingMask); + foreach (int block in mapping.Blocks) + { + if (block != lastBlock) + { + DecommitFromMap(baseAddress, blockAddress); + + lastBlock = block; + } + + blockAddress += MappingGranularity; + } + } + + if (!VirtualFree((IntPtr)baseAddress, (IntPtr)0, AllocationType.Release)) + { + throw new InvalidOperationException("Couldn't free mapping placeholder."); + } + + return true; + } + + return false; + } + } + + public void Dispose() + { + // Remove all file mappings + lock (_lock) + { + foreach (ulong baseAddress in _mappedBases.ToArray()) + { + Unmap(baseAddress); + } + } + + // Finally, delete the file mapping. + CloseHandle(_backingMemHandle); + } + } +} diff --git a/Ryujinx.Memory/WindowsShared/PlaceholderList.cs b/Ryujinx.Memory/WindowsShared/PlaceholderList.cs new file mode 100644 index 0000000000..be8cef9c3e --- /dev/null +++ b/Ryujinx.Memory/WindowsShared/PlaceholderList.cs @@ -0,0 +1,291 @@ +using Ryujinx.Memory.Range; +using System; +using System.Diagnostics; + +namespace Ryujinx.Memory.WindowsShared +{ + /// + /// A specialized list used for keeping track of Windows 10's memory placeholders. + /// This is used to make splitting a large placeholder into equally small + /// granular chunks much easier, while avoiding slowdown due to a large number of + /// placeholders by coalescing adjacent granular placeholders after they are unused. + /// + class PlaceholderList + { + private class PlaceholderBlock : IRange + { + public ulong Address { get; } + public ulong Size { get; private set; } + public ulong EndAddress { get; private set; } + public bool IsGranular { get; set; } + + public PlaceholderBlock(ulong id, ulong size, bool isGranular) + { + Address = id; + Size = size; + EndAddress = id + size; + IsGranular = isGranular; + } + + public bool OverlapsWith(ulong address, ulong size) + { + return Address < address + size && address < EndAddress; + } + + public void ExtendTo(ulong end) + { + EndAddress = end; + Size = end - Address; + } + } + + private RangeList _placeholders; + private PlaceholderBlock[] _foundBlocks = new PlaceholderBlock[32]; + + /// + /// Create a new list to manage placeholders. + /// Note that a size is measured in granular placeholders. + /// If the placeholder granularity is 65536 bytes, then a 65536 region will be covered by 1 placeholder granularity. + /// + /// Size measured in granular placeholders + public PlaceholderList(ulong size) + { + _placeholders = new RangeList(); + + _placeholders.Add(new PlaceholderBlock(0, size, false)); + } + + /// + /// Ensure that the given range of placeholders is granular. + /// + /// Start of the range, measured in granular placeholders + /// Size of the range, measured in granular placeholders + /// Callback function to run when splitting placeholders, calls with (start, middle) + public void EnsurePlaceholders(ulong id, ulong size, Action splitPlaceholderCallback) + { + // Search 1 before and after the placeholders, as we may need to expand/join granular regions surrounding the requested area. + + ulong endId = id + size; + ulong searchStartId = id == 0 ? 0 : (id - 1); + int blockCount = _placeholders.FindOverlapsNonOverlapping(searchStartId, (endId - searchStartId) + 1, ref _foundBlocks); + + PlaceholderBlock first = _foundBlocks[0]; + PlaceholderBlock last = _foundBlocks[blockCount - 1]; + bool overlapStart = first.EndAddress >= id && id != 0; + bool overlapEnd = last.Address <= endId; + + for (int i = 0; i < blockCount; i++) + { + // Go through all non-granular blocks in the range and create placeholders. + PlaceholderBlock block = _foundBlocks[i]; + + if (block.Address <= id && block.EndAddress >= endId && block.IsGranular) + { + return; // The region we're searching for is already granular. + } + + if (!block.IsGranular) + { + ulong placeholderStart = Math.Max(block.Address, id); + ulong placeholderEnd = Math.Min(block.EndAddress - 1, endId); + + if (placeholderStart != block.Address && placeholderStart != block.EndAddress) + { + splitPlaceholderCallback(block.Address, placeholderStart - block.Address); + } + + for (ulong j = placeholderStart; j < placeholderEnd; j++) + { + splitPlaceholderCallback(j, 1); + } + } + + if (!((block == first && overlapStart) || (block == last && overlapEnd))) + { + // Remove blocks that will be replaced + _placeholders.Remove(block); + } + } + + if (overlapEnd) + { + if (!(first == last && overlapStart)) + { + _placeholders.Remove(last); + } + + if (last.IsGranular) + { + endId = last.EndAddress; + } + else if (last.EndAddress != endId) + { + _placeholders.Add(new PlaceholderBlock(endId, last.EndAddress - endId, false)); + } + } + + if (overlapStart && first.IsGranular) + { + first.ExtendTo(endId); + } + else + { + if (overlapStart) + { + first.ExtendTo(id); + } + + _placeholders.Add(new PlaceholderBlock(id, endId - id, true)); + } + + ValidateList(); + } + + /// + /// Coalesces placeholders in a given region, as they are not being used. + /// This assumes that the region only contains placeholders - all views and allocations must have been replaced with placeholders. + /// + /// Start of the range, measured in granular placeholders + /// Size of the range, measured in granular placeholders + /// Callback function to run when coalescing two placeholders, calls with (start, end) + public void RemovePlaceholders(ulong id, ulong size, Action coalescePlaceholderCallback) + { + ulong endId = id + size; + int blockCount = _placeholders.FindOverlapsNonOverlapping(id, size, ref _foundBlocks); + + PlaceholderBlock first = _foundBlocks[0]; + PlaceholderBlock last = _foundBlocks[blockCount - 1]; + + // All granular blocks must have non-granular blocks surrounding them, unless they start at 0. + // We must extend the non-granular blocks into the granular ones. This does mean that we need to search twice. + + if (first.IsGranular || last.IsGranular) + { + ulong surroundStart = Math.Max(0, (first.IsGranular && first.Address != 0) ? first.Address - 1 : id); + blockCount = _placeholders.FindOverlapsNonOverlapping( + surroundStart, + (last.IsGranular ? last.EndAddress + 1 : endId) - surroundStart, + ref _foundBlocks); + + first = _foundBlocks[0]; + last = _foundBlocks[blockCount - 1]; + } + + if (first == last) + { + return; // Already coalesced. + } + + PlaceholderBlock extendBlock = id == 0 ? null : first; + bool newBlock = false; + for (int i = extendBlock == null ? 0 : 1; i < blockCount; i++) + { + // Go through all granular blocks in the range and extend placeholders. + PlaceholderBlock block = _foundBlocks[i]; + + ulong blockEnd = block.EndAddress; + ulong extendFrom; + ulong extent = Math.Min(blockEnd, endId); + + if (block.Address < id && blockEnd > id) + { + block.ExtendTo(id); + extendBlock = null; + } + else + { + _placeholders.Remove(block); + } + + if (extendBlock == null) + { + extendFrom = id; + extendBlock = new PlaceholderBlock(id, extent - id, false); + _placeholders.Add(extendBlock); + + if (blockEnd > extent) + { + _placeholders.Add(new PlaceholderBlock(extent, blockEnd - extent, true)); + + // Skip the next non-granular block, and extend from that into the granular block afterwards. + // (assuming that one is still in the requested range) + + if (i + 1 < blockCount) + { + extendBlock = _foundBlocks[i + 1]; + } + + i++; + } + + newBlock = true; + } + else + { + extendFrom = extendBlock.Address; + extendBlock.ExtendTo(block.IsGranular ? extent : block.EndAddress); + } + + if (block.IsGranular) + { + ulong placeholderStart = Math.Max(block.Address, id); + ulong placeholderEnd = extent; + + if (newBlock) + { + placeholderStart++; + newBlock = false; + } + + for (ulong j = placeholderStart; j < placeholderEnd; j++) + { + coalescePlaceholderCallback(extendFrom, (j + 1) - extendFrom); + } + + if (extent < block.EndAddress) + { + _placeholders.Add(new PlaceholderBlock(placeholderEnd, block.EndAddress - placeholderEnd, true)); + ValidateList(); + return; + } + } + else + { + coalescePlaceholderCallback(extendFrom, block.EndAddress - extendFrom); + } + } + + ValidateList(); + } + + /// + /// Ensure that the placeholder list is valid. + /// A valid list should not have any gaps between the placeholders, + /// and there may be no placehonders with the same IsGranular value next to each other. + /// + [Conditional("DEBUG")] + private void ValidateList() + { + bool isGranular = false; + bool first = true; + ulong lastAddress = 0; + + foreach (var placeholder in _placeholders) + { + if (placeholder.Address != lastAddress) + { + throw new InvalidOperationException("Gap in placeholder list."); + } + + if (isGranular == placeholder.IsGranular && !first) + { + throw new InvalidOperationException("Placeholder list not alternating."); + } + + first = false; + isGranular = placeholder.IsGranular; + lastAddress = placeholder.EndAddress; + } + } + } +} diff --git a/Ryujinx.Memory/WindowsShared/WindowsFlags.cs b/Ryujinx.Memory/WindowsShared/WindowsFlags.cs new file mode 100644 index 0000000000..ca69cfe935 --- /dev/null +++ b/Ryujinx.Memory/WindowsShared/WindowsFlags.cs @@ -0,0 +1,52 @@ +using System; + +namespace Ryujinx.Memory.WindowsShared +{ + [Flags] + enum AllocationType : uint + { + CoalescePlaceholders = 0x1, + PreservePlaceholder = 0x2, + Commit = 0x1000, + Reserve = 0x2000, + Decommit = 0x4000, + ReplacePlaceholder = 0x4000, + Release = 0x8000, + ReservePlaceholder = 0x40000, + Reset = 0x80000, + Physical = 0x400000, + TopDown = 0x100000, + WriteWatch = 0x200000, + LargePages = 0x20000000 + } + + [Flags] + enum MemoryProtection : uint + { + NoAccess = 0x01, + ReadOnly = 0x02, + ReadWrite = 0x04, + WriteCopy = 0x08, + Execute = 0x10, + ExecuteRead = 0x20, + ExecuteReadWrite = 0x40, + ExecuteWriteCopy = 0x80, + GuardModifierflag = 0x100, + NoCacheModifierflag = 0x200, + WriteCombineModifierflag = 0x400 + } + + [Flags] + enum FileMapProtection : uint + { + PageReadonly = 0x02, + PageReadWrite = 0x04, + PageWriteCopy = 0x08, + PageExecuteRead = 0x20, + PageExecuteReadWrite = 0x40, + SectionCommit = 0x8000000, + SectionImage = 0x1000000, + SectionNoCache = 0x10000000, + SectionReserve = 0x4000000 + } +} diff --git a/Ryujinx.Tests/Cpu/CpuTest.cs b/Ryujinx.Tests/Cpu/CpuTest.cs index dada567d36..e823ea17b0 100644 --- a/Ryujinx.Tests/Cpu/CpuTest.cs +++ b/Ryujinx.Tests/Cpu/CpuTest.cs @@ -53,8 +53,9 @@ namespace Ryujinx.Tests.Cpu _currAddress = CodeBaseAddress; _ram = new MemoryBlock(Size * 2); - _memory = new MemoryManager(_ram, 1ul << 16); - _memory.Map(CodeBaseAddress, 0, Size * 2); + _memory = new MemoryManager(1ul << 16); + _memory.IncrementReferenceCount(); + _memory.Map(CodeBaseAddress, _ram.GetPointer(0, Size * 2), Size * 2); _context = CpuContext.CreateExecutionContext(); Translator.IsReadyForTranslation.Set(); @@ -73,7 +74,7 @@ namespace Ryujinx.Tests.Cpu [TearDown] public void Teardown() { - _memory.Dispose(); + _memory.DecrementReferenceCount(); _context.Dispose(); _ram.Dispose(); diff --git a/Ryujinx.Tests/Cpu/CpuTest32.cs b/Ryujinx.Tests/Cpu/CpuTest32.cs index 910ae2e07c..380c86e85d 100644 --- a/Ryujinx.Tests/Cpu/CpuTest32.cs +++ b/Ryujinx.Tests/Cpu/CpuTest32.cs @@ -48,8 +48,9 @@ namespace Ryujinx.Tests.Cpu _currAddress = CodeBaseAddress; _ram = new MemoryBlock(Size * 2); - _memory = new MemoryManager(_ram, 1ul << 16); - _memory.Map(CodeBaseAddress, 0, Size * 2); + _memory = new MemoryManager(1ul << 16); + _memory.IncrementReferenceCount(); + _memory.Map(CodeBaseAddress, _ram.GetPointer(0, Size * 2), Size * 2); _context = CpuContext.CreateExecutionContext(); _context.IsAarch32 = true; @@ -69,7 +70,7 @@ namespace Ryujinx.Tests.Cpu [TearDown] public void Teardown() { - _memory.Dispose(); + _memory.DecrementReferenceCount(); _context.Dispose(); _ram.Dispose(); diff --git a/Ryujinx/Config.json b/Ryujinx/Config.json index cf21656a57..0ce0813b4b 100644 --- a/Ryujinx/Config.json +++ b/Ryujinx/Config.json @@ -31,6 +31,7 @@ "enable_fs_integrity_checks": true, "fs_global_access_log_mode": 0, "audio_backend": "OpenAl", + "memory_manager_mode": "HostMappedUnsafe", "expand_ram": false, "ignore_missing_services": false, "gui_columns": { diff --git a/Ryujinx/Configuration/ConfigurationFileFormat.cs b/Ryujinx/Configuration/ConfigurationFileFormat.cs index be9c68648b..d988b849d0 100644 --- a/Ryujinx/Configuration/ConfigurationFileFormat.cs +++ b/Ryujinx/Configuration/ConfigurationFileFormat.cs @@ -14,7 +14,7 @@ namespace Ryujinx.Configuration /// /// The current version of the file format /// - public const int CurrentVersion = 25; + public const int CurrentVersion = 26; public int Version { get; set; } @@ -174,6 +174,11 @@ namespace Ryujinx.Configuration /// public AudioBackend AudioBackend { get; set; } + /// + /// The selected memory manager mode + /// + public MemoryManagerMode MemoryManagerMode { get; set; } + /// /// Expands the RAM amount on the emulated system from 4GB to 6GB /// diff --git a/Ryujinx/Configuration/ConfigurationState.cs b/Ryujinx/Configuration/ConfigurationState.cs index 9ea5c28271..11ec1373d4 100644 --- a/Ryujinx/Configuration/ConfigurationState.cs +++ b/Ryujinx/Configuration/ConfigurationState.cs @@ -219,6 +219,11 @@ namespace Ryujinx.Configuration /// public ReactiveObject AudioBackend { get; private set; } + /// + /// The selected memory manager mode + /// + public ReactiveObject MemoryManagerMode { get; private set; } + /// /// Defines the amount of RAM available on the emulated system, and how it is distributed /// @@ -245,6 +250,8 @@ namespace Ryujinx.Configuration FsGlobalAccessLogMode.Event += static (sender, e) => LogValueChange(sender, e, nameof(FsGlobalAccessLogMode)); AudioBackend = new ReactiveObject(); AudioBackend.Event += static (sender, e) => LogValueChange(sender, e, nameof(AudioBackend)); + MemoryManagerMode = new ReactiveObject(); + MemoryManagerMode.Event += static (sender, e) => LogValueChange(sender, e, nameof(MemoryManagerMode)); ExpandRam = new ReactiveObject(); ExpandRam.Event += static (sender, e) => LogValueChange(sender, e, nameof(ExpandRam)); IgnoreMissingServices = new ReactiveObject(); @@ -438,6 +445,7 @@ namespace Ryujinx.Configuration EnableFsIntegrityChecks = System.EnableFsIntegrityChecks, FsGlobalAccessLogMode = System.FsGlobalAccessLogMode, AudioBackend = System.AudioBackend, + MemoryManagerMode = System.MemoryManagerMode, ExpandRam = System.ExpandRam, IgnoreMissingServices = System.IgnoreMissingServices, GuiColumns = new GuiColumns @@ -504,6 +512,7 @@ namespace Ryujinx.Configuration System.EnableFsIntegrityChecks.Value = true; System.FsGlobalAccessLogMode.Value = 0; System.AudioBackend.Value = AudioBackend.OpenAl; + System.MemoryManagerMode.Value = MemoryManagerMode.HostMappedUnsafe; System.ExpandRam.Value = false; System.IgnoreMissingServices.Value = false; Ui.GuiColumns.FavColumn.Value = true; @@ -810,6 +819,15 @@ namespace Ryujinx.Configuration configurationFileUpdated = true; } + if (configurationFileFormat.Version < 26) + { + Common.Logging.Logger.Warning?.Print(LogClass.Application, $"Outdated configuration version {configurationFileFormat.Version}, migrating to version 26."); + + configurationFileFormat.MemoryManagerMode = MemoryManagerMode.HostMappedUnsafe; + + configurationFileUpdated = true; + } + Logger.EnableFileLog.Value = configurationFileFormat.EnableFileLog; Graphics.ResScale.Value = configurationFileFormat.ResScale; Graphics.ResScaleCustom.Value = configurationFileFormat.ResScaleCustom; @@ -840,6 +858,7 @@ namespace Ryujinx.Configuration System.EnableFsIntegrityChecks.Value = configurationFileFormat.EnableFsIntegrityChecks; System.FsGlobalAccessLogMode.Value = configurationFileFormat.FsGlobalAccessLogMode; System.AudioBackend.Value = configurationFileFormat.AudioBackend; + System.MemoryManagerMode.Value = configurationFileFormat.MemoryManagerMode; System.ExpandRam.Value = configurationFileFormat.ExpandRam; System.IgnoreMissingServices.Value = configurationFileFormat.IgnoreMissingServices; Ui.GuiColumns.FavColumn.Value = configurationFileFormat.GuiColumns.FavColumn; diff --git a/Ryujinx/Ui/MainWindow.cs b/Ryujinx/Ui/MainWindow.cs index 1eef7554f3..d0848012fb 100644 --- a/Ryujinx/Ui/MainWindow.cs +++ b/Ryujinx/Ui/MainWindow.cs @@ -430,6 +430,7 @@ namespace Ryujinx.Ui ConfigurationState.Instance.System.FsGlobalAccessLogMode, ConfigurationState.Instance.System.SystemTimeOffset, ConfigurationState.Instance.System.TimeZone, + ConfigurationState.Instance.System.MemoryManagerMode, ConfigurationState.Instance.System.IgnoreMissingServices, ConfigurationState.Instance.Graphics.AspectRatio); diff --git a/Ryujinx/Ui/Windows/SettingsWindow.cs b/Ryujinx/Ui/Windows/SettingsWindow.cs index 43fea4e2c1..f4e11fde8c 100644 --- a/Ryujinx/Ui/Windows/SettingsWindow.cs +++ b/Ryujinx/Ui/Windows/SettingsWindow.cs @@ -50,6 +50,9 @@ namespace Ryujinx.Ui.Windows [GUI] CheckButton _shaderCacheToggle; [GUI] CheckButton _ptcToggle; [GUI] CheckButton _fsicToggle; + [GUI] RadioButton _mmSoftware; + [GUI] RadioButton _mmHost; + [GUI] RadioButton _mmHostUnsafe; [GUI] CheckButton _expandRamToggle; [GUI] CheckButton _ignoreToggle; [GUI] CheckButton _directKeyboardAccess; @@ -214,6 +217,19 @@ namespace Ryujinx.Ui.Windows _fsicToggle.Click(); } + switch (ConfigurationState.Instance.System.MemoryManagerMode.Value) + { + case MemoryManagerMode.SoftwarePageTable: + _mmSoftware.Click(); + break; + case MemoryManagerMode.HostMapped: + _mmHost.Click(); + break; + case MemoryManagerMode.HostMappedUnsafe: + _mmHostUnsafe.Click(); + break; + } + if (ConfigurationState.Instance.System.ExpandRam) { _expandRamToggle.Click(); @@ -411,6 +427,18 @@ namespace Ryujinx.Ui.Windows ConfigurationState.Instance.System.TimeZone.Value = _systemTimeZoneEntry.Text; } + MemoryManagerMode memoryMode = MemoryManagerMode.SoftwarePageTable; + + if (_mmHost.Active) + { + memoryMode = MemoryManagerMode.HostMapped; + } + + if (_mmHostUnsafe.Active) + { + memoryMode = MemoryManagerMode.HostMappedUnsafe; + } + ConfigurationState.Instance.Logger.EnableError.Value = _errorLogToggle.Active; ConfigurationState.Instance.Logger.EnableWarn.Value = _warningLogToggle.Active; ConfigurationState.Instance.Logger.EnableInfo.Value = _infoLogToggle.Active; @@ -429,6 +457,7 @@ namespace Ryujinx.Ui.Windows ConfigurationState.Instance.Graphics.EnableShaderCache.Value = _shaderCacheToggle.Active; ConfigurationState.Instance.System.EnablePtc.Value = _ptcToggle.Active; ConfigurationState.Instance.System.EnableFsIntegrityChecks.Value = _fsicToggle.Active; + ConfigurationState.Instance.System.MemoryManagerMode.Value = memoryMode; ConfigurationState.Instance.System.ExpandRam.Value = _expandRamToggle.Active; ConfigurationState.Instance.System.IgnoreMissingServices.Value = _ignoreToggle.Active; ConfigurationState.Instance.Hid.EnableKeyboard.Value = _directKeyboardAccess.Active; diff --git a/Ryujinx/Ui/Windows/SettingsWindow.glade b/Ryujinx/Ui/Windows/SettingsWindow.glade index 3dc0bdce8a..daa9ecaaff 100644 --- a/Ryujinx/Ui/Windows/SettingsWindow.glade +++ b/Ryujinx/Ui/Windows/SettingsWindow.glade @@ -1546,6 +1546,93 @@ 2 + + + True + False + + + + + + True + False + Change how guest memory is mapped and accessed. Greatly affects emulated CPU performance. + end + 5 + Memory Manager Mode: + + + False + True + 5 + 2 + + + + + Software + True + True + False + Use a software page table for address translation. Highest accuracy but slowest performance. + start + 5 + 5 + True + + + False + True + 3 + + + + + Host (fast) + True + True + False + Directly map memory in the host address space. Much faster JIT compilation and execution. + start + 5 + 5 + True + _mmSoftware + + + False + True + 4 + + + + + Host unchecked (fastest, unsafe) + True + True + False + Directly map memory, but do not mask the address within the guest address space before access. Faster, but at the cost of safety. The guest application can access memory from anywhere in Ryujinx, so only run programs you trust with this mode. + start + 5 + 5 + True + _mmSoftware + + + False + True + 5 + + + + + False + True + 5 + 3 + + False diff --git a/Ryujinx/_schema.json b/Ryujinx/_schema.json index e03e4afcc5..242874d5ef 100644 --- a/Ryujinx/_schema.json +++ b/Ryujinx/_schema.json @@ -1210,7 +1210,7 @@ false ] }, - "hide_cursor_on_idle": { + "hide_cursor_on_idle": { "$id": "#/properties/hide_cursor_on_idle", "type": "boolean", "title": "Hide Cursor On Idle", @@ -1280,6 +1280,18 @@ "OpenAl" ] }, + "memory_manager_mode": { + "$id": "#/properties/memory_manager_mode", + "type": "string", + "title": "The selected memory manager mode", + "description": "The selected memory manager mode", + "default": "HostMappedUnsafe", + "enum": [ + "SoftwarePageTable", + "HostMapped", + "HostMappedUnsafe" + ] + }, "ignore_missing_services": { "$id": "#/properties/ignore_missing_services", "type": "boolean",