Archived
1
0
Fork 0
forked from Mirror/Ryujinx
This repository has been archived on 2024-10-11. You can view files and clone it, but cannot push or open issues or pull requests.
jinx/Ryujinx.HLE/HOS/Kernel/Memory/KPageTable.cs
gdkchan 95017b8c66
Support memory aliasing (#2954)
* Back to the origins: Make memory manager take guest PA rather than host address once again

* Direct mapping with alias support on Windows

* Fixes and remove more of the emulated shared memory

* Linux support

* Make shared and transfer memory not depend on SharedMemoryStorage

* More efficient view mapping on Windows (no more restricted to 4KB pages at a time)

* Handle potential access violations caused by partial unmap

* Implement host mapping using shared memory on Linux

* Add new GetPhysicalAddressChecked method, used to ensure the virtual address is mapped before address translation

Also align GetRef behaviour with software memory manager

* We don't need a mirrorable memory block for software memory manager mode

* Disable memory aliasing tests while we don't have shared memory support on Mac

* Shared memory & SIGBUS handler for macOS

* Fix typo + nits + re-enable memory tests

* Set MAP_JIT_DARWIN on x86 Mac too

* Add back the address space mirror

* Only set MAP_JIT_DARWIN if we are mapping as executable

* Disable aliasing tests again (still fails on Mac)

* Fix UnmapView4KB (by not casting size to int)

* Use ref counting on memory blocks to delay closing the shared memory handle until all blocks using it are disposed

* Address PR feedback

* Make RO hold a reference to the guest process memory manager to avoid early disposal

Co-authored-by: nastys <nastys@users.noreply.github.com>
2022-05-02 20:30:02 -03:00

191 lines
6 KiB
C#

using Ryujinx.HLE.HOS.Kernel.Common;
using Ryujinx.Memory;
using System;
using System.Diagnostics;
namespace Ryujinx.HLE.HOS.Kernel.Memory
{
class KPageTable : KPageTableBase
{
private readonly IVirtualMemoryManager _cpuMemory;
public KPageTable(KernelContext context, IVirtualMemoryManager cpuMemory) : base(context)
{
_cpuMemory = cpuMemory;
}
/// <inheritdoc/>
protected override void GetPhysicalRegions(ulong va, ulong size, KPageList pageList)
{
var ranges = _cpuMemory.GetPhysicalRegions(va, size);
foreach (var range in ranges)
{
pageList.AddRange(range.Address + DramMemoryMap.DramBase, range.Size / PageSize);
}
}
/// <inheritdoc/>
protected override ReadOnlySpan<byte> GetSpan(ulong va, int size)
{
return _cpuMemory.GetSpan(va, size);
}
/// <inheritdoc/>
protected override KernelResult MapMemory(ulong src, ulong dst, ulong pagesCount, KMemoryPermission oldSrcPermission, KMemoryPermission newDstPermission)
{
KPageList pageList = new KPageList();
GetPhysicalRegions(src, pagesCount * PageSize, pageList);
KernelResult result = Reprotect(src, pagesCount, KMemoryPermission.None);
if (result != KernelResult.Success)
{
return result;
}
result = MapPages(dst, pageList, newDstPermission, false, 0);
if (result != KernelResult.Success)
{
KernelResult reprotectResult = Reprotect(src, pagesCount, oldSrcPermission);
Debug.Assert(reprotectResult == KernelResult.Success);
}
return result;
}
/// <inheritdoc/>
protected override KernelResult UnmapMemory(ulong dst, ulong src, ulong pagesCount, KMemoryPermission oldDstPermission, KMemoryPermission newSrcPermission)
{
ulong size = pagesCount * PageSize;
KPageList srcPageList = new KPageList();
KPageList dstPageList = new KPageList();
GetPhysicalRegions(src, size, srcPageList);
GetPhysicalRegions(dst, size, dstPageList);
if (!dstPageList.IsEqual(srcPageList))
{
return KernelResult.InvalidMemRange;
}
KernelResult result = Unmap(dst, pagesCount);
if (result != KernelResult.Success)
{
return result;
}
result = Reprotect(src, pagesCount, newSrcPermission);
if (result != KernelResult.Success)
{
KernelResult mapResult = MapPages(dst, dstPageList, oldDstPermission, false, 0);
Debug.Assert(mapResult == KernelResult.Success);
}
return result;
}
/// <inheritdoc/>
protected override KernelResult MapPages(ulong dstVa, ulong pagesCount, ulong srcPa, KMemoryPermission permission, bool shouldFillPages, byte fillValue)
{
ulong size = pagesCount * PageSize;
Context.Memory.Commit(srcPa - DramMemoryMap.DramBase, size);
_cpuMemory.Map(dstVa, srcPa - DramMemoryMap.DramBase, size);
if (DramMemoryMap.IsHeapPhysicalAddress(srcPa))
{
Context.MemoryManager.IncrementPagesReferenceCount(srcPa, pagesCount);
}
if (shouldFillPages)
{
_cpuMemory.Fill(dstVa, size, fillValue);
}
return KernelResult.Success;
}
/// <inheritdoc/>
protected override KernelResult MapPages(ulong address, KPageList pageList, KMemoryPermission permission, bool shouldFillPages, byte fillValue)
{
using var scopedPageList = new KScopedPageList(Context.MemoryManager, pageList);
ulong currentVa = address;
foreach (var pageNode in pageList)
{
ulong addr = pageNode.Address - DramMemoryMap.DramBase;
ulong size = pageNode.PagesCount * PageSize;
Context.Memory.Commit(addr, size);
_cpuMemory.Map(currentVa, addr, size);
if (shouldFillPages)
{
_cpuMemory.Fill(currentVa, size, fillValue);
}
currentVa += size;
}
scopedPageList.SignalSuccess();
return KernelResult.Success;
}
/// <inheritdoc/>
protected override KernelResult Unmap(ulong address, ulong pagesCount)
{
KPageList pagesToClose = new KPageList();
var regions = _cpuMemory.GetPhysicalRegions(address, pagesCount * PageSize);
foreach (var region in regions)
{
ulong pa = region.Address + DramMemoryMap.DramBase;
if (DramMemoryMap.IsHeapPhysicalAddress(pa))
{
pagesToClose.AddRange(pa, region.Size / PageSize);
}
}
_cpuMemory.Unmap(address, pagesCount * PageSize);
pagesToClose.DecrementPagesReferenceCount(Context.MemoryManager);
return KernelResult.Success;
}
/// <inheritdoc/>
protected override KernelResult Reprotect(ulong address, ulong pagesCount, KMemoryPermission permission)
{
// TODO.
return KernelResult.Success;
}
/// <inheritdoc/>
protected override KernelResult ReprotectWithAttributes(ulong address, ulong pagesCount, KMemoryPermission permission)
{
// TODO.
return KernelResult.Success;
}
/// <inheritdoc/>
protected override void SignalMemoryTracking(ulong va, ulong size, bool write)
{
_cpuMemory.SignalMemoryTracking(va, size, write);
}
/// <inheritdoc/>
protected override void Write(ulong va, ReadOnlySpan<byte> data)
{
_cpuMemory.Write(va, data);
}
}
}