JinxRyu/Ryujinx.Graphics.GAL/Multithreading/BufferMap.cs
riperiperi ec3e848d79
Add a Multithreading layer for the GAL, multi-thread shader compilation at runtime (#2501)
* Initial Implementation

About as fast as nvidia GL multithreading, can be improved with faster command queuing.

* Struct based command list

Speeds up a bit. Still a lot of time lost to resource copy.

* Do shader init while the render thread is active.

* Introduce circular span pool V1

Ideally should be able to use structs instead of references for storing these spans on commands. Will try that next.

* Refactor SpanRef some more

Use a struct to represent SpanRef, rather than a reference.

* Flush buffers on background thread

* Use a span for UpdateRenderScale.

Much faster than copying the array.

* Calculate command size using reflection

* WIP parallel shaders

* Some minor optimisation

* Only 2 max refs per command now.

The command with 3 refs is gone. 😌

* Don't cast on the GPU side

* Remove redundant casts, force sync on window present

* Fix Shader Cache

* Fix host shader save.

* Fixup to work with new renderer stuff

* Make command Run static, use array of delegates as lookup

Profile says this takes less time than the previous way.

* Bring up to date

* Add settings toggle. Fix Muiltithreading Off mode.

* Fix warning.

* Release tracking lock for flushes

* Fix Conditional Render fast path with threaded gal

* Make handle iteration safe when releasing the lock

This is mostly temporary.

* Attempt to set backend threading on driver

Only really works on nvidia before launching a game.

* Fix race condition with BufferModifiedRangeList, exceptions in tracking actions

* Update buffer set commands

* Some cleanup

* Only use stutter workaround when using opengl renderer non-threaded

* Add host-conditional reservation of counter events

There has always been the possibility that conditional rendering could use a query object just as it is disposed by the counter queue. This change makes it so that when the host decides to use host conditional rendering, the query object is reserved so that it cannot be deleted. Counter events can optionally start reserved, as the threaded implementation can reserve them before the backend creates them, and there would otherwise be a short amount of time where the counter queue could dispose the event before a call to reserve it could be made.

* Address Feedback

* Make counter flush tracked again.

Hopefully does not cause any issues this time.

* Wait for FlushTo on the main queue thread.

Currently assumes only one thread will want to FlushTo (in this case, the GPU thread)

* Add SDL2 headless integration

* Add HLE macro commands.

Co-authored-by: Mary <mary@mary.zone>
2021-08-27 00:31:29 +02:00

170 lines
4.9 KiB
C#

using System;
using System.Collections.Generic;
using System.Runtime.CompilerServices;
using System.Threading;
namespace Ryujinx.Graphics.GAL.Multithreading
{
/// <summary>
/// Buffer handles given to the client are not the same as those provided by the backend,
/// as their handle is created at a later point on the queue.
/// The handle returned is a unique identifier that will map to the real buffer when it is available.
/// Note that any uses within the queue should be safe, but outside you must use MapBufferBlocking.
/// </summary>
class BufferMap
{
private ulong _bufferHandle = 0;
private Dictionary<BufferHandle, BufferHandle> _bufferMap = new Dictionary<BufferHandle, BufferHandle>();
private HashSet<BufferHandle> _inFlight = new HashSet<BufferHandle>();
private AutoResetEvent _inFlightChanged = new AutoResetEvent(false);
internal BufferHandle CreateBufferHandle()
{
ulong handle64 = Interlocked.Increment(ref _bufferHandle);
BufferHandle threadedHandle = Unsafe.As<ulong, BufferHandle>(ref handle64);
lock (_inFlight)
{
_inFlight.Add(threadedHandle);
}
return threadedHandle;
}
internal void AssignBuffer(BufferHandle threadedHandle, BufferHandle realHandle)
{
lock (_bufferMap)
{
_bufferMap[threadedHandle] = realHandle;
}
lock (_inFlight)
{
_inFlight.Remove(threadedHandle);
}
_inFlightChanged.Set();
}
internal void UnassignBuffer(BufferHandle threadedHandle)
{
lock (_bufferMap)
{
_bufferMap.Remove(threadedHandle);
}
}
internal BufferHandle MapBuffer(BufferHandle handle)
{
// Maps a threaded buffer to a backend one.
// Threaded buffers are returned on creation as the buffer
// isn't actually created until the queue runs the command.
BufferHandle result;
lock (_bufferMap)
{
if (!_bufferMap.TryGetValue(handle, out result))
{
result = BufferHandle.Null;
}
return result;
}
}
internal BufferHandle MapBufferBlocking(BufferHandle handle)
{
// Blocks until the handle is available.
BufferHandle result;
lock (_bufferMap)
{
if (_bufferMap.TryGetValue(handle, out result))
{
return result;
}
}
bool signal = false;
while (true)
{
lock (_inFlight)
{
if (!_inFlight.Contains(handle))
{
break;
}
}
_inFlightChanged.WaitOne();
signal = true;
}
if (signal)
{
// Signal other threads which might still be waiting.
_inFlightChanged.Set();
}
return MapBuffer(handle);
}
internal BufferRange MapBufferRange(BufferRange range)
{
return new BufferRange(MapBuffer(range.Handle), range.Offset, range.Size);
}
internal Span<BufferRange> MapBufferRanges(Span<BufferRange> ranges)
{
// Rewrite the buffer ranges to point to the mapped handles.
lock (_bufferMap)
{
for (int i = 0; i < ranges.Length; i++)
{
ref BufferRange range = ref ranges[i];
BufferHandle result;
if (!_bufferMap.TryGetValue(range.Handle, out result))
{
result = BufferHandle.Null;
}
range = new BufferRange(result, range.Offset, range.Size);
}
}
return ranges;
}
internal Span<VertexBufferDescriptor> MapBufferRanges(Span<VertexBufferDescriptor> ranges)
{
// Rewrite the buffer ranges to point to the mapped handles.
lock (_bufferMap)
{
for (int i = 0; i < ranges.Length; i++)
{
BufferRange range = ranges[i].Buffer;
BufferHandle result;
if (!_bufferMap.TryGetValue(range.Handle, out result))
{
result = BufferHandle.Null;
}
range = new BufferRange(result, range.Offset, range.Size);
ranges[i] = new VertexBufferDescriptor(range, ranges[i].Stride, ranges[i].Divisor);
}
}
return ranges;
}
}
}