diff --git a/Ryujinx.Graphics.Device/DeviceState.cs b/Ryujinx.Graphics.Device/DeviceState.cs index ea6942ec26..740d8589c8 100644 --- a/Ryujinx.Graphics.Device/DeviceState.cs +++ b/Ryujinx.Graphics.Device/DeviceState.cs @@ -90,14 +90,12 @@ namespace Ryujinx.Graphics.Device { int alignedOffset = Align(offset); + GetRef(alignedOffset) = data; + if (_writeCallbacks.TryGetValue(alignedOffset, out Action write)) { write(data); } - else - { - GetRef(alignedOffset) = data; - } } } diff --git a/Ryujinx.Graphics.Gpu/DmaPusher.cs b/Ryujinx.Graphics.Gpu/DmaPusher.cs deleted file mode 100644 index 3b5ac830db..0000000000 --- a/Ryujinx.Graphics.Gpu/DmaPusher.cs +++ /dev/null @@ -1,316 +0,0 @@ -using System; -using System.Collections.Concurrent; -using System.Runtime.InteropServices; -using System.Threading; - -namespace Ryujinx.Graphics.Gpu -{ - /// - /// GPU DMA pusher, used to push commands to the GPU. - /// - public class DmaPusher - { - private ConcurrentQueue _commandBufferQueue; - - private enum CommandBufferType - { - Prefetch, - NoPrefetch, - } - - private struct CommandBuffer - { - /// - /// The type of the command buffer. - /// - public CommandBufferType Type; - - /// - /// Fetched data. - /// - public int[] Words; - - /// - /// The GPFIFO entry address. (used in NoPrefetch mode) - /// - public ulong EntryAddress; - - /// - /// The count of entries inside this GPFIFO entry. - /// - public uint EntryCount; - - /// - /// Fetch the command buffer. - /// - public void Fetch(GpuContext context) - { - if (Words == null) - { - Words = MemoryMarshal.Cast(context.MemoryAccessor.GetSpan(EntryAddress, (int)EntryCount * 4)).ToArray(); - } - } - - /// - /// Read inside the command buffer. - /// - /// The GPU context - /// The index inside the command buffer - /// The value read - public int ReadAt(GpuContext context, int index) - { - return Words[index]; - } - } - - private CommandBuffer _currentCommandBuffer; - private int _wordsPosition; - - /// - /// Internal GPFIFO state. - /// - private struct DmaState - { - public int Method; - public int SubChannel; - public int MethodCount; - public bool NonIncrementing; - public bool IncrementOnce; - public int LengthPending; - } - - private DmaState _state; - - private bool _ibEnable; - - private GpuContext _context; - - private AutoResetEvent _event; - - /// - /// Creates a new instance of the GPU DMA pusher. - /// - /// GPU context that the pusher belongs to - internal DmaPusher(GpuContext context) - { - _context = context; - - _ibEnable = true; - - _commandBufferQueue = new ConcurrentQueue(); - - _event = new AutoResetEvent(false); - } - - /// - /// Signal the pusher that there are new entries to process. - /// - public void SignalNewEntries() - { - _event.Set(); - } - - /// - /// Push a GPFIFO entry in the form of a prefetched command buffer. - /// It is intended to be used by nvservices to handle special cases. - /// - /// The command buffer containing the prefetched commands - public void PushHostCommandBuffer(int[] commandBuffer) - { - _commandBufferQueue.Enqueue(new CommandBuffer - { - Type = CommandBufferType.Prefetch, - Words = commandBuffer, - EntryAddress = ulong.MaxValue, - EntryCount = (uint)commandBuffer.Length - }); - } - - /// - /// Create a CommandBuffer from a GPFIFO entry. - /// - /// The GPFIFO entry - /// A new CommandBuffer based on the GPFIFO entry - private CommandBuffer CreateCommandBuffer(ulong entry) - { - ulong length = (entry >> 42) & 0x1fffff; - ulong startAddress = entry & 0xfffffffffc; - - bool noPrefetch = (entry & (1UL << 63)) != 0; - - CommandBufferType type = CommandBufferType.Prefetch; - - if (noPrefetch) - { - type = CommandBufferType.NoPrefetch; - } - - return new CommandBuffer - { - Type = type, - Words = null, - EntryAddress = startAddress, - EntryCount = (uint)length - }; - } - - /// - /// Pushes GPFIFO entries. - /// - /// GPFIFO entries - public void PushEntries(ReadOnlySpan entries) - { - bool beforeBarrier = true; - - foreach (ulong entry in entries) - { - CommandBuffer commandBuffer = CreateCommandBuffer(entry); - - if (beforeBarrier && commandBuffer.Type == CommandBufferType.Prefetch) - { - commandBuffer.Fetch(_context); - } - - if (commandBuffer.Type == CommandBufferType.NoPrefetch) - { - beforeBarrier = false; - } - - _commandBufferQueue.Enqueue(commandBuffer); - } - } - - /// - /// Waits until commands are pushed to the FIFO. - /// - /// True if commands were received, false if wait timed out - public bool WaitForCommands() - { - return _event.WaitOne(8); - } - - /// - /// Processes commands pushed to the FIFO. - /// - public void DispatchCalls() - { - while (Step()); - } - - /// - /// Processes a single command on the FIFO. - /// - /// True if the FIFO still has commands to be processed, false otherwise - private bool Step() - { - if (_wordsPosition != _currentCommandBuffer.EntryCount) - { - int word = _currentCommandBuffer.ReadAt(_context, _wordsPosition++); - - if (_state.LengthPending != 0) - { - _state.LengthPending = 0; - _state.MethodCount = word & 0xffffff; - } - else if (_state.MethodCount != 0) - { - CallMethod(word); - - if (!_state.NonIncrementing) - { - _state.Method++; - } - - if (_state.IncrementOnce) - { - _state.NonIncrementing = true; - } - - _state.MethodCount--; - } - else - { - int submissionMode = (word >> 29) & 7; - - switch (submissionMode) - { - case 1: - // Incrementing. - SetNonImmediateState(word); - - _state.NonIncrementing = false; - _state.IncrementOnce = false; - - break; - - case 3: - // Non-incrementing. - SetNonImmediateState(word); - - _state.NonIncrementing = true; - _state.IncrementOnce = false; - - break; - - case 4: - // Immediate. - _state.Method = (word >> 0) & 0x1fff; - _state.SubChannel = (word >> 13) & 7; - _state.NonIncrementing = true; - _state.IncrementOnce = false; - - CallMethod((word >> 16) & 0x1fff); - - break; - - case 5: - // Increment-once. - SetNonImmediateState(word); - - _state.NonIncrementing = false; - _state.IncrementOnce = true; - - break; - } - } - } - else if (_ibEnable && _commandBufferQueue.TryDequeue(out CommandBuffer entry)) - { - _currentCommandBuffer = entry; - _wordsPosition = 0; - - _currentCommandBuffer.Fetch(_context); - } - else - { - return false; - } - - return true; - } - - /// - /// Sets current non-immediate method call state. - /// - /// Compressed method word - private void SetNonImmediateState(int word) - { - _state.Method = (word >> 0) & 0x1fff; - _state.SubChannel = (word >> 13) & 7; - _state.MethodCount = (word >> 16) & 0x1fff; - } - - /// - /// Forwards the method call to GPU engines. - /// - /// Call argument - private void CallMethod(int argument) - { - _context.Fifo.CallMethod(new MethodParams( - _state.Method, - argument, - _state.SubChannel, - _state.MethodCount)); - } - } -} \ No newline at end of file diff --git a/Ryujinx.Graphics.Gpu/Engine/GPFifo/CompressedMethod.cs b/Ryujinx.Graphics.Gpu/Engine/GPFifo/CompressedMethod.cs new file mode 100644 index 0000000000..9a21348945 --- /dev/null +++ b/Ryujinx.Graphics.Gpu/Engine/GPFifo/CompressedMethod.cs @@ -0,0 +1,39 @@ +// This file was auto-generated from NVIDIA official Maxwell definitions. + +namespace Ryujinx.Graphics.Gpu.Engine.GPFifo +{ + enum TertOp + { + Grp0IncMethod = 0, + Grp0SetSubDevMask = 1, + Grp0StoreSubDevMask = 2, + Grp0UseSubDevMask = 3, + Grp2NonIncMethod = 0 + } + + enum SecOp + { + Grp0UseTert = 0, + IncMethod = 1, + Grp2UseTert = 2, + NonIncMethod = 3, + ImmdDataMethod = 4, + OneInc = 5, + Reserved6 = 6, + EndPbSegment = 7 + } + + struct CompressedMethod + { + public uint Method; + public int MethodAddressOld => (int)((Method >> 2) & 0x7FF); + public int MethodAddress => (int)((Method >> 0) & 0xFFF); + public int SubdeviceMask => (int)((Method >> 4) & 0xFFF); + public int MethodSubchannel => (int)((Method >> 13) & 0x7); + public TertOp TertOp => (TertOp)((Method >> 16) & 0x3); + public int MethodCountOld => (int)((Method >> 18) & 0x7FF); + public int MethodCount => (int)((Method >> 16) & 0x1FFF); + public int ImmdData => (int)((Method >> 16) & 0x1FFF); + public SecOp SecOp => (SecOp)((Method >> 29) & 0x7); + } +} diff --git a/Ryujinx.Graphics.Gpu/Engine/GPFifo/GPEntry.cs b/Ryujinx.Graphics.Gpu/Engine/GPFifo/GPEntry.cs new file mode 100644 index 0000000000..9866cd2e7b --- /dev/null +++ b/Ryujinx.Graphics.Gpu/Engine/GPFifo/GPEntry.cs @@ -0,0 +1,51 @@ +// This file was auto-generated from NVIDIA official Maxwell definitions. + +namespace Ryujinx.Graphics.Gpu.Engine.GPFifo +{ + enum Entry0Fetch + { + Unconditional = 0, + Conditional = 1, + } + + enum Entry1Priv + { + User = 0, + Kernel = 1, + } + + enum Entry1Level + { + Main = 0, + Subroutine = 1, + } + + enum Entry1Sync + { + Proceed = 0, + Wait = 1, + } + + enum Entry1Opcode + { + Nop = 0, + Illegal = 1, + Crc = 2, + PbCrc = 3, + } + + struct GPEntry + { + public uint Entry0; + public Entry0Fetch Entry0Fetch => (Entry0Fetch)((Entry0 >> 0) & 0x1); + public int Entry0Get => (int)((Entry0 >> 2) & 0x3FFFFFFF); + public int Entry0Operand => (int)(Entry0); + public uint Entry1; + public int Entry1GetHi => (int)((Entry1 >> 0) & 0xFF); + public Entry1Priv Entry1Priv => (Entry1Priv)((Entry1 >> 8) & 0x1); + public Entry1Level Entry1Level => (Entry1Level)((Entry1 >> 9) & 0x1); + public int Entry1Length => (int)((Entry1 >> 10) & 0x1FFFFF); + public Entry1Sync Entry1Sync => (Entry1Sync)((Entry1 >> 31) & 0x1); + public Entry1Opcode Entry1Opcode => (Entry1Opcode)((Entry1 >> 0) & 0xFF); + } +} diff --git a/Ryujinx.Graphics.Gpu/Engine/GPFifo/GPFifoClass.cs b/Ryujinx.Graphics.Gpu/Engine/GPFifo/GPFifoClass.cs new file mode 100644 index 0000000000..ec2e4bdc16 --- /dev/null +++ b/Ryujinx.Graphics.Gpu/Engine/GPFifo/GPFifoClass.cs @@ -0,0 +1,214 @@ +using Ryujinx.Graphics.Device; +using Ryujinx.Graphics.Gpu.Engine.MME; +using Ryujinx.Graphics.Gpu.State; +using System; +using System.Collections.Generic; +using System.Threading; + +namespace Ryujinx.Graphics.Gpu.Engine.GPFifo +{ + /// + /// Represents a GPU General Purpose FIFO class. + /// + class GPFifoClass : IDeviceState + { + private readonly GpuContext _context; + private readonly DeviceState _state; + + private const int MacrosCount = 0x80; + + // Note: The size of the macro memory is unknown, we just make + // a guess here and use 256kb as the size. Increase if needed. + private const int MacroCodeSize = 256 * 256; + + private readonly Macro[] _macros; + private readonly int[] _macroCode; + + /// + /// MME Shadow RAM Control. + /// + public ShadowRamControl ShadowCtrl { get; private set; } + + /// + /// Creates a new instance of the GPU General Purpose FIFO class. + /// + /// GPU context + public GPFifoClass(GpuContext context) + { + _context = context; + _state = new DeviceState(new Dictionary + { + { nameof(GPFifoClassState.Semaphored), new RwCallback(Semaphored, null) }, + { nameof(GPFifoClassState.Syncpointb), new RwCallback(Syncpointb, null) }, + { nameof(GPFifoClassState.WaitForIdle), new RwCallback(WaitForIdle, null) }, + { nameof(GPFifoClassState.LoadMmeInstructionRam), new RwCallback(LoadMmeInstructionRam, null) }, + { nameof(GPFifoClassState.LoadMmeStartAddressRam), new RwCallback(LoadMmeStartAddressRam, null) }, + { nameof(GPFifoClassState.SetMmeShadowRamControl), new RwCallback(SetMmeShadowRamControl, null) } + }); + + _macros = new Macro[MacrosCount]; + _macroCode = new int[MacroCodeSize]; + } + + /// + /// Reads data from the class registers. + /// + /// Register byte offset + /// Data at the specified offset + public int Read(int offset) => _state.Read(offset); + + /// + /// Writes data to the class registers. + /// + /// Register byte offset + /// Data to be written + public void Write(int offset, int data) => _state.Write(offset, data); + + /// + /// Writes a GPU counter to guest memory. + /// + /// Method call argument + public void Semaphored(int argument) + { + ulong address = ((ulong)_state.State.SemaphorebOffsetLower << 2) | + ((ulong)_state.State.SemaphoreaOffsetUpper << 32); + + int value = _state.State.SemaphorecPayload; + + SemaphoredOperation operation = _state.State.SemaphoredOperation; + + // TODO: Acquire operations (Wait), interrupts for invalid combinations. + if (operation == SemaphoredOperation.Release) + { + _context.MemoryAccessor.Write(address, value); + } + else if (operation == SemaphoredOperation.Reduction) + { + bool signed = _state.State.SemaphoredFormat == SemaphoredFormat.Signed; + + int mem = _context.MemoryAccessor.Read(address); + + switch (_state.State.SemaphoredReduction) + { + case SemaphoredReduction.Min: + value = signed ? Math.Min(mem, value) : (int)Math.Min((uint)mem, (uint)value); + break; + case SemaphoredReduction.Max: + value = signed ? Math.Max(mem, value) : (int)Math.Max((uint)mem, (uint)value); + break; + case SemaphoredReduction.Xor: + value ^= mem; + break; + case SemaphoredReduction.And: + value &= mem; + break; + case SemaphoredReduction.Or: + value |= mem; + break; + case SemaphoredReduction.Add: + value += mem; + break; + case SemaphoredReduction.Inc: + value = (uint)mem < (uint)value ? mem + 1 : 0; + break; + case SemaphoredReduction.Dec: + value = (uint)mem > 0 && (uint)mem <= (uint)value ? mem - 1 : value; + break; + } + + _context.MemoryAccessor.Write(address, value); + } + } + + /// + /// Apply a fence operation on a syncpoint. + /// + /// Method call argument + public void Syncpointb(int argument) + { + SyncpointbOperation operation = _state.State.SyncpointbOperation; + + uint syncpointId = (uint)_state.State.SyncpointbSyncptIndex; + + if (operation == SyncpointbOperation.Wait) + { + uint threshold = (uint)_state.State.SyncpointaPayload; + + _context.Synchronization.WaitOnSyncpoint(syncpointId, threshold, Timeout.InfiniteTimeSpan); + } + else if (operation == SyncpointbOperation.Incr) + { + _context.Synchronization.IncrementSyncpoint(syncpointId); + } + + _context.AdvanceSequence(); + } + + /// + /// Waits for the GPU to be idle. + /// + /// Method call argument + public void WaitForIdle(int argument) + { + _context.Methods.PerformDeferredDraws(); + _context.Renderer.Pipeline.Barrier(); + } + + /// + /// Send macro code/data to the MME + /// + /// Method call argument + public void LoadMmeInstructionRam(int argument) + { + _macroCode[_state.State.LoadMmeInstructionRamPointer++] = argument; + } + + /// + /// Bind a macro index to a position for the MME + /// + /// Method call argument + public void LoadMmeStartAddressRam(int argument) + { + _macros[_state.State.LoadMmeStartAddressRamPointer++] = new Macro(argument); + } + + /// + /// Change the shadow RAM setting + /// + /// Method call argument + public void SetMmeShadowRamControl(int argument) + { + ShadowCtrl = (ShadowRamControl)argument; + } + + /// + /// Pushes an argument to a macro. + /// + /// Index of the macro + /// Argument to be pushed to the macro + public void MmePushArgument(int index, int argument) + { + _macros[index].PushArgument(argument); + } + + /// + /// Prepares a macro for execution. + /// + /// Index of the macro + /// Initial argument passed to the macro + public void MmeStart(int index, int argument) + { + _macros[index].StartExecution(argument); + } + + /// + /// Executes a macro. + /// + /// Index of the macro + /// Current GPU state + public void CallMme(int index, GpuState state) + { + _macros[index].Execute(_macroCode, ShadowCtrl, state); + } + } +} diff --git a/Ryujinx.Graphics.Gpu/Engine/GPFifo/GPFifoClassState.cs b/Ryujinx.Graphics.Gpu/Engine/GPFifo/GPFifoClassState.cs new file mode 100644 index 0000000000..3b28266859 --- /dev/null +++ b/Ryujinx.Graphics.Gpu/Engine/GPFifo/GPFifoClassState.cs @@ -0,0 +1,186 @@ +// This file was auto-generated from NVIDIA official Maxwell definitions. + +using Ryujinx.Common.Memory; + +namespace Ryujinx.Graphics.Gpu.Engine.GPFifo +{ + enum SemaphoredOperation + { + Acquire = 1, + Release = 2, + AcqGeq = 4, + AcqAnd = 8, + Reduction = 16 + } + + enum SemaphoredAcquireSwitch + { + Disabled = 0, + Enabled = 1 + } + + enum SemaphoredReleaseWfi + { + En = 0, + Dis = 1 + } + + enum SemaphoredReleaseSize + { + SixteenBytes = 0, + FourBytes = 1 + } + + enum SemaphoredReduction + { + Min = 0, + Max = 1, + Xor = 2, + And = 3, + Or = 4, + Add = 5, + Inc = 6, + Dec = 7 + } + + enum SemaphoredFormat + { + Signed = 0, + Unsigned = 1 + } + + enum MemOpCTlbInvalidatePdb + { + One = 0, + All = 1 + } + + enum MemOpCTlbInvalidateGpc + { + Enable = 0, + Disable = 1 + } + + enum MemOpCTlbInvalidateTarget + { + VidMem = 0, + SysMemCoherent = 2, + SysMemNoncoherent = 3 + } + + enum MemOpDOperation + { + Membar = 5, + MmuTlbInvalidate = 9, + L2PeermemInvalidate = 13, + L2SysmemInvalidate = 14, + L2CleanComptags = 15, + L2FlushDirty = 16 + } + + enum SyncpointbOperation + { + Wait = 0, + Incr = 1 + } + + enum SyncpointbWaitSwitch + { + Dis = 0, + En = 1 + } + + enum WfiScope + { + CurrentScgType = 0, + All = 1 + } + + enum YieldOp + { + Nop = 0, + PbdmaTimeslice = 1, + RunlistTimeslice = 2, + Tsg = 3 + } + + struct GPFifoClassState + { + public uint SetObject; + public int SetObjectNvclass => (int)((SetObject >> 0) & 0xFFFF); + public int SetObjectEngine => (int)((SetObject >> 16) & 0x1F); + public uint Illegal; + public int IllegalHandle => (int)(Illegal); + public uint Nop; + public int NopHandle => (int)(Nop); + public uint Reserved0C; + public uint Semaphorea; + public int SemaphoreaOffsetUpper => (int)((Semaphorea >> 0) & 0xFF); + public uint Semaphoreb; + public int SemaphorebOffsetLower => (int)((Semaphoreb >> 2) & 0x3FFFFFFF); + public uint Semaphorec; + public int SemaphorecPayload => (int)(Semaphorec); + public uint Semaphored; + public SemaphoredOperation SemaphoredOperation => (SemaphoredOperation)((Semaphored >> 0) & 0x1F); + public SemaphoredAcquireSwitch SemaphoredAcquireSwitch => (SemaphoredAcquireSwitch)((Semaphored >> 12) & 0x1); + public SemaphoredReleaseWfi SemaphoredReleaseWfi => (SemaphoredReleaseWfi)((Semaphored >> 20) & 0x1); + public SemaphoredReleaseSize SemaphoredReleaseSize => (SemaphoredReleaseSize)((Semaphored >> 24) & 0x1); + public SemaphoredReduction SemaphoredReduction => (SemaphoredReduction)((Semaphored >> 27) & 0xF); + public SemaphoredFormat SemaphoredFormat => (SemaphoredFormat)((Semaphored >> 31) & 0x1); + public uint NonStallInterrupt; + public int NonStallInterruptHandle => (int)(NonStallInterrupt); + public uint FbFlush; + public int FbFlushHandle => (int)(FbFlush); + public uint Reserved28; + public uint Reserved2C; + public uint MemOpC; + public int MemOpCOperandLow => (int)((MemOpC >> 2) & 0x3FFFFFFF); + public MemOpCTlbInvalidatePdb MemOpCTlbInvalidatePdb => (MemOpCTlbInvalidatePdb)((MemOpC >> 0) & 0x1); + public MemOpCTlbInvalidateGpc MemOpCTlbInvalidateGpc => (MemOpCTlbInvalidateGpc)((MemOpC >> 1) & 0x1); + public MemOpCTlbInvalidateTarget MemOpCTlbInvalidateTarget => (MemOpCTlbInvalidateTarget)((MemOpC >> 10) & 0x3); + public int MemOpCTlbInvalidateAddrLo => (int)((MemOpC >> 12) & 0xFFFFF); + public uint MemOpD; + public int MemOpDOperandHigh => (int)((MemOpD >> 0) & 0xFF); + public MemOpDOperation MemOpDOperation => (MemOpDOperation)((MemOpD >> 27) & 0x1F); + public int MemOpDTlbInvalidateAddrHi => (int)((MemOpD >> 0) & 0xFF); + public uint Reserved38; + public uint Reserved3C; + public uint Reserved40; + public uint Reserved44; + public uint Reserved48; + public uint Reserved4C; + public uint SetReference; + public int SetReferenceCount => (int)(SetReference); + public uint Reserved54; + public uint Reserved58; + public uint Reserved5C; + public uint Reserved60; + public uint Reserved64; + public uint Reserved68; + public uint Reserved6C; + public uint Syncpointa; + public int SyncpointaPayload => (int)(Syncpointa); + public uint Syncpointb; + public SyncpointbOperation SyncpointbOperation => (SyncpointbOperation)((Syncpointb >> 0) & 0x1); + public SyncpointbWaitSwitch SyncpointbWaitSwitch => (SyncpointbWaitSwitch)((Syncpointb >> 4) & 0x1); + public int SyncpointbSyncptIndex => (int)((Syncpointb >> 8) & 0xFFF); + public uint Wfi; + public WfiScope WfiScope => (WfiScope)((Wfi >> 0) & 0x1); + public uint CrcCheck; + public int CrcCheckValue => (int)(CrcCheck); + public uint Yield; + public YieldOp YieldOp => (YieldOp)((Yield >> 0) & 0x3); + // TODO: Eventually move this to per-engine state. + public Array31 Reserved84; + public uint NoOperation; + public uint SetNotifyA; + public uint SetNotifyB; + public uint Notify; + public uint WaitForIdle; + public uint LoadMmeInstructionRamPointer; + public uint LoadMmeInstructionRam; + public uint LoadMmeStartAddressRamPointer; + public uint LoadMmeStartAddressRam; + public uint SetMmeShadowRamControl; + } +} diff --git a/Ryujinx.Graphics.Gpu/Engine/GPFifo/GPFifoDevice.cs b/Ryujinx.Graphics.Gpu/Engine/GPFifo/GPFifoDevice.cs new file mode 100644 index 0000000000..466bff8fd4 --- /dev/null +++ b/Ryujinx.Graphics.Gpu/Engine/GPFifo/GPFifoDevice.cs @@ -0,0 +1,188 @@ +using System; +using System.Collections.Concurrent; +using System.Runtime.CompilerServices; +using System.Runtime.InteropServices; +using System.Threading; + +namespace Ryujinx.Graphics.Gpu.Engine.GPFifo +{ + /// + /// Represents a GPU General Purpose FIFO device. + /// + public sealed class GPFifoDevice : IDisposable + { + /// + /// Indicates if the command buffer has pre-fetch enabled. + /// + private enum CommandBufferType + { + Prefetch, + NoPrefetch + } + + /// + /// Command buffer data. + /// + private struct CommandBuffer + { + /// + /// The type of the command buffer. + /// + public CommandBufferType Type; + + /// + /// Fetched data. + /// + public int[] Words; + + /// + /// The GPFIFO entry address (used in mode). + /// + public ulong EntryAddress; + + /// + /// The count of entries inside this GPFIFO entry. + /// + public uint EntryCount; + + /// + /// Fetch the command buffer. + /// + public void Fetch(GpuContext context) + { + if (Words == null) + { + Words = MemoryMarshal.Cast(context.MemoryAccessor.GetSpan(EntryAddress, (int)EntryCount * 4)).ToArray(); + } + } + } + + private readonly ConcurrentQueue _commandBufferQueue; + + private CommandBuffer _currentCommandBuffer; + + private readonly bool _ibEnable; + private readonly GpuContext _context; + private readonly AutoResetEvent _event; + private readonly GPFifoProcessor _processor; + + /// + /// Creates a new instance of the GPU General Purpose FIFO device. + /// + /// GPU context that the GPFIFO belongs to + internal GPFifoDevice(GpuContext context) + { + _commandBufferQueue = new ConcurrentQueue(); + _ibEnable = true; + _context = context; + _event = new AutoResetEvent(false); + + _processor = new GPFifoProcessor(context); + } + + /// + /// Signal the FIFO that there are new entries to process. + /// + public void SignalNewEntries() + { + _event.Set(); + } + + /// + /// Push a GPFIFO entry in the form of a prefetched command buffer. + /// It is intended to be used by nvservices to handle special cases. + /// + /// The command buffer containing the prefetched commands + public void PushHostCommandBuffer(int[] commandBuffer) + { + _commandBufferQueue.Enqueue(new CommandBuffer + { + Type = CommandBufferType.Prefetch, + Words = commandBuffer, + EntryAddress = ulong.MaxValue, + EntryCount = (uint)commandBuffer.Length + }); + } + + /// + /// Create a CommandBuffer from a GPFIFO entry. + /// + /// The GPFIFO entry + /// A new CommandBuffer based on the GPFIFO entry + private CommandBuffer CreateCommandBuffer(GPEntry entry) + { + CommandBufferType type = CommandBufferType.Prefetch; + + if (entry.Entry1Sync == Entry1Sync.Wait) + { + type = CommandBufferType.NoPrefetch; + } + + ulong startAddress = ((ulong)entry.Entry0Get << 2) | ((ulong)entry.Entry1GetHi << 32); + + return new CommandBuffer + { + Type = type, + Words = null, + EntryAddress = startAddress, + EntryCount = (uint)entry.Entry1Length + }; + } + + /// + /// Pushes GPFIFO entries. + /// + /// GPFIFO entries + public void PushEntries(ReadOnlySpan entries) + { + bool beforeBarrier = true; + + for (int index = 0; index < entries.Length; index++) + { + ulong entry = entries[index]; + + CommandBuffer commandBuffer = CreateCommandBuffer(Unsafe.As(ref entry)); + + if (beforeBarrier && commandBuffer.Type == CommandBufferType.Prefetch) + { + commandBuffer.Fetch(_context); + } + + if (commandBuffer.Type == CommandBufferType.NoPrefetch) + { + beforeBarrier = false; + } + + _commandBufferQueue.Enqueue(commandBuffer); + } + } + + /// + /// Waits until commands are pushed to the FIFO. + /// + /// True if commands were received, false if wait timed out + public bool WaitForCommands() + { + return _event.WaitOne(8); + } + + /// + /// Processes commands pushed to the FIFO. + /// + public void DispatchCalls() + { + while (_ibEnable && _commandBufferQueue.TryDequeue(out CommandBuffer entry)) + { + _currentCommandBuffer = entry; + _currentCommandBuffer.Fetch(_context); + + _processor.Process(_currentCommandBuffer.Words); + } + } + + /// + /// Disposes of resources used for GPFifo command processing. + /// + public void Dispose() => _event.Dispose(); + } +} diff --git a/Ryujinx.Graphics.Gpu/Engine/GPFifo/GPFifoProcessor.cs b/Ryujinx.Graphics.Gpu/Engine/GPFifo/GPFifoProcessor.cs new file mode 100644 index 0000000000..115361f3e4 --- /dev/null +++ b/Ryujinx.Graphics.Gpu/Engine/GPFifo/GPFifoProcessor.cs @@ -0,0 +1,179 @@ +using Ryujinx.Graphics.Gpu.State; +using System; +using System.Runtime.CompilerServices; + +namespace Ryujinx.Graphics.Gpu.Engine.GPFifo +{ + /// + /// Represents a GPU General Purpose FIFO command processor. + /// + class GPFifoProcessor + { + private const int MacrosCount = 0x80; + private const int MacroIndexMask = MacrosCount - 1; + + private readonly GpuContext _context; + + /// + /// Internal GPFIFO state. + /// + private struct DmaState + { + public int Method; + public int SubChannel; + public int MethodCount; + public bool NonIncrementing; + public bool IncrementOnce; + } + + private DmaState _state; + + private readonly GpuState[] _subChannels; + private readonly GPFifoClass _fifoClass; + + /// + /// Creates a new instance of the GPU General Purpose FIFO command processor. + /// + /// GPU context + public GPFifoProcessor(GpuContext context) + { + _context = context; + + _fifoClass = new GPFifoClass(context); + + _subChannels = new GpuState[8]; + + for (int index = 0; index < _subChannels.Length; index++) + { + _subChannels[index] = new GpuState(); + + _context.Methods.RegisterCallbacks(_subChannels[index]); + } + } + + /// + /// Processes a command buffer. + /// + /// Command buffer + public void Process(ReadOnlySpan commandBuffer) + { + for (int index = 0; index < commandBuffer.Length; index++) + { + int command = commandBuffer[index]; + + if (_state.MethodCount != 0) + { + Send(new MethodParams(_state.Method, command, _state.SubChannel, _state.MethodCount)); + + if (!_state.NonIncrementing) + { + _state.Method++; + } + + if (_state.IncrementOnce) + { + _state.NonIncrementing = true; + } + + _state.MethodCount--; + } + else + { + CompressedMethod meth = Unsafe.As(ref command); + + if (TryFastUniformBufferUpdate(meth, commandBuffer, index)) + { + index += meth.MethodCount; + continue; + } + + switch (meth.SecOp) + { + case SecOp.IncMethod: + case SecOp.NonIncMethod: + case SecOp.OneInc: + _state.Method = meth.MethodAddress; + _state.SubChannel = meth.MethodSubchannel; + _state.MethodCount = meth.MethodCount; + _state.IncrementOnce = meth.SecOp == SecOp.OneInc; + _state.NonIncrementing = meth.SecOp == SecOp.NonIncMethod; + break; + case SecOp.ImmdDataMethod: + Send(new MethodParams(meth.MethodAddress, meth.ImmdData, meth.MethodSubchannel, 1)); + break; + } + } + } + } + + /// + /// Tries to perform a fast constant buffer data update. + /// If successful, all data will be copied at once, and + 1 + /// command buffer entries will be consumed. + /// + /// Compressed method to be checked + /// Command buffer where is contained + /// Offset at where is located + /// True if the fast copy was successful, false otherwise + private bool TryFastUniformBufferUpdate(CompressedMethod meth, ReadOnlySpan commandBuffer, int offset) + { + int availableCount = commandBuffer.Length - offset; + + if (meth.MethodCount < availableCount && + meth.SecOp == SecOp.NonIncMethod && + meth.MethodAddress == (int)MethodOffset.UniformBufferUpdateData) + { + GpuState state = _subChannels[meth.MethodSubchannel]; + + _context.Methods.UniformBufferUpdate(state, commandBuffer.Slice(offset + 1, meth.MethodCount)); + + return true; + } + + return false; + } + + /// + /// Sends a uncompressed method for processing by the graphics pipeline. + /// + /// Method to be processed + private void Send(MethodParams meth) + { + if ((MethodOffset)meth.Method == MethodOffset.BindChannel) + { + _subChannels[meth.SubChannel] = new GpuState(); + + _context.Methods.RegisterCallbacks(_subChannels[meth.SubChannel]); + } + else if (meth.Method < 0x60) + { + // TODO: check if macros are shared between subchannels or not. For now let's assume they are. + _fifoClass.Write(meth.Method * 4, meth.Argument); + } + else if (meth.Method < 0xe00) + { + _subChannels[meth.SubChannel].CallMethod(meth, _fifoClass.ShadowCtrl); + } + else + { + int macroIndex = (meth.Method >> 1) & MacroIndexMask; + + if ((meth.Method & 1) != 0) + { + _fifoClass.MmePushArgument(macroIndex, meth.Argument); + } + else + { + _fifoClass.MmeStart(macroIndex, meth.Argument); + } + + if (meth.IsLastCall) + { + _fifoClass.CallMme(macroIndex, _subChannels[meth.SubChannel]); + + _context.Methods.PerformDeferredDraws(); + } + } + } + } +} diff --git a/Ryujinx.Graphics.Gpu/Engine/MME/Macro.cs b/Ryujinx.Graphics.Gpu/Engine/MME/Macro.cs new file mode 100644 index 0000000000..10127d11b1 --- /dev/null +++ b/Ryujinx.Graphics.Gpu/Engine/MME/Macro.cs @@ -0,0 +1,69 @@ +using Ryujinx.Graphics.Gpu.State; + +namespace Ryujinx.Graphics.Gpu.Engine.MME +{ + /// + /// GPU macro program. + /// + struct Macro + { + /// + /// Word offset of the code on the code memory. + /// + public int Position { get; } + + private bool _executionPending; + private int _argument; + + private readonly MacroInterpreter _interpreter; + + /// + /// Creates a new instance of the GPU cached macro program. + /// + /// Macro code start position + public Macro(int position) + { + Position = position; + + _executionPending = false; + _argument = 0; + + _interpreter = new MacroInterpreter(); + } + + /// + /// Sets the first argument for the macro call. + /// + /// First argument + public void StartExecution(int argument) + { + _argument = argument; + + _executionPending = true; + } + + /// + /// Starts executing the macro program code. + /// + /// Program code + /// Current GPU state + public void Execute(int[] mme, ShadowRamControl shadowCtrl, GpuState state) + { + if (_executionPending) + { + _executionPending = false; + + _interpreter?.Execute(mme, Position, _argument, shadowCtrl, state); + } + } + + /// + /// Pushes an argument to the macro call argument FIFO. + /// + /// Argument to be pushed + public void PushArgument(int argument) + { + _interpreter?.Fifo.Enqueue(argument); + } + } +} diff --git a/Ryujinx.Graphics.Gpu/Engine/MethodFifo.cs b/Ryujinx.Graphics.Gpu/Engine/MethodFifo.cs deleted file mode 100644 index c1f45941c5..0000000000 --- a/Ryujinx.Graphics.Gpu/Engine/MethodFifo.cs +++ /dev/null @@ -1,103 +0,0 @@ -using Ryujinx.Graphics.Gpu.State; -using System.Threading; - -namespace Ryujinx.Graphics.Gpu.Engine -{ - partial class Methods - { - /// - /// Writes a GPU counter to guest memory. - /// - /// Current GPU state - /// Method call argument - public void Semaphore(GpuState state, int argument) - { - FifoSemaphoreOperation op = (FifoSemaphoreOperation)(argument & 3); - - var semaphore = state.Get(MethodOffset.Semaphore); - - int value = semaphore.Payload; - - if (op == FifoSemaphoreOperation.Counter) - { - // TODO: There's much more that should be done here. - // NVN only supports the "Accumulate" mode, so we - // can't currently guess which bits specify the - // reduction operation. - value += _context.MemoryAccessor.Read(semaphore.Address.Pack()); - } - - _context.MemoryAccessor.Write(semaphore.Address.Pack(), value); - - _context.AdvanceSequence(); - } - - /// - /// Waits for the GPU to be idle. - /// - /// Current GPU state - /// Method call argument - public void WaitForIdle(GpuState state, int argument) - { - PerformDeferredDraws(); - - _context.Renderer.Pipeline.Barrier(); - } - - /// - /// Send macro code/data to the MME. - /// - /// Current GPU state - /// Method call argument - public void SendMacroCodeData(GpuState state, int argument) - { - int macroUploadAddress = state.Get(MethodOffset.MacroUploadAddress); - - _context.Fifo.SendMacroCodeData(macroUploadAddress++, argument); - - state.Write((int)MethodOffset.MacroUploadAddress, macroUploadAddress); - } - - /// - /// Bind a macro index to a position for the MME. - /// - /// Current GPU state - /// Method call argument - public void BindMacro(GpuState state, int argument) - { - int macroBindingIndex = state.Get(MethodOffset.MacroBindingIndex); - - _context.Fifo.BindMacro(macroBindingIndex++, argument); - - state.Write((int)MethodOffset.MacroBindingIndex, macroBindingIndex); - } - - public void SetMmeShadowRamControl(GpuState state, int argument) - { - _context.Fifo.SetMmeShadowRamControl((ShadowRamControl)argument); - } - - /// - /// Apply a fence operation on a syncpoint. - /// - /// Current GPU state - /// Method call argument - public void FenceAction(GpuState state, int argument) - { - uint threshold = state.Get(MethodOffset.FenceValue); - - FenceActionOperation operation = (FenceActionOperation)(argument & 1); - - uint syncpointId = (uint)(argument >> 8) & 0xFF; - - if (operation == FenceActionOperation.Acquire) - { - _context.Synchronization.WaitOnSyncpoint(syncpointId, threshold, Timeout.InfiniteTimeSpan); - } - else if (operation == FenceActionOperation.Increment) - { - _context.Synchronization.IncrementSyncpoint(syncpointId); - } - } - } -} diff --git a/Ryujinx.Graphics.Gpu/Engine/MethodUniformBufferUpdate.cs b/Ryujinx.Graphics.Gpu/Engine/MethodUniformBufferUpdate.cs index 524f5e0399..032a58683a 100644 --- a/Ryujinx.Graphics.Gpu/Engine/MethodUniformBufferUpdate.cs +++ b/Ryujinx.Graphics.Gpu/Engine/MethodUniformBufferUpdate.cs @@ -1,4 +1,6 @@ using Ryujinx.Graphics.Gpu.State; +using System; +using System.Runtime.InteropServices; namespace Ryujinx.Graphics.Gpu.Engine { @@ -19,5 +21,21 @@ namespace Ryujinx.Graphics.Gpu.Engine _context.AdvanceSequence(); } + + /// + /// Updates the uniform buffer data with inline data. + /// + /// Current GPU state + /// Data to be written to the uniform buffer + public void UniformBufferUpdate(GpuState state, ReadOnlySpan data) + { + var uniformBuffer = state.Get(MethodOffset.UniformBufferState); + + _context.MemoryAccessor.Write(uniformBuffer.Address.Pack() + (uint)uniformBuffer.Offset, MemoryMarshal.Cast(data)); + + state.SetUniformBufferOffset(uniformBuffer.Offset + data.Length * 4); + + _context.AdvanceSequence(); + } } } \ No newline at end of file diff --git a/Ryujinx.Graphics.Gpu/Engine/Methods.cs b/Ryujinx.Graphics.Gpu/Engine/Methods.cs index df0e713dff..e84687effd 100644 --- a/Ryujinx.Graphics.Gpu/Engine/Methods.cs +++ b/Ryujinx.Graphics.Gpu/Engine/Methods.cs @@ -106,20 +106,6 @@ namespace Ryujinx.Graphics.Gpu.Engine state.RegisterCallback(MethodOffset.UniformBufferBindFragment, UniformBufferBindFragment); } - /// - /// Register callback for Fifo method calls that triggers an action on the GPFIFO. - /// - /// GPU state where the triggers will be registered - public void RegisterCallbacksForFifo(GpuState state) - { - state.RegisterCallback(MethodOffset.Semaphore, Semaphore); - state.RegisterCallback(MethodOffset.FenceAction, FenceAction); - state.RegisterCallback(MethodOffset.WaitForIdle, WaitForIdle); - state.RegisterCallback(MethodOffset.SendMacroCodeData, SendMacroCodeData); - state.RegisterCallback(MethodOffset.BindMacro, BindMacro); - state.RegisterCallback(MethodOffset.SetMmeShadowRamControl, SetMmeShadowRamControl); - } - /// /// Updates host state based on the current guest GPU state. /// diff --git a/Ryujinx.Graphics.Gpu/GpuContext.cs b/Ryujinx.Graphics.Gpu/GpuContext.cs index b07694b95d..8e9f27329b 100644 --- a/Ryujinx.Graphics.Gpu/GpuContext.cs +++ b/Ryujinx.Graphics.Gpu/GpuContext.cs @@ -1,5 +1,6 @@ using Ryujinx.Graphics.GAL; using Ryujinx.Graphics.Gpu.Engine; +using Ryujinx.Graphics.Gpu.Engine.GPFifo; using Ryujinx.Graphics.Gpu.Memory; using Ryujinx.Graphics.Gpu.Synchronization; using System; @@ -37,14 +38,9 @@ namespace Ryujinx.Graphics.Gpu internal Methods Methods { get; } /// - /// GPU commands FIFO. + /// GPU General Purpose FIFO queue. /// - internal NvGpuFifo Fifo { get; } - - /// - /// DMA pusher. - /// - public DmaPusher DmaPusher { get; } + public GPFifoDevice GPFifo { get; } /// /// GPU synchronization manager. @@ -83,9 +79,7 @@ namespace Ryujinx.Graphics.Gpu Methods = new Methods(this); - Fifo = new NvGpuFifo(this); - - DmaPusher = new DmaPusher(this); + GPFifo = new GPFifoDevice(this); Synchronization = new SynchronizationManager(); @@ -125,6 +119,7 @@ namespace Ryujinx.Graphics.Gpu Methods.BufferManager.Dispose(); Methods.TextureManager.Dispose(); Renderer.Dispose(); + GPFifo.Dispose(); } } } \ No newline at end of file diff --git a/Ryujinx.Graphics.Gpu/NvGpuFifo.cs b/Ryujinx.Graphics.Gpu/NvGpuFifo.cs deleted file mode 100644 index 36a275e291..0000000000 --- a/Ryujinx.Graphics.Gpu/NvGpuFifo.cs +++ /dev/null @@ -1,220 +0,0 @@ -using Ryujinx.Graphics.Gpu.State; -using System.IO; - -namespace Ryujinx.Graphics.Gpu -{ - /// - /// GPU commands FIFO. - /// - class NvGpuFifo - { - private const int MacrosCount = 0x80; - private const int MacroIndexMask = MacrosCount - 1; - - // Note: The size of the macro memory is unknown, we just make - // a guess here and use 256kb as the size. Increase if needed. - private const int MmeWords = 256 * 256; - - private GpuContext _context; - - /// - /// Cached GPU macro program. - /// - private struct CachedMacro - { - /// - /// Word offset of the code on the code memory. - /// - public int Position { get; } - - private bool _executionPending; - private int _argument; - - private MacroInterpreter _interpreter; - - /// - /// Creates a new instance of the GPU cached macro program. - /// - /// Macro code start position - public CachedMacro(int position) - { - Position = position; - - _executionPending = false; - _argument = 0; - - _interpreter = new MacroInterpreter(); - } - - /// - /// Sets the first argument for the macro call. - /// - /// First argument - public void StartExecution(int argument) - { - _argument = argument; - - _executionPending = true; - } - - /// - /// Starts executing the macro program code. - /// - /// Program code - /// Current GPU state - public void Execute(int[] mme, ShadowRamControl shadowCtrl, GpuState state) - { - if (_executionPending) - { - _executionPending = false; - - _interpreter?.Execute(mme, Position, _argument, shadowCtrl, state); - } - } - - /// - /// Pushes an argument to the macro call argument FIFO. - /// - /// Argument to be pushed - public void PushArgument(int argument) - { - _interpreter?.Fifo.Enqueue(argument); - } - } - - private ShadowRamControl _shadowCtrl; - - private CachedMacro[] _macros; - - private int[] _mme; - - /// - /// GPU sub-channel information. - /// - private class SubChannel - { - /// - /// Sub-channel GPU state. - /// - public GpuState State { get; } - - /// - /// Engine bound to the sub-channel. - /// - public ClassId Class { get; set; } - - /// - /// Creates a new instance of the GPU sub-channel. - /// - public SubChannel() - { - State = new GpuState(); - } - } - - private SubChannel[] _subChannels; - - private SubChannel _fifoChannel; - - /// - /// Creates a new instance of the GPU commands FIFO. - /// - /// GPU emulation context - public NvGpuFifo(GpuContext context) - { - _context = context; - - _macros = new CachedMacro[MacrosCount]; - - _mme = new int[MmeWords]; - - _fifoChannel = new SubChannel(); - - _context.Methods.RegisterCallbacksForFifo(_fifoChannel.State); - - _subChannels = new SubChannel[8]; - - for (int index = 0; index < _subChannels.Length; index++) - { - _subChannels[index] = new SubChannel(); - - _context.Methods.RegisterCallbacks(_subChannels[index].State); - } - } - - /// - /// Send macro code/data to the MME - /// - /// The index in the MME - /// The data to use - public void SendMacroCodeData(int index, int data) - { - _mme[index] = data; - } - - /// - /// Bind a macro index to a position for the MME - /// - /// The macro index - /// The position of the macro - public void BindMacro(int index, int position) - { - _macros[index] = new CachedMacro(position); - } - - /// - /// Change the shadow RAM setting - /// - /// The new Shadow RAM setting - public void SetMmeShadowRamControl(ShadowRamControl shadowCtrl) - { - _shadowCtrl = shadowCtrl; - } - - /// - /// Calls a GPU method. - /// - /// GPU method call parameters - public void CallMethod(MethodParams meth) - { - if ((MethodOffset)meth.Method == MethodOffset.BindChannel) - { - _subChannels[meth.SubChannel] = new SubChannel - { - Class = (ClassId)meth.Argument - }; - - _context.Methods.RegisterCallbacks(_subChannels[meth.SubChannel].State); - } - else if (meth.Method < 0x60) - { - // TODO: check if macros are shared between subchannels or not. For now let's assume they are. - _fifoChannel.State.CallMethod(meth, _shadowCtrl); - } - else if (meth.Method < 0xe00) - { - _subChannels[meth.SubChannel].State.CallMethod(meth, _shadowCtrl); - } - else - { - int macroIndex = (meth.Method >> 1) & MacroIndexMask; - - if ((meth.Method & 1) != 0) - { - _macros[macroIndex].PushArgument(meth.Argument); - } - else - { - _macros[macroIndex].StartExecution(meth.Argument); - } - - if (meth.IsLastCall) - { - _macros[macroIndex].Execute(_mme, _shadowCtrl, _subChannels[meth.SubChannel].State); - - _context.Methods.PerformDeferredDraws(); - } - } - } - } -} \ No newline at end of file diff --git a/Ryujinx.Graphics.Gpu/Ryujinx.Graphics.Gpu.csproj b/Ryujinx.Graphics.Gpu/Ryujinx.Graphics.Gpu.csproj index 9348d04bb5..a9e81be357 100644 --- a/Ryujinx.Graphics.Gpu/Ryujinx.Graphics.Gpu.csproj +++ b/Ryujinx.Graphics.Gpu/Ryujinx.Graphics.Gpu.csproj @@ -2,6 +2,7 @@ + diff --git a/Ryujinx.Graphics.Gpu/State/FenceActionOperation.cs b/Ryujinx.Graphics.Gpu/State/FenceActionOperation.cs deleted file mode 100644 index c03443a8d5..0000000000 --- a/Ryujinx.Graphics.Gpu/State/FenceActionOperation.cs +++ /dev/null @@ -1,11 +0,0 @@ -namespace Ryujinx.Graphics.Gpu.State -{ - /// - /// Fence action operations. - /// - enum FenceActionOperation - { - Acquire = 0, - Increment = 1 - } -} diff --git a/Ryujinx.Graphics.Gpu/State/FifoSemaphoreOperation.cs b/Ryujinx.Graphics.Gpu/State/FifoSemaphoreOperation.cs deleted file mode 100644 index a6ccdcfe4d..0000000000 --- a/Ryujinx.Graphics.Gpu/State/FifoSemaphoreOperation.cs +++ /dev/null @@ -1,9 +0,0 @@ -namespace Ryujinx.Graphics.Gpu.State -{ - enum FifoSemaphoreOperation - { - Counter = 0, - Acquire = 1, - Release = 2 - } -} diff --git a/Ryujinx.Graphics.Gpu/State/MethodOffset.cs b/Ryujinx.Graphics.Gpu/State/MethodOffset.cs index d9e2ce9309..505e3d89ee 100644 --- a/Ryujinx.Graphics.Gpu/State/MethodOffset.cs +++ b/Ryujinx.Graphics.Gpu/State/MethodOffset.cs @@ -9,15 +9,6 @@ namespace Ryujinx.Graphics.Gpu.State enum MethodOffset { BindChannel = 0x0, - Semaphore = 0x4, - FenceValue = 0x1c, - FenceAction = 0x1d, - WaitForIdle = 0x44, - MacroUploadAddress = 0x45, - SendMacroCodeData = 0x46, - MacroBindingIndex = 0x47, - BindMacro = 0x48, - SetMmeShadowRamControl = 0x49, I2mParams = 0x60, LaunchDma = 0x6c, LoadInlineData = 0x6d, diff --git a/Ryujinx.HLE/HOS/Services/Nv/NvDrvServices/NvHostChannel/NvHostChannelDeviceFile.cs b/Ryujinx.HLE/HOS/Services/Nv/NvDrvServices/NvHostChannel/NvHostChannelDeviceFile.cs index 70c9a47bf0..b45d84018a 100644 --- a/Ryujinx.HLE/HOS/Services/Nv/NvDrvServices/NvHostChannel/NvHostChannelDeviceFile.cs +++ b/Ryujinx.HLE/HOS/Services/Nv/NvDrvServices/NvHostChannel/NvHostChannelDeviceFile.cs @@ -414,10 +414,10 @@ namespace Ryujinx.HLE.HOS.Services.Nv.NvDrvServices.NvHostChannel if (header.Flags.HasFlag(SubmitGpfifoFlags.FenceWait) && !_device.System.HostSyncpoint.IsSyncpointExpired(header.Fence.Id, header.Fence.Value)) { - _device.Gpu.DmaPusher.PushHostCommandBuffer(CreateWaitCommandBuffer(header.Fence)); + _device.Gpu.GPFifo.PushHostCommandBuffer(CreateWaitCommandBuffer(header.Fence)); } - _device.Gpu.DmaPusher.PushEntries(entries); + _device.Gpu.GPFifo.PushEntries(entries); header.Fence.Id = _channelSyncpoint.Id; @@ -439,12 +439,12 @@ namespace Ryujinx.HLE.HOS.Services.Nv.NvDrvServices.NvHostChannel if (header.Flags.HasFlag(SubmitGpfifoFlags.FenceIncrement)) { - _device.Gpu.DmaPusher.PushHostCommandBuffer(CreateIncrementCommandBuffer(ref header.Fence, header.Flags)); + _device.Gpu.GPFifo.PushHostCommandBuffer(CreateIncrementCommandBuffer(ref header.Fence, header.Flags)); } header.Flags = SubmitGpfifoFlags.None; - _device.Gpu.DmaPusher.SignalNewEntries(); + _device.Gpu.GPFifo.SignalNewEntries(); return NvInternalResult.Success; } diff --git a/Ryujinx.HLE/Switch.cs b/Ryujinx.HLE/Switch.cs index 9defe25d5b..2e1a4b66a9 100644 --- a/Ryujinx.HLE/Switch.cs +++ b/Ryujinx.HLE/Switch.cs @@ -148,12 +148,12 @@ namespace Ryujinx.HLE public bool WaitFifo() { - return Gpu.DmaPusher.WaitForCommands(); + return Gpu.GPFifo.WaitForCommands(); } public void ProcessFrame() { - Gpu.DmaPusher.DispatchCalls(); + Gpu.GPFifo.DispatchCalls(); } public void PresentFrame(Action swapBuffersCallback)