JinxRyu/Ryujinx.Graphics.Nvdec/H264Decoder.cs
gdkchan f4f496cb48
NVDEC (H264): Use separate contexts per channel and decode frames in DTS order (#2671)
* Use separate NVDEC contexts per channel (for FFMPEG)

* Remove NVDEC -> VIC frame override hack

* Add missing bottom_field_pic_order_in_frame_present_flag

* Make FFMPEG logging static

* nit: Remove empty lines

* New FFMPEG decoding approach -- call h264_decode_frame directly, trim surface cache to reduce memory usage

* Fix case

* Silence warnings

* PR feedback

* Per-decoder rather than per-codec ownership of surfaces on the cache
2021-09-29 00:43:40 +02:00

40 lines
1.4 KiB
C#

using Ryujinx.Graphics.Nvdec.H264;
using Ryujinx.Graphics.Nvdec.Image;
using Ryujinx.Graphics.Nvdec.Types.H264;
using Ryujinx.Graphics.Video;
using System;
namespace Ryujinx.Graphics.Nvdec
{
static class H264Decoder
{
private const int MbSizeInPixels = 16;
public unsafe static void Decode(NvdecDecoderContext context, ResourceManager rm, ref NvdecRegisters state)
{
PictureInfo pictureInfo = rm.Gmm.DeviceRead<PictureInfo>(state.SetPictureInfoOffset);
H264PictureInfo info = pictureInfo.Convert();
ReadOnlySpan<byte> bitstream = rm.Gmm.DeviceGetSpan(state.SetBitstreamOffset, (int)pictureInfo.BitstreamSize);
int width = (int)pictureInfo.PicWidthInMbs * MbSizeInPixels;
int height = (int)pictureInfo.PicHeightInMbs * MbSizeInPixels;
int surfaceIndex = (int)pictureInfo.OutputSurfaceIndex;
uint lumaOffset = state.SetSurfaceLumaOffset[surfaceIndex];
uint chromaOffset = state.SetSurfaceChromaOffset[surfaceIndex];
Decoder decoder = context.GetDecoder();
ISurface outputSurface = rm.Cache.Get(decoder, 0, 0, width, height);
if (decoder.Decode(ref info, outputSurface, bitstream))
{
SurfaceWriter.Write(rm.Gmm, outputSurface, lumaOffset, chromaOffset);
}
rm.Cache.Put(outputSurface);
}
}
}