mirror of
https://github.com/PabloMK7/citra.git
synced 2024-11-28 10:20:17 +00:00
Merge pull request #4228 from NarcolepticK/lle-mapped-buffer
LLE Mapped Buffer: Add unmapping of write buffer, zero-size, and multiple page handling
This commit is contained in:
commit
a753b9c6cc
3 changed files with 156 additions and 54 deletions
|
@ -14,6 +14,68 @@
|
||||||
|
|
||||||
namespace Kernel {
|
namespace Kernel {
|
||||||
|
|
||||||
|
void ScanForAndUnmapBuffer(std::array<u32, IPC::COMMAND_BUFFER_LENGTH>& dst_cmd_buf,
|
||||||
|
const std::size_t dst_command_size, std::size_t& target_index,
|
||||||
|
SharedPtr<Process> src_process, SharedPtr<Process> dst_process,
|
||||||
|
const VAddr source_address, const VAddr page_start, const u32 num_pages,
|
||||||
|
const u32 size, const IPC::MappedBufferPermissions permissions) {
|
||||||
|
while (target_index < dst_command_size) {
|
||||||
|
u32 desc = dst_cmd_buf[target_index++];
|
||||||
|
|
||||||
|
if (IPC::GetDescriptorType(desc) == IPC::DescriptorType::CopyHandle ||
|
||||||
|
IPC::GetDescriptorType(desc) == IPC::DescriptorType::MoveHandle) {
|
||||||
|
u32 num_handles = IPC::HandleNumberFromDesc(desc);
|
||||||
|
for (u32 j = 0; j < num_handles; ++j) {
|
||||||
|
target_index += 1;
|
||||||
|
}
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (IPC::GetDescriptorType(desc) == IPC::DescriptorType::CallingPid ||
|
||||||
|
IPC::GetDescriptorType(desc) == IPC::DescriptorType::StaticBuffer) {
|
||||||
|
target_index += 1;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (IPC::GetDescriptorType(desc) == IPC::DescriptorType::MappedBuffer) {
|
||||||
|
VAddr dest_address = dst_cmd_buf[target_index];
|
||||||
|
IPC::MappedBufferDescInfo dest_descInfo{desc};
|
||||||
|
u32 dest_size = static_cast<u32>(dest_descInfo.size);
|
||||||
|
IPC::MappedBufferPermissions dest_permissions = dest_descInfo.perms;
|
||||||
|
|
||||||
|
if (dest_size == 0) {
|
||||||
|
target_index += 1;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
ASSERT(permissions == dest_permissions && size == dest_size);
|
||||||
|
// Readonly buffers do not need to be copied over to the target
|
||||||
|
// process again because they were (presumably) not modified. This
|
||||||
|
// behavior is consistent with the real kernel.
|
||||||
|
if (permissions != IPC::MappedBufferPermissions::R) {
|
||||||
|
// Copy the modified buffer back into the target process
|
||||||
|
Memory::CopyBlock(*src_process, *dst_process, source_address, dest_address, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
VAddr prev_reserve = page_start - Memory::PAGE_SIZE;
|
||||||
|
VAddr next_reserve = page_start + num_pages * Memory::PAGE_SIZE;
|
||||||
|
|
||||||
|
auto& prev_vma = src_process->vm_manager.FindVMA(prev_reserve)->second;
|
||||||
|
auto& next_vma = src_process->vm_manager.FindVMA(next_reserve)->second;
|
||||||
|
ASSERT(prev_vma.meminfo_state == MemoryState::Reserved &&
|
||||||
|
next_vma.meminfo_state == MemoryState::Reserved);
|
||||||
|
|
||||||
|
// Unmap the buffer and guard pages from the source process
|
||||||
|
ResultCode result = src_process->vm_manager.UnmapRange(
|
||||||
|
page_start - Memory::PAGE_SIZE, (num_pages + 2) * Memory::PAGE_SIZE);
|
||||||
|
ASSERT(result == RESULT_SUCCESS);
|
||||||
|
|
||||||
|
target_index += 1;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
ResultCode TranslateCommandBuffer(SharedPtr<Thread> src_thread, SharedPtr<Thread> dst_thread,
|
ResultCode TranslateCommandBuffer(SharedPtr<Thread> src_thread, SharedPtr<Thread> dst_thread,
|
||||||
VAddr src_address, VAddr dst_address, bool reply) {
|
VAddr src_address, VAddr dst_address, bool reply) {
|
||||||
|
|
||||||
|
@ -33,6 +95,18 @@ ResultCode TranslateCommandBuffer(SharedPtr<Thread> src_thread, SharedPtr<Thread
|
||||||
std::array<u32, IPC::COMMAND_BUFFER_LENGTH> cmd_buf;
|
std::array<u32, IPC::COMMAND_BUFFER_LENGTH> cmd_buf;
|
||||||
Memory::ReadBlock(*src_process, src_address, cmd_buf.data(), command_size * sizeof(u32));
|
Memory::ReadBlock(*src_process, src_address, cmd_buf.data(), command_size * sizeof(u32));
|
||||||
|
|
||||||
|
// Create a copy of the target's command buffer
|
||||||
|
IPC::Header dst_header;
|
||||||
|
Memory::ReadBlock(*dst_process, dst_address, &dst_header.raw, sizeof(dst_header.raw));
|
||||||
|
|
||||||
|
std::size_t dst_untranslated_size = 1u + dst_header.normal_params_size;
|
||||||
|
std::size_t dst_command_size = dst_untranslated_size + dst_header.translate_params_size;
|
||||||
|
std::size_t target_index = dst_untranslated_size;
|
||||||
|
|
||||||
|
std::array<u32, IPC::COMMAND_BUFFER_LENGTH> dst_cmd_buf;
|
||||||
|
Memory::ReadBlock(*dst_process, dst_address, dst_cmd_buf.data(),
|
||||||
|
dst_command_size * sizeof(u32));
|
||||||
|
|
||||||
std::size_t i = untranslated_size;
|
std::size_t i = untranslated_size;
|
||||||
while (i < command_size) {
|
while (i < command_size) {
|
||||||
u32 descriptor = cmd_buf[i];
|
u32 descriptor = cmd_buf[i];
|
||||||
|
@ -128,76 +202,50 @@ ResultCode TranslateCommandBuffer(SharedPtr<Thread> src_thread, SharedPtr<Thread
|
||||||
u32 num_pages =
|
u32 num_pages =
|
||||||
Common::AlignUp(page_offset + size, Memory::PAGE_SIZE) >> Memory::PAGE_BITS;
|
Common::AlignUp(page_offset + size, Memory::PAGE_SIZE) >> Memory::PAGE_BITS;
|
||||||
|
|
||||||
|
// Skip when the size is zero and num_pages == 0
|
||||||
|
if (size == 0) {
|
||||||
|
cmd_buf[i++] = 0;
|
||||||
|
break;
|
||||||
|
}
|
||||||
ASSERT(num_pages >= 1);
|
ASSERT(num_pages >= 1);
|
||||||
|
|
||||||
if (reply) {
|
if (reply) {
|
||||||
// TODO(Subv): Scan the target's command buffer to make sure that there was a
|
// Scan the target's command buffer for the matching mapped buffer.
|
||||||
// MappedBuffer descriptor in the original request. The real kernel panics if you
|
// The real kernel panics if you try to reply with an unsolicited MappedBuffer.
|
||||||
// try to reply with an unsolicited MappedBuffer.
|
ScanForAndUnmapBuffer(dst_cmd_buf, dst_command_size, target_index, src_process,
|
||||||
|
dst_process, source_address, page_start, num_pages, size,
|
||||||
|
permissions);
|
||||||
|
|
||||||
// Unmap the buffers. Readonly buffers do not need to be copied over to the target
|
|
||||||
// process again because they were (presumably) not modified. This behavior is
|
|
||||||
// consistent with the real kernel.
|
|
||||||
if (permissions == IPC::MappedBufferPermissions::R) {
|
|
||||||
ResultCode result = src_process->vm_manager.UnmapRange(
|
|
||||||
page_start, num_pages * Memory::PAGE_SIZE);
|
|
||||||
ASSERT(result == RESULT_SUCCESS);
|
|
||||||
}
|
|
||||||
|
|
||||||
ASSERT_MSG(permissions == IPC::MappedBufferPermissions::R,
|
|
||||||
"Unmapping Write MappedBuffers is unimplemented");
|
|
||||||
i += 1;
|
i += 1;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
VAddr target_address = 0;
|
VAddr target_address = 0;
|
||||||
|
|
||||||
auto IsPageAligned = [](VAddr address) -> bool {
|
|
||||||
return (address & Memory::PAGE_MASK) == 0;
|
|
||||||
};
|
|
||||||
|
|
||||||
// TODO(Subv): Support more than 1 page and aligned page mappings
|
|
||||||
ASSERT_MSG(
|
|
||||||
num_pages == 1 &&
|
|
||||||
(!IsPageAligned(source_address) || !IsPageAligned(source_address + size)),
|
|
||||||
"MappedBuffers of more than one page or aligned transfers are not implemented");
|
|
||||||
|
|
||||||
// TODO(Subv): Perform permission checks.
|
// TODO(Subv): Perform permission checks.
|
||||||
|
|
||||||
// TODO(Subv): Leave a page of Reserved memory before the first page and after the last
|
// Reserve a page of memory before the mapped buffer
|
||||||
// page.
|
auto reserve_buffer = std::make_shared<std::vector<u8>>(Memory::PAGE_SIZE);
|
||||||
|
dst_process->vm_manager.MapMemoryBlockToBase(
|
||||||
|
Memory::IPC_MAPPING_VADDR, Memory::IPC_MAPPING_SIZE, reserve_buffer, 0,
|
||||||
|
static_cast<u32>(reserve_buffer->size()), Kernel::MemoryState::Reserved);
|
||||||
|
|
||||||
if (!IsPageAligned(source_address) ||
|
auto buffer = std::make_shared<std::vector<u8>>(num_pages * Memory::PAGE_SIZE);
|
||||||
(num_pages == 1 && !IsPageAligned(source_address + size))) {
|
Memory::ReadBlock(*src_process, source_address, buffer->data() + page_offset, size);
|
||||||
// If the address of the source buffer is not page-aligned or if the buffer doesn't
|
|
||||||
// fill an entire page, then we have to allocate a page of memory in the target
|
|
||||||
// process and copy over the data from the input buffer. This allocated buffer will
|
|
||||||
// be copied back to the source process and deallocated when the server replies to
|
|
||||||
// the request via ReplyAndReceive.
|
|
||||||
|
|
||||||
auto buffer = std::make_shared<std::vector<u8>>(Memory::PAGE_SIZE);
|
// Map the page(s) into the target process' address space.
|
||||||
|
target_address = dst_process->vm_manager
|
||||||
// Number of bytes until the next page.
|
.MapMemoryBlockToBase(
|
||||||
std::size_t difference_to_page =
|
Memory::IPC_MAPPING_VADDR, Memory::IPC_MAPPING_SIZE, buffer, 0,
|
||||||
Common::AlignUp(source_address, Memory::PAGE_SIZE) - source_address;
|
static_cast<u32>(buffer->size()), Kernel::MemoryState::Shared)
|
||||||
// If the data fits in one page we can just copy the required size instead of the
|
.Unwrap();
|
||||||
// entire page.
|
|
||||||
std::size_t read_size =
|
|
||||||
num_pages == 1 ? static_cast<std::size_t>(size) : difference_to_page;
|
|
||||||
|
|
||||||
Memory::ReadBlock(*src_process, source_address, buffer->data() + page_offset,
|
|
||||||
read_size);
|
|
||||||
|
|
||||||
// Map the page into the target process' address space.
|
|
||||||
target_address =
|
|
||||||
dst_process->vm_manager
|
|
||||||
.MapMemoryBlockToBase(Memory::IPC_MAPPING_VADDR, Memory::IPC_MAPPING_SIZE,
|
|
||||||
buffer, 0, static_cast<u32>(buffer->size()),
|
|
||||||
Kernel::MemoryState::Shared)
|
|
||||||
.Unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
cmd_buf[i++] = target_address + page_offset;
|
cmd_buf[i++] = target_address + page_offset;
|
||||||
|
|
||||||
|
// Reserve a page of memory after the mapped buffer
|
||||||
|
dst_process->vm_manager.MapMemoryBlockToBase(
|
||||||
|
Memory::IPC_MAPPING_VADDR, Memory::IPC_MAPPING_SIZE, reserve_buffer, 0,
|
||||||
|
static_cast<u32>(reserve_buffer->size()), Kernel::MemoryState::Reserved);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
|
|
|
@ -705,6 +705,58 @@ void CopyBlock(VAddr dest_addr, VAddr src_addr, const std::size_t size) {
|
||||||
CopyBlock(*Core::System::GetInstance().Kernel().GetCurrentProcess(), dest_addr, src_addr, size);
|
CopyBlock(*Core::System::GetInstance().Kernel().GetCurrentProcess(), dest_addr, src_addr, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void CopyBlock(const Kernel::Process& src_process, const Kernel::Process& dest_process,
|
||||||
|
VAddr src_addr, VAddr dest_addr, std::size_t size) {
|
||||||
|
auto& page_table = src_process.vm_manager.page_table;
|
||||||
|
std::size_t remaining_size = size;
|
||||||
|
std::size_t page_index = src_addr >> PAGE_BITS;
|
||||||
|
std::size_t page_offset = src_addr & PAGE_MASK;
|
||||||
|
|
||||||
|
while (remaining_size > 0) {
|
||||||
|
const std::size_t copy_amount = std::min(PAGE_SIZE - page_offset, remaining_size);
|
||||||
|
const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset);
|
||||||
|
|
||||||
|
switch (page_table.attributes[page_index]) {
|
||||||
|
case PageType::Unmapped: {
|
||||||
|
LOG_ERROR(HW_Memory,
|
||||||
|
"unmapped CopyBlock @ 0x{:08X} (start address = 0x{:08X}, size = {})",
|
||||||
|
current_vaddr, src_addr, size);
|
||||||
|
ZeroBlock(dest_process, dest_addr, copy_amount);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case PageType::Memory: {
|
||||||
|
DEBUG_ASSERT(page_table.pointers[page_index]);
|
||||||
|
const u8* src_ptr = page_table.pointers[page_index] + page_offset;
|
||||||
|
WriteBlock(dest_process, dest_addr, src_ptr, copy_amount);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case PageType::Special: {
|
||||||
|
MMIORegionPointer handler = GetMMIOHandler(page_table, current_vaddr);
|
||||||
|
DEBUG_ASSERT(handler);
|
||||||
|
std::vector<u8> buffer(copy_amount);
|
||||||
|
handler->ReadBlock(current_vaddr, buffer.data(), buffer.size());
|
||||||
|
WriteBlock(dest_process, dest_addr, buffer.data(), buffer.size());
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case PageType::RasterizerCachedMemory: {
|
||||||
|
RasterizerFlushVirtualRegion(current_vaddr, static_cast<u32>(copy_amount),
|
||||||
|
FlushMode::Flush);
|
||||||
|
WriteBlock(dest_process, dest_addr, GetPointerFromVMA(src_process, current_vaddr),
|
||||||
|
copy_amount);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
UNREACHABLE();
|
||||||
|
}
|
||||||
|
|
||||||
|
page_index++;
|
||||||
|
page_offset = 0;
|
||||||
|
dest_addr += static_cast<VAddr>(copy_amount);
|
||||||
|
src_addr += static_cast<VAddr>(copy_amount);
|
||||||
|
remaining_size -= copy_amount;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
template <>
|
template <>
|
||||||
u8 ReadMMIO<u8>(MMIORegionPointer mmio_handler, VAddr addr) {
|
u8 ReadMMIO<u8>(MMIORegionPointer mmio_handler, VAddr addr) {
|
||||||
return mmio_handler->Read8(addr);
|
return mmio_handler->Read8(addr);
|
||||||
|
|
|
@ -205,6 +205,8 @@ void ZeroBlock(const Kernel::Process& process, VAddr dest_addr, const std::size_
|
||||||
void ZeroBlock(VAddr dest_addr, const std::size_t size);
|
void ZeroBlock(VAddr dest_addr, const std::size_t size);
|
||||||
void CopyBlock(const Kernel::Process& process, VAddr dest_addr, VAddr src_addr, std::size_t size);
|
void CopyBlock(const Kernel::Process& process, VAddr dest_addr, VAddr src_addr, std::size_t size);
|
||||||
void CopyBlock(VAddr dest_addr, VAddr src_addr, std::size_t size);
|
void CopyBlock(VAddr dest_addr, VAddr src_addr, std::size_t size);
|
||||||
|
void CopyBlock(const Kernel::Process& src_process, const Kernel::Process& dest_process,
|
||||||
|
VAddr src_addr, VAddr dest_addr, std::size_t size);
|
||||||
|
|
||||||
u8* GetPointer(VAddr vaddr);
|
u8* GetPointer(VAddr vaddr);
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue