mirror of
https://github.com/PabloMK7/citra.git
synced 2024-11-25 08:54:05 +00:00
Merge pull request #1025 from yuriks/heap-management
Kernel: Correct(er) handling of Heap and Linear Heap allocations
This commit is contained in:
commit
3efb205a68
29 changed files with 729 additions and 316 deletions
|
@ -91,17 +91,16 @@ void LogMessage(Class log_class, Level log_level,
|
|||
} // namespace Log
|
||||
|
||||
#define LOG_GENERIC(log_class, log_level, ...) \
|
||||
::Log::LogMessage(::Log::Class::log_class, ::Log::Level::log_level, \
|
||||
__FILE__, __LINE__, __func__, __VA_ARGS__)
|
||||
::Log::LogMessage(log_class, log_level, __FILE__, __LINE__, __func__, __VA_ARGS__)
|
||||
|
||||
#ifdef _DEBUG
|
||||
#define LOG_TRACE( log_class, ...) LOG_GENERIC(log_class, Trace, __VA_ARGS__)
|
||||
#define LOG_TRACE( log_class, ...) LOG_GENERIC(::Log::Class::log_class, ::Log::Level::Trace, __VA_ARGS__)
|
||||
#else
|
||||
#define LOG_TRACE( log_class, ...) (void(0))
|
||||
#endif
|
||||
|
||||
#define LOG_DEBUG( log_class, ...) LOG_GENERIC(log_class, Debug, __VA_ARGS__)
|
||||
#define LOG_INFO( log_class, ...) LOG_GENERIC(log_class, Info, __VA_ARGS__)
|
||||
#define LOG_WARNING( log_class, ...) LOG_GENERIC(log_class, Warning, __VA_ARGS__)
|
||||
#define LOG_ERROR( log_class, ...) LOG_GENERIC(log_class, Error, __VA_ARGS__)
|
||||
#define LOG_CRITICAL(log_class, ...) LOG_GENERIC(log_class, Critical, __VA_ARGS__)
|
||||
#define LOG_DEBUG( log_class, ...) LOG_GENERIC(::Log::Class::log_class, ::Log::Level::Debug, __VA_ARGS__)
|
||||
#define LOG_INFO( log_class, ...) LOG_GENERIC(::Log::Class::log_class, ::Log::Level::Info, __VA_ARGS__)
|
||||
#define LOG_WARNING( log_class, ...) LOG_GENERIC(::Log::Class::log_class, ::Log::Level::Warning, __VA_ARGS__)
|
||||
#define LOG_ERROR( log_class, ...) LOG_GENERIC(::Log::Class::log_class, ::Log::Level::Error, __VA_ARGS__)
|
||||
#define LOG_CRITICAL(log_class, ...) LOG_GENERIC(::Log::Class::log_class, ::Log::Level::Critical, __VA_ARGS__)
|
||||
|
|
|
@ -29,6 +29,7 @@ set(SRCS
|
|||
hle/kernel/address_arbiter.cpp
|
||||
hle/kernel/event.cpp
|
||||
hle/kernel/kernel.cpp
|
||||
hle/kernel/memory.cpp
|
||||
hle/kernel/mutex.cpp
|
||||
hle/kernel/process.cpp
|
||||
hle/kernel/resource_limit.cpp
|
||||
|
@ -115,7 +116,6 @@ set(SRCS
|
|||
loader/loader.cpp
|
||||
loader/ncch.cpp
|
||||
tracer/recorder.cpp
|
||||
mem_map.cpp
|
||||
memory.cpp
|
||||
settings.cpp
|
||||
system.cpp
|
||||
|
@ -157,6 +157,7 @@ set(HEADERS
|
|||
hle/kernel/address_arbiter.h
|
||||
hle/kernel/event.h
|
||||
hle/kernel/kernel.h
|
||||
hle/kernel/memory.h
|
||||
hle/kernel/mutex.h
|
||||
hle/kernel/process.h
|
||||
hle/kernel/resource_limit.h
|
||||
|
@ -245,7 +246,6 @@ set(HEADERS
|
|||
loader/ncch.h
|
||||
tracer/recorder.h
|
||||
tracer/citrace.h
|
||||
mem_map.h
|
||||
memory.h
|
||||
memory_setup.h
|
||||
settings.h
|
||||
|
|
|
@ -4,7 +4,6 @@
|
|||
|
||||
#include "common/swap.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "core/mem_map.h"
|
||||
#include "core/memory.h"
|
||||
#include "core/arm/skyeye_common/armstate.h"
|
||||
#include "core/arm/skyeye_common/vfp/vfp.h"
|
||||
|
|
|
@ -17,7 +17,6 @@
|
|||
|
||||
#include "common/logging/log.h"
|
||||
|
||||
#include "core/mem_map.h"
|
||||
#include "core/arm/skyeye_common/arm_regformat.h"
|
||||
#include "core/arm/skyeye_common/armstate.h"
|
||||
#include "core/arm/skyeye_common/armsupp.h"
|
||||
|
|
|
@ -25,10 +25,6 @@ void Init() {
|
|||
config_mem.sys_core_ver = 0x2;
|
||||
config_mem.unit_info = 0x1; // Bit 0 set for Retail
|
||||
config_mem.prev_firm = 0;
|
||||
config_mem.app_mem_type = 0x2; // Default app mem type is 0
|
||||
config_mem.app_mem_alloc = 0x06000000; // Set to 96MB, since some games use more than the default (64MB)
|
||||
config_mem.base_mem_alloc = 0x01400000; // Default base memory is 20MB
|
||||
config_mem.sys_mem_alloc = Memory::FCRAM_SIZE - (config_mem.app_mem_alloc + config_mem.base_mem_alloc);
|
||||
config_mem.firm_unk = 0;
|
||||
config_mem.firm_version_rev = 0;
|
||||
config_mem.firm_version_min = 0x40;
|
||||
|
@ -36,7 +32,4 @@ void Init() {
|
|||
config_mem.firm_sys_core_ver = 0x2;
|
||||
}
|
||||
|
||||
void Shutdown() {
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
|
|
@ -52,6 +52,5 @@ static_assert(sizeof(ConfigMemDef) == Memory::CONFIG_MEMORY_SIZE, "Config Memory
|
|||
extern ConfigMemDef config_mem;
|
||||
|
||||
void Init();
|
||||
void Shutdown();
|
||||
|
||||
} // namespace
|
||||
|
|
|
@ -172,6 +172,14 @@ template<ResultCode func(u32, s64, s64)> void Wrap() {
|
|||
FuncReturn(func(PARAM(0), param1, param2).raw);
|
||||
}
|
||||
|
||||
template<ResultCode func(s64*, Handle, u32)> void Wrap() {
|
||||
s64 param_1 = 0;
|
||||
u32 retval = func(¶m_1, PARAM(1), PARAM(2)).raw;
|
||||
Core::g_app_core->SetReg(1, (u32)param_1);
|
||||
Core::g_app_core->SetReg(2, (u32)(param_1 >> 32));
|
||||
FuncReturn(retval);
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Function wrappers that return type u32
|
||||
|
||||
|
|
|
@ -34,8 +34,6 @@ void Reschedule(const char *reason) {
|
|||
|
||||
void Init() {
|
||||
Service::Init();
|
||||
ConfigMem::Init();
|
||||
SharedPage::Init();
|
||||
|
||||
g_reschedule = false;
|
||||
|
||||
|
@ -43,8 +41,6 @@ void Init() {
|
|||
}
|
||||
|
||||
void Shutdown() {
|
||||
ConfigMem::Shutdown();
|
||||
SharedPage::Shutdown();
|
||||
Service::Shutdown();
|
||||
|
||||
LOG_DEBUG(Kernel, "shutdown OK");
|
||||
|
|
|
@ -7,11 +7,14 @@
|
|||
#include "common/assert.h"
|
||||
#include "common/logging/log.h"
|
||||
|
||||
#include "core/hle/config_mem.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/resource_limit.h"
|
||||
#include "core/hle/kernel/memory.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/kernel/resource_limit.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/hle/kernel/timer.h"
|
||||
#include "core/hle/shared_page.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
|
@ -119,6 +122,13 @@ void HandleTable::Clear() {
|
|||
|
||||
/// Initialize the kernel
|
||||
void Init() {
|
||||
ConfigMem::Init();
|
||||
SharedPage::Init();
|
||||
|
||||
// TODO(yuriks): The memory type parameter needs to be determined by the ExHeader field instead
|
||||
// For now it defaults to the one with a largest allocation to the app
|
||||
Kernel::MemoryInit(2); // Allocates 96MB to the application
|
||||
|
||||
Kernel::ResourceLimitsInit();
|
||||
Kernel::ThreadingInit();
|
||||
Kernel::TimersInit();
|
||||
|
@ -131,11 +141,14 @@ void Init() {
|
|||
|
||||
/// Shutdown the kernel
|
||||
void Shutdown() {
|
||||
g_handle_table.Clear(); // Free all kernel objects
|
||||
|
||||
Kernel::ThreadingShutdown();
|
||||
g_current_process = nullptr;
|
||||
|
||||
Kernel::TimersShutdown();
|
||||
Kernel::ResourceLimitsShutdown();
|
||||
g_handle_table.Clear(); // Free all kernel objects
|
||||
g_current_process = nullptr;
|
||||
Kernel::MemoryShutdown();
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
|
136
src/core/hle/kernel/memory.cpp
Normal file
136
src/core/hle/kernel/memory.cpp
Normal file
|
@ -0,0 +1,136 @@
|
|||
// Copyright 2014 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "common/logging/log.h"
|
||||
|
||||
#include "core/hle/config_mem.h"
|
||||
#include "core/hle/kernel/memory.h"
|
||||
#include "core/hle/kernel/vm_manager.h"
|
||||
#include "core/hle/result.h"
|
||||
#include "core/hle/shared_page.h"
|
||||
#include "core/memory.h"
|
||||
#include "core/memory_setup.h"
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
static MemoryRegionInfo memory_regions[3];
|
||||
|
||||
/// Size of the APPLICATION, SYSTEM and BASE memory regions (respectively) for each sytem
|
||||
/// memory configuration type.
|
||||
static const u32 memory_region_sizes[8][3] = {
|
||||
// Old 3DS layouts
|
||||
{0x04000000, 0x02C00000, 0x01400000}, // 0
|
||||
{ /* This appears to be unused. */ }, // 1
|
||||
{0x06000000, 0x00C00000, 0x01400000}, // 2
|
||||
{0x05000000, 0x01C00000, 0x01400000}, // 3
|
||||
{0x04800000, 0x02400000, 0x01400000}, // 4
|
||||
{0x02000000, 0x04C00000, 0x01400000}, // 5
|
||||
|
||||
// New 3DS layouts
|
||||
{0x07C00000, 0x06400000, 0x02000000}, // 6
|
||||
{0x0B200000, 0x02E00000, 0x02000000}, // 7
|
||||
};
|
||||
|
||||
void MemoryInit(u32 mem_type) {
|
||||
// TODO(yuriks): On the n3DS, all o3DS configurations (<=5) are forced to 6 instead.
|
||||
ASSERT_MSG(mem_type <= 5, "New 3DS memory configuration aren't supported yet!");
|
||||
ASSERT(mem_type != 1);
|
||||
|
||||
// The kernel allocation regions (APPLICATION, SYSTEM and BASE) are laid out in sequence, with
|
||||
// the sizes specified in the memory_region_sizes table.
|
||||
VAddr base = 0;
|
||||
for (int i = 0; i < 3; ++i) {
|
||||
memory_regions[i].base = base;
|
||||
memory_regions[i].size = memory_region_sizes[mem_type][i];
|
||||
memory_regions[i].linear_heap_memory = std::make_shared<std::vector<u8>>();
|
||||
|
||||
base += memory_regions[i].size;
|
||||
}
|
||||
|
||||
// We must've allocated the entire FCRAM by the end
|
||||
ASSERT(base == Memory::FCRAM_SIZE);
|
||||
|
||||
using ConfigMem::config_mem;
|
||||
config_mem.app_mem_type = mem_type;
|
||||
// app_mem_malloc does not always match the configured size for memory_region[0]: in case the
|
||||
// n3DS type override is in effect it reports the size the game expects, not the real one.
|
||||
config_mem.app_mem_alloc = memory_region_sizes[mem_type][0];
|
||||
config_mem.sys_mem_alloc = memory_regions[1].size;
|
||||
config_mem.base_mem_alloc = memory_regions[2].size;
|
||||
}
|
||||
|
||||
void MemoryShutdown() {
|
||||
for (auto& region : memory_regions) {
|
||||
region.base = 0;
|
||||
region.size = 0;
|
||||
region.linear_heap_memory = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
MemoryRegionInfo* GetMemoryRegion(MemoryRegion region) {
|
||||
switch (region) {
|
||||
case MemoryRegion::APPLICATION:
|
||||
return &memory_regions[0];
|
||||
case MemoryRegion::SYSTEM:
|
||||
return &memory_regions[1];
|
||||
case MemoryRegion::BASE:
|
||||
return &memory_regions[2];
|
||||
default:
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
namespace Memory {
|
||||
|
||||
namespace {
|
||||
|
||||
struct MemoryArea {
|
||||
u32 base;
|
||||
u32 size;
|
||||
const char* name;
|
||||
};
|
||||
|
||||
// We don't declare the IO regions in here since its handled by other means.
|
||||
static MemoryArea memory_areas[] = {
|
||||
{SHARED_MEMORY_VADDR, SHARED_MEMORY_SIZE, "Shared Memory"}, // Shared memory
|
||||
{VRAM_VADDR, VRAM_SIZE, "VRAM"}, // Video memory (VRAM)
|
||||
{DSP_RAM_VADDR, DSP_RAM_SIZE, "DSP RAM"}, // DSP memory
|
||||
{TLS_AREA_VADDR, TLS_AREA_SIZE, "TLS Area"}, // TLS memory
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
void Init() {
|
||||
InitMemoryMap();
|
||||
LOG_DEBUG(HW_Memory, "initialized OK");
|
||||
}
|
||||
|
||||
void InitLegacyAddressSpace(Kernel::VMManager& address_space) {
|
||||
using namespace Kernel;
|
||||
|
||||
for (MemoryArea& area : memory_areas) {
|
||||
auto block = std::make_shared<std::vector<u8>>(area.size);
|
||||
address_space.MapMemoryBlock(area.base, std::move(block), 0, area.size, MemoryState::Private).Unwrap();
|
||||
}
|
||||
|
||||
auto cfg_mem_vma = address_space.MapBackingMemory(CONFIG_MEMORY_VADDR,
|
||||
(u8*)&ConfigMem::config_mem, CONFIG_MEMORY_SIZE, MemoryState::Shared).MoveFrom();
|
||||
address_space.Reprotect(cfg_mem_vma, VMAPermission::Read);
|
||||
|
||||
auto shared_page_vma = address_space.MapBackingMemory(SHARED_PAGE_VADDR,
|
||||
(u8*)&SharedPage::shared_page, SHARED_PAGE_SIZE, MemoryState::Shared).MoveFrom();
|
||||
address_space.Reprotect(shared_page_vma, VMAPermission::Read);
|
||||
}
|
||||
|
||||
} // namespace
|
35
src/core/hle/kernel/memory.h
Normal file
35
src/core/hle/kernel/memory.h
Normal file
|
@ -0,0 +1,35 @@
|
|||
// Copyright 2014 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "common/common_types.h"
|
||||
|
||||
#include "core/hle/kernel/process.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class VMManager;
|
||||
|
||||
struct MemoryRegionInfo {
|
||||
u32 base; // Not an address, but offset from start of FCRAM
|
||||
u32 size;
|
||||
|
||||
std::shared_ptr<std::vector<u8>> linear_heap_memory;
|
||||
};
|
||||
|
||||
void MemoryInit(u32 mem_type);
|
||||
void MemoryShutdown();
|
||||
MemoryRegionInfo* GetMemoryRegion(MemoryRegion region);
|
||||
|
||||
}
|
||||
|
||||
namespace Memory {
|
||||
|
||||
void Init();
|
||||
void InitLegacyAddressSpace(Kernel::VMManager& address_space);
|
||||
|
||||
} // namespace
|
|
@ -7,11 +7,11 @@
|
|||
#include "common/logging/log.h"
|
||||
#include "common/make_unique.h"
|
||||
|
||||
#include "core/hle/kernel/memory.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/kernel/resource_limit.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
#include "core/hle/kernel/vm_manager.h"
|
||||
#include "core/mem_map.h"
|
||||
#include "core/memory.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
@ -36,8 +36,7 @@ SharedPtr<Process> Process::Create(SharedPtr<CodeSet> code_set) {
|
|||
process->codeset = std::move(code_set);
|
||||
process->flags.raw = 0;
|
||||
process->flags.memory_region = MemoryRegion::APPLICATION;
|
||||
process->address_space = Common::make_unique<VMManager>();
|
||||
Memory::InitLegacyAddressSpace(*process->address_space);
|
||||
Memory::InitLegacyAddressSpace(process->vm_manager);
|
||||
|
||||
return process;
|
||||
}
|
||||
|
@ -93,9 +92,11 @@ void Process::ParseKernelCaps(const u32* kernel_caps, size_t len) {
|
|||
mapping.unk_flag = false;
|
||||
} else if ((type & 0xFE0) == 0xFC0) { // 0x01FF
|
||||
// Kernel version
|
||||
int minor = descriptor & 0xFF;
|
||||
int major = (descriptor >> 8) & 0xFF;
|
||||
LOG_INFO(Loader, "ExHeader kernel version ignored: %d.%d", major, minor);
|
||||
kernel_version = descriptor & 0xFFFF;
|
||||
|
||||
int minor = kernel_version & 0xFF;
|
||||
int major = (kernel_version >> 8) & 0xFF;
|
||||
LOG_INFO(Loader, "ExHeader kernel version: %d.%d", major, minor);
|
||||
} else {
|
||||
LOG_ERROR(Loader, "Unhandled kernel caps descriptor: 0x%08X", descriptor);
|
||||
}
|
||||
|
@ -103,20 +104,153 @@ void Process::ParseKernelCaps(const u32* kernel_caps, size_t len) {
|
|||
}
|
||||
|
||||
void Process::Run(s32 main_thread_priority, u32 stack_size) {
|
||||
memory_region = GetMemoryRegion(flags.memory_region);
|
||||
|
||||
auto MapSegment = [&](CodeSet::Segment& segment, VMAPermission permissions, MemoryState memory_state) {
|
||||
auto vma = address_space->MapMemoryBlock(segment.addr, codeset->memory,
|
||||
auto vma = vm_manager.MapMemoryBlock(segment.addr, codeset->memory,
|
||||
segment.offset, segment.size, memory_state).Unwrap();
|
||||
address_space->Reprotect(vma, permissions);
|
||||
vm_manager.Reprotect(vma, permissions);
|
||||
misc_memory_used += segment.size;
|
||||
};
|
||||
|
||||
// Map CodeSet segments
|
||||
MapSegment(codeset->code, VMAPermission::ReadExecute, MemoryState::Code);
|
||||
MapSegment(codeset->rodata, VMAPermission::Read, MemoryState::Code);
|
||||
MapSegment(codeset->data, VMAPermission::ReadWrite, MemoryState::Private);
|
||||
|
||||
address_space->LogLayout();
|
||||
// Allocate and map stack
|
||||
vm_manager.MapMemoryBlock(Memory::HEAP_VADDR_END - stack_size,
|
||||
std::make_shared<std::vector<u8>>(stack_size, 0), 0, stack_size, MemoryState::Locked
|
||||
).Unwrap();
|
||||
misc_memory_used += stack_size;
|
||||
|
||||
vm_manager.LogLayout(Log::Level::Debug);
|
||||
Kernel::SetupMainThread(codeset->entrypoint, main_thread_priority);
|
||||
}
|
||||
|
||||
VAddr Process::GetLinearHeapBase() const {
|
||||
return (kernel_version < 0x22C ? Memory::LINEAR_HEAP_VADDR : Memory::NEW_LINEAR_HEAP_SIZE)
|
||||
+ memory_region->base;
|
||||
}
|
||||
|
||||
VAddr Process::GetLinearHeapLimit() const {
|
||||
return GetLinearHeapBase() + memory_region->size;
|
||||
}
|
||||
|
||||
ResultVal<VAddr> Process::HeapAllocate(VAddr target, u32 size, VMAPermission perms) {
|
||||
if (target < Memory::HEAP_VADDR || target + size > Memory::HEAP_VADDR_END || target + size < target) {
|
||||
return ERR_INVALID_ADDRESS;
|
||||
}
|
||||
|
||||
if (heap_memory == nullptr) {
|
||||
// Initialize heap
|
||||
heap_memory = std::make_shared<std::vector<u8>>();
|
||||
heap_start = heap_end = target;
|
||||
}
|
||||
|
||||
// If necessary, expand backing vector to cover new heap extents.
|
||||
if (target < heap_start) {
|
||||
heap_memory->insert(begin(*heap_memory), heap_start - target, 0);
|
||||
heap_start = target;
|
||||
vm_manager.RefreshMemoryBlockMappings(heap_memory.get());
|
||||
}
|
||||
if (target + size > heap_end) {
|
||||
heap_memory->insert(end(*heap_memory), (target + size) - heap_end, 0);
|
||||
heap_end = target + size;
|
||||
vm_manager.RefreshMemoryBlockMappings(heap_memory.get());
|
||||
}
|
||||
ASSERT(heap_end - heap_start == heap_memory->size());
|
||||
|
||||
CASCADE_RESULT(auto vma, vm_manager.MapMemoryBlock(target, heap_memory, target - heap_start, size, MemoryState::Private));
|
||||
vm_manager.Reprotect(vma, perms);
|
||||
|
||||
heap_used += size;
|
||||
|
||||
return MakeResult<VAddr>(heap_end - size);
|
||||
}
|
||||
|
||||
ResultCode Process::HeapFree(VAddr target, u32 size) {
|
||||
if (target < Memory::HEAP_VADDR || target + size > Memory::HEAP_VADDR_END || target + size < target) {
|
||||
return ERR_INVALID_ADDRESS;
|
||||
}
|
||||
|
||||
ResultCode result = vm_manager.UnmapRange(target, size);
|
||||
if (result.IsError()) return result;
|
||||
|
||||
heap_used -= size;
|
||||
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
ResultVal<VAddr> Process::LinearAllocate(VAddr target, u32 size, VMAPermission perms) {
|
||||
auto& linheap_memory = memory_region->linear_heap_memory;
|
||||
|
||||
VAddr heap_end = GetLinearHeapBase() + (u32)linheap_memory->size();
|
||||
// Games and homebrew only ever seem to pass 0 here (which lets the kernel decide the address),
|
||||
// but explicit addresses are also accepted and respected.
|
||||
if (target == 0) {
|
||||
target = heap_end;
|
||||
}
|
||||
|
||||
if (target < GetLinearHeapBase() || target + size > GetLinearHeapLimit() ||
|
||||
target > heap_end || target + size < target) {
|
||||
|
||||
return ERR_INVALID_ADDRESS;
|
||||
}
|
||||
|
||||
// Expansion of the linear heap is only allowed if you do an allocation immediatelly at its
|
||||
// end. It's possible to free gaps in the middle of the heap and then reallocate them later,
|
||||
// but expansions are only allowed at the end.
|
||||
if (target == heap_end) {
|
||||
linheap_memory->insert(linheap_memory->end(), size, 0);
|
||||
vm_manager.RefreshMemoryBlockMappings(linheap_memory.get());
|
||||
}
|
||||
|
||||
// TODO(yuriks): As is, this lets processes map memory allocated by other processes from the
|
||||
// same region. It is unknown if or how the 3DS kernel checks against this.
|
||||
size_t offset = target - GetLinearHeapBase();
|
||||
CASCADE_RESULT(auto vma, vm_manager.MapMemoryBlock(target, linheap_memory, offset, size, MemoryState::Continuous));
|
||||
vm_manager.Reprotect(vma, perms);
|
||||
|
||||
linear_heap_used += size;
|
||||
|
||||
return MakeResult<VAddr>(target);
|
||||
}
|
||||
|
||||
ResultCode Process::LinearFree(VAddr target, u32 size) {
|
||||
auto& linheap_memory = memory_region->linear_heap_memory;
|
||||
|
||||
if (target < GetLinearHeapBase() || target + size > GetLinearHeapLimit() ||
|
||||
target + size < target) {
|
||||
|
||||
return ERR_INVALID_ADDRESS;
|
||||
}
|
||||
|
||||
VAddr heap_end = GetLinearHeapBase() + (u32)linheap_memory->size();
|
||||
if (target + size > heap_end) {
|
||||
return ERR_INVALID_ADDRESS_STATE;
|
||||
}
|
||||
|
||||
ResultCode result = vm_manager.UnmapRange(target, size);
|
||||
if (result.IsError()) return result;
|
||||
|
||||
linear_heap_used -= size;
|
||||
|
||||
if (target + size == heap_end) {
|
||||
// End of linear heap has been freed, so check what's the last allocated block in it and
|
||||
// reduce the size.
|
||||
auto vma = vm_manager.FindVMA(target);
|
||||
ASSERT(vma != vm_manager.vma_map.end());
|
||||
ASSERT(vma->second.type == VMAType::Free);
|
||||
VAddr new_end = vma->second.base;
|
||||
if (new_end >= GetLinearHeapBase()) {
|
||||
linheap_memory->resize(new_end - GetLinearHeapBase());
|
||||
}
|
||||
}
|
||||
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
Kernel::Process::Process() {}
|
||||
Kernel::Process::~Process() {}
|
||||
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
#include "common/common_types.h"
|
||||
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/vm_manager.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
|
@ -48,7 +49,7 @@ union ProcessFlags {
|
|||
};
|
||||
|
||||
class ResourceLimit;
|
||||
class VMManager;
|
||||
struct MemoryRegionInfo;
|
||||
|
||||
struct CodeSet final : public Object {
|
||||
static SharedPtr<CodeSet> Create(std::string name, u64 program_id);
|
||||
|
@ -104,14 +105,12 @@ public:
|
|||
/// processes access to specific I/O regions and device memory.
|
||||
boost::container::static_vector<AddressMapping, 8> address_mappings;
|
||||
ProcessFlags flags;
|
||||
/// Kernel compatibility version for this process
|
||||
u16 kernel_version = 0;
|
||||
|
||||
/// The id of this process
|
||||
u32 process_id = next_process_id++;
|
||||
|
||||
/// Bitmask of the used TLS slots
|
||||
std::bitset<300> used_tls_slots;
|
||||
std::unique_ptr<VMManager> address_space;
|
||||
|
||||
/**
|
||||
* Parses a list of kernel capability descriptors (as found in the ExHeader) and applies them
|
||||
* to this process.
|
||||
|
@ -123,6 +122,36 @@ public:
|
|||
*/
|
||||
void Run(s32 main_thread_priority, u32 stack_size);
|
||||
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Memory Management
|
||||
|
||||
VMManager vm_manager;
|
||||
|
||||
// Memory used to back the allocations in the regular heap. A single vector is used to cover
|
||||
// the entire virtual address space extents that bound the allocations, including any holes.
|
||||
// This makes deallocation and reallocation of holes fast and keeps process memory contiguous
|
||||
// in the emulator address space, allowing Memory::GetPointer to be reasonably safe.
|
||||
std::shared_ptr<std::vector<u8>> heap_memory;
|
||||
// The left/right bounds of the address space covered by heap_memory.
|
||||
VAddr heap_start = 0, heap_end = 0;
|
||||
|
||||
u32 heap_used = 0, linear_heap_used = 0, misc_memory_used = 0;
|
||||
|
||||
MemoryRegionInfo* memory_region = nullptr;
|
||||
|
||||
/// Bitmask of the used TLS slots
|
||||
std::bitset<300> used_tls_slots;
|
||||
|
||||
VAddr GetLinearHeapBase() const;
|
||||
VAddr GetLinearHeapLimit() const;
|
||||
|
||||
ResultVal<VAddr> HeapAllocate(VAddr target, u32 size, VMAPermission perms);
|
||||
ResultCode HeapFree(VAddr target, u32 size);
|
||||
|
||||
ResultVal<VAddr> LinearAllocate(VAddr target, u32 size, VMAPermission perms);
|
||||
ResultCode LinearFree(VAddr target, u32 size);
|
||||
|
||||
private:
|
||||
Process();
|
||||
~Process() override;
|
||||
|
|
|
@ -6,7 +6,6 @@
|
|||
|
||||
#include "common/logging/log.h"
|
||||
|
||||
#include "core/mem_map.h"
|
||||
#include "core/hle/kernel/resource_limit.h"
|
||||
|
||||
namespace Kernel {
|
||||
|
|
|
@ -117,6 +117,7 @@ void Thread::Stop() {
|
|||
wait_objects.clear();
|
||||
|
||||
Kernel::g_current_process->used_tls_slots[tls_index] = false;
|
||||
g_current_process->misc_memory_used -= Memory::TLS_ENTRY_SIZE;
|
||||
|
||||
HLE::Reschedule(__func__);
|
||||
}
|
||||
|
@ -414,6 +415,7 @@ ResultVal<SharedPtr<Thread>> Thread::Create(std::string name, VAddr entry_point,
|
|||
}
|
||||
|
||||
ASSERT_MSG(thread->tls_index != -1, "Out of TLS space");
|
||||
g_current_process->misc_memory_used += Memory::TLS_ENTRY_SIZE;
|
||||
|
||||
// TODO(peachum): move to ScheduleThread() when scheduler is added so selected core is used
|
||||
// to initialize the context
|
||||
|
@ -504,7 +506,7 @@ void Thread::SetWaitSynchronizationOutput(s32 output) {
|
|||
}
|
||||
|
||||
VAddr Thread::GetTLSAddress() const {
|
||||
return Memory::TLS_AREA_VADDR + tls_index * 0x200;
|
||||
return Memory::TLS_AREA_VADDR + tls_index * Memory::TLS_ENTRY_SIZE;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
|
|
@ -11,6 +11,15 @@
|
|||
|
||||
namespace Kernel {
|
||||
|
||||
static const char* GetMemoryStateName(MemoryState state) {
|
||||
static const char* names[] = {
|
||||
"Free", "Reserved", "IO", "Static", "Code", "Private", "Shared", "Continuous", "Aliased",
|
||||
"Alias", "AliasCode", "Locked",
|
||||
};
|
||||
|
||||
return names[(int)state];
|
||||
}
|
||||
|
||||
bool VirtualMemoryArea::CanBeMergedWith(const VirtualMemoryArea& next) const {
|
||||
ASSERT(base + size == next.base);
|
||||
if (permissions != next.permissions ||
|
||||
|
@ -51,11 +60,15 @@ void VMManager::Reset() {
|
|||
}
|
||||
|
||||
VMManager::VMAHandle VMManager::FindVMA(VAddr target) const {
|
||||
return std::prev(vma_map.upper_bound(target));
|
||||
if (target >= MAX_ADDRESS) {
|
||||
return vma_map.end();
|
||||
} else {
|
||||
return std::prev(vma_map.upper_bound(target));
|
||||
}
|
||||
}
|
||||
|
||||
ResultVal<VMManager::VMAHandle> VMManager::MapMemoryBlock(VAddr target,
|
||||
std::shared_ptr<std::vector<u8>> block, u32 offset, u32 size, MemoryState state) {
|
||||
std::shared_ptr<std::vector<u8>> block, size_t offset, u32 size, MemoryState state) {
|
||||
ASSERT(block != nullptr);
|
||||
ASSERT(offset + size <= block->size());
|
||||
|
||||
|
@ -106,10 +119,8 @@ ResultVal<VMManager::VMAHandle> VMManager::MapMMIO(VAddr target, PAddr paddr, u3
|
|||
return MakeResult<VMAHandle>(MergeAdjacent(vma_handle));
|
||||
}
|
||||
|
||||
void VMManager::Unmap(VMAHandle vma_handle) {
|
||||
VMAIter iter = StripIterConstness(vma_handle);
|
||||
|
||||
VirtualMemoryArea& vma = iter->second;
|
||||
VMManager::VMAIter VMManager::Unmap(VMAIter vma_handle) {
|
||||
VirtualMemoryArea& vma = vma_handle->second;
|
||||
vma.type = VMAType::Free;
|
||||
vma.permissions = VMAPermission::None;
|
||||
vma.meminfo_state = MemoryState::Free;
|
||||
|
@ -121,26 +132,67 @@ void VMManager::Unmap(VMAHandle vma_handle) {
|
|||
|
||||
UpdatePageTableForVMA(vma);
|
||||
|
||||
MergeAdjacent(iter);
|
||||
return MergeAdjacent(vma_handle);
|
||||
}
|
||||
|
||||
void VMManager::Reprotect(VMAHandle vma_handle, VMAPermission new_perms) {
|
||||
ResultCode VMManager::UnmapRange(VAddr target, u32 size) {
|
||||
CASCADE_RESULT(VMAIter vma, CarveVMARange(target, size));
|
||||
VAddr target_end = target + size;
|
||||
|
||||
VMAIter end = vma_map.end();
|
||||
// The comparison against the end of the range must be done using addresses since VMAs can be
|
||||
// merged during this process, causing invalidation of the iterators.
|
||||
while (vma != end && vma->second.base < target_end) {
|
||||
vma = std::next(Unmap(vma));
|
||||
}
|
||||
|
||||
ASSERT(FindVMA(target)->second.size >= size);
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
VMManager::VMAHandle VMManager::Reprotect(VMAHandle vma_handle, VMAPermission new_perms) {
|
||||
VMAIter iter = StripIterConstness(vma_handle);
|
||||
|
||||
VirtualMemoryArea& vma = iter->second;
|
||||
vma.permissions = new_perms;
|
||||
UpdatePageTableForVMA(vma);
|
||||
|
||||
MergeAdjacent(iter);
|
||||
return MergeAdjacent(iter);
|
||||
}
|
||||
|
||||
void VMManager::LogLayout() const {
|
||||
ResultCode VMManager::ReprotectRange(VAddr target, u32 size, VMAPermission new_perms) {
|
||||
CASCADE_RESULT(VMAIter vma, CarveVMARange(target, size));
|
||||
VAddr target_end = target + size;
|
||||
|
||||
VMAIter end = vma_map.end();
|
||||
// The comparison against the end of the range must be done using addresses since VMAs can be
|
||||
// merged during this process, causing invalidation of the iterators.
|
||||
while (vma != end && vma->second.base < target_end) {
|
||||
vma = std::next(StripIterConstness(Reprotect(vma, new_perms)));
|
||||
}
|
||||
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
void VMManager::RefreshMemoryBlockMappings(const std::vector<u8>* block) {
|
||||
// If this ever proves to have a noticeable performance impact, allow users of the function to
|
||||
// specify a specific range of addresses to limit the scan to.
|
||||
for (const auto& p : vma_map) {
|
||||
const VirtualMemoryArea& vma = p.second;
|
||||
LOG_DEBUG(Kernel, "%08X - %08X size: %8X %c%c%c", vma.base, vma.base + vma.size, vma.size,
|
||||
if (block == vma.backing_block.get()) {
|
||||
UpdatePageTableForVMA(vma);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void VMManager::LogLayout(Log::Level log_level) const {
|
||||
for (const auto& p : vma_map) {
|
||||
const VirtualMemoryArea& vma = p.second;
|
||||
LOG_GENERIC(Log::Class::Kernel, log_level, "%08X - %08X size: %8X %c%c%c %s",
|
||||
vma.base, vma.base + vma.size, vma.size,
|
||||
(u8)vma.permissions & (u8)VMAPermission::Read ? 'R' : '-',
|
||||
(u8)vma.permissions & (u8)VMAPermission::Write ? 'W' : '-',
|
||||
(u8)vma.permissions & (u8)VMAPermission::Execute ? 'X' : '-');
|
||||
(u8)vma.permissions & (u8)VMAPermission::Execute ? 'X' : '-', GetMemoryStateName(vma.meminfo_state));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -151,21 +203,19 @@ VMManager::VMAIter VMManager::StripIterConstness(const VMAHandle & iter) {
|
|||
}
|
||||
|
||||
ResultVal<VMManager::VMAIter> VMManager::CarveVMA(VAddr base, u32 size) {
|
||||
ASSERT_MSG((size & Memory::PAGE_MASK) == 0, "non-page aligned size: %8X", size);
|
||||
ASSERT_MSG((base & Memory::PAGE_MASK) == 0, "non-page aligned base: %08X", base);
|
||||
ASSERT_MSG((size & Memory::PAGE_MASK) == 0, "non-page aligned size: 0x%8X", size);
|
||||
ASSERT_MSG((base & Memory::PAGE_MASK) == 0, "non-page aligned base: 0x%08X", base);
|
||||
|
||||
VMAIter vma_handle = StripIterConstness(FindVMA(base));
|
||||
if (vma_handle == vma_map.end()) {
|
||||
// Target address is outside the range managed by the kernel
|
||||
return ResultCode(ErrorDescription::InvalidAddress, ErrorModule::OS,
|
||||
ErrorSummary::InvalidArgument, ErrorLevel::Usage); // 0xE0E01BF5
|
||||
return ERR_INVALID_ADDRESS;
|
||||
}
|
||||
|
||||
VirtualMemoryArea& vma = vma_handle->second;
|
||||
if (vma.type != VMAType::Free) {
|
||||
// Region is already allocated
|
||||
return ResultCode(ErrorDescription::InvalidAddress, ErrorModule::OS,
|
||||
ErrorSummary::InvalidState, ErrorLevel::Usage); // 0xE0A01BF5
|
||||
return ERR_INVALID_ADDRESS_STATE;
|
||||
}
|
||||
|
||||
u32 start_in_vma = base - vma.base;
|
||||
|
@ -173,8 +223,7 @@ ResultVal<VMManager::VMAIter> VMManager::CarveVMA(VAddr base, u32 size) {
|
|||
|
||||
if (end_in_vma > vma.size) {
|
||||
// Requested allocation doesn't fit inside VMA
|
||||
return ResultCode(ErrorDescription::InvalidAddress, ErrorModule::OS,
|
||||
ErrorSummary::InvalidState, ErrorLevel::Usage); // 0xE0A01BF5
|
||||
return ERR_INVALID_ADDRESS_STATE;
|
||||
}
|
||||
|
||||
if (end_in_vma != vma.size) {
|
||||
|
@ -189,6 +238,35 @@ ResultVal<VMManager::VMAIter> VMManager::CarveVMA(VAddr base, u32 size) {
|
|||
return MakeResult<VMAIter>(vma_handle);
|
||||
}
|
||||
|
||||
ResultVal<VMManager::VMAIter> VMManager::CarveVMARange(VAddr target, u32 size) {
|
||||
ASSERT_MSG((size & Memory::PAGE_MASK) == 0, "non-page aligned size: 0x%8X", size);
|
||||
ASSERT_MSG((target & Memory::PAGE_MASK) == 0, "non-page aligned base: 0x%08X", target);
|
||||
|
||||
VAddr target_end = target + size;
|
||||
ASSERT(target_end >= target);
|
||||
ASSERT(target_end <= MAX_ADDRESS);
|
||||
ASSERT(size > 0);
|
||||
|
||||
VMAIter begin_vma = StripIterConstness(FindVMA(target));
|
||||
VMAIter i_end = vma_map.lower_bound(target_end);
|
||||
for (auto i = begin_vma; i != i_end; ++i) {
|
||||
if (i->second.type == VMAType::Free) {
|
||||
return ERR_INVALID_ADDRESS_STATE;
|
||||
}
|
||||
}
|
||||
|
||||
if (target != begin_vma->second.base) {
|
||||
begin_vma = SplitVMA(begin_vma, target - begin_vma->second.base);
|
||||
}
|
||||
|
||||
VMAIter end_vma = StripIterConstness(FindVMA(target_end));
|
||||
if (end_vma != vma_map.end() && target_end != end_vma->second.base) {
|
||||
end_vma = SplitVMA(end_vma, target_end - end_vma->second.base);
|
||||
}
|
||||
|
||||
return MakeResult<VMAIter>(begin_vma);
|
||||
}
|
||||
|
||||
VMManager::VMAIter VMManager::SplitVMA(VMAIter vma_handle, u32 offset_in_vma) {
|
||||
VirtualMemoryArea& old_vma = vma_handle->second;
|
||||
VirtualMemoryArea new_vma = old_vma; // Make a copy of the VMA
|
||||
|
|
|
@ -14,6 +14,14 @@
|
|||
|
||||
namespace Kernel {
|
||||
|
||||
const ResultCode ERR_INVALID_ADDRESS{ // 0xE0E01BF5
|
||||
ErrorDescription::InvalidAddress, ErrorModule::OS,
|
||||
ErrorSummary::InvalidArgument, ErrorLevel::Usage};
|
||||
|
||||
const ResultCode ERR_INVALID_ADDRESS_STATE{ // 0xE0A01BF5
|
||||
ErrorDescription::InvalidAddress, ErrorModule::OS,
|
||||
ErrorSummary::InvalidState, ErrorLevel::Usage};
|
||||
|
||||
enum class VMAType : u8 {
|
||||
/// VMA represents an unmapped region of the address space.
|
||||
Free,
|
||||
|
@ -75,7 +83,7 @@ struct VirtualMemoryArea {
|
|||
/// Memory block backing this VMA.
|
||||
std::shared_ptr<std::vector<u8>> backing_block = nullptr;
|
||||
/// Offset into the backing_memory the mapping starts from.
|
||||
u32 offset = 0;
|
||||
size_t offset = 0;
|
||||
|
||||
// Settings for type = BackingMemory
|
||||
/// Pointer backing this VMA. It will not be destroyed or freed when the VMA is removed.
|
||||
|
@ -141,7 +149,7 @@ public:
|
|||
* @param state MemoryState tag to attach to the VMA.
|
||||
*/
|
||||
ResultVal<VMAHandle> MapMemoryBlock(VAddr target, std::shared_ptr<std::vector<u8>> block,
|
||||
u32 offset, u32 size, MemoryState state);
|
||||
size_t offset, u32 size, MemoryState state);
|
||||
|
||||
/**
|
||||
* Maps an unmanaged host memory pointer at a given address.
|
||||
|
@ -163,14 +171,23 @@ public:
|
|||
*/
|
||||
ResultVal<VMAHandle> MapMMIO(VAddr target, PAddr paddr, u32 size, MemoryState state);
|
||||
|
||||
/// Unmaps the given VMA.
|
||||
void Unmap(VMAHandle vma);
|
||||
/// Unmaps a range of addresses, splitting VMAs as necessary.
|
||||
ResultCode UnmapRange(VAddr target, u32 size);
|
||||
|
||||
/// Changes the permissions of the given VMA.
|
||||
void Reprotect(VMAHandle vma, VMAPermission new_perms);
|
||||
VMAHandle Reprotect(VMAHandle vma, VMAPermission new_perms);
|
||||
|
||||
/// Changes the permissions of a range of addresses, splitting VMAs as necessary.
|
||||
ResultCode ReprotectRange(VAddr target, u32 size, VMAPermission new_perms);
|
||||
|
||||
/**
|
||||
* Scans all VMAs and updates the page table range of any that use the given vector as backing
|
||||
* memory. This should be called after any operation that causes reallocation of the vector.
|
||||
*/
|
||||
void RefreshMemoryBlockMappings(const std::vector<u8>* block);
|
||||
|
||||
/// Dumps the address space layout to the log, for debugging
|
||||
void LogLayout() const;
|
||||
void LogLayout(Log::Level log_level) const;
|
||||
|
||||
private:
|
||||
using VMAIter = decltype(vma_map)::iterator;
|
||||
|
@ -178,12 +195,21 @@ private:
|
|||
/// Converts a VMAHandle to a mutable VMAIter.
|
||||
VMAIter StripIterConstness(const VMAHandle& iter);
|
||||
|
||||
/// Unmaps the given VMA.
|
||||
VMAIter Unmap(VMAIter vma);
|
||||
|
||||
/**
|
||||
* Carves a VMA of a specific size at the specified address by splitting Free VMAs while doing
|
||||
* the appropriate error checking.
|
||||
*/
|
||||
ResultVal<VMAIter> CarveVMA(VAddr base, u32 size);
|
||||
|
||||
/**
|
||||
* Splits the edges of the given range of non-Free VMAs so that there is a VMA split at each
|
||||
* end of the range.
|
||||
*/
|
||||
ResultVal<VMAIter> CarveVMARange(VAddr base, u32 size);
|
||||
|
||||
/**
|
||||
* Splits a VMA in two, at the specified offset.
|
||||
* @returns the right side of the split, with the original iterator becoming the left side.
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#include "core/hle/hle.h"
|
||||
#include "core/hle/kernel/event.h"
|
||||
#include "core/hle/kernel/mutex.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/kernel/shared_memory.h"
|
||||
#include "core/hle/kernel/thread.h"
|
||||
|
||||
|
@ -37,7 +38,7 @@ static Kernel::SharedPtr<Kernel::Mutex> lock;
|
|||
static Kernel::SharedPtr<Kernel::Event> notification_event; ///< APT notification event
|
||||
static Kernel::SharedPtr<Kernel::Event> parameter_event; ///< APT parameter event
|
||||
|
||||
static std::vector<u8> shared_font;
|
||||
static std::shared_ptr<std::vector<u8>> shared_font;
|
||||
|
||||
static u32 cpu_percent; ///< CPU time available to the running application
|
||||
|
||||
|
@ -74,11 +75,12 @@ void Initialize(Service::Interface* self) {
|
|||
void GetSharedFont(Service::Interface* self) {
|
||||
u32* cmd_buff = Kernel::GetCommandBuffer();
|
||||
|
||||
if (!shared_font.empty()) {
|
||||
// TODO(bunnei): This function shouldn't copy the shared font every time it's called.
|
||||
// Instead, it should probably map the shared font as RO memory. We don't currently have
|
||||
// an easy way to do this, but the copy should be sufficient for now.
|
||||
memcpy(Memory::GetPointer(SHARED_FONT_VADDR), shared_font.data(), shared_font.size());
|
||||
if (shared_font != nullptr) {
|
||||
// TODO(yuriks): This is a hack to keep this working right now even with our completely
|
||||
// broken shared memory system.
|
||||
shared_font_mem->base_address = SHARED_FONT_VADDR;
|
||||
Kernel::g_current_process->vm_manager.MapMemoryBlock(shared_font_mem->base_address,
|
||||
shared_font, 0, shared_font_mem->size, Kernel::MemoryState::Shared);
|
||||
|
||||
cmd_buff[0] = IPC::MakeHeader(0x44, 2, 2);
|
||||
cmd_buff[1] = RESULT_SUCCESS.raw; // No error
|
||||
|
@ -391,7 +393,6 @@ void Init() {
|
|||
// a homebrew app to do this: https://github.com/citra-emu/3dsutils. Put the resulting file
|
||||
// "shared_font.bin" in the Citra "sysdata" directory.
|
||||
|
||||
shared_font.clear();
|
||||
std::string filepath = FileUtil::GetUserPath(D_SYSDATA_IDX) + SHARED_FONT;
|
||||
|
||||
FileUtil::CreateFullPath(filepath); // Create path if not already created
|
||||
|
@ -399,8 +400,8 @@ void Init() {
|
|||
|
||||
if (file.IsOpen()) {
|
||||
// Read shared font data
|
||||
shared_font.resize((size_t)file.GetSize());
|
||||
file.ReadBytes(shared_font.data(), (size_t)file.GetSize());
|
||||
shared_font = std::make_shared<std::vector<u8>>((size_t)file.GetSize());
|
||||
file.ReadBytes(shared_font->data(), shared_font->size());
|
||||
|
||||
// Create shared font memory object
|
||||
using Kernel::MemoryPermission;
|
||||
|
@ -424,7 +425,7 @@ void Init() {
|
|||
}
|
||||
|
||||
void Shutdown() {
|
||||
shared_font.clear();
|
||||
shared_font = nullptr;
|
||||
shared_font_mem = nullptr;
|
||||
lock = nullptr;
|
||||
notification_event = nullptr;
|
||||
|
|
|
@ -4,7 +4,6 @@
|
|||
|
||||
#include "common/bit_field.h"
|
||||
|
||||
#include "core/mem_map.h"
|
||||
#include "core/memory.h"
|
||||
#include "core/hle/kernel/event.h"
|
||||
#include "core/hle/kernel/shared_memory.h"
|
||||
|
|
|
@ -10,7 +10,6 @@
|
|||
#include "core/hle/kernel/event.h"
|
||||
#include "core/hle/service/y2r_u.h"
|
||||
#include "core/hw/y2r.h"
|
||||
#include "core/mem_map.h"
|
||||
|
||||
#include "video_core/renderer_base.h"
|
||||
#include "video_core/utils.h"
|
||||
|
|
|
@ -18,7 +18,4 @@ void Init() {
|
|||
shared_page.running_hw = 0x1; // product
|
||||
}
|
||||
|
||||
void Shutdown() {
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
|
|
@ -54,6 +54,5 @@ static_assert(sizeof(SharedPageDef) == Memory::SHARED_PAGE_SIZE, "Shared page st
|
|||
extern SharedPageDef shared_page;
|
||||
|
||||
void Init();
|
||||
void Shutdown();
|
||||
|
||||
} // namespace
|
||||
|
|
|
@ -10,11 +10,11 @@
|
|||
#include "common/symbols.h"
|
||||
|
||||
#include "core/core_timing.h"
|
||||
#include "core/mem_map.h"
|
||||
#include "core/arm/arm_interface.h"
|
||||
|
||||
#include "core/hle/kernel/address_arbiter.h"
|
||||
#include "core/hle/kernel/event.h"
|
||||
#include "core/hle/kernel/memory.h"
|
||||
#include "core/hle/kernel/mutex.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/hle/kernel/resource_limit.h"
|
||||
|
@ -41,32 +41,114 @@ const ResultCode ERR_NOT_FOUND(ErrorDescription::NotFound, ErrorModule::Kernel,
|
|||
const ResultCode ERR_PORT_NAME_TOO_LONG(ErrorDescription(30), ErrorModule::OS,
|
||||
ErrorSummary::InvalidArgument, ErrorLevel::Usage); // 0xE0E0181E
|
||||
|
||||
const ResultCode ERR_MISALIGNED_ADDRESS{ // 0xE0E01BF1
|
||||
ErrorDescription::MisalignedAddress, ErrorModule::OS,
|
||||
ErrorSummary::InvalidArgument, ErrorLevel::Usage};
|
||||
const ResultCode ERR_MISALIGNED_SIZE{ // 0xE0E01BF2
|
||||
ErrorDescription::MisalignedSize, ErrorModule::OS,
|
||||
ErrorSummary::InvalidArgument, ErrorLevel::Usage};
|
||||
const ResultCode ERR_INVALID_COMBINATION{ // 0xE0E01BEE
|
||||
ErrorDescription::InvalidCombination, ErrorModule::OS,
|
||||
ErrorSummary::InvalidArgument, ErrorLevel::Usage};
|
||||
|
||||
enum ControlMemoryOperation {
|
||||
MEMORY_OPERATION_HEAP = 0x00000003,
|
||||
MEMORY_OPERATION_GSP_HEAP = 0x00010003,
|
||||
MEMOP_FREE = 1,
|
||||
MEMOP_RESERVE = 2, // This operation seems to be unsupported in the kernel
|
||||
MEMOP_COMMIT = 3,
|
||||
MEMOP_MAP = 4,
|
||||
MEMOP_UNMAP = 5,
|
||||
MEMOP_PROTECT = 6,
|
||||
MEMOP_OPERATION_MASK = 0xFF,
|
||||
|
||||
MEMOP_REGION_APP = 0x100,
|
||||
MEMOP_REGION_SYSTEM = 0x200,
|
||||
MEMOP_REGION_BASE = 0x300,
|
||||
MEMOP_REGION_MASK = 0xF00,
|
||||
|
||||
MEMOP_LINEAR = 0x10000,
|
||||
};
|
||||
|
||||
/// Map application or GSP heap memory
|
||||
static ResultCode ControlMemory(u32* out_addr, u32 operation, u32 addr0, u32 addr1, u32 size, u32 permissions) {
|
||||
LOG_TRACE(Kernel_SVC,"called operation=0x%08X, addr0=0x%08X, addr1=0x%08X, size=%08X, permissions=0x%08X",
|
||||
using namespace Kernel;
|
||||
|
||||
LOG_DEBUG(Kernel_SVC,"called operation=0x%08X, addr0=0x%08X, addr1=0x%08X, size=0x%X, permissions=0x%08X",
|
||||
operation, addr0, addr1, size, permissions);
|
||||
|
||||
switch (operation) {
|
||||
if ((addr0 & Memory::PAGE_MASK) != 0 || (addr1 & Memory::PAGE_MASK) != 0) {
|
||||
return ERR_MISALIGNED_ADDRESS;
|
||||
}
|
||||
if ((size & Memory::PAGE_MASK) != 0) {
|
||||
return ERR_MISALIGNED_SIZE;
|
||||
}
|
||||
|
||||
// Map normal heap memory
|
||||
case MEMORY_OPERATION_HEAP:
|
||||
*out_addr = Memory::MapBlock_Heap(size, operation, permissions);
|
||||
u32 region = operation & MEMOP_REGION_MASK;
|
||||
operation &= ~MEMOP_REGION_MASK;
|
||||
|
||||
if (region != 0) {
|
||||
LOG_WARNING(Kernel_SVC, "ControlMemory with specified region not supported, region=%X", region);
|
||||
}
|
||||
|
||||
if ((permissions & (u32)MemoryPermission::ReadWrite) != permissions) {
|
||||
return ERR_INVALID_COMBINATION;
|
||||
}
|
||||
VMAPermission vma_permissions = (VMAPermission)permissions;
|
||||
|
||||
auto& process = *g_current_process;
|
||||
|
||||
switch (operation & MEMOP_OPERATION_MASK) {
|
||||
case MEMOP_FREE:
|
||||
{
|
||||
if (addr0 >= Memory::HEAP_VADDR && addr0 < Memory::HEAP_VADDR_END) {
|
||||
ResultCode result = process.HeapFree(addr0, size);
|
||||
if (result.IsError()) return result;
|
||||
} else if (addr0 >= process.GetLinearHeapBase() && addr0 < process.GetLinearHeapLimit()) {
|
||||
ResultCode result = process.LinearFree(addr0, size);
|
||||
if (result.IsError()) return result;
|
||||
} else {
|
||||
return ERR_INVALID_ADDRESS;
|
||||
}
|
||||
*out_addr = addr0;
|
||||
break;
|
||||
}
|
||||
|
||||
// Map GSP heap memory
|
||||
case MEMORY_OPERATION_GSP_HEAP:
|
||||
*out_addr = Memory::MapBlock_HeapLinear(size, operation, permissions);
|
||||
case MEMOP_COMMIT:
|
||||
{
|
||||
if (operation & MEMOP_LINEAR) {
|
||||
CASCADE_RESULT(*out_addr, process.LinearAllocate(addr0, size, vma_permissions));
|
||||
} else {
|
||||
CASCADE_RESULT(*out_addr, process.HeapAllocate(addr0, size, vma_permissions));
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case MEMOP_MAP: // TODO: This is just a hack to avoid regressions until memory aliasing is implemented
|
||||
{
|
||||
CASCADE_RESULT(*out_addr, process.HeapAllocate(addr0, size, vma_permissions));
|
||||
break;
|
||||
}
|
||||
|
||||
case MEMOP_UNMAP: // TODO: This is just a hack to avoid regressions until memory aliasing is implemented
|
||||
{
|
||||
ResultCode result = process.HeapFree(addr0, size);
|
||||
if (result.IsError()) return result;
|
||||
break;
|
||||
}
|
||||
|
||||
case MEMOP_PROTECT:
|
||||
{
|
||||
ResultCode result = process.vm_manager.ReprotectRange(addr0, size, vma_permissions);
|
||||
if (result.IsError()) return result;
|
||||
break;
|
||||
}
|
||||
|
||||
// Unknown ControlMemory operation
|
||||
default:
|
||||
LOG_ERROR(Kernel_SVC, "unknown operation=0x%08X", operation);
|
||||
return ERR_INVALID_COMBINATION;
|
||||
}
|
||||
|
||||
process.vm_manager.LogLayout(Log::Level::Trace);
|
||||
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -537,9 +619,9 @@ static ResultCode QueryProcessMemory(MemoryInfo* memory_info, PageInfo* page_inf
|
|||
if (process == nullptr)
|
||||
return ERR_INVALID_HANDLE;
|
||||
|
||||
auto vma = process->address_space->FindVMA(addr);
|
||||
auto vma = process->vm_manager.FindVMA(addr);
|
||||
|
||||
if (vma == process->address_space->vma_map.end())
|
||||
if (vma == Kernel::g_current_process->vm_manager.vma_map.end())
|
||||
return ResultCode(ErrorDescription::InvalidAddress, ErrorModule::OS, ErrorSummary::InvalidArgument, ErrorLevel::Usage);
|
||||
|
||||
memory_info->base_address = vma->second.base;
|
||||
|
@ -692,6 +774,52 @@ static ResultCode CreateMemoryBlock(Handle* out_handle, u32 addr, u32 size, u32
|
|||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
static ResultCode GetProcessInfo(s64* out, Handle process_handle, u32 type) {
|
||||
LOG_TRACE(Kernel_SVC, "called process=0x%08X type=%u", process_handle, type);
|
||||
|
||||
using Kernel::Process;
|
||||
Kernel::SharedPtr<Process> process = Kernel::g_handle_table.Get<Process>(process_handle);
|
||||
if (process == nullptr)
|
||||
return ERR_INVALID_HANDLE;
|
||||
|
||||
switch (type) {
|
||||
case 0:
|
||||
case 2:
|
||||
// TODO(yuriks): Type 0 returns a slightly higher number than type 2, but I'm not sure
|
||||
// what's the difference between them.
|
||||
*out = process->heap_used + process->linear_heap_used + process->misc_memory_used;
|
||||
break;
|
||||
case 1:
|
||||
case 3:
|
||||
case 4:
|
||||
case 5:
|
||||
case 6:
|
||||
case 7:
|
||||
case 8:
|
||||
// These are valid, but not implemented yet
|
||||
LOG_ERROR(Kernel_SVC, "unimplemented GetProcessInfo type=%u", type);
|
||||
break;
|
||||
case 20:
|
||||
*out = Memory::FCRAM_PADDR - process->GetLinearHeapBase();
|
||||
break;
|
||||
default:
|
||||
LOG_ERROR(Kernel_SVC, "unknown GetProcessInfo type=%u", type);
|
||||
|
||||
if (type >= 21 && type <= 23) {
|
||||
return ResultCode( // 0xE0E01BF4
|
||||
ErrorDescription::NotImplemented, ErrorModule::OS,
|
||||
ErrorSummary::InvalidArgument, ErrorLevel::Usage);
|
||||
} else {
|
||||
return ResultCode( // 0xD8E007ED
|
||||
ErrorDescription::InvalidEnumValue, ErrorModule::Kernel,
|
||||
ErrorSummary::InvalidArgument, ErrorLevel::Permanent);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
return RESULT_SUCCESS;
|
||||
}
|
||||
|
||||
namespace {
|
||||
struct FunctionDef {
|
||||
using Func = void();
|
||||
|
@ -746,7 +874,7 @@ static const FunctionDef SVC_Table[] = {
|
|||
{0x28, HLE::Wrap<GetSystemTick>, "GetSystemTick"},
|
||||
{0x29, nullptr, "GetHandleInfo"},
|
||||
{0x2A, nullptr, "GetSystemInfo"},
|
||||
{0x2B, nullptr, "GetProcessInfo"},
|
||||
{0x2B, HLE::Wrap<GetProcessInfo>, "GetProcessInfo"},
|
||||
{0x2C, nullptr, "GetThreadInfo"},
|
||||
{0x2D, HLE::Wrap<ConnectToPort>, "ConnectToPort"},
|
||||
{0x2E, nullptr, "SendSyncRequest1"},
|
||||
|
|
|
@ -1,163 +0,0 @@
|
|||
// Copyright 2014 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "common/logging/log.h"
|
||||
|
||||
#include "core/hle/config_mem.h"
|
||||
#include "core/hle/kernel/vm_manager.h"
|
||||
#include "core/hle/result.h"
|
||||
#include "core/hle/shared_page.h"
|
||||
#include "core/mem_map.h"
|
||||
#include "core/memory.h"
|
||||
#include "core/memory_setup.h"
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
namespace Memory {
|
||||
|
||||
namespace {
|
||||
|
||||
struct MemoryArea {
|
||||
u32 base;
|
||||
u32 size;
|
||||
const char* name;
|
||||
};
|
||||
|
||||
// We don't declare the IO regions in here since its handled by other means.
|
||||
static MemoryArea memory_areas[] = {
|
||||
{HEAP_VADDR, HEAP_SIZE, "Heap"}, // Application heap (main memory)
|
||||
{SHARED_MEMORY_VADDR, SHARED_MEMORY_SIZE, "Shared Memory"}, // Shared memory
|
||||
{LINEAR_HEAP_VADDR, LINEAR_HEAP_SIZE, "Linear Heap"}, // Linear heap (main memory)
|
||||
{VRAM_VADDR, VRAM_SIZE, "VRAM"}, // Video memory (VRAM)
|
||||
{DSP_RAM_VADDR, DSP_RAM_SIZE, "DSP RAM"}, // DSP memory
|
||||
{TLS_AREA_VADDR, TLS_AREA_SIZE, "TLS Area"}, // TLS memory
|
||||
};
|
||||
|
||||
/// Represents a block of memory mapped by ControlMemory/MapMemoryBlock
|
||||
struct MemoryBlock {
|
||||
MemoryBlock() : handle(0), base_address(0), address(0), size(0), operation(0), permissions(0) {
|
||||
}
|
||||
u32 handle;
|
||||
u32 base_address;
|
||||
u32 address;
|
||||
u32 size;
|
||||
u32 operation;
|
||||
u32 permissions;
|
||||
|
||||
const u32 GetVirtualAddress() const{
|
||||
return base_address + address;
|
||||
}
|
||||
};
|
||||
|
||||
static std::map<u32, MemoryBlock> heap_map;
|
||||
static std::map<u32, MemoryBlock> heap_linear_map;
|
||||
|
||||
}
|
||||
|
||||
u32 MapBlock_Heap(u32 size, u32 operation, u32 permissions) {
|
||||
MemoryBlock block;
|
||||
|
||||
block.base_address = HEAP_VADDR;
|
||||
block.size = size;
|
||||
block.operation = operation;
|
||||
block.permissions = permissions;
|
||||
|
||||
if (heap_map.size() > 0) {
|
||||
const MemoryBlock last_block = heap_map.rbegin()->second;
|
||||
block.address = last_block.address + last_block.size;
|
||||
}
|
||||
heap_map[block.GetVirtualAddress()] = block;
|
||||
|
||||
return block.GetVirtualAddress();
|
||||
}
|
||||
|
||||
u32 MapBlock_HeapLinear(u32 size, u32 operation, u32 permissions) {
|
||||
MemoryBlock block;
|
||||
|
||||
block.base_address = LINEAR_HEAP_VADDR;
|
||||
block.size = size;
|
||||
block.operation = operation;
|
||||
block.permissions = permissions;
|
||||
|
||||
if (heap_linear_map.size() > 0) {
|
||||
const MemoryBlock last_block = heap_linear_map.rbegin()->second;
|
||||
block.address = last_block.address + last_block.size;
|
||||
}
|
||||
heap_linear_map[block.GetVirtualAddress()] = block;
|
||||
|
||||
return block.GetVirtualAddress();
|
||||
}
|
||||
|
||||
PAddr VirtualToPhysicalAddress(const VAddr addr) {
|
||||
if (addr == 0) {
|
||||
return 0;
|
||||
} else if (addr >= VRAM_VADDR && addr < VRAM_VADDR_END) {
|
||||
return addr - VRAM_VADDR + VRAM_PADDR;
|
||||
} else if (addr >= LINEAR_HEAP_VADDR && addr < LINEAR_HEAP_VADDR_END) {
|
||||
return addr - LINEAR_HEAP_VADDR + FCRAM_PADDR;
|
||||
} else if (addr >= DSP_RAM_VADDR && addr < DSP_RAM_VADDR_END) {
|
||||
return addr - DSP_RAM_VADDR + DSP_RAM_PADDR;
|
||||
} else if (addr >= IO_AREA_VADDR && addr < IO_AREA_VADDR_END) {
|
||||
return addr - IO_AREA_VADDR + IO_AREA_PADDR;
|
||||
}
|
||||
|
||||
LOG_ERROR(HW_Memory, "Unknown virtual address @ 0x%08x", addr);
|
||||
// To help with debugging, set bit on address so that it's obviously invalid.
|
||||
return addr | 0x80000000;
|
||||
}
|
||||
|
||||
VAddr PhysicalToVirtualAddress(const PAddr addr) {
|
||||
if (addr == 0) {
|
||||
return 0;
|
||||
} else if (addr >= VRAM_PADDR && addr < VRAM_PADDR_END) {
|
||||
return addr - VRAM_PADDR + VRAM_VADDR;
|
||||
} else if (addr >= FCRAM_PADDR && addr < FCRAM_PADDR_END) {
|
||||
return addr - FCRAM_PADDR + LINEAR_HEAP_VADDR;
|
||||
} else if (addr >= DSP_RAM_PADDR && addr < DSP_RAM_PADDR_END) {
|
||||
return addr - DSP_RAM_PADDR + DSP_RAM_VADDR;
|
||||
} else if (addr >= IO_AREA_PADDR && addr < IO_AREA_PADDR_END) {
|
||||
return addr - IO_AREA_PADDR + IO_AREA_VADDR;
|
||||
}
|
||||
|
||||
LOG_ERROR(HW_Memory, "Unknown physical address @ 0x%08x", addr);
|
||||
// To help with debugging, set bit on address so that it's obviously invalid.
|
||||
return addr | 0x80000000;
|
||||
}
|
||||
|
||||
void Init() {
|
||||
InitMemoryMap();
|
||||
LOG_DEBUG(HW_Memory, "initialized OK");
|
||||
}
|
||||
|
||||
void InitLegacyAddressSpace(Kernel::VMManager& address_space) {
|
||||
using namespace Kernel;
|
||||
|
||||
for (MemoryArea& area : memory_areas) {
|
||||
auto block = std::make_shared<std::vector<u8>>(area.size);
|
||||
address_space.MapMemoryBlock(area.base, std::move(block), 0, area.size, MemoryState::Private).Unwrap();
|
||||
}
|
||||
|
||||
auto cfg_mem_vma = address_space.MapBackingMemory(CONFIG_MEMORY_VADDR,
|
||||
(u8*)&ConfigMem::config_mem, CONFIG_MEMORY_SIZE, MemoryState::Shared).MoveFrom();
|
||||
address_space.Reprotect(cfg_mem_vma, VMAPermission::Read);
|
||||
|
||||
auto shared_page_vma = address_space.MapBackingMemory(SHARED_PAGE_VADDR,
|
||||
(u8*)&SharedPage::shared_page, SHARED_PAGE_SIZE, MemoryState::Shared).MoveFrom();
|
||||
address_space.Reprotect(shared_page_vma, VMAPermission::Read);
|
||||
}
|
||||
|
||||
void Shutdown() {
|
||||
heap_map.clear();
|
||||
heap_linear_map.clear();
|
||||
|
||||
LOG_DEBUG(HW_Memory, "shutdown OK");
|
||||
}
|
||||
|
||||
} // namespace
|
|
@ -1,46 +0,0 @@
|
|||
// Copyright 2014 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Kernel {
|
||||
class VMManager;
|
||||
}
|
||||
|
||||
namespace Memory {
|
||||
|
||||
void Init();
|
||||
void InitLegacyAddressSpace(Kernel::VMManager& address_space);
|
||||
void Shutdown();
|
||||
|
||||
/**
|
||||
* Maps a block of memory on the heap
|
||||
* @param size Size of block in bytes
|
||||
* @param operation Memory map operation type
|
||||
* @param permissions Memory allocation permissions
|
||||
*/
|
||||
u32 MapBlock_Heap(u32 size, u32 operation, u32 permissions);
|
||||
|
||||
/**
|
||||
* Maps a block of memory on the GSP heap
|
||||
* @param size Size of block in bytes
|
||||
* @param operation Memory map operation type
|
||||
* @param permissions Control memory permissions
|
||||
*/
|
||||
u32 MapBlock_HeapLinear(u32 size, u32 operation, u32 permissions);
|
||||
|
||||
/**
|
||||
* Converts a virtual address inside a region with 1:1 mapping to physical memory to a physical
|
||||
* address. This should be used by services to translate addresses for use by the hardware.
|
||||
*/
|
||||
PAddr VirtualToPhysicalAddress(VAddr addr);
|
||||
|
||||
/**
|
||||
* Undoes a mapping performed by VirtualToPhysicalAddress().
|
||||
*/
|
||||
VAddr PhysicalToVirtualAddress(PAddr addr);
|
||||
|
||||
} // namespace
|
|
@ -9,7 +9,7 @@
|
|||
#include "common/logging/log.h"
|
||||
#include "common/swap.h"
|
||||
|
||||
#include "core/mem_map.h"
|
||||
#include "core/hle/kernel/process.h"
|
||||
#include "core/memory.h"
|
||||
#include "core/memory_setup.h"
|
||||
|
||||
|
@ -198,4 +198,42 @@ void WriteBlock(const VAddr addr, const u8* data, const size_t size) {
|
|||
Write8(addr + offset, data[offset]);
|
||||
}
|
||||
|
||||
PAddr VirtualToPhysicalAddress(const VAddr addr) {
|
||||
if (addr == 0) {
|
||||
return 0;
|
||||
} else if (addr >= VRAM_VADDR && addr < VRAM_VADDR_END) {
|
||||
return addr - VRAM_VADDR + VRAM_PADDR;
|
||||
} else if (addr >= LINEAR_HEAP_VADDR && addr < LINEAR_HEAP_VADDR_END) {
|
||||
return addr - LINEAR_HEAP_VADDR + FCRAM_PADDR;
|
||||
} else if (addr >= DSP_RAM_VADDR && addr < DSP_RAM_VADDR_END) {
|
||||
return addr - DSP_RAM_VADDR + DSP_RAM_PADDR;
|
||||
} else if (addr >= IO_AREA_VADDR && addr < IO_AREA_VADDR_END) {
|
||||
return addr - IO_AREA_VADDR + IO_AREA_PADDR;
|
||||
} else if (addr >= NEW_LINEAR_HEAP_VADDR && addr < NEW_LINEAR_HEAP_VADDR_END) {
|
||||
return addr - NEW_LINEAR_HEAP_VADDR + FCRAM_PADDR;
|
||||
}
|
||||
|
||||
LOG_ERROR(HW_Memory, "Unknown virtual address @ 0x%08X", addr);
|
||||
// To help with debugging, set bit on address so that it's obviously invalid.
|
||||
return addr | 0x80000000;
|
||||
}
|
||||
|
||||
VAddr PhysicalToVirtualAddress(const PAddr addr) {
|
||||
if (addr == 0) {
|
||||
return 0;
|
||||
} else if (addr >= VRAM_PADDR && addr < VRAM_PADDR_END) {
|
||||
return addr - VRAM_PADDR + VRAM_VADDR;
|
||||
} else if (addr >= FCRAM_PADDR && addr < FCRAM_PADDR_END) {
|
||||
return addr - FCRAM_PADDR + Kernel::g_current_process->GetLinearHeapBase();
|
||||
} else if (addr >= DSP_RAM_PADDR && addr < DSP_RAM_PADDR_END) {
|
||||
return addr - DSP_RAM_PADDR + DSP_RAM_VADDR;
|
||||
} else if (addr >= IO_AREA_PADDR && addr < IO_AREA_PADDR_END) {
|
||||
return addr - IO_AREA_PADDR + IO_AREA_VADDR;
|
||||
}
|
||||
|
||||
LOG_ERROR(HW_Memory, "Unknown physical address @ 0x%08X", addr);
|
||||
// To help with debugging, set bit on address so that it's obviously invalid.
|
||||
return addr | 0x80000000;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
|
|
@ -15,6 +15,8 @@ namespace Memory {
|
|||
* be mapped.
|
||||
*/
|
||||
const u32 PAGE_SIZE = 0x1000;
|
||||
const u32 PAGE_MASK = PAGE_SIZE - 1;
|
||||
const int PAGE_BITS = 12;
|
||||
|
||||
/// Physical memory regions as seen from the ARM11
|
||||
enum : PAddr {
|
||||
|
@ -103,8 +105,15 @@ enum : VAddr {
|
|||
// hardcoded value.
|
||||
/// Area where TLS (Thread-Local Storage) buffers are allocated.
|
||||
TLS_AREA_VADDR = 0x1FF82000,
|
||||
TLS_AREA_SIZE = 0x00030000, // Each TLS buffer is 0x200 bytes, allows for 300 threads
|
||||
TLS_ENTRY_SIZE = 0x200,
|
||||
TLS_AREA_SIZE = 300 * TLS_ENTRY_SIZE, // Allows for up to 300 threads
|
||||
TLS_AREA_VADDR_END = TLS_AREA_VADDR + TLS_AREA_SIZE,
|
||||
|
||||
|
||||
/// Equivalent to LINEAR_HEAP_VADDR, but expanded to cover the extra memory in the New 3DS.
|
||||
NEW_LINEAR_HEAP_VADDR = 0x30000000,
|
||||
NEW_LINEAR_HEAP_SIZE = 0x10000000,
|
||||
NEW_LINEAR_HEAP_VADDR_END = NEW_LINEAR_HEAP_VADDR + NEW_LINEAR_HEAP_SIZE,
|
||||
};
|
||||
|
||||
u8 Read8(VAddr addr);
|
||||
|
@ -121,6 +130,17 @@ void WriteBlock(VAddr addr, const u8* data, size_t size);
|
|||
|
||||
u8* GetPointer(VAddr virtual_address);
|
||||
|
||||
/**
|
||||
* Converts a virtual address inside a region with 1:1 mapping to physical memory to a physical
|
||||
* address. This should be used by services to translate addresses for use by the hardware.
|
||||
*/
|
||||
PAddr VirtualToPhysicalAddress(VAddr addr);
|
||||
|
||||
/**
|
||||
* Undoes a mapping performed by VirtualToPhysicalAddress().
|
||||
*/
|
||||
VAddr PhysicalToVirtualAddress(PAddr addr);
|
||||
|
||||
/**
|
||||
* Gets a pointer to the memory region beginning at the specified physical address.
|
||||
*
|
||||
|
|
|
@ -10,9 +10,6 @@
|
|||
|
||||
namespace Memory {
|
||||
|
||||
const u32 PAGE_MASK = PAGE_SIZE - 1;
|
||||
const int PAGE_BITS = 12;
|
||||
|
||||
void InitMemoryMap();
|
||||
|
||||
/**
|
||||
|
|
|
@ -4,11 +4,11 @@
|
|||
|
||||
#include "core/core.h"
|
||||
#include "core/core_timing.h"
|
||||
#include "core/mem_map.h"
|
||||
#include "core/system.h"
|
||||
#include "core/hw/hw.h"
|
||||
#include "core/hle/hle.h"
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
#include "core/hle/kernel/memory.h"
|
||||
|
||||
#include "video_core/video_core.h"
|
||||
|
||||
|
@ -29,7 +29,6 @@ void Shutdown() {
|
|||
HLE::Shutdown();
|
||||
Kernel::Shutdown();
|
||||
HW::Shutdown();
|
||||
Memory::Shutdown();
|
||||
CoreTiming::Shutdown();
|
||||
Core::Shutdown();
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue