mirror of
https://github.com/PabloMK7/citra.git
synced 2024-11-19 22:16:36 +00:00
This reverts commit 05cccb585d
.
This commit is contained in:
parent
05cccb585d
commit
2e9d7bd208
2 changed files with 11 additions and 56 deletions
|
@ -88,16 +88,8 @@ public:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void invalidate(const key_type& key) {
|
|
||||||
auto i = find(key);
|
|
||||||
if (i != m_list.cend()) {
|
|
||||||
m_list.erase(i);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void clear() {
|
void clear() {
|
||||||
m_list.clear();
|
m_list.clear();
|
||||||
m_array.fill(value_type{});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
|
|
@ -25,20 +25,13 @@ ResultVal<std::size_t> ArticCache::Read(s32 file_handle, std::size_t offset, std
|
||||||
auto res =
|
auto res =
|
||||||
ReadFromArtic(file_handle, reinterpret_cast<u8*>(big_cache_entry.second.data()),
|
ReadFromArtic(file_handle, reinterpret_cast<u8*>(big_cache_entry.second.data()),
|
||||||
length, offset);
|
length, offset);
|
||||||
if (res.Failed()) {
|
if (res.Failed())
|
||||||
big_cache.invalidate(std::make_pair(offset, length));
|
|
||||||
return res;
|
return res;
|
||||||
}
|
length = res.Unwrap();
|
||||||
read_progress = res.Unwrap();
|
|
||||||
} else {
|
} else {
|
||||||
LOG_TRACE(Service_FS, "ArticCache BHIT: offset={}, length={}", offset,
|
LOG_TRACE(Service_FS, "ArticCache BHIT: offset={}, length={}", offset, length);
|
||||||
read_progress);
|
|
||||||
}
|
|
||||||
memcpy(buffer, big_cache_entry.second.data(), read_progress);
|
|
||||||
if (read_progress < length) {
|
|
||||||
// Invalidate the entry as it is not fully read
|
|
||||||
big_cache.invalidate(std::make_pair(offset, length));
|
|
||||||
}
|
}
|
||||||
|
memcpy(buffer, big_cache_entry.second.data(), length);
|
||||||
} else {
|
} else {
|
||||||
if (segments[0].second < very_big_cache_skip) {
|
if (segments[0].second < very_big_cache_skip) {
|
||||||
std::unique_lock very_big_read_guard(very_big_cache_mutex);
|
std::unique_lock very_big_read_guard(very_big_cache_mutex);
|
||||||
|
@ -51,39 +44,28 @@ ResultVal<std::size_t> ArticCache::Read(s32 file_handle, std::size_t offset, std
|
||||||
auto res = ReadFromArtic(
|
auto res = ReadFromArtic(
|
||||||
file_handle, reinterpret_cast<u8*>(very_big_cache_entry.second.data()),
|
file_handle, reinterpret_cast<u8*>(very_big_cache_entry.second.data()),
|
||||||
length, offset);
|
length, offset);
|
||||||
if (res.Failed()) {
|
if (res.Failed())
|
||||||
very_big_cache.invalidate(std::make_pair(offset, length));
|
|
||||||
return res;
|
return res;
|
||||||
}
|
length = res.Unwrap();
|
||||||
read_progress = res.Unwrap();
|
|
||||||
} else {
|
} else {
|
||||||
LOG_TRACE(Service_FS, "ArticCache VBHIT: offset={}, length={}", offset,
|
LOG_TRACE(Service_FS, "ArticCache VBHIT: offset={}, length={}", offset, length);
|
||||||
read_progress);
|
|
||||||
}
|
|
||||||
memcpy(buffer, very_big_cache_entry.second.data(), read_progress);
|
|
||||||
if (read_progress < length) {
|
|
||||||
// Invalidate the entry as it is not fully read
|
|
||||||
very_big_cache.invalidate(std::make_pair(offset, length));
|
|
||||||
}
|
}
|
||||||
|
memcpy(buffer, very_big_cache_entry.second.data(), length);
|
||||||
} else {
|
} else {
|
||||||
LOG_TRACE(Service_FS, "ArticCache SKIP: offset={}, length={}", offset, length);
|
LOG_TRACE(Service_FS, "ArticCache SKIP: offset={}, length={}", offset, length);
|
||||||
|
|
||||||
auto res = ReadFromArtic(file_handle, buffer, length, offset);
|
auto res = ReadFromArtic(file_handle, buffer, length, offset);
|
||||||
if (res.Failed())
|
if (res.Failed())
|
||||||
return res;
|
return res;
|
||||||
read_progress = res.Unwrap();
|
length = res.Unwrap();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return read_progress;
|
return length;
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(PabloMK7): Make cache thread safe, read the comment in CacheReady function.
|
// TODO(PabloMK7): Make cache thread safe, read the comment in CacheReady function.
|
||||||
std::unique_lock read_guard(cache_mutex);
|
std::unique_lock read_guard(cache_mutex);
|
||||||
bool read_past_end = false;
|
|
||||||
for (const auto& seg : segments) {
|
for (const auto& seg : segments) {
|
||||||
if (read_past_end) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
std::size_t read_size = cache_line_size;
|
std::size_t read_size = cache_line_size;
|
||||||
std::size_t page = OffsetToPage(seg.first);
|
std::size_t page = OffsetToPage(seg.first);
|
||||||
// Check if segment is in cache
|
// Check if segment is in cache
|
||||||
|
@ -91,28 +73,9 @@ ResultVal<std::size_t> ArticCache::Read(s32 file_handle, std::size_t offset, std
|
||||||
if (!cache_entry.first) {
|
if (!cache_entry.first) {
|
||||||
// If not found, read from artic and cache the data
|
// If not found, read from artic and cache the data
|
||||||
auto res = ReadFromArtic(file_handle, cache_entry.second.data(), read_size, page);
|
auto res = ReadFromArtic(file_handle, cache_entry.second.data(), read_size, page);
|
||||||
if (res.Failed()) {
|
if (res.Failed())
|
||||||
// Invalidate the requested entry as it is not populated
|
|
||||||
cache.invalidate(page);
|
|
||||||
|
|
||||||
// In the very unlikely case the file size is a multiple of the cache size,
|
|
||||||
// and the game request more data than the file size, this will save us from
|
|
||||||
// returning an incorrect out of bounds error caused by reading at just the very end
|
|
||||||
// of the file.
|
|
||||||
constexpr u32 out_of_bounds_read = 714;
|
|
||||||
if (res.Code().description == out_of_bounds_read) {
|
|
||||||
return read_progress;
|
|
||||||
}
|
|
||||||
return res;
|
return res;
|
||||||
}
|
|
||||||
size_t expected_read_size = read_size;
|
|
||||||
read_size = res.Unwrap();
|
read_size = res.Unwrap();
|
||||||
//
|
|
||||||
if (read_size < expected_read_size) {
|
|
||||||
// Invalidate the requested entry as it is not fully read
|
|
||||||
cache.invalidate(page);
|
|
||||||
read_past_end = true;
|
|
||||||
}
|
|
||||||
LOG_TRACE(Service_FS, "ArticCache MISS: page={}, length={}, into={}", page, seg.second,
|
LOG_TRACE(Service_FS, "ArticCache MISS: page={}, length={}, into={}", page, seg.second,
|
||||||
(seg.first - page));
|
(seg.first - page));
|
||||||
} else {
|
} else {
|
||||||
|
|
Loading…
Reference in a new issue