core: Remove usage of MemoryRef

This commit is contained in:
GPUCode 2024-04-02 22:39:06 +03:00
parent ac792f7b98
commit e00a49e1e5
23 changed files with 151 additions and 293 deletions

2
externals/teakra vendored

@ -1 +1 @@
Subproject commit 01db7cdd00aabcce559a8dddce8798dabb71949b Subproject commit ad825418ef8b91e31bea678a9f470988bac6b568

View file

@ -87,7 +87,7 @@ public:
virtual void PipeWrite(DspPipe pipe_number, std::span<const u8> buffer) = 0; virtual void PipeWrite(DspPipe pipe_number, std::span<const u8> buffer) = 0;
/// Returns a reference to the array backing DSP memory /// Returns a reference to the array backing DSP memory
virtual std::array<u8, Memory::DSP_RAM_SIZE>& GetDspMemory() = 0; virtual std::span<u8, Memory::DSP_RAM_SIZE> GetDspMemory() = 0;
/// Sets the handler for the interrupts we trigger /// Sets the handler for the interrupts we trigger
virtual void SetInterruptHandler( virtual void SetInterruptHandler(

View file

@ -44,12 +44,9 @@ public:
std::size_t GetPipeReadableSize(DspPipe pipe_number) const; std::size_t GetPipeReadableSize(DspPipe pipe_number) const;
void PipeWrite(DspPipe pipe_number, std::span<const u8> buffer); void PipeWrite(DspPipe pipe_number, std::span<const u8> buffer);
std::array<u8, Memory::DSP_RAM_SIZE>& GetDspMemory();
void SetInterruptHandler( void SetInterruptHandler(
std::function<void(Service::DSP::InterruptType type, DspPipe pipe)> handler); std::function<void(Service::DSP::InterruptType type, DspPipe pipe)> handler);
private:
void ResetPipes(); void ResetPipes();
void WriteU16(DspPipe pipe_number, u16 value); void WriteU16(DspPipe pipe_number, u16 value);
void AudioPipeWriteStructAddresses(); void AudioPipeWriteStructAddresses();
@ -65,7 +62,7 @@ private:
DspState dsp_state = DspState::Off; DspState dsp_state = DspState::Off;
std::array<std::vector<u8>, num_dsp_pipe> pipe_data{}; std::array<std::vector<u8>, num_dsp_pipe> pipe_data{};
HLE::DspMemory dsp_memory; HLE::DspMemory* dsp_memory;
std::array<HLE::Source, HLE::num_sources> sources{{ std::array<HLE::Source, HLE::num_sources> sources{{
HLE::Source(0), HLE::Source(1), HLE::Source(2), HLE::Source(3), HLE::Source(4), HLE::Source(0), HLE::Source(1), HLE::Source(2), HLE::Source(3), HLE::Source(4),
HLE::Source(5), HLE::Source(6), HLE::Source(7), HLE::Source(8), HLE::Source(9), HLE::Source(5), HLE::Source(6), HLE::Source(7), HLE::Source(8), HLE::Source(9),
@ -86,7 +83,8 @@ private:
DspHle::Impl::Impl(DspHle& parent_, Memory::MemorySystem& memory, Core::Timing& timing) DspHle::Impl::Impl(DspHle& parent_, Memory::MemorySystem& memory, Core::Timing& timing)
: parent(parent_), core_timing(timing) { : parent(parent_), core_timing(timing) {
dsp_memory.raw_memory.fill(0); dsp_memory = reinterpret_cast<HLE::DspMemory*>(memory.GetDspMemory().data());
dsp_memory->raw_memory.fill(0);
for (auto& source : sources) { for (auto& source : sources) {
source.SetMemory(memory); source.SetMemory(memory);
@ -257,10 +255,6 @@ void DspHle::Impl::PipeWrite(DspPipe pipe_number, std::span<const u8> buffer) {
} }
} }
std::array<u8, Memory::DSP_RAM_SIZE>& DspHle::Impl::GetDspMemory() {
return dsp_memory.raw_memory;
}
void DspHle::Impl::SetInterruptHandler( void DspHle::Impl::SetInterruptHandler(
std::function<void(Service::DSP::InterruptType type, DspPipe pipe)> handler) { std::function<void(Service::DSP::InterruptType type, DspPipe pipe)> handler) {
interrupt_handler = handler; interrupt_handler = handler;
@ -316,8 +310,8 @@ void DspHle::Impl::AudioPipeWriteStructAddresses() {
size_t DspHle::Impl::CurrentRegionIndex() const { size_t DspHle::Impl::CurrentRegionIndex() const {
// The region with the higher frame counter is chosen unless there is wraparound. // The region with the higher frame counter is chosen unless there is wraparound.
// This function only returns a 0 or 1. // This function only returns a 0 or 1.
const u16 frame_counter_0 = dsp_memory.region_0.frame_counter; const u16 frame_counter_0 = dsp_memory->region_0.frame_counter;
const u16 frame_counter_1 = dsp_memory.region_1.frame_counter; const u16 frame_counter_1 = dsp_memory->region_1.frame_counter;
if (frame_counter_0 == 0xFFFFu && frame_counter_1 != 0xFFFEu) { if (frame_counter_0 == 0xFFFFu && frame_counter_1 != 0xFFFEu) {
// Wraparound has occurred. // Wraparound has occurred.
@ -333,11 +327,11 @@ size_t DspHle::Impl::CurrentRegionIndex() const {
} }
HLE::SharedMemory& DspHle::Impl::ReadRegion() { HLE::SharedMemory& DspHle::Impl::ReadRegion() {
return CurrentRegionIndex() == 0 ? dsp_memory.region_0 : dsp_memory.region_1; return CurrentRegionIndex() == 0 ? dsp_memory->region_0 : dsp_memory->region_1;
} }
HLE::SharedMemory& DspHle::Impl::WriteRegion() { HLE::SharedMemory& DspHle::Impl::WriteRegion() {
return CurrentRegionIndex() != 0 ? dsp_memory.region_0 : dsp_memory.region_1; return CurrentRegionIndex() != 0 ? dsp_memory->region_0 : dsp_memory->region_1;
} }
StereoFrame16 DspHle::Impl::GenerateCurrentFrame() { StereoFrame16 DspHle::Impl::GenerateCurrentFrame() {
@ -421,8 +415,8 @@ void DspHle::PipeWrite(DspPipe pipe_number, std::span<const u8> buffer) {
impl->PipeWrite(pipe_number, buffer); impl->PipeWrite(pipe_number, buffer);
} }
std::array<u8, Memory::DSP_RAM_SIZE>& DspHle::GetDspMemory() { std::span<u8, Memory::DSP_RAM_SIZE> DspHle::GetDspMemory() {
return impl->GetDspMemory(); return impl->dsp_memory->raw_memory;
} }
void DspHle::SetInterruptHandler( void DspHle::SetInterruptHandler(

View file

@ -4,14 +4,11 @@
#pragma once #pragma once
#include <array>
#include <memory> #include <memory>
#include <vector> #include <vector>
#include "audio_core/audio_types.h" #include "audio_core/audio_types.h"
#include "audio_core/dsp_interface.h" #include "audio_core/dsp_interface.h"
#include "common/common_types.h"
#include "core/hle/service/dsp/dsp_dsp.h" #include "core/hle/service/dsp/dsp_dsp.h"
#include "core/memory.h"
namespace Core { namespace Core {
class Timing; class Timing;
@ -36,7 +33,7 @@ public:
std::size_t GetPipeReadableSize(DspPipe pipe_number) const override; std::size_t GetPipeReadableSize(DspPipe pipe_number) const override;
void PipeWrite(DspPipe pipe_number, std::span<const u8> buffer) override; void PipeWrite(DspPipe pipe_number, std::span<const u8> buffer) override;
std::array<u8, Memory::DSP_RAM_SIZE>& GetDspMemory() override; std::span<u8, Memory::DSP_RAM_SIZE> GetDspMemory() override;
void SetInterruptHandler( void SetInterruptHandler(
std::function<void(Service::DSP::InterruptType type, DspPipe pipe)> handler) override; std::function<void(Service::DSP::InterruptType type, DspPipe pipe)> handler) override;

View file

@ -121,7 +121,9 @@ static u8 PipeIndexToSlotIndex(u8 pipe_index, PipeDirection direction) {
} }
struct DspLle::Impl final { struct DspLle::Impl final {
Impl(Core::Timing& timing, bool multithread) : core_timing(timing), multithread(multithread) { Impl(Memory::MemorySystem& memory, Core::Timing& timing, bool multithread_)
: dsp_memory{memory.GetDspMemory()}, config{dsp_memory.data()}, teakra{config},
core_timing{timing}, multithread{multithread_} {
teakra_slice_event = core_timing.RegisterEvent( teakra_slice_event = core_timing.RegisterEvent(
"DSP slice", [this](u64, int late) { TeakraSliceEvent(static_cast<u64>(late)); }); "DSP slice", [this](u64, int late) { TeakraSliceEvent(static_cast<u64>(late)); });
} }
@ -130,6 +132,8 @@ struct DspLle::Impl final {
StopTeakraThread(); StopTeakraThread();
} }
std::span<u8, Memory::DSP_RAM_SIZE> dsp_memory;
Teakra::UserConfig config;
Teakra::Teakra teakra; Teakra::Teakra teakra;
u16 pipe_base_waddr = 0; u16 pipe_base_waddr = 0;
@ -189,13 +193,11 @@ struct DspLle::Impl final {
} }
u8* GetDspDataPointer(u32 baddr) { u8* GetDspDataPointer(u32 baddr) {
auto& memory = teakra.GetDspMemory(); return &dsp_memory[DspDataOffset + baddr];
return &memory[DspDataOffset + baddr];
} }
const u8* GetDspDataPointer(u32 baddr) const { const u8* GetDspDataPointer(u32 baddr) const {
auto& memory = teakra.GetDspMemory(); return &dsp_memory[DspDataOffset + baddr];
return &memory[DspDataOffset + baddr];
} }
PipeStatus GetPipeStatus(u8 pipe_index, PipeDirection direction) const { PipeStatus GetPipeStatus(u8 pipe_index, PipeDirection direction) const {
@ -312,7 +314,6 @@ struct DspLle::Impl final {
teakra.Reset(); teakra.Reset();
Dsp1 dsp(buffer); Dsp1 dsp(buffer);
auto& dsp_memory = teakra.GetDspMemory();
u8* program = dsp_memory.data(); u8* program = dsp_memory.data();
u8* data = dsp_memory.data() + DspDataOffset; u8* data = dsp_memory.data() + DspDataOffset;
for (const auto& segment : dsp.segments) { for (const auto& segment : dsp.segments) {
@ -403,8 +404,8 @@ void DspLle::PipeWrite(DspPipe pipe_number, std::span<const u8> buffer) {
impl->WritePipe(static_cast<u8>(pipe_number), buffer); impl->WritePipe(static_cast<u8>(pipe_number), buffer);
} }
std::array<u8, Memory::DSP_RAM_SIZE>& DspLle::GetDspMemory() { std::span<u8, Memory::DSP_RAM_SIZE> DspLle::GetDspMemory() {
return impl->teakra.GetDspMemory(); return impl->dsp_memory;
} }
void DspLle::SetInterruptHandler( void DspLle::SetInterruptHandler(
@ -469,7 +470,7 @@ DspLle::DspLle(Core::System& system, bool multithread)
DspLle::DspLle(Core::System& system, Memory::MemorySystem& memory, Core::Timing& timing, DspLle::DspLle(Core::System& system, Memory::MemorySystem& memory, Core::Timing& timing,
bool multithread) bool multithread)
: DspInterface(system), impl(std::make_unique<Impl>(timing, multithread)) { : DspInterface(system), impl(std::make_unique<Impl>(memory, timing, multithread)) {
Teakra::AHBMCallback ahbm; Teakra::AHBMCallback ahbm;
ahbm.read8 = [&memory](u32 address) -> u8 { ahbm.read8 = [&memory](u32 address) -> u8 {
return *memory.GetFCRAMPointer(address - Memory::FCRAM_PADDR); return *memory.GetFCRAMPointer(address - Memory::FCRAM_PADDR);

View file

@ -31,7 +31,7 @@ public:
std::size_t GetPipeReadableSize(DspPipe pipe_number) const override; std::size_t GetPipeReadableSize(DspPipe pipe_number) const override;
void PipeWrite(DspPipe pipe_number, std::span<const u8> buffer) override; void PipeWrite(DspPipe pipe_number, std::span<const u8> buffer) override;
std::array<u8, Memory::DSP_RAM_SIZE>& GetDspMemory() override; std::span<u8, Memory::DSP_RAM_SIZE> GetDspMemory() override;
void SetInterruptHandler( void SetInterruptHandler(
std::function<void(Service::DSP::InterruptType type, DspPipe pipe)> handler) override; std::function<void(Service::DSP::InterruptType type, DspPipe pipe)> handler) override;

View file

@ -298,7 +298,7 @@ std::unique_ptr<Dynarmic::A32::Jit> ARM_Dynarmic::MakeJit() {
Dynarmic::A32::UserConfig config; Dynarmic::A32::UserConfig config;
config.callbacks = cb.get(); config.callbacks = cb.get();
if (current_page_table) { if (current_page_table) {
config.page_table = &current_page_table->GetPointerArray(); config.page_table = &current_page_table->pointers;
} }
config.coprocessors[15] = std::make_shared<DynarmicCP15>(cp15_state); config.coprocessors[15] = std::make_shared<DynarmicCP15>(cp15_state);
config.define_unpredictable_behaviour = true; config.define_unpredictable_behaviour = true;

View file

@ -429,8 +429,6 @@ System::ResultStatus System::Init(Frontend::EmuWindow& emu_window,
dsp_core = std::make_unique<AudioCore::DspLle>(*this, multithread); dsp_core = std::make_unique<AudioCore::DspLle>(*this, multithread);
} }
memory->SetDSP(*dsp_core);
dsp_core->SetSink(Settings::values.output_type.GetValue(), dsp_core->SetSink(Settings::values.output_type.GetValue(),
Settings::values.output_device.GetValue()); Settings::values.output_device.GetValue());
dsp_core->EnableStretching(Settings::values.enable_audio_stretching.GetValue()); dsp_core->EnableStretching(Settings::values.enable_audio_stretching.GetValue());

View file

@ -194,9 +194,9 @@ Loader::ResultStatus FileSys::Plugin3GXLoader::Map(
plg_context.plugin_path); plg_context.plugin_path);
return Loader::ResultStatus::ErrorMemoryAllocationFailed; return Loader::ResultStatus::ErrorMemoryAllocationFailed;
} }
auto backing_memory_fb = kernel.memory.GetFCRAMRef(*offset_fb); auto backing_memory_fb = kernel.memory.GetFCRAMPointer(*offset_fb);
plg_ldr.SetPluginFBAddr(Memory::FCRAM_PADDR + *offset_fb); plg_ldr.SetPluginFBAddr(Memory::FCRAM_PADDR + *offset_fb);
std::fill(backing_memory_fb.GetPtr(), backing_memory_fb.GetPtr() + _3GX_fb_size, 0); std::memset(backing_memory_fb, 0, _3GX_fb_size);
auto vma_heap_fb = process.vm_manager.MapBackingMemory( auto vma_heap_fb = process.vm_manager.MapBackingMemory(
_3GX_heap_load_addr, backing_memory_fb, _3GX_fb_size, Kernel::MemoryState::Continuous); _3GX_heap_load_addr, backing_memory_fb, _3GX_fb_size, Kernel::MemoryState::Continuous);
@ -212,8 +212,8 @@ Loader::ResultStatus FileSys::Plugin3GXLoader::Map(
plg_context.plugin_path); plg_context.plugin_path);
return Loader::ResultStatus::ErrorMemoryAllocationFailed; return Loader::ResultStatus::ErrorMemoryAllocationFailed;
} }
auto backing_memory = kernel.memory.GetFCRAMRef(*offset); auto backing_memory = kernel.memory.GetFCRAMPointer(*offset);
std::fill(backing_memory.GetPtr(), backing_memory.GetPtr() + block_size - _3GX_fb_size, 0); std::memset(backing_memory, 0, block_size - _3GX_fb_size);
// Then we map part of the memory, which contains the executable // Then we map part of the memory, which contains the executable
auto vma = process.vm_manager.MapBackingMemory(_3GX_exe_load_addr, backing_memory, exe_size, auto vma = process.vm_manager.MapBackingMemory(_3GX_exe_load_addr, backing_memory, exe_size,
@ -251,7 +251,7 @@ Loader::ResultStatus FileSys::Plugin3GXLoader::Map(
kernel.memory.WriteBlock(process, _3GX_exe_load_addr, &plugin_header, sizeof(PluginHeader)); kernel.memory.WriteBlock(process, _3GX_exe_load_addr, &plugin_header, sizeof(PluginHeader));
// Map plugin heap // Map plugin heap
auto backing_memory_heap = kernel.memory.GetFCRAMRef(*offset + exe_size); auto backing_memory_heap = kernel.memory.GetFCRAMPointer(*offset + exe_size);
// Map the rest of the memory at the heap location // Map the rest of the memory at the heap location
auto vma_heap = process.vm_manager.MapBackingMemory( auto vma_heap = process.vm_manager.MapBackingMemory(
@ -346,8 +346,8 @@ void FileSys::Plugin3GXLoader::MapBootloader(Kernel::Process& process, Kernel::K
} }
// Map bootloader to the offset provided // Map bootloader to the offset provided
auto backing_memory = kernel.memory.GetFCRAMRef(memory_offset); auto backing_memory = kernel.memory.GetFCRAMPointer(memory_offset);
std::fill(backing_memory.GetPtr(), backing_memory.GetPtr() + bootloader_memory_size, 0); std::memset(backing_memory, 0, bootloader_memory_size);
auto vma = process.vm_manager.MapBackingMemory(_3GX_exe_load_addr - bootloader_memory_size, auto vma = process.vm_manager.MapBackingMemory(_3GX_exe_load_addr - bootloader_memory_size,
backing_memory, bootloader_memory_size, backing_memory, bootloader_memory_size,
Kernel::MemoryState::Continuous); Kernel::MemoryState::Continuous);

View file

@ -5,7 +5,6 @@
#include <algorithm> #include <algorithm>
#include "common/alignment.h" #include "common/alignment.h"
#include "common/memory_ref.h"
#include "core/core.h" #include "core/core.h"
#include "core/hle/ipc.h" #include "core/hle/ipc.h"
#include "core/hle/kernel/handle_table.h" #include "core/hle/kernel/handle_table.h"
@ -196,23 +195,22 @@ Result TranslateCommandBuffer(Kernel::KernelSystem& kernel, Memory::MemorySystem
// TODO(Subv): Perform permission checks. // TODO(Subv): Perform permission checks.
// Create a buffer which contains the mapped buffer and two additional guard pages. // Create a buffer which contains the mapped buffer and two additional guard pages.
std::shared_ptr<BackingMem> buffer = const u32 buffer_size = (num_pages + 2) * Memory::CITRA_PAGE_SIZE;
std::make_shared<BufferMem>((num_pages + 2) * Memory::CITRA_PAGE_SIZE); auto buffer = std::make_unique<u8[]>(buffer_size);
memory.ReadBlock(*src_process, source_address, memory.ReadBlock(*src_process, source_address,
buffer->GetPtr() + Memory::CITRA_PAGE_SIZE + page_offset, size); buffer.get() + Memory::CITRA_PAGE_SIZE + page_offset, size);
// Map the guard pages and mapped pages at once. // Map the guard pages and mapped pages at once.
target_address = target_address =
dst_process->vm_manager dst_process->vm_manager
.MapBackingMemoryToBase(Memory::IPC_MAPPING_VADDR, Memory::IPC_MAPPING_SIZE, .MapBackingMemoryToBase(Memory::IPC_MAPPING_VADDR, Memory::IPC_MAPPING_SIZE,
buffer, static_cast<u32>(buffer->GetSize()), buffer.get(), buffer_size, Kernel::MemoryState::Shared)
Kernel::MemoryState::Shared)
.Unwrap(); .Unwrap();
// Change the permissions and state of the guard pages. // Change the permissions and state of the guard pages.
const VAddr low_guard_address = target_address; const VAddr low_guard_address = target_address;
const VAddr high_guard_address = const VAddr high_guard_address =
low_guard_address + static_cast<VAddr>(buffer->GetSize()) - Memory::CITRA_PAGE_SIZE; low_guard_address + buffer_size - Memory::CITRA_PAGE_SIZE;
ASSERT(dst_process->vm_manager.ChangeMemoryState( ASSERT(dst_process->vm_manager.ChangeMemoryState(
low_guard_address, Memory::CITRA_PAGE_SIZE, Kernel::MemoryState::Shared, low_guard_address, Memory::CITRA_PAGE_SIZE, Kernel::MemoryState::Shared,
Kernel::VMAPermission::ReadWrite, Kernel::MemoryState::Reserved, Kernel::VMAPermission::ReadWrite, Kernel::MemoryState::Reserved,
@ -226,8 +224,8 @@ Result TranslateCommandBuffer(Kernel::KernelSystem& kernel, Memory::MemorySystem
target_address += Memory::CITRA_PAGE_SIZE; target_address += Memory::CITRA_PAGE_SIZE;
cmd_buf[i++] = target_address + page_offset; cmd_buf[i++] = target_address + page_offset;
mapped_buffer_context.push_back({permissions, size, source_address, mapped_buffer_context.emplace_back(permissions, size, source_address,
target_address + page_offset, std::move(buffer)}); target_address + page_offset, std::move(buffer));
break; break;
} }

View file

@ -23,8 +23,7 @@ struct MappedBufferContext {
u32 size; u32 size;
VAddr source_address; VAddr source_address;
VAddr target_address; VAddr target_address;
std::unique_ptr<u8[]> buffer;
std::shared_ptr<BackingMem> buffer;
}; };
/// Performs IPC command buffer translation from one process to another. /// Performs IPC command buffer translation from one process to another.

View file

@ -135,7 +135,7 @@ void KernelSystem::HandleSpecialMapping(VMManager& address_space, const AddressM
return; return;
} }
auto target_pointer = memory.GetPhysicalRef(area->paddr_base + offset_into_region); u8* target_pointer = memory.GetPhysicalPointer(area->paddr_base + offset_into_region);
// TODO(yuriks): This flag seems to have some other effect, but it's unknown what // TODO(yuriks): This flag seems to have some other effect, but it's unknown what
MemoryState memory_state = mapping.unk_flag ? MemoryState::Static : MemoryState::IO; MemoryState memory_state = mapping.unk_flag ? MemoryState::Static : MemoryState::IO;
@ -148,16 +148,18 @@ void KernelSystem::HandleSpecialMapping(VMManager& address_space, const AddressM
} }
void KernelSystem::MapSharedPages(VMManager& address_space) { void KernelSystem::MapSharedPages(VMManager& address_space) {
auto cfg_mem_vma = address_space auto cfg_mem_vma =
.MapBackingMemory(Memory::CONFIG_MEMORY_VADDR, {config_mem_handler}, address_space
Memory::CONFIG_MEMORY_SIZE, MemoryState::Shared) .MapBackingMemory(Memory::CONFIG_MEMORY_VADDR, config_mem_handler->GetPtr(),
.Unwrap(); Memory::CONFIG_MEMORY_SIZE, MemoryState::Shared)
.Unwrap();
address_space.Reprotect(cfg_mem_vma, VMAPermission::Read); address_space.Reprotect(cfg_mem_vma, VMAPermission::Read);
auto shared_page_vma = address_space auto shared_page_vma =
.MapBackingMemory(Memory::SHARED_PAGE_VADDR, {shared_page_handler}, address_space
Memory::SHARED_PAGE_SIZE, MemoryState::Shared) .MapBackingMemory(Memory::SHARED_PAGE_VADDR, shared_page_handler->GetPtr(),
.Unwrap(); Memory::SHARED_PAGE_SIZE, MemoryState::Shared)
.Unwrap();
address_space.Reprotect(shared_page_vma, VMAPermission::Read); address_space.Reprotect(shared_page_vma, VMAPermission::Read);
} }

View file

@ -242,13 +242,13 @@ Result Process::HeapAllocate(VAddr* out_addr, VAddr target, u32 size, VMAPermiss
// Maps heap block by block // Maps heap block by block
VAddr interval_target = target; VAddr interval_target = target;
for (const auto& interval : allocated_fcram) { for (const auto& interval : allocated_fcram) {
u32 interval_size = interval.upper() - interval.lower(); const u32 interval_size = interval.upper() - interval.lower();
LOG_DEBUG(Kernel, "Allocated FCRAM region lower={:08X}, upper={:08X}", interval.lower(), LOG_DEBUG(Kernel, "Allocated FCRAM region lower={:08X}, upper={:08X}", interval.lower(),
interval.upper()); interval.upper());
std::fill(kernel.memory.GetFCRAMPointer(interval.lower()), std::fill(kernel.memory.GetFCRAMPointer(interval.lower()),
kernel.memory.GetFCRAMPointer(interval.upper()), 0); kernel.memory.GetFCRAMPointer(interval.upper()), 0);
auto vma = vm_manager.MapBackingMemory(interval_target, auto vma = vm_manager.MapBackingMemory(interval_target,
kernel.memory.GetFCRAMRef(interval.lower()), kernel.memory.GetFCRAMPointer(interval.lower()),
interval_size, memory_state); interval_size, memory_state);
ASSERT(vma.Succeeded()); ASSERT(vma.Succeeded());
vm_manager.Reprotect(vma.Unwrap(), perms); vm_manager.Reprotect(vma.Unwrap(), perms);
@ -276,7 +276,7 @@ Result Process::HeapFree(VAddr target, u32 size) {
// Free heaps block by block // Free heaps block by block
CASCADE_RESULT(auto backing_blocks, vm_manager.GetBackingBlocksForRange(target, size)); CASCADE_RESULT(auto backing_blocks, vm_manager.GetBackingBlocksForRange(target, size));
for (const auto& [backing_memory, block_size] : backing_blocks) { for (const auto& [backing_memory, block_size] : backing_blocks) {
const auto backing_offset = kernel.memory.GetFCRAMOffset(backing_memory.GetPtr()); const auto backing_offset = kernel.memory.GetFCRAMOffset(backing_memory);
memory_region->Free(backing_offset, block_size); memory_region->Free(backing_offset, block_size);
holding_memory -= MemoryRegionInfo::Interval(backing_offset, backing_offset + block_size); holding_memory -= MemoryRegionInfo::Interval(backing_offset, backing_offset + block_size);
} }
@ -322,9 +322,9 @@ Result Process::LinearAllocate(VAddr* out_addr, VAddr target, u32 size, VMAPermi
} }
} }
auto backing_memory = kernel.memory.GetFCRAMRef(physical_offset); auto backing_memory = kernel.memory.GetFCRAMPointer(physical_offset);
std::fill(backing_memory.GetPtr(), backing_memory.GetPtr() + size, 0); std::fill(backing_memory, backing_memory + size, 0);
auto vma = vm_manager.MapBackingMemory(target, backing_memory, size, MemoryState::Continuous); auto vma = vm_manager.MapBackingMemory(target, backing_memory, size, MemoryState::Continuous);
ASSERT(vma.Succeeded()); ASSERT(vma.Succeeded());
vm_manager.Reprotect(vma.Unwrap(), perms); vm_manager.Reprotect(vma.Unwrap(), perms);
@ -410,7 +410,7 @@ ResultVal<VAddr> Process::AllocateThreadLocalStorage() {
// Map the page to the current process' address space. // Map the page to the current process' address space.
auto tls_page_addr = auto tls_page_addr =
Memory::TLS_AREA_VADDR + static_cast<VAddr>(tls_page) * Memory::CITRA_PAGE_SIZE; Memory::TLS_AREA_VADDR + static_cast<VAddr>(tls_page) * Memory::CITRA_PAGE_SIZE;
vm_manager.MapBackingMemory(tls_page_addr, kernel.memory.GetFCRAMRef(*offset), vm_manager.MapBackingMemory(tls_page_addr, kernel.memory.GetFCRAMPointer(*offset),
Memory::CITRA_PAGE_SIZE, MemoryState::Locked); Memory::CITRA_PAGE_SIZE, MemoryState::Locked);
LOG_DEBUG(Kernel, "Allocated TLS page at addr={:08X}", tls_page_addr); LOG_DEBUG(Kernel, "Allocated TLS page at addr={:08X}", tls_page_addr);

View file

@ -52,7 +52,7 @@ ResultVal<std::shared_ptr<SharedMemory>> KernelSystem::CreateSharedMemory(
ASSERT_MSG(offset, "Not enough space in region to allocate shared memory!"); ASSERT_MSG(offset, "Not enough space in region to allocate shared memory!");
std::fill(memory.GetFCRAMPointer(*offset), memory.GetFCRAMPointer(*offset + size), 0); std::fill(memory.GetFCRAMPointer(*offset), memory.GetFCRAMPointer(*offset + size), 0);
shared_memory->backing_blocks = {{memory.GetFCRAMRef(*offset), size}}; shared_memory->backing_blocks = {{memory.GetFCRAMPointer(*offset), size}};
shared_memory->holding_memory += MemoryRegionInfo::Interval(*offset, *offset + size); shared_memory->holding_memory += MemoryRegionInfo::Interval(*offset, *offset + size);
shared_memory->linear_heap_phys_offset = *offset; shared_memory->linear_heap_phys_offset = *offset;
@ -94,7 +94,7 @@ std::shared_ptr<SharedMemory> KernelSystem::CreateSharedMemoryForApplet(
shared_memory->permissions = permissions; shared_memory->permissions = permissions;
shared_memory->other_permissions = other_permissions; shared_memory->other_permissions = other_permissions;
for (const auto& interval : backing_blocks) { for (const auto& interval : backing_blocks) {
shared_memory->backing_blocks.emplace_back(memory.GetFCRAMRef(interval.lower()), shared_memory->backing_blocks.emplace_back(memory.GetFCRAMPointer(interval.lower()),
interval.upper() - interval.lower()); interval.upper() - interval.lower());
std::fill(memory.GetFCRAMPointer(interval.lower()), std::fill(memory.GetFCRAMPointer(interval.lower()),
memory.GetFCRAMPointer(interval.upper()), 0); memory.GetFCRAMPointer(interval.upper()), 0);

View file

@ -7,7 +7,6 @@
#include <string> #include <string>
#include <utility> #include <utility>
#include "common/common_types.h" #include "common/common_types.h"
#include "common/memory_ref.h"
#include "core/hle/kernel/object.h" #include "core/hle/kernel/object.h"
#include "core/hle/kernel/process.h" #include "core/hle/kernel/process.h"
#include "core/hle/result.h" #include "core/hle/result.h"
@ -87,7 +86,7 @@ private:
/// during creation. /// during creation.
PAddr linear_heap_phys_offset = 0; PAddr linear_heap_phys_offset = 0;
/// Backing memory for this shared memory block. /// Backing memory for this shared memory block.
std::vector<std::pair<MemoryRef, u32>> backing_blocks; std::vector<std::pair<u8*, u32>> backing_blocks;
/// Size of the memory block. Page-aligned. /// Size of the memory block. Page-aligned.
u32 size = 0; u32 size = 0;
/// Region of memory this block exists in. /// Region of memory this block exists in.

View file

@ -1937,8 +1937,7 @@ u32 SVC::ConvertVaToPa(u32 addr) {
vma->second.type != VMAType::BackingMemory) { vma->second.type != VMAType::BackingMemory) {
return 0; return 0;
} }
return kernel.memory.GetFCRAMOffset(vma->second.backing_memory.GetPtr() + addr - return kernel.memory.GetFCRAMOffset(vma->second.backing_memory + addr - vma->second.base) +
vma->second.base) +
Memory::FCRAM_PADDR; Memory::FCRAM_PADDR;
} }
@ -1967,8 +1966,8 @@ Result SVC::MapProcessMemoryEx(Handle dst_process_handle, u32 dst_address,
auto vma_res = dst_process->vm_manager.MapBackingMemory( auto vma_res = dst_process->vm_manager.MapBackingMemory(
dst_address, dst_address,
memory.GetFCRAMRef(vma->second.backing_memory.GetPtr() + offset - memory.GetFCRAMPointer(vma->second.backing_memory + offset -
kernel.memory.GetFCRAMPointer(0)), kernel.memory.GetFCRAMPointer(0)),
size, Kernel::MemoryState::Continuous); size, Kernel::MemoryState::Continuous);
if (!vma_res.Succeeded()) { if (!vma_res.Succeeded()) {

View file

@ -29,8 +29,7 @@ bool VirtualMemoryArea::CanBeMergedWith(const VirtualMemoryArea& next) const {
type != next.type) { type != next.type) {
return false; return false;
} }
if (type == VMAType::BackingMemory && if (type == VMAType::BackingMemory && backing_memory + size != next.backing_memory) {
backing_memory.GetPtr() + size != next.backing_memory.GetPtr()) {
return false; return false;
} }
return true; return true;
@ -38,24 +37,16 @@ bool VirtualMemoryArea::CanBeMergedWith(const VirtualMemoryArea& next) const {
VMManager::VMManager(Memory::MemorySystem& memory, Kernel::Process& proc) VMManager::VMManager(Memory::MemorySystem& memory, Kernel::Process& proc)
: page_table(std::make_shared<Memory::PageTable>()), memory(memory), process(proc) { : page_table(std::make_shared<Memory::PageTable>()), memory(memory), process(proc) {
Reset();
}
VMManager::~VMManager() = default;
void VMManager::Reset() {
vma_map.clear();
// Initialize the map with a single free region covering the entire managed space. // Initialize the map with a single free region covering the entire managed space.
VirtualMemoryArea initial_vma; VirtualMemoryArea initial_vma;
initial_vma.size = MAX_ADDRESS; initial_vma.size = MAX_ADDRESS;
vma_map.emplace(initial_vma.base, initial_vma); vma_map.emplace(initial_vma.base, initial_vma);
page_table->Clear();
UpdatePageTableForVMA(initial_vma); UpdatePageTableForVMA(initial_vma);
} }
VMManager::~VMManager() = default;
VMManager::VMAHandle VMManager::FindVMA(VAddr target) const { VMManager::VMAHandle VMManager::FindVMA(VAddr target) const {
if (target >= MAX_ADDRESS) { if (target >= MAX_ADDRESS) {
return vma_map.end(); return vma_map.end();
@ -64,7 +55,7 @@ VMManager::VMAHandle VMManager::FindVMA(VAddr target) const {
} }
} }
ResultVal<VAddr> VMManager::MapBackingMemoryToBase(VAddr base, u32 region_size, MemoryRef memory, ResultVal<VAddr> VMManager::MapBackingMemoryToBase(VAddr base, u32 region_size, u8* memory,
u32 size, MemoryState state) { u32 size, MemoryState state) {
// Find the first Free VMA. // Find the first Free VMA.
VMAHandle vma_handle = std::find_if(vma_map.begin(), vma_map.end(), [&](const auto& vma) { VMAHandle vma_handle = std::find_if(vma_map.begin(), vma_map.end(), [&](const auto& vma) {
@ -92,9 +83,9 @@ ResultVal<VAddr> VMManager::MapBackingMemoryToBase(VAddr base, u32 region_size,
return target; return target;
} }
ResultVal<VMManager::VMAHandle> VMManager::MapBackingMemory(VAddr target, MemoryRef memory, ResultVal<VMManager::VMAHandle> VMManager::MapBackingMemory(VAddr target, u8* memory, u32 size,
u32 size, MemoryState state) { MemoryState state) {
ASSERT(memory.GetPtr() != nullptr); ASSERT(memory != nullptr);
// This is the appropriately sized VMA that will turn into our allocation. // This is the appropriately sized VMA that will turn into our allocation.
CASCADE_RESULT(VMAIter vma_handle, CarveVMA(target, size)); CASCADE_RESULT(VMAIter vma_handle, CarveVMA(target, size));
@ -339,9 +330,8 @@ void VMManager::UpdatePageTableForVMA(const VirtualMemoryArea& vma) {
plgldr->OnMemoryChanged(process, Core::System::GetInstance().Kernel()); plgldr->OnMemoryChanged(process, Core::System::GetInstance().Kernel());
} }
ResultVal<std::vector<std::pair<MemoryRef, u32>>> VMManager::GetBackingBlocksForRange(VAddr address, ResultVal<VMManager::BackingBlocks> VMManager::GetBackingBlocksForRange(VAddr address, u32 size) {
u32 size) { BackingBlocks backing_blocks;
std::vector<std::pair<MemoryRef, u32>> backing_blocks;
VAddr interval_target = address; VAddr interval_target = address;
while (interval_target != address + size) { while (interval_target != address + size) {
auto vma = FindVMA(interval_target); auto vma = FindVMA(interval_target);

View file

@ -68,7 +68,7 @@ struct VirtualMemoryArea {
/// Settings for type = BackingMemory /// Settings for type = BackingMemory
/// Pointer backing this VMA. It will not be destroyed or freed when the VMA is removed. /// Pointer backing this VMA. It will not be destroyed or freed when the VMA is removed.
MemoryRef backing_memory{}; u8* backing_memory{};
/// Tests if this area can be merged to the right with `next`. /// Tests if this area can be merged to the right with `next`.
bool CanBeMergedWith(const VirtualMemoryArea& next) const; bool CanBeMergedWith(const VirtualMemoryArea& next) const;
@ -108,14 +108,9 @@ public:
explicit VMManager(Memory::MemorySystem& memory, Kernel::Process& proc); explicit VMManager(Memory::MemorySystem& memory, Kernel::Process& proc);
~VMManager(); ~VMManager();
/// Clears the address space map, re-initializing with a single free area.
void Reset();
/// Finds the VMA in which the given address is included in, or `vma_map.end()`. /// Finds the VMA in which the given address is included in, or `vma_map.end()`.
VMAHandle FindVMA(VAddr target) const; VMAHandle FindVMA(VAddr target) const;
// TODO(yuriks): Should these functions actually return the handle?
/** /**
* Maps part of a ref-counted block of memory at the first free address after the given base. * Maps part of a ref-counted block of memory at the first free address after the given base.
* *
@ -126,7 +121,7 @@ public:
* @param state MemoryState tag to attach to the VMA. * @param state MemoryState tag to attach to the VMA.
* @returns The address at which the memory was mapped. * @returns The address at which the memory was mapped.
*/ */
ResultVal<VAddr> MapBackingMemoryToBase(VAddr base, u32 region_size, MemoryRef memory, u32 size, ResultVal<VAddr> MapBackingMemoryToBase(VAddr base, u32 region_size, u8* memory, u32 size,
MemoryState state); MemoryState state);
/** /**
* Maps an unmanaged host memory pointer at a given address. * Maps an unmanaged host memory pointer at a given address.
@ -136,8 +131,7 @@ public:
* @param size Size of the mapping. * @param size Size of the mapping.
* @param state MemoryState tag to attach to the VMA. * @param state MemoryState tag to attach to the VMA.
*/ */
ResultVal<VMAHandle> MapBackingMemory(VAddr target, MemoryRef memory, u32 size, ResultVal<VMAHandle> MapBackingMemory(VAddr target, u8* memory, u32 size, MemoryState state);
MemoryState state);
/** /**
* Updates the memory state and permissions of the specified range. The range's original memory * Updates the memory state and permissions of the specified range. The range's original memory
@ -167,8 +161,8 @@ public:
void LogLayout(Common::Log::Level log_level) const; void LogLayout(Common::Log::Level log_level) const;
/// Gets a list of backing memory blocks for the specified range /// Gets a list of backing memory blocks for the specified range
ResultVal<std::vector<std::pair<MemoryRef, u32>>> GetBackingBlocksForRange(VAddr address, using BackingBlocks = std::vector<std::pair<u8*, u32>>;
u32 size); ResultVal<BackingBlocks> GetBackingBlocksForRange(VAddr address, u32 size);
/// Each VMManager has its own page table, which is set as the main one when the owning process /// Each VMManager has its own page table, which is set as the main one when the owning process
/// is scheduled. /// is scheduled.

View file

@ -4,7 +4,6 @@
#include <array> #include <array>
#include <cstring> #include <cstring>
#include "audio_core/dsp_interface.h"
#include "common/assert.h" #include "common/assert.h"
#include "common/atomic_ops.h" #include "common/atomic_ops.h"
@ -22,12 +21,6 @@
namespace Memory { namespace Memory {
void PageTable::Clear() {
pointers.raw.fill(nullptr);
pointers.refs.fill(MemoryRef());
attributes.fill(PageType::Unmapped);
}
class RasterizerCacheMarker { class RasterizerCacheMarker {
public: public:
void Mark(VAddr addr, bool cached) { void Mark(VAddr addr, bool cached) {
@ -68,53 +61,17 @@ private:
class MemorySystem::Impl { class MemorySystem::Impl {
public: public:
Core::System& system;
std::unique_ptr<u8[]> fcram = std::make_unique<u8[]>(Memory::FCRAM_N3DS_SIZE); std::unique_ptr<u8[]> fcram = std::make_unique<u8[]>(Memory::FCRAM_N3DS_SIZE);
std::unique_ptr<u8[]> vram = std::make_unique<u8[]>(Memory::VRAM_SIZE); std::unique_ptr<u8[]> vram = std::make_unique<u8[]>(Memory::VRAM_SIZE);
std::unique_ptr<u8[]> n3ds_extra_ram = std::make_unique<u8[]>(Memory::N3DS_EXTRA_RAM_SIZE); std::unique_ptr<u8[]> n3ds_extra_ram = std::make_unique<u8[]>(Memory::N3DS_EXTRA_RAM_SIZE);
std::unique_ptr<u8[]> dsp_mem = std::make_unique<u8[]>(Memory::DSP_RAM_SIZE);
Core::System& system;
std::shared_ptr<PageTable> current_page_table = nullptr; std::shared_ptr<PageTable> current_page_table = nullptr;
RasterizerCacheMarker cache_marker; RasterizerCacheMarker cache_marker;
std::vector<std::shared_ptr<PageTable>> page_table_list; std::vector<std::shared_ptr<PageTable>> page_table_list;
AudioCore::DspInterface* dsp = nullptr; Impl(Core::System& system_) : system{system_} {}
std::shared_ptr<BackingMem> fcram_mem;
std::shared_ptr<BackingMem> vram_mem;
std::shared_ptr<BackingMem> n3ds_extra_ram_mem;
std::shared_ptr<BackingMem> dsp_mem;
Impl(Core::System& system_);
const u8* GetPtr(Region r) const {
switch (r) {
case Region::VRAM:
return vram.get();
case Region::DSP:
return dsp->GetDspMemory().data();
case Region::FCRAM:
return fcram.get();
case Region::N3DS:
return n3ds_extra_ram.get();
default:
UNREACHABLE();
}
}
u8* GetPtr(Region r) {
switch (r) {
case Region::VRAM:
return vram.get();
case Region::DSP:
return dsp->GetDspMemory().data();
case Region::FCRAM:
return fcram.get();
case Region::N3DS:
return n3ds_extra_ram.get();
default:
UNREACHABLE();
}
}
u32 GetSize(Region r) const { u32 GetSize(Region r) const {
switch (r) { switch (r) {
@ -233,26 +190,26 @@ public:
} }
} }
MemoryRef GetPointerForRasterizerCache(VAddr addr) const { u8* GetPointerForRasterizerCache(VAddr addr) const {
if (addr >= LINEAR_HEAP_VADDR && addr < LINEAR_HEAP_VADDR_END) { if (addr >= LINEAR_HEAP_VADDR && addr < LINEAR_HEAP_VADDR_END) {
return {fcram_mem, addr - LINEAR_HEAP_VADDR}; return fcram.get() + addr - LINEAR_HEAP_VADDR;
} }
if (addr >= NEW_LINEAR_HEAP_VADDR && addr < NEW_LINEAR_HEAP_VADDR_END) { if (addr >= NEW_LINEAR_HEAP_VADDR && addr < NEW_LINEAR_HEAP_VADDR_END) {
return {fcram_mem, addr - NEW_LINEAR_HEAP_VADDR}; return fcram.get() + addr - NEW_LINEAR_HEAP_VADDR;
} }
if (addr >= VRAM_VADDR && addr < VRAM_VADDR_END) { if (addr >= VRAM_VADDR && addr < VRAM_VADDR_END) {
return {vram_mem, addr - VRAM_VADDR}; return vram.get() + addr - VRAM_VADDR;
} }
if (addr >= PLUGIN_3GX_FB_VADDR && addr < PLUGIN_3GX_FB_VADDR_END) { if (addr >= PLUGIN_3GX_FB_VADDR && addr < PLUGIN_3GX_FB_VADDR_END) {
auto plg_ldr = Service::PLGLDR::GetService(system); auto plg_ldr = Service::PLGLDR::GetService(system);
if (plg_ldr) { if (plg_ldr) {
return {fcram_mem, return fcram.get() + addr - PLUGIN_3GX_FB_VADDR + plg_ldr->GetPluginFBAddr() -
addr - PLUGIN_3GX_FB_VADDR + plg_ldr->GetPluginFBAddr() - FCRAM_PADDR}; FCRAM_PADDR;
} }
} }
UNREACHABLE(); UNREACHABLE();
return MemoryRef{}; return nullptr;
} }
void RasterizerFlushVirtualRegion(VAddr start, u32 size, FlushMode mode) { void RasterizerFlushVirtualRegion(VAddr start, u32 size, FlushMode mode) {
@ -294,33 +251,8 @@ public:
} }
}; };
// We use this rather than BufferMem because we don't want new objects to be allocated when
// deserializing. This avoids unnecessary memory thrashing.
template <Region R>
class MemorySystem::BackingMemImpl : public BackingMem {
public:
explicit BackingMemImpl(MemorySystem::Impl& impl_) : impl(impl_) {}
u8* GetPtr() override {
return impl.GetPtr(R);
}
const u8* GetPtr() const override {
return impl.GetPtr(R);
}
std::size_t GetSize() const override {
return impl.GetSize(R);
}
private:
MemorySystem::Impl& impl;
};
MemorySystem::Impl::Impl(Core::System& system_)
: system{system_}, fcram_mem(std::make_shared<BackingMemImpl<Region::FCRAM>>(*this)),
vram_mem(std::make_shared<BackingMemImpl<Region::VRAM>>(*this)),
n3ds_extra_ram_mem(std::make_shared<BackingMemImpl<Region::N3DS>>(*this)),
dsp_mem(std::make_shared<BackingMemImpl<Region::DSP>>(*this)) {}
MemorySystem::MemorySystem(Core::System& system) : impl(std::make_unique<Impl>(system)) {} MemorySystem::MemorySystem(Core::System& system) : impl(std::make_unique<Impl>(system)) {}
MemorySystem::~MemorySystem() = default; MemorySystem::~MemorySystem() = default;
void MemorySystem::SetCurrentPageTable(std::shared_ptr<PageTable> page_table) { void MemorySystem::SetCurrentPageTable(std::shared_ptr<PageTable> page_table) {
@ -335,10 +267,9 @@ void MemorySystem::RasterizerFlushVirtualRegion(VAddr start, u32 size, FlushMode
impl->RasterizerFlushVirtualRegion(start, size, mode); impl->RasterizerFlushVirtualRegion(start, size, mode);
} }
void MemorySystem::MapPages(PageTable& page_table, u32 base, u32 size, MemoryRef memory, void MemorySystem::MapPages(PageTable& page_table, u32 base, u32 size, u8* memory, PageType type) {
PageType type) { LOG_DEBUG(HW_Memory, "Mapping {} onto {:08X}-{:08X}", fmt::ptr(memory), base * CITRA_PAGE_SIZE,
LOG_DEBUG(HW_Memory, "Mapping {} onto {:08X}-{:08X}", (void*)memory.GetPtr(), (base + size) * CITRA_PAGE_SIZE);
base * CITRA_PAGE_SIZE, (base + size) * CITRA_PAGE_SIZE);
if (impl->system.IsPoweredOn()) { if (impl->system.IsPoweredOn()) {
RasterizerFlushVirtualRegion(base << CITRA_PAGE_BITS, size * CITRA_PAGE_SIZE, RasterizerFlushVirtualRegion(base << CITRA_PAGE_BITS, size * CITRA_PAGE_SIZE,
@ -347,7 +278,7 @@ void MemorySystem::MapPages(PageTable& page_table, u32 base, u32 size, MemoryRef
u32 end = base + size; u32 end = base + size;
while (base != end) { while (base != end) {
ASSERT_MSG(base < PAGE_TABLE_NUM_ENTRIES, "out of range mapping at {:08X}", base); ASSERT_MSG(base < PageTable::NUM_ENTRIES, "out of range mapping at {:08X}", base);
page_table.attributes[base] = type; page_table.attributes[base] = type;
page_table.pointers[base] = memory; page_table.pointers[base] = memory;
@ -359,12 +290,13 @@ void MemorySystem::MapPages(PageTable& page_table, u32 base, u32 size, MemoryRef
} }
base += 1; base += 1;
if (memory != nullptr && memory.GetSize() > CITRA_PAGE_SIZE) if (memory != nullptr /*&& memory.GetSize() > CITRA_PAGE_SIZE*/) {
memory += CITRA_PAGE_SIZE; memory += CITRA_PAGE_SIZE;
}
} }
} }
void MemorySystem::MapMemoryRegion(PageTable& page_table, VAddr base, u32 size, MemoryRef target) { void MemorySystem::MapMemoryRegion(PageTable& page_table, VAddr base, u32 size, u8* target) {
ASSERT_MSG((size & CITRA_PAGE_MASK) == 0, "non-page aligned size: {:08X}", size); ASSERT_MSG((size & CITRA_PAGE_MASK) == 0, "non-page aligned size: {:08X}", size);
ASSERT_MSG((base & CITRA_PAGE_MASK) == 0, "non-page aligned base: {:08X}", base); ASSERT_MSG((base & CITRA_PAGE_MASK) == 0, "non-page aligned base: {:08X}", base);
MapPages(page_table, base / CITRA_PAGE_SIZE, size / CITRA_PAGE_SIZE, target, PageType::Memory); MapPages(page_table, base / CITRA_PAGE_SIZE, size / CITRA_PAGE_SIZE, target, PageType::Memory);
@ -377,7 +309,7 @@ void MemorySystem::UnmapRegion(PageTable& page_table, VAddr base, u32 size) {
PageType::Unmapped); PageType::Unmapped);
} }
MemoryRef MemorySystem::GetPointerForRasterizerCache(VAddr addr) const { u8* MemorySystem::GetPointerForRasterizerCache(VAddr addr) const {
return impl->GetPointerForRasterizerCache(addr); return impl->GetPointerForRasterizerCache(addr);
} }
@ -507,7 +439,7 @@ bool MemorySystem::WriteExclusive(const VAddr vaddr, const T data, const T expec
case PageType::RasterizerCachedMemory: { case PageType::RasterizerCachedMemory: {
RasterizerFlushVirtualRegion(vaddr, sizeof(T), FlushMode::Invalidate); RasterizerFlushVirtualRegion(vaddr, sizeof(T), FlushMode::Invalidate);
const auto volatile_pointer = const auto volatile_pointer =
reinterpret_cast<volatile T*>(GetPointerForRasterizerCache(vaddr).GetPtr()); reinterpret_cast<volatile T*>(GetPointerForRasterizerCache(vaddr));
return Common::AtomicCompareAndSwap(volatile_pointer, data, expected); return Common::AtomicCompareAndSwap(volatile_pointer, data, expected);
} }
default: default:
@ -532,7 +464,7 @@ bool MemorySystem::IsValidVirtualAddress(const Kernel::Process& process, const V
} }
bool MemorySystem::IsValidPhysicalAddress(const PAddr paddr) const { bool MemorySystem::IsValidPhysicalAddress(const PAddr paddr) const {
return GetPhysicalRef(paddr); return !GetPhysicalSpan(paddr).empty();
} }
u8* MemorySystem::GetPointer(const VAddr vaddr) { u8* MemorySystem::GetPointer(const VAddr vaddr) {
@ -583,10 +515,10 @@ std::string MemorySystem::ReadCString(VAddr vaddr, std::size_t max_length) {
} }
u8* MemorySystem::GetPhysicalPointer(PAddr address) const { u8* MemorySystem::GetPhysicalPointer(PAddr address) const {
return GetPhysicalRef(address); return GetPhysicalSpan(address).data();
} }
MemoryRef MemorySystem::GetPhysicalRef(PAddr address) const { std::span<u8> MemorySystem::GetPhysicalSpan(PAddr address) const {
constexpr std::array memory_areas = { constexpr std::array memory_areas = {
std::make_pair(VRAM_PADDR, VRAM_SIZE), std::make_pair(VRAM_PADDR, VRAM_SIZE),
std::make_pair(DSP_RAM_PADDR, DSP_RAM_SIZE), std::make_pair(DSP_RAM_PADDR, DSP_RAM_SIZE),
@ -603,33 +535,33 @@ MemoryRef MemorySystem::GetPhysicalRef(PAddr address) const {
if (area == memory_areas.end()) { if (area == memory_areas.end()) {
LOG_ERROR(HW_Memory, "Unknown GetPhysicalPointer @ {:#08X} at PC {:#08X}", address, LOG_ERROR(HW_Memory, "Unknown GetPhysicalPointer @ {:#08X} at PC {:#08X}", address,
impl->GetPC()); impl->GetPC());
return nullptr; return {};
} }
u32 offset_into_region = address - area->first; const u32 offset_into_region = address - area->first;
if (offset_into_region > area->second) {
return {};
}
std::shared_ptr<BackingMem> target_mem = nullptr; u8* target_mem = nullptr;
switch (area->first) { switch (area->first) {
case VRAM_PADDR: case VRAM_PADDR:
target_mem = impl->vram_mem; target_mem = impl->vram.get();
break; break;
case DSP_RAM_PADDR: case DSP_RAM_PADDR:
target_mem = impl->dsp_mem; target_mem = impl->dsp_mem.get();
break; break;
case FCRAM_PADDR: case FCRAM_PADDR:
target_mem = impl->fcram_mem; target_mem = impl->fcram.get();
break; break;
case N3DS_EXTRA_RAM_PADDR: case N3DS_EXTRA_RAM_PADDR:
target_mem = impl->n3ds_extra_ram_mem; target_mem = impl->n3ds_extra_ram.get();
break; break;
default: default:
UNREACHABLE(); UNREACHABLE();
} }
if (offset_into_region > target_mem->GetSize()) {
return {nullptr};
}
return {target_mem, offset_into_region}; return std::span{target_mem + offset_into_region, area->second - offset_into_region};
} }
std::vector<VAddr> MemorySystem::PhysicalToVirtualAddressForRasterizer(PAddr addr) { std::vector<VAddr> MemorySystem::PhysicalToVirtualAddressForRasterizer(PAddr addr) {
@ -889,13 +821,8 @@ const u8* MemorySystem::GetFCRAMPointer(std::size_t offset) const {
return impl->fcram.get() + offset; return impl->fcram.get() + offset;
} }
MemoryRef MemorySystem::GetFCRAMRef(std::size_t offset) const { std::span<u8, DSP_RAM_SIZE> MemorySystem::GetDspMemory() const {
ASSERT(offset <= Memory::FCRAM_N3DS_SIZE); return std::span<u8, DSP_RAM_SIZE>{impl->dsp_mem.get(), DSP_RAM_SIZE};
return MemoryRef(impl->fcram_mem, offset);
}
void MemorySystem::SetDSP(AudioCore::DspInterface& dsp) {
impl->dsp = &dsp;
} }
} // namespace Memory } // namespace Memory

View file

@ -7,7 +7,6 @@
#include <cstddef> #include <cstddef>
#include <string> #include <string>
#include "common/common_types.h" #include "common/common_types.h"
#include "common/memory_ref.h"
namespace Kernel { namespace Kernel {
class Process; class Process;
@ -30,7 +29,6 @@ namespace Memory {
constexpr u32 CITRA_PAGE_SIZE = 0x1000; constexpr u32 CITRA_PAGE_SIZE = 0x1000;
constexpr u32 CITRA_PAGE_MASK = CITRA_PAGE_SIZE - 1; constexpr u32 CITRA_PAGE_MASK = CITRA_PAGE_SIZE - 1;
constexpr int CITRA_PAGE_BITS = 12; constexpr int CITRA_PAGE_BITS = 12;
constexpr std::size_t PAGE_TABLE_NUM_ENTRIES = 1 << (32 - CITRA_PAGE_BITS);
enum class PageType { enum class PageType {
/// Page is unmapped and should cause an access error. /// Page is unmapped and should cause an access error.
@ -49,54 +47,23 @@ enum class PageType {
* requires an indexed fetch and a check for NULL. * requires an indexed fetch and a check for NULL.
*/ */
struct PageTable { struct PageTable {
PageTable() = default;
~PageTable() noexcept = default;
PageTable(const PageTable&) = delete;
PageTable& operator=(const PageTable&) = delete;
PageTable(PageTable&&) noexcept = default;
PageTable& operator=(PageTable&&) noexcept = default;
static constexpr std::size_t NUM_ENTRIES = 1 << (32 - CITRA_PAGE_BITS);
/** /**
* Array of memory pointers backing each page. An entry can only be non-null if the * Array of memory pointers backing each page. An entry can only be non-null if the
* corresponding entry in the `attributes` array is of type `Memory`. * corresponding entry in the `attributes` array is of type `Memory`.
*/ */
std::array<u8*, NUM_ENTRIES> pointers{};
// The reason for this rigmarole is to keep the 'raw' and 'refs' arrays in sync. std::array<PageType, NUM_ENTRIES> attributes{};
// We need 'raw' for dynarmic and 'refs' for serialization
struct Pointers {
struct Entry {
Entry(Pointers& pointers_, VAddr idx_) : pointers(pointers_), idx(idx_) {}
Entry& operator=(MemoryRef value) {
pointers.raw[idx] = value.GetPtr();
pointers.refs[idx] = std::move(value);
return *this;
}
operator u8*() {
return pointers.raw[idx];
}
private:
Pointers& pointers;
VAddr idx;
};
Entry operator[](std::size_t idx) {
return Entry(*this, static_cast<VAddr>(idx));
}
private:
std::array<u8*, PAGE_TABLE_NUM_ENTRIES> raw;
std::array<MemoryRef, PAGE_TABLE_NUM_ENTRIES> refs;
friend struct PageTable;
};
Pointers pointers;
/**
* Array of fine grained page attributes. If it is set to any value other than `Memory`, then
* the corresponding entry in `pointers` MUST be set to null.
*/
std::array<PageType, PAGE_TABLE_NUM_ENTRIES> attributes;
std::array<u8*, PAGE_TABLE_NUM_ENTRIES>& GetPointerArray() {
return pointers.raw;
}
void Clear(); void Clear();
}; };
@ -235,7 +202,7 @@ public:
* @param size The amount of bytes to map. Must be page-aligned. * @param size The amount of bytes to map. Must be page-aligned.
* @param target Buffer with the memory backing the mapping. Must be of length at least `size`. * @param target Buffer with the memory backing the mapping. Must be of length at least `size`.
*/ */
void MapMemoryRegion(PageTable& page_table, VAddr base, u32 size, MemoryRef target); void MapMemoryRegion(PageTable& page_table, VAddr base, u32 size, u8* target);
void UnmapRegion(PageTable& page_table, VAddr base, u32 size); void UnmapRegion(PageTable& page_table, VAddr base, u32 size);
@ -510,7 +477,7 @@ public:
u8* GetPhysicalPointer(PAddr address) const; u8* GetPhysicalPointer(PAddr address) const;
/// Returns a reference to the memory region beginning at the specified physical address /// Returns a reference to the memory region beginning at the specified physical address
MemoryRef GetPhysicalRef(PAddr address) const; std::span<u8> GetPhysicalSpan(PAddr address) const;
/// Determines if the given VAddr is valid for the specified process. /// Determines if the given VAddr is valid for the specified process.
bool IsValidVirtualAddress(const Kernel::Process& process, VAddr vaddr); bool IsValidVirtualAddress(const Kernel::Process& process, VAddr vaddr);
@ -527,16 +494,13 @@ public:
/// Gets pointer in FCRAM with given offset /// Gets pointer in FCRAM with given offset
const u8* GetFCRAMPointer(std::size_t offset) const; const u8* GetFCRAMPointer(std::size_t offset) const;
/// Gets a serializable ref to FCRAM with the given offset
MemoryRef GetFCRAMRef(std::size_t offset) const;
/// Registers page table for rasterizer cache marking /// Registers page table for rasterizer cache marking
void RegisterPageTable(std::shared_ptr<PageTable> page_table); void RegisterPageTable(std::shared_ptr<PageTable> page_table);
/// Unregisters page table for rasterizer cache marking /// Unregisters page table for rasterizer cache marking
void UnregisterPageTable(std::shared_ptr<PageTable> page_table); void UnregisterPageTable(std::shared_ptr<PageTable> page_table);
void SetDSP(AudioCore::DspInterface& dsp); std::span<u8, DSP_RAM_SIZE> GetDspMemory() const;
void RasterizerFlushVirtualRegion(VAddr start, u32 size, FlushMode mode); void RasterizerFlushVirtualRegion(VAddr start, u32 size, FlushMode mode);
@ -556,17 +520,13 @@ private:
* Since the cache only happens on linear heap or VRAM, we know the exact physical address and * Since the cache only happens on linear heap or VRAM, we know the exact physical address and
* pointer of such virtual address * pointer of such virtual address
*/ */
MemoryRef GetPointerForRasterizerCache(VAddr addr) const; u8* GetPointerForRasterizerCache(VAddr addr) const;
void MapPages(PageTable& page_table, u32 base, u32 size, MemoryRef memory, PageType type); void MapPages(PageTable& page_table, u32 base, u32 size, u8* memory, PageType type);
private: private:
class Impl; class Impl;
std::unique_ptr<Impl> impl; std::unique_ptr<Impl> impl;
public:
template <Region R>
class BackingMemImpl;
}; };
} // namespace Memory } // namespace Memory

View file

@ -32,7 +32,7 @@ TEST_CASE("Memory Basics", "[kernel][memory]") {
CHECK(vma != manager->vma_map.end()); CHECK(vma != manager->vma_map.end());
CHECK(vma->second.size == static_cast<u32>(block.GetSize())); CHECK(vma->second.size == static_cast<u32>(block.GetSize()));
CHECK(vma->second.type == Kernel::VMAType::BackingMemory); CHECK(vma->second.type == Kernel::VMAType::BackingMemory);
CHECK(vma->second.backing_memory.GetPtr() == block.GetPtr()); CHECK(vma->second.backing_memory == block.GetPtr());
CHECK(vma->second.meminfo_state == Kernel::MemoryState::Private); CHECK(vma->second.meminfo_state == Kernel::MemoryState::Private);
} }
@ -50,7 +50,7 @@ TEST_CASE("Memory Basics", "[kernel][memory]") {
auto vma = manager->FindVMA(Memory::HEAP_VADDR); auto vma = manager->FindVMA(Memory::HEAP_VADDR);
CHECK(vma != manager->vma_map.end()); CHECK(vma != manager->vma_map.end());
CHECK(vma->second.type == Kernel::VMAType::Free); CHECK(vma->second.type == Kernel::VMAType::Free);
CHECK(vma->second.backing_memory.GetPtr() == nullptr); CHECK(vma->second.backing_memory == nullptr);
} }
SECTION("changing memory permissions") { SECTION("changing memory permissions") {

View file

@ -1000,12 +1000,12 @@ void RasterizerCache<T>::UploadSurface(Surface& surface, SurfaceInterval interva
const auto staging = runtime.FindStaging( const auto staging = runtime.FindStaging(
load_info.width * load_info.height * surface.GetInternalBytesPerPixel(), true); load_info.width * load_info.height * surface.GetInternalBytesPerPixel(), true);
MemoryRef source_ptr = memory.GetPhysicalRef(load_info.addr); auto source_span = memory.GetPhysicalSpan(load_info.addr);
if (!source_ptr) [[unlikely]] { if (source_span.empty()) [[unlikely]] {
return; return;
} }
const auto upload_data = source_ptr.GetWriteBytes(load_info.end - load_info.addr); const auto upload_data = source_span.subspan(0, load_info.end - load_info.addr);
DecodeTexture(load_info, load_info.addr, load_info.end, upload_data, staging.mapped, DecodeTexture(load_info, load_info.addr, load_info.end, upload_data, staging.mapped,
runtime.NeedsConversion(surface.pixel_format)); runtime.NeedsConversion(surface.pixel_format));
@ -1048,12 +1048,12 @@ bool RasterizerCache<T>::UploadCustomSurface(SurfaceId surface_id, SurfaceInterv
const SurfaceParams load_info = surface.FromInterval(interval); const SurfaceParams load_info = surface.FromInterval(interval);
ASSERT(load_info.addr >= surface.addr && load_info.end <= surface.end); ASSERT(load_info.addr >= surface.addr && load_info.end <= surface.end);
MemoryRef source_ptr = memory.GetPhysicalRef(load_info.addr); auto source_span = memory.GetPhysicalSpan(load_info.addr);
if (!source_ptr) [[unlikely]] { if (source_span.empty()) [[unlikely]] {
return false; return false;
} }
const auto upload_data = source_ptr.GetWriteBytes(load_info.end - load_info.addr); const auto upload_data = source_span.subspan(0, load_info.end - load_info.addr);
const u64 hash = ComputeHash(load_info, upload_data); const u64 hash = ComputeHash(load_info, upload_data);
const u32 level = surface.LevelOf(load_info.addr); const u32 level = surface.LevelOf(load_info.addr);
@ -1108,12 +1108,12 @@ void RasterizerCache<T>::DownloadSurface(Surface& surface, SurfaceInterval inter
}; };
surface.Download(download, staging); surface.Download(download, staging);
MemoryRef dest_ptr = memory.GetPhysicalRef(flush_start); auto dest_span = memory.GetPhysicalSpan(flush_start);
if (!dest_ptr) [[unlikely]] { if (dest_span.empty()) [[unlikely]] {
return; return;
} }
const auto download_dest = dest_ptr.GetWriteBytes(flush_end - flush_start); const auto download_dest = dest_span.subspan(0, flush_end - flush_start);
EncodeTexture(flush_info, flush_start, flush_end, staging.mapped, download_dest, EncodeTexture(flush_info, flush_start, flush_end, staging.mapped, download_dest,
runtime.NeedsConversion(surface.pixel_format)); runtime.NeedsConversion(surface.pixel_format));
} }
@ -1124,29 +1124,29 @@ void RasterizerCache<T>::DownloadFillSurface(Surface& surface, SurfaceInterval i
const u32 flush_end = boost::icl::last_next(interval); const u32 flush_end = boost::icl::last_next(interval);
ASSERT(flush_start >= surface.addr && flush_end <= surface.end); ASSERT(flush_start >= surface.addr && flush_end <= surface.end);
MemoryRef dest_ptr = memory.GetPhysicalRef(flush_start); auto dest_span = memory.GetPhysicalSpan(flush_start);
if (!dest_ptr) [[unlikely]] { if (dest_span.empty()) [[unlikely]] {
return; return;
} }
const u32 start_offset = flush_start - surface.addr; const u32 start_offset = flush_start - surface.addr;
const u32 download_size = const u32 download_size =
std::clamp(flush_end - flush_start, 0u, static_cast<u32>(dest_ptr.GetSize())); std::clamp(flush_end - flush_start, 0u, static_cast<u32>(dest_span.size()));
const u32 coarse_start_offset = start_offset - (start_offset % surface.fill_size); const u32 coarse_start_offset = start_offset - (start_offset % surface.fill_size);
const u32 backup_bytes = start_offset % surface.fill_size; const u32 backup_bytes = start_offset % surface.fill_size;
std::array<u8, 4> backup_data; std::array<u8, 4> backup_data;
if (backup_bytes) { if (backup_bytes) {
std::memcpy(backup_data.data(), &dest_ptr[coarse_start_offset], backup_bytes); std::memcpy(backup_data.data(), &dest_span[coarse_start_offset], backup_bytes);
} }
for (u32 offset = coarse_start_offset; offset < download_size; offset += surface.fill_size) { for (u32 offset = coarse_start_offset; offset < download_size; offset += surface.fill_size) {
std::memcpy(&dest_ptr[offset], &surface.fill_data[0], std::memcpy(&dest_span[offset], &surface.fill_data[0],
std::min(surface.fill_size, download_size - offset)); std::min(surface.fill_size, download_size - offset));
} }
if (backup_bytes) { if (backup_bytes) {
std::memcpy(&dest_ptr[coarse_start_offset], &backup_data[0], backup_bytes); std::memcpy(&dest_span[coarse_start_offset], &backup_data[0], backup_bytes);
} }
} }

View file

@ -219,14 +219,14 @@ void RasterizerVulkan::SetupVertexArray() {
u32 data_size = loader.byte_count * vertex_num; u32 data_size = loader.byte_count * vertex_num;
res_cache.FlushRegion(data_addr, data_size); res_cache.FlushRegion(data_addr, data_size);
const MemoryRef src_ref = memory.GetPhysicalRef(data_addr); const auto src_span = memory.GetPhysicalSpan(data_addr);
if (src_ref.GetSize() < data_size) { if (src_span.size() < data_size) {
LOG_ERROR(Render_Vulkan, LOG_ERROR(Render_Vulkan,
"Vertex buffer size {} exceeds available space {} at address {:#016X}", "Vertex buffer size {} exceeds available space {} at address {:#016X}",
data_size, src_ref.GetSize(), data_addr); data_size, src_span.size(), data_addr);
} }
const u8* src_ptr = src_ref.GetPtr(); const u8* src_ptr = src_span.data();
u8* dst_ptr = array_ptr + buffer_offset; u8* dst_ptr = array_ptr + buffer_offset;
// Align stride up if required by Vulkan implementation. // Align stride up if required by Vulkan implementation.