mirror of
				https://github.com/PabloMK7/citra.git
				synced 2025-10-30 21:30:04 +00:00 
			
		
		
		
	kernel: Improvements to process cleanup. (#6680)
* kernel: Properly clean up process threads on exit. * kernel: Track process-owned memory and free on destruction. * apt: Implement DoApplicationJump via home menu when available. * kernel: Move TLS allocation management to owning process.
This commit is contained in:
		
							parent
							
								
									8b6b58a364
								
							
						
					
					
						commit
						9cb14044ec
					
				
					 11 changed files with 272 additions and 179 deletions
				
			
		|  | @ -135,10 +135,10 @@ public: | |||
|     std::shared_ptr<Process> CreateProcess(std::shared_ptr<CodeSet> code_set); | ||||
| 
 | ||||
|     /**
 | ||||
|      * Removes a process from the kernel process list | ||||
|      * @param process Process to remove | ||||
|      * Terminates a process, killing its threads and removing it from the process list. | ||||
|      * @param process Process to terminate. | ||||
|      */ | ||||
|     void RemoveProcess(std::shared_ptr<Process> process); | ||||
|     void TerminateProcess(std::shared_ptr<Process> process); | ||||
| 
 | ||||
|     /**
 | ||||
|      * Creates and returns a new thread. The new thread is immediately scheduled | ||||
|  | @ -208,7 +208,7 @@ public: | |||
|      * @param name Optional object name, used for debugging purposes. | ||||
|      */ | ||||
|     ResultVal<std::shared_ptr<SharedMemory>> CreateSharedMemory( | ||||
|         Process* owner_process, u32 size, MemoryPermission permissions, | ||||
|         std::shared_ptr<Process> owner_process, u32 size, MemoryPermission permissions, | ||||
|         MemoryPermission other_permissions, VAddr address = 0, | ||||
|         MemoryRegion region = MemoryRegion::BASE, std::string name = "Unknown"); | ||||
| 
 | ||||
|  |  | |||
|  | @ -79,7 +79,18 @@ std::shared_ptr<Process> KernelSystem::CreateProcess(std::shared_ptr<CodeSet> co | |||
|     return process; | ||||
| } | ||||
| 
 | ||||
| void KernelSystem::RemoveProcess(std::shared_ptr<Process> process) { | ||||
| void KernelSystem::TerminateProcess(std::shared_ptr<Process> process) { | ||||
|     LOG_INFO(Kernel_SVC, "Process {} exiting", process->process_id); | ||||
| 
 | ||||
|     ASSERT_MSG(process->status == ProcessStatus::Running, "Process has already exited"); | ||||
|     process->status = ProcessStatus::Exited; | ||||
| 
 | ||||
|     // Stop all process threads.
 | ||||
|     for (u32 core = 0; core < Core::GetNumCores(); core++) { | ||||
|         GetThreadManager(core).TerminateProcessThreads(process); | ||||
|     } | ||||
| 
 | ||||
|     process->Exit(); | ||||
|     std::erase(process_list, process); | ||||
| } | ||||
| 
 | ||||
|  | @ -268,6 +279,7 @@ ResultVal<VAddr> Process::HeapAllocate(VAddr target, u32 size, VMAPermission per | |||
|         interval_target += interval_size; | ||||
|     } | ||||
| 
 | ||||
|     holding_memory += allocated_fcram; | ||||
|     memory_used += size; | ||||
|     resource_limit->current_commit += size; | ||||
| 
 | ||||
|  | @ -288,13 +300,14 @@ ResultCode Process::HeapFree(VAddr target, u32 size) { | |||
| 
 | ||||
|     // Free heaps block by block
 | ||||
|     CASCADE_RESULT(auto backing_blocks, vm_manager.GetBackingBlocksForRange(target, size)); | ||||
|     for (const auto& [backing_memory, block_size] : backing_blocks) { | ||||
|         memory_region->Free(kernel.memory.GetFCRAMOffset(backing_memory.GetPtr()), block_size); | ||||
|     for (const auto& backing_block : backing_blocks) { | ||||
|         memory_region->Free(backing_block.lower(), backing_block.upper() - backing_block.lower()); | ||||
|     } | ||||
| 
 | ||||
|     ResultCode result = vm_manager.UnmapRange(target, size); | ||||
|     ASSERT(result.IsSuccess()); | ||||
| 
 | ||||
|     holding_memory -= backing_blocks; | ||||
|     memory_used -= size; | ||||
|     resource_limit->current_commit -= size; | ||||
| 
 | ||||
|  | @ -340,6 +353,7 @@ ResultVal<VAddr> Process::LinearAllocate(VAddr target, u32 size, VMAPermission p | |||
|     ASSERT(vma.Succeeded()); | ||||
|     vm_manager.Reprotect(vma.Unwrap(), perms); | ||||
| 
 | ||||
|     holding_memory += MemoryRegionInfo::Interval(physical_offset, physical_offset + size); | ||||
|     memory_used += size; | ||||
|     resource_limit->current_commit += size; | ||||
| 
 | ||||
|  | @ -365,15 +379,86 @@ ResultCode Process::LinearFree(VAddr target, u32 size) { | |||
|         return result; | ||||
|     } | ||||
| 
 | ||||
|     memory_used -= size; | ||||
|     resource_limit->current_commit -= size; | ||||
| 
 | ||||
|     u32 physical_offset = target - GetLinearHeapAreaAddress(); // relative to FCRAM
 | ||||
|     memory_region->Free(physical_offset, size); | ||||
| 
 | ||||
|     holding_memory -= MemoryRegionInfo::Interval(physical_offset, physical_offset + size); | ||||
|     memory_used -= size; | ||||
|     resource_limit->current_commit -= size; | ||||
| 
 | ||||
|     return RESULT_SUCCESS; | ||||
| } | ||||
| 
 | ||||
| ResultVal<VAddr> Process::AllocateThreadLocalStorage() { | ||||
|     std::size_t tls_page; | ||||
|     std::size_t tls_slot; | ||||
|     bool needs_allocation = true; | ||||
| 
 | ||||
|     // Iterate over all the allocated pages, and try to find one where not all slots are used.
 | ||||
|     for (tls_page = 0; tls_page < tls_slots.size(); ++tls_page) { | ||||
|         const auto& page_tls_slots = tls_slots[tls_page]; | ||||
|         if (!page_tls_slots.all()) { | ||||
|             // We found a page with at least one free slot, find which slot it is.
 | ||||
|             for (tls_slot = 0; tls_slot < page_tls_slots.size(); ++tls_slot) { | ||||
|                 if (!page_tls_slots.test(tls_slot)) { | ||||
|                     needs_allocation = false; | ||||
|                     break; | ||||
|                 } | ||||
|             } | ||||
| 
 | ||||
|             if (!needs_allocation) { | ||||
|                 break; | ||||
|             } | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     if (needs_allocation) { | ||||
|         tls_page = tls_slots.size(); | ||||
|         tls_slot = 0; | ||||
| 
 | ||||
|         LOG_DEBUG(Kernel, "Allocating new TLS page in slot {}", tls_page); | ||||
| 
 | ||||
|         // There are no already-allocated pages with free slots, lets allocate a new one.
 | ||||
|         // TLS pages are allocated from the BASE region in the linear heap.
 | ||||
|         auto base_memory_region = kernel.GetMemoryRegion(MemoryRegion::BASE); | ||||
| 
 | ||||
|         // Allocate some memory from the end of the linear heap for this region.
 | ||||
|         auto offset = base_memory_region->LinearAllocate(Memory::CITRA_PAGE_SIZE); | ||||
|         if (!offset) { | ||||
|             LOG_ERROR(Kernel_SVC, | ||||
|                       "Not enough space in BASE linear region to allocate a new TLS page"); | ||||
|             return ERR_OUT_OF_MEMORY; | ||||
|         } | ||||
| 
 | ||||
|         holding_tls_memory += | ||||
|             MemoryRegionInfo::Interval(*offset, *offset + Memory::CITRA_PAGE_SIZE); | ||||
|         memory_used += Memory::CITRA_PAGE_SIZE; | ||||
| 
 | ||||
|         // The page is completely available at the start.
 | ||||
|         tls_slots.emplace_back(0); | ||||
| 
 | ||||
|         // Map the page to the current process' address space.
 | ||||
|         auto tls_page_addr = | ||||
|             Memory::TLS_AREA_VADDR + static_cast<VAddr>(tls_page) * Memory::CITRA_PAGE_SIZE; | ||||
|         vm_manager.MapBackingMemory(tls_page_addr, kernel.memory.GetFCRAMRef(*offset), | ||||
|                                     Memory::CITRA_PAGE_SIZE, MemoryState::Locked); | ||||
| 
 | ||||
|         LOG_DEBUG(Kernel, "Allocated TLS page at addr={:08X}", tls_page_addr); | ||||
|     } else { | ||||
|         LOG_DEBUG(Kernel, "Allocating TLS in existing page slot {}", tls_page); | ||||
|     } | ||||
| 
 | ||||
|     // Mark the slot as used
 | ||||
|     tls_slots[tls_page].set(tls_slot); | ||||
| 
 | ||||
|     auto tls_address = Memory::TLS_AREA_VADDR + | ||||
|                        static_cast<VAddr>(tls_page) * Memory::CITRA_PAGE_SIZE + | ||||
|                        static_cast<VAddr>(tls_slot) * Memory::TLS_ENTRY_SIZE; | ||||
|     kernel.memory.ZeroBlock(*this, tls_address, Memory::TLS_ENTRY_SIZE); | ||||
| 
 | ||||
|     return tls_address; | ||||
| } | ||||
| 
 | ||||
| ResultCode Process::Map(VAddr target, VAddr source, u32 size, VMAPermission perms, | ||||
|                         bool privileged) { | ||||
|     LOG_DEBUG(Kernel, "Map memory target={:08X}, source={:08X}, size={:08X}, perms={:08X}", target, | ||||
|  | @ -419,7 +504,9 @@ ResultCode Process::Map(VAddr target, VAddr source, u32 size, VMAPermission perm | |||
| 
 | ||||
|     CASCADE_RESULT(auto backing_blocks, vm_manager.GetBackingBlocksForRange(source, size)); | ||||
|     VAddr interval_target = target; | ||||
|     for (const auto& [backing_memory, block_size] : backing_blocks) { | ||||
|     for (const auto& backing_block : backing_blocks) { | ||||
|         auto backing_memory = kernel.memory.GetFCRAMRef(backing_block.lower()); | ||||
|         auto block_size = backing_block.upper() - backing_block.lower(); | ||||
|         auto target_vma = | ||||
|             vm_manager.MapBackingMemory(interval_target, backing_memory, block_size, target_state); | ||||
|         ASSERT(target_vma.Succeeded()); | ||||
|  | @ -471,6 +558,42 @@ ResultCode Process::Unmap(VAddr target, VAddr source, u32 size, VMAPermission pe | |||
|     return RESULT_SUCCESS; | ||||
| } | ||||
| 
 | ||||
| void Process::FreeAllMemory() { | ||||
|     if (memory_region == nullptr || resource_limit == nullptr) { | ||||
|         return; | ||||
|     } | ||||
| 
 | ||||
|     // Free any heap/linear memory allocations.
 | ||||
|     for (auto& entry : holding_memory) { | ||||
|         LOG_DEBUG(Kernel, "Freeing process memory region 0x{:08X} - 0x{:08X}", entry.lower(), | ||||
|                   entry.upper()); | ||||
|         auto size = entry.upper() - entry.lower(); | ||||
|         memory_region->Free(entry.lower(), size); | ||||
|         memory_used -= size; | ||||
|         resource_limit->current_commit -= size; | ||||
|     } | ||||
|     holding_memory.clear(); | ||||
| 
 | ||||
|     // Free any TLS memory allocations.
 | ||||
|     auto base_memory_region = kernel.GetMemoryRegion(MemoryRegion::BASE); | ||||
|     for (auto& entry : holding_tls_memory) { | ||||
|         LOG_DEBUG(Kernel, "Freeing process TLS memory region 0x{:08X} - 0x{:08X}", entry.lower(), | ||||
|                   entry.upper()); | ||||
|         auto size = entry.upper() - entry.lower(); | ||||
|         base_memory_region->Free(entry.lower(), size); | ||||
|         memory_used -= size; | ||||
|     } | ||||
|     holding_tls_memory.clear(); | ||||
|     tls_slots.clear(); | ||||
| 
 | ||||
|     // Diagnostics for debugging.
 | ||||
|     // TODO: The way certain non-application shared memory is allocated can result in very slight
 | ||||
|     // leaks in these values still.
 | ||||
|     LOG_DEBUG(Kernel, "Remaining memory used after process cleanup: 0x{:08X}", memory_used); | ||||
|     LOG_DEBUG(Kernel, "Remaining memory resource commit after process cleanup: 0x{:08X}", | ||||
|               resource_limit->current_commit); | ||||
| } | ||||
| 
 | ||||
| Kernel::Process::Process(KernelSystem& kernel) | ||||
|     : Object(kernel), handle_table(kernel), vm_manager(kernel.memory, *this), kernel(kernel) { | ||||
|     kernel.memory.RegisterPageTable(vm_manager.page_table); | ||||
|  | @ -484,6 +607,7 @@ Kernel::Process::~Process() { | |||
|     // memory etc.) even if they are still referenced by other processes.
 | ||||
|     handle_table.Clear(); | ||||
| 
 | ||||
|     FreeAllMemory(); | ||||
|     kernel.memory.UnregisterPageTable(vm_manager.page_table); | ||||
| } | ||||
| 
 | ||||
|  |  | |||
|  | @ -217,6 +217,8 @@ public: | |||
|     u32 memory_used = 0; | ||||
| 
 | ||||
|     std::shared_ptr<MemoryRegionInfo> memory_region = nullptr; | ||||
|     MemoryRegionInfo::IntervalSet holding_memory; | ||||
|     MemoryRegionInfo::IntervalSet holding_tls_memory; | ||||
| 
 | ||||
|     /// The Thread Local Storage area is allocated as processes create threads,
 | ||||
|     /// each TLS area is 0x200 bytes, so one page (0x1000) is split up in 8 parts, and each part
 | ||||
|  | @ -237,12 +239,16 @@ public: | |||
|     ResultVal<VAddr> LinearAllocate(VAddr target, u32 size, VMAPermission perms); | ||||
|     ResultCode LinearFree(VAddr target, u32 size); | ||||
| 
 | ||||
|     ResultVal<VAddr> AllocateThreadLocalStorage(); | ||||
| 
 | ||||
|     ResultCode Map(VAddr target, VAddr source, u32 size, VMAPermission perms, | ||||
|                    bool privileged = false); | ||||
|     ResultCode Unmap(VAddr target, VAddr source, u32 size, VMAPermission perms, | ||||
|                      bool privileged = false); | ||||
| 
 | ||||
| private: | ||||
|     void FreeAllMemory(); | ||||
| 
 | ||||
|     KernelSystem& kernel; | ||||
| 
 | ||||
|     friend class boost::serialization::access; | ||||
|  |  | |||
|  | @ -2,7 +2,6 @@ | |||
| // Licensed under GPLv2 or any later version
 | ||||
| // Refer to the license.txt file included.
 | ||||
| 
 | ||||
| #include <cstring> | ||||
| #include "common/archives.h" | ||||
| #include "common/logging/log.h" | ||||
| #include "core/hle/kernel/errors.h" | ||||
|  | @ -20,15 +19,21 @@ SharedMemory::~SharedMemory() { | |||
|         kernel.GetMemoryRegion(MemoryRegion::SYSTEM) | ||||
|             ->Free(interval.lower(), interval.upper() - interval.lower()); | ||||
|     } | ||||
|     if (base_address != 0 && owner_process != nullptr) { | ||||
|         owner_process->vm_manager.ChangeMemoryState(base_address, size, MemoryState::Locked, | ||||
| 
 | ||||
|     auto process = owner_process.lock(); | ||||
|     if (process) { | ||||
|         if (base_address != 0) { | ||||
|             process->vm_manager.ChangeMemoryState(base_address, size, MemoryState::Locked, | ||||
|                                                   VMAPermission::None, MemoryState::Private, | ||||
|                                                   VMAPermission::ReadWrite); | ||||
|         } else { | ||||
|             process->memory_used -= size; | ||||
|         } | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| ResultVal<std::shared_ptr<SharedMemory>> KernelSystem::CreateSharedMemory( | ||||
|     Process* owner_process, u32 size, MemoryPermission permissions, | ||||
|     std::shared_ptr<Process> owner_process, u32 size, MemoryPermission permissions, | ||||
|     MemoryPermission other_permissions, VAddr address, MemoryRegion region, std::string name) { | ||||
|     auto shared_memory{std::make_shared<SharedMemory>(*this)}; | ||||
| 
 | ||||
|  | @ -52,11 +57,11 @@ ResultVal<std::shared_ptr<SharedMemory>> KernelSystem::CreateSharedMemory( | |||
|         shared_memory->linear_heap_phys_offset = *offset; | ||||
| 
 | ||||
|         // Increase the amount of used linear heap memory for the owner process.
 | ||||
|         if (shared_memory->owner_process != nullptr) { | ||||
|             shared_memory->owner_process->memory_used += size; | ||||
|         if (owner_process != nullptr) { | ||||
|             owner_process->memory_used += size; | ||||
|         } | ||||
|     } else { | ||||
|         auto& vm_manager = shared_memory->owner_process->vm_manager; | ||||
|         auto& vm_manager = owner_process->vm_manager; | ||||
|         // The memory is already available and mapped in the owner process.
 | ||||
| 
 | ||||
|         CASCADE_CODE(vm_manager.ChangeMemoryState(address, size, MemoryState::Private, | ||||
|  | @ -65,7 +70,10 @@ ResultVal<std::shared_ptr<SharedMemory>> KernelSystem::CreateSharedMemory( | |||
| 
 | ||||
|         auto backing_blocks = vm_manager.GetBackingBlocksForRange(address, size); | ||||
|         ASSERT(backing_blocks.Succeeded()); // should success after verifying memory state above
 | ||||
|         shared_memory->backing_blocks = std::move(backing_blocks).Unwrap(); | ||||
|         for (const auto& interval : backing_blocks.Unwrap()) { | ||||
|             shared_memory->backing_blocks.emplace_back(memory.GetFCRAMRef(interval.lower()), | ||||
|                                                        interval.upper() - interval.lower()); | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     shared_memory->base_address = address; | ||||
|  | @ -82,14 +90,14 @@ std::shared_ptr<SharedMemory> KernelSystem::CreateSharedMemoryForApplet( | |||
|     auto backing_blocks = memory_region->HeapAllocate(size); | ||||
|     ASSERT_MSG(!backing_blocks.empty(), "Not enough space in region to allocate shared memory!"); | ||||
|     shared_memory->holding_memory = backing_blocks; | ||||
|     shared_memory->owner_process = nullptr; | ||||
|     shared_memory->owner_process = std::weak_ptr<Process>(); | ||||
|     shared_memory->name = std::move(name); | ||||
|     shared_memory->size = size; | ||||
|     shared_memory->permissions = permissions; | ||||
|     shared_memory->other_permissions = other_permissions; | ||||
|     for (const auto& interval : backing_blocks) { | ||||
|         shared_memory->backing_blocks.push_back( | ||||
|             {memory.GetFCRAMRef(interval.lower()), interval.upper() - interval.lower()}); | ||||
|         shared_memory->backing_blocks.emplace_back(memory.GetFCRAMRef(interval.lower()), | ||||
|                                                    interval.upper() - interval.lower()); | ||||
|         std::fill(memory.GetFCRAMPointer(interval.lower()), | ||||
|                   memory.GetFCRAMPointer(interval.upper()), 0); | ||||
|     } | ||||
|  | @ -102,7 +110,7 @@ ResultCode SharedMemory::Map(Process& target_process, VAddr address, MemoryPermi | |||
|                              MemoryPermission other_permissions) { | ||||
| 
 | ||||
|     MemoryPermission own_other_permissions = | ||||
|         &target_process == owner_process ? this->permissions : this->other_permissions; | ||||
|         &target_process == owner_process.lock().get() ? this->permissions : this->other_permissions; | ||||
| 
 | ||||
|     // Automatically allocated memory blocks can only be mapped with other_permissions = DontCare
 | ||||
|     if (base_address == 0 && other_permissions != MemoryPermission::DontCare) { | ||||
|  |  | |||
|  | @ -9,6 +9,7 @@ | |||
| #include <boost/serialization/base_object.hpp> | ||||
| #include <boost/serialization/export.hpp> | ||||
| #include <boost/serialization/string.hpp> | ||||
| #include <boost/serialization/weak_ptr.hpp> | ||||
| #include "common/common_types.h" | ||||
| #include "common/memory_ref.h" | ||||
| #include "core/hle/kernel/object.h" | ||||
|  | @ -98,7 +99,7 @@ private: | |||
|     /// Permission restrictions applied to other processes mapping the block.
 | ||||
|     MemoryPermission other_permissions{}; | ||||
|     /// Process that created this shared memory block.
 | ||||
|     Process* owner_process; | ||||
|     std::weak_ptr<Process> owner_process; | ||||
|     /// Address of shared memory block in the owner process if specified.
 | ||||
|     VAddr base_address = 0; | ||||
|     /// Name of shared memory object.
 | ||||
|  |  | |||
|  | @ -369,6 +369,7 @@ private: | |||
|     ResultCode ControlMemory(u32* out_addr, u32 addr0, u32 addr1, u32 size, u32 operation, | ||||
|                              u32 permissions); | ||||
|     void ExitProcess(); | ||||
|     ResultCode TerminateProcess(Handle handle); | ||||
|     ResultCode MapMemoryBlock(Handle handle, u32 addr, u32 permissions, u32 other_permissions); | ||||
|     ResultCode UnmapMemoryBlock(Handle handle, u32 addr); | ||||
|     ResultCode ConnectToPort(Handle* out_handle, VAddr port_name_address); | ||||
|  | @ -535,41 +536,18 @@ ResultCode SVC::ControlMemory(u32* out_addr, u32 addr0, u32 addr1, u32 size, u32 | |||
| } | ||||
| 
 | ||||
| void SVC::ExitProcess() { | ||||
|     std::shared_ptr<Process> current_process = kernel.GetCurrentProcess(); | ||||
|     LOG_INFO(Kernel_SVC, "Process {} exiting", current_process->process_id); | ||||
| 
 | ||||
|     ASSERT_MSG(current_process->status == ProcessStatus::Running, "Process has already exited"); | ||||
| 
 | ||||
|     current_process->status = ProcessStatus::Exited; | ||||
| 
 | ||||
|     // Stop all the process threads that are currently waiting for objects.
 | ||||
|     const auto thread_list = kernel.GetCurrentThreadManager().GetThreadList(); | ||||
|     for (auto& thread : thread_list) { | ||||
|         if (thread->owner_process.lock() != current_process) { | ||||
|             continue; | ||||
|     kernel.TerminateProcess(kernel.GetCurrentProcess()); | ||||
| } | ||||
| 
 | ||||
|         if (thread.get() == kernel.GetCurrentThreadManager().GetCurrentThread()) { | ||||
|             continue; | ||||
| ResultCode SVC::TerminateProcess(Handle handle) { | ||||
|     std::shared_ptr<Process> process = | ||||
|         kernel.GetCurrentProcess()->handle_table.Get<Process>(handle); | ||||
|     if (process == nullptr) { | ||||
|         return ERR_INVALID_HANDLE; | ||||
|     } | ||||
| 
 | ||||
|         // TODO(Subv): When are the other running/ready threads terminated?
 | ||||
|         ASSERT_MSG(thread->status == ThreadStatus::WaitSynchAny || | ||||
|                        thread->status == ThreadStatus::WaitSynchAll, | ||||
|                    "Exiting processes with non-waiting threads is currently unimplemented"); | ||||
| 
 | ||||
|         thread->Stop(); | ||||
|     } | ||||
| 
 | ||||
|     current_process->Exit(); | ||||
| 
 | ||||
|     // Kill the current thread
 | ||||
|     kernel.GetCurrentThreadManager().GetCurrentThread()->Stop(); | ||||
| 
 | ||||
|     // Remove kernel reference to process so it can be cleaned up.
 | ||||
|     kernel.RemoveProcess(current_process); | ||||
| 
 | ||||
|     system.PrepareReschedule(); | ||||
|     kernel.TerminateProcess(process); | ||||
|     return RESULT_SUCCESS; | ||||
| } | ||||
| 
 | ||||
| /// Maps a memory block to specified address
 | ||||
|  | @ -1690,7 +1668,7 @@ ResultCode SVC::CreateMemoryBlock(Handle* out_handle, u32 addr, u32 size, u32 my | |||
| 
 | ||||
|     CASCADE_RESULT(shared_memory, | ||||
|                    kernel.CreateSharedMemory( | ||||
|                        current_process.get(), size, static_cast<MemoryPermission>(my_permission), | ||||
|                        current_process, size, static_cast<MemoryPermission>(my_permission), | ||||
|                        static_cast<MemoryPermission>(other_permission), addr, region)); | ||||
|     CASCADE_RESULT(*out_handle, current_process->handle_table.Create(std::move(shared_memory))); | ||||
| 
 | ||||
|  | @ -2244,7 +2222,7 @@ const std::array<SVC::FunctionDef, 180> SVC::SVC_Table{{ | |||
|     {0x73, nullptr, "CreateCodeSet"}, | ||||
|     {0x74, nullptr, "RandomStub"}, | ||||
|     {0x75, nullptr, "CreateProcess"}, | ||||
|     {0x76, nullptr, "TerminateProcess"}, | ||||
|     {0x76, &SVC::Wrap<&SVC::TerminateProcess>, "TerminateProcess"}, | ||||
|     {0x77, nullptr, "SetProcessResourceLimits"}, | ||||
|     {0x78, nullptr, "CreateResourceLimit"}, | ||||
|     {0x79, nullptr, "SetResourceLimitValues"}, | ||||
|  |  | |||
|  | @ -199,11 +199,34 @@ void ThreadManager::WaitCurrentThread_Sleep() { | |||
| } | ||||
| 
 | ||||
| void ThreadManager::ExitCurrentThread() { | ||||
|     Thread* thread = GetCurrentThread(); | ||||
|     current_thread->Stop(); | ||||
|     std::erase(thread_list, current_thread); | ||||
|     kernel.PrepareReschedule(); | ||||
| } | ||||
| 
 | ||||
| void ThreadManager::TerminateProcessThreads(std::shared_ptr<Process> process) { | ||||
|     auto iter = thread_list.begin(); | ||||
|     while (iter != thread_list.end()) { | ||||
|         auto& thread = *iter; | ||||
|         if (thread == current_thread || thread->owner_process.lock() != process) { | ||||
|             iter++; | ||||
|             continue; | ||||
|         } | ||||
| 
 | ||||
|         if (thread->status != ThreadStatus::WaitSynchAny && | ||||
|             thread->status != ThreadStatus::WaitSynchAll) { | ||||
|             // TODO: How does the real kernel handle non-waiting threads?
 | ||||
|             LOG_WARNING(Kernel, "Terminating non-waiting thread {}", thread->thread_id); | ||||
|         } | ||||
| 
 | ||||
|         thread->Stop(); | ||||
|     thread_list.erase(std::remove_if(thread_list.begin(), thread_list.end(), | ||||
|                                      [thread](const auto& p) { return p.get() == thread; }), | ||||
|                       thread_list.end()); | ||||
|         iter = thread_list.erase(iter); | ||||
|     } | ||||
| 
 | ||||
|     // Kill the current thread last, if applicable.
 | ||||
|     if (current_thread != nullptr && current_thread->owner_process.lock() == process) { | ||||
|         ExitCurrentThread(); | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| void ThreadManager::ThreadWakeupCallback(u64 thread_id, s64 cycles_late) { | ||||
|  | @ -295,32 +318,6 @@ void ThreadManager::DebugThreadQueue() { | |||
|     } | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Finds a free location for the TLS section of a thread. | ||||
|  * @param tls_slots The TLS page array of the thread's owner process. | ||||
|  * Returns a tuple of (page, slot, alloc_needed) where: | ||||
|  * page: The index of the first allocated TLS page that has free slots. | ||||
|  * slot: The index of the first free slot in the indicated page. | ||||
|  * alloc_needed: Whether there's a need to allocate a new TLS page (All pages are full). | ||||
|  */ | ||||
| static std::tuple<std::size_t, std::size_t, bool> GetFreeThreadLocalSlot( | ||||
|     std::span<const std::bitset<8>> tls_slots) { | ||||
|     // Iterate over all the allocated pages, and try to find one where not all slots are used.
 | ||||
|     for (std::size_t page = 0; page < tls_slots.size(); ++page) { | ||||
|         const auto& page_tls_slots = tls_slots[page]; | ||||
|         if (!page_tls_slots.all()) { | ||||
|             // We found a page with at least one free slot, find which slot it is
 | ||||
|             for (std::size_t slot = 0; slot < page_tls_slots.size(); ++slot) { | ||||
|                 if (!page_tls_slots.test(slot)) { | ||||
|                     return std::make_tuple(page, slot, false); | ||||
|                 } | ||||
|             } | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     return std::make_tuple(0, 0, true); | ||||
| } | ||||
| 
 | ||||
| /**
 | ||||
|  * Resets a thread context, making it ready to be scheduled and run by the CPU | ||||
|  * @param context Thread context to reset | ||||
|  | @ -376,45 +373,7 @@ ResultVal<std::shared_ptr<Thread>> KernelSystem::CreateThread( | |||
|     thread->name = std::move(name); | ||||
|     thread_managers[processor_id]->wakeup_callback_table[thread->thread_id] = thread.get(); | ||||
|     thread->owner_process = owner_process; | ||||
| 
 | ||||
|     // Find the next available TLS index, and mark it as used
 | ||||
|     auto& tls_slots = owner_process->tls_slots; | ||||
| 
 | ||||
|     auto [available_page, available_slot, needs_allocation] = GetFreeThreadLocalSlot(tls_slots); | ||||
| 
 | ||||
|     if (needs_allocation) { | ||||
|         // There are no already-allocated pages with free slots, lets allocate a new one.
 | ||||
|         // TLS pages are allocated from the BASE region in the linear heap.
 | ||||
|         auto memory_region = GetMemoryRegion(MemoryRegion::BASE); | ||||
| 
 | ||||
|         // Allocate some memory from the end of the linear heap for this region.
 | ||||
|         auto offset = memory_region->LinearAllocate(Memory::CITRA_PAGE_SIZE); | ||||
|         if (!offset) { | ||||
|             LOG_ERROR(Kernel_SVC, | ||||
|                       "Not enough space in region to allocate a new TLS page for thread"); | ||||
|             return ERR_OUT_OF_MEMORY; | ||||
|         } | ||||
|         owner_process->memory_used += Memory::CITRA_PAGE_SIZE; | ||||
| 
 | ||||
|         tls_slots.emplace_back(0); // The page is completely available at the start
 | ||||
|         available_page = tls_slots.size() - 1; | ||||
|         available_slot = 0; // Use the first slot in the new page
 | ||||
| 
 | ||||
|         auto& vm_manager = owner_process->vm_manager; | ||||
| 
 | ||||
|         // Map the page to the current process' address space.
 | ||||
|         vm_manager.MapBackingMemory( | ||||
|             Memory::TLS_AREA_VADDR + static_cast<VAddr>(available_page) * Memory::CITRA_PAGE_SIZE, | ||||
|             memory.GetFCRAMRef(*offset), Memory::CITRA_PAGE_SIZE, MemoryState::Locked); | ||||
|     } | ||||
| 
 | ||||
|     // Mark the slot as used
 | ||||
|     tls_slots[available_page].set(available_slot); | ||||
|     thread->tls_address = Memory::TLS_AREA_VADDR + | ||||
|                           static_cast<VAddr>(available_page) * Memory::CITRA_PAGE_SIZE + | ||||
|                           static_cast<VAddr>(available_slot) * Memory::TLS_ENTRY_SIZE; | ||||
| 
 | ||||
|     memory.ZeroBlock(*owner_process, thread->tls_address, Memory::TLS_ENTRY_SIZE); | ||||
|     CASCADE_RESULT(thread->tls_address, owner_process->AllocateThreadLocalStorage()); | ||||
| 
 | ||||
|     // TODO(peachum): move to ScheduleThread() when scheduler is added so selected core is used
 | ||||
|     // to initialize the context
 | ||||
|  |  | |||
|  | @ -111,6 +111,11 @@ public: | |||
|      */ | ||||
|     void ExitCurrentThread(); | ||||
| 
 | ||||
|     /**
 | ||||
|      * Terminates all threads belonging to a specific process. | ||||
|      */ | ||||
|     void TerminateProcessThreads(std::shared_ptr<Process> process); | ||||
| 
 | ||||
|     /**
 | ||||
|      * Get a const reference to the thread list for debug use | ||||
|      */ | ||||
|  |  | |||
|  | @ -391,9 +391,9 @@ void VMManager::UpdatePageTableForVMA(const VirtualMemoryArea& vma) { | |||
|         plgldr->OnMemoryChanged(process, Core::System::GetInstance().Kernel()); | ||||
| } | ||||
| 
 | ||||
| ResultVal<std::vector<std::pair<MemoryRef, u32>>> VMManager::GetBackingBlocksForRange(VAddr address, | ||||
| ResultVal<MemoryRegionInfo::IntervalSet> VMManager::GetBackingBlocksForRange(VAddr address, | ||||
|                                                                              u32 size) { | ||||
|     std::vector<std::pair<MemoryRef, u32>> backing_blocks; | ||||
|     MemoryRegionInfo::IntervalSet backing_blocks; | ||||
|     VAddr interval_target = address; | ||||
|     while (interval_target != address + size) { | ||||
|         auto vma = FindVMA(interval_target); | ||||
|  | @ -404,8 +404,10 @@ ResultVal<std::vector<std::pair<MemoryRef, u32>>> VMManager::GetBackingBlocksFor | |||
| 
 | ||||
|         VAddr interval_end = std::min(address + size, vma->second.base + vma->second.size); | ||||
|         u32 interval_size = interval_end - interval_target; | ||||
|         auto backing_memory = vma->second.backing_memory + (interval_target - vma->second.base); | ||||
|         backing_blocks.push_back({backing_memory, interval_size}); | ||||
|         auto backing_memory = memory.GetFCRAMOffset(vma->second.backing_memory + | ||||
|                                                     (interval_target - vma->second.base)); | ||||
|         backing_blocks += | ||||
|             MemoryRegionInfo::Interval(backing_memory, backing_memory + interval_size); | ||||
| 
 | ||||
|         interval_target += interval_size; | ||||
|     } | ||||
|  |  | |||
|  | @ -205,8 +205,7 @@ public: | |||
|     void LogLayout(Common::Log::Level log_level) const; | ||||
| 
 | ||||
|     /// Gets a list of backing memory blocks for the specified range
 | ||||
|     ResultVal<std::vector<std::pair<MemoryRef, u32>>> GetBackingBlocksForRange(VAddr address, | ||||
|                                                                                u32 size); | ||||
|     ResultVal<MemoryRegionInfo::IntervalSet> GetBackingBlocksForRange(VAddr address, u32 size); | ||||
| 
 | ||||
|     /// Each VMManager has its own page table, which is set as the main one when the owning process
 | ||||
|     /// is scheduled.
 | ||||
|  |  | |||
|  | @ -988,13 +988,18 @@ ResultCode AppletManager::PrepareToDoApplicationJump(u64 title_id, FS::MediaType | |||
|     // Save the title data to send it to the Home Menu when DoApplicationJump is called.
 | ||||
|     auto application_slot_data = GetAppletSlot(AppletSlot::Application); | ||||
|     app_jump_parameters.current_title_id = application_slot_data->title_id; | ||||
|     // TODO(Subv): Retrieve the correct media type of the currently-running application. For now
 | ||||
|     // just assume NAND.
 | ||||
|     app_jump_parameters.current_media_type = FS::MediaType::NAND; | ||||
|     app_jump_parameters.next_title_id = flags == ApplicationJumpFlags::UseCurrentParameters | ||||
|                                             ? application_slot_data->title_id | ||||
|                                             : title_id; | ||||
|     // TODO: Basic heuristic to guess media type, needs proper implementation.
 | ||||
|     app_jump_parameters.current_media_type = | ||||
|         ((application_slot_data->title_id >> 32) & 0xFFFFFFFF) == 0x00040000 | ||||
|             ? Service::FS::MediaType::SDMC | ||||
|             : Service::FS::MediaType::NAND; | ||||
|     if (flags == ApplicationJumpFlags::UseCurrentParameters) { | ||||
|         app_jump_parameters.next_title_id = app_jump_parameters.current_title_id; | ||||
|         app_jump_parameters.next_media_type = app_jump_parameters.current_media_type; | ||||
|     } else { | ||||
|         app_jump_parameters.next_title_id = title_id; | ||||
|         app_jump_parameters.next_media_type = media_type; | ||||
|     } | ||||
|     app_jump_parameters.flags = flags; | ||||
| 
 | ||||
|     // Note: The real console uses the Home Menu to perform the application jump, therefore the menu
 | ||||
|  | @ -1020,36 +1025,26 @@ ResultCode AppletManager::DoApplicationJump(const DeliverArg& arg) { | |||
|         deliver_arg->source_program_id = title_id; | ||||
|     } | ||||
| 
 | ||||
|     // TODO(Subv): Terminate the current Application.
 | ||||
|     if (GetAppletSlot(AppletSlot::HomeMenu)->registered) { | ||||
|         // If the home menu is running, use it to jump to the next application.
 | ||||
|         // The home menu will call GetProgramIdOnApplicationJump and
 | ||||
|         // PrepareToStartApplication/StartApplication to launch the title.
 | ||||
|         active_slot = AppletSlot::HomeMenu; | ||||
|         SendParameter({ | ||||
|             .sender_id = AppletId::Application, | ||||
|             .destination_id = AppletId::HomeMenu, | ||||
|             .signal = SignalType::WakeupToLaunchApplication, | ||||
|         }); | ||||
| 
 | ||||
|     // Note: The real console sends signal 17 (WakeupToLaunchApplication) to the Home Menu, this
 | ||||
|     // prompts it to call GetProgramIdOnApplicationJump and
 | ||||
|     // PrepareToStartApplication/StartApplication on the title to launch.
 | ||||
|     active_slot = AppletSlot::Application; | ||||
| 
 | ||||
|     // Perform a soft-reset if we're trying to relaunch the same title.
 | ||||
|     // TODO(Subv): Note that this reboots the entire emulated system, a better way would be to
 | ||||
|     // simply re-launch the title without closing all services, but this would only work for
 | ||||
|     // installed titles since we have no way of getting the file path of an arbitrary game dump
 | ||||
|     // based only on the title id.
 | ||||
| 
 | ||||
|     auto new_path = Service::AM::GetTitleContentPath(app_jump_parameters.next_media_type, | ||||
|                                                      app_jump_parameters.next_title_id); | ||||
|     if (new_path.empty() || !FileUtil::Exists(new_path)) { | ||||
|         LOG_CRITICAL( | ||||
|             Service_APT, | ||||
|             "Failed to find title during application jump: {} Resetting current title instead.", | ||||
|             new_path); | ||||
|         new_path.clear(); | ||||
|     } | ||||
| 
 | ||||
|     system.RequestReset(new_path); | ||||
|         // TODO: APT terminates the application here, usually it will exit itself properly though.
 | ||||
|         return RESULT_SUCCESS; | ||||
|     } else { | ||||
|         // Otherwise, work around the missing home menu by launching the title directly.
 | ||||
| 
 | ||||
|     // Launch the title directly.
 | ||||
|     // The emulator does not suport terminating old processes, would require a lot of cleanup
 | ||||
|     // This code is left commented for when this is implemented, for now we cannot use NS
 | ||||
|     // as the old process resources would interfere with the new ones
 | ||||
|         // TODO: The emulator does not support terminating the old process immediately.
 | ||||
|         // We could call TerminateProcess but references to the process are still held elsewhere,
 | ||||
|         // preventing clean up. This code is left commented for when this is implemented, for now we
 | ||||
|         // cannot use NS as the old process resources would interfere with the new ones.
 | ||||
|         /*
 | ||||
|         auto process = | ||||
|             NS::LaunchTitle(app_jump_parameters.next_media_type, app_jump_parameters.next_title_id); | ||||
|  | @ -1059,6 +1054,22 @@ ResultCode AppletManager::DoApplicationJump(const DeliverArg& arg) { | |||
|         } | ||||
|         return RESULT_SUCCESS; | ||||
|         */ | ||||
| 
 | ||||
|         auto new_path = Service::AM::GetTitleContentPath(app_jump_parameters.next_media_type, | ||||
|                                                          app_jump_parameters.next_title_id); | ||||
|         if (new_path.empty() || !FileUtil::Exists(new_path)) { | ||||
|             // TODO: This can happen if the requested title is not installed. Need a way to find
 | ||||
|             // non-installed titles in the game list.
 | ||||
|             LOG_CRITICAL( | ||||
|                 Service_APT, | ||||
|                 "Failed to find title during application jump: {} Resetting current title instead.", | ||||
|                 new_path); | ||||
|             new_path.clear(); | ||||
|         } | ||||
| 
 | ||||
|         system.RequestReset(new_path); | ||||
|         return RESULT_SUCCESS; | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| ResultCode AppletManager::PrepareToStartApplication(u64 title_id, FS::MediaType media_type) { | ||||
|  |  | |||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue