mirror of
				https://github.com/PabloMK7/citra.git
				synced 2025-10-31 05:40:04 +00:00 
			
		
		
		
	Kernel/IPC: Partially implement MappedBuffer translation.
Right now only MappedBuffers that only span a single page and are not aligned are implemented. MappedBuffers are unmapped during the reply part of ReplyAndReceive. Only unmapping of ReadOnly buffers is currently implemented.
This commit is contained in:
		
							parent
							
								
									928202f744
								
							
						
					
					
						commit
						a7a5c5aa0d
					
				
					 5 changed files with 102 additions and 14 deletions
				
			
		|  | @ -2,6 +2,7 @@ | |||
| // Licensed under GPLv2 or any later version
 | ||||
| // Refer to the license.txt file included.
 | ||||
| 
 | ||||
| #include "common/alignment.h" | ||||
| #include "core/hle/ipc.h" | ||||
| #include "core/hle/kernel/handle_table.h" | ||||
| #include "core/hle/kernel/ipc.h" | ||||
|  | @ -14,7 +15,7 @@ | |||
| namespace Kernel { | ||||
| 
 | ||||
| ResultCode TranslateCommandBuffer(SharedPtr<Thread> src_thread, SharedPtr<Thread> dst_thread, | ||||
|                                   VAddr src_address, VAddr dst_address) { | ||||
|                                   VAddr src_address, VAddr dst_address, bool reply) { | ||||
| 
 | ||||
|     auto& src_process = src_thread->owner_process; | ||||
|     auto& dst_process = dst_thread->owner_process; | ||||
|  | @ -115,6 +116,88 @@ ResultCode TranslateCommandBuffer(SharedPtr<Thread> src_thread, SharedPtr<Thread | |||
|             cmd_buf[i++] = target_buffer.address; | ||||
|             break; | ||||
|         } | ||||
|         case IPC::DescriptorType::MappedBuffer: { | ||||
|             IPC::MappedBufferDescInfo descInfo{descriptor}; | ||||
|             VAddr source_address = cmd_buf[i]; | ||||
| 
 | ||||
|             size_t size = descInfo.size; | ||||
|             IPC::MappedBufferPermissions permissions = descInfo.perms; | ||||
| 
 | ||||
|             VAddr page_start = Common::AlignDown(source_address, Memory::PAGE_SIZE); | ||||
|             u32 page_offset = source_address - page_start; | ||||
|             u32 num_pages = | ||||
|                 Common::AlignUp(page_offset + size, Memory::PAGE_SIZE) >> Memory::PAGE_BITS; | ||||
| 
 | ||||
|             ASSERT(num_pages >= 1); | ||||
| 
 | ||||
|             if (reply) { | ||||
|                 // TODO(Subv): Scan the target's command buffer to make sure that there was a
 | ||||
|                 // MappedBuffer descriptor in the original request. The real kernel panics if you
 | ||||
|                 // try to reply with an unsolicited MappedBuffer.
 | ||||
| 
 | ||||
|                 // Unmap the buffers. Readonly buffers do not need to be copied over to the target
 | ||||
|                 // process again because they were (presumably) not modified. This behavior is
 | ||||
|                 // consistent with the real kernel.
 | ||||
|                 if (permissions == IPC::MappedBufferPermissions::R) { | ||||
|                     ResultCode result = src_process->vm_manager.UnmapRange( | ||||
|                         page_start, num_pages * Memory::PAGE_SIZE); | ||||
|                     ASSERT(result == RESULT_SUCCESS); | ||||
|                 } | ||||
| 
 | ||||
|                 ASSERT_MSG(permissions == IPC::MappedBufferPermissions::R, | ||||
|                            "Unmapping Write MappedBuffers is unimplemented"); | ||||
|                 i += 1; | ||||
|                 break; | ||||
|             } | ||||
| 
 | ||||
|             VAddr target_address = 0; | ||||
| 
 | ||||
|             auto IsPageAligned = [](VAddr address) -> bool { | ||||
|                 return (address & Memory::PAGE_MASK) == 0; | ||||
|             }; | ||||
| 
 | ||||
|             // TODO(Subv): Support more than 1 page and aligned page mappings
 | ||||
|             ASSERT_MSG( | ||||
|                 num_pages == 1 && | ||||
|                     (!IsPageAligned(source_address) || !IsPageAligned(source_address + size)), | ||||
|                 "MappedBuffers of more than one page or aligned transfers are not implemented"); | ||||
| 
 | ||||
|             // TODO(Subv): Perform permission checks.
 | ||||
| 
 | ||||
|             // TODO(Subv): Leave a page of Reserved memory before the first page and after the last
 | ||||
|             // page.
 | ||||
| 
 | ||||
|             if (!IsPageAligned(source_address) || | ||||
|                 (num_pages == 1 && !IsPageAligned(source_address + size))) { | ||||
|                 // If the address of the source buffer is not page-aligned or if the buffer doesn't
 | ||||
|                 // fill an entire page, then we have to allocate a page of memory in the target
 | ||||
|                 // process and copy over the data from the input buffer. This allocated buffer will
 | ||||
|                 // be copied back to the source process and deallocated when the server replies to
 | ||||
|                 // the request via ReplyAndReceive.
 | ||||
| 
 | ||||
|                 auto buffer = std::make_shared<std::vector<u8>>(Memory::PAGE_SIZE); | ||||
| 
 | ||||
|                 // Number of bytes until the next page.
 | ||||
|                 size_t difference_to_page = | ||||
|                     Common::AlignUp(source_address, Memory::PAGE_SIZE) - source_address; | ||||
|                 // If the data fits in one page we can just copy the required size instead of the
 | ||||
|                 // entire page.
 | ||||
|                 size_t read_size = num_pages == 1 ? size : difference_to_page; | ||||
| 
 | ||||
|                 Memory::ReadBlock(*src_process, source_address, buffer->data() + page_offset, | ||||
|                                   read_size); | ||||
| 
 | ||||
|                 // Map the page into the target process' address space.
 | ||||
|                 target_address = dst_process->vm_manager | ||||
|                                      .MapMemoryBlockToBase( | ||||
|                                          Memory::IPC_MAPPING_VADDR, Memory::IPC_MAPPING_SIZE, | ||||
|                                          buffer, 0, buffer->size(), Kernel::MemoryState::Shared) | ||||
|                                      .Unwrap(); | ||||
|             } | ||||
| 
 | ||||
|             cmd_buf[i++] = target_address + page_offset; | ||||
|             break; | ||||
|         } | ||||
|         default: | ||||
|             UNIMPLEMENTED_MSG("Unsupported handle translation: 0x%08X", descriptor); | ||||
|         } | ||||
|  |  | |||
|  | @ -10,5 +10,5 @@ | |||
| namespace Kernel { | ||||
| /// Performs IPC command buffer translation from one process to another.
 | ||||
| ResultCode TranslateCommandBuffer(SharedPtr<Thread> src_thread, SharedPtr<Thread> dst_thread, | ||||
|                                   VAddr src_address, VAddr dst_address); | ||||
|                                   VAddr src_address, VAddr dst_address, bool reply); | ||||
| } // namespace Kernel
 | ||||
|  |  | |||
|  | @ -472,8 +472,8 @@ static ResultCode ReceiveIPCRequest(SharedPtr<ServerSession> server_session, | |||
|     VAddr target_address = thread->GetCommandBufferAddress(); | ||||
|     VAddr source_address = server_session->currently_handling->GetCommandBufferAddress(); | ||||
| 
 | ||||
|     ResultCode translation_result = TranslateCommandBuffer(server_session->currently_handling, | ||||
|                                                            thread, source_address, target_address); | ||||
|     ResultCode translation_result = TranslateCommandBuffer( | ||||
|         server_session->currently_handling, thread, source_address, target_address, false); | ||||
| 
 | ||||
|     // If a translation error occurred, immediately resume the client thread.
 | ||||
|     if (translation_result.IsError()) { | ||||
|  | @ -535,8 +535,8 @@ static ResultCode ReplyAndReceive(s32* index, VAddr handles_address, s32 handle_ | |||
|         VAddr source_address = GetCurrentThread()->GetCommandBufferAddress(); | ||||
|         VAddr target_address = request_thread->GetCommandBufferAddress(); | ||||
| 
 | ||||
|         ResultCode translation_result = TranslateCommandBuffer(GetCurrentThread(), request_thread, | ||||
|                                                                source_address, target_address); | ||||
|         ResultCode translation_result = TranslateCommandBuffer( | ||||
|             Kernel::GetCurrentThread(), request_thread, source_address, target_address, true); | ||||
| 
 | ||||
|         // Note: The real kernel seems to always panic if the Server->Client buffer translation
 | ||||
|         // fails for whatever reason.
 | ||||
|  |  | |||
|  | @ -93,7 +93,8 @@ ResultVal<VMManager::VMAHandle> VMManager::MapMemoryBlock(VAddr target, | |||
|     return MakeResult<VMAHandle>(MergeAdjacent(vma_handle)); | ||||
| } | ||||
| 
 | ||||
| ResultVal<VAddr> VMManager::MapMemoryBlockToBase(VAddr base, std::shared_ptr<std::vector<u8>> block, | ||||
| ResultVal<VAddr> VMManager::MapMemoryBlockToBase(VAddr base, u32 region_size, | ||||
|                                                  std::shared_ptr<std::vector<u8>> block, | ||||
|                                                  size_t offset, u32 size, MemoryState state) { | ||||
| 
 | ||||
|     // Find the first Free VMA.
 | ||||
|  | @ -105,13 +106,15 @@ ResultVal<VAddr> VMManager::MapMemoryBlockToBase(VAddr base, std::shared_ptr<std | |||
|         return vma_end > base && vma_end >= base + size; | ||||
|     }); | ||||
| 
 | ||||
|     if (vma_handle == vma_map.end()) { | ||||
|     VAddr target = std::max(base, vma_handle->second.base); | ||||
| 
 | ||||
|     // Do not try to allocate the block if there are no available addresses within the desired
 | ||||
|     // region.
 | ||||
|     if (vma_handle == vma_map.end() || target + size > base + region_size) { | ||||
|         return ResultCode(ErrorDescription::OutOfMemory, ErrorModule::Kernel, | ||||
|                           ErrorSummary::OutOfResource, ErrorLevel::Permanent); | ||||
|     } | ||||
| 
 | ||||
|     VAddr target = std::max(base, vma_handle->second.base); | ||||
| 
 | ||||
|     auto result = MapMemoryBlock(target, block, offset, size, state); | ||||
| 
 | ||||
|     if (result.Failed()) | ||||
|  | @ -373,4 +376,4 @@ void VMManager::UpdatePageTableForVMA(const VirtualMemoryArea& vma) { | |||
|         break; | ||||
|     } | ||||
| } | ||||
| } | ||||
| } // namespace Kernel
 | ||||
|  |  | |||
|  | @ -148,14 +148,16 @@ public: | |||
|      * Maps part of a ref-counted block of memory at the first free address after the given base. | ||||
|      * | ||||
|      * @param base The base address to start the mapping at. | ||||
|      * @param region_size The max size of the region from where we'll try to find an address. | ||||
|      * @param block The block to be mapped. | ||||
|      * @param offset Offset into `block` to map from. | ||||
|      * @param size Size of the mapping. | ||||
|      * @param state MemoryState tag to attach to the VMA. | ||||
|      * @returns The address at which the memory was mapped. | ||||
|      */ | ||||
|     ResultVal<VAddr> MapMemoryBlockToBase(VAddr base, std::shared_ptr<std::vector<u8>> block, | ||||
|                                           size_t offset, u32 size, MemoryState state); | ||||
|     ResultVal<VAddr> MapMemoryBlockToBase(VAddr base, u32 region_size, | ||||
|                                           std::shared_ptr<std::vector<u8>> block, size_t offset, | ||||
|                                           u32 size, MemoryState state); | ||||
|     /**
 | ||||
|      * Maps an unmanaged host memory pointer at a given address. | ||||
|      * | ||||
|  | @ -236,4 +238,4 @@ private: | |||
|     /// Updates the pages corresponding to this VMA so they match the VMA's attributes.
 | ||||
|     void UpdatePageTableForVMA(const VirtualMemoryArea& vma); | ||||
| }; | ||||
| } | ||||
| } // namespace Kernel
 | ||||
|  |  | |||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue