mirror of
				https://github.com/PabloMK7/citra.git
				synced 2025-10-30 21:30:04 +00:00 
			
		
		
		
	LLE Mapped Buffer: addressed comments
This commit is contained in:
		
							parent
							
								
									19291ba465
								
							
						
					
					
						commit
						51d53a6281
					
				
					 4 changed files with 140 additions and 76 deletions
				
			
		|  | @ -33,6 +33,17 @@ ResultCode TranslateCommandBuffer(SharedPtr<Thread> src_thread, SharedPtr<Thread | ||||||
|     std::array<u32, IPC::COMMAND_BUFFER_LENGTH> cmd_buf; |     std::array<u32, IPC::COMMAND_BUFFER_LENGTH> cmd_buf; | ||||||
|     Memory::ReadBlock(*src_process, src_address, cmd_buf.data(), command_size * sizeof(u32)); |     Memory::ReadBlock(*src_process, src_address, cmd_buf.data(), command_size * sizeof(u32)); | ||||||
| 
 | 
 | ||||||
|  |     // Create a copy of the target's command buffer
 | ||||||
|  |     IPC::Header dst_header; | ||||||
|  |     Memory::ReadBlock(*dst_process, dst_address, &dst_header.raw, sizeof(dst_header.raw)); | ||||||
|  | 
 | ||||||
|  |     std::size_t dst_untranslated_size = 1u + dst_header.normal_params_size; | ||||||
|  |     std::size_t dst_command_size = dst_untranslated_size + dst_header.translate_params_size; | ||||||
|  | 
 | ||||||
|  |     std::array<u32, IPC::COMMAND_BUFFER_LENGTH> dst_cmd_buf; | ||||||
|  |     Memory::ReadBlock(*dst_process, dst_address, dst_cmd_buf.data(), | ||||||
|  |                       dst_command_size * sizeof(u32)); | ||||||
|  | 
 | ||||||
|     std::size_t i = untranslated_size; |     std::size_t i = untranslated_size; | ||||||
|     while (i < command_size) { |     while (i < command_size) { | ||||||
|         u32 descriptor = cmd_buf[i]; |         u32 descriptor = cmd_buf[i]; | ||||||
|  | @ -128,36 +139,63 @@ ResultCode TranslateCommandBuffer(SharedPtr<Thread> src_thread, SharedPtr<Thread | ||||||
|             u32 num_pages = |             u32 num_pages = | ||||||
|                 Common::AlignUp(page_offset + size, Memory::PAGE_SIZE) >> Memory::PAGE_BITS; |                 Common::AlignUp(page_offset + size, Memory::PAGE_SIZE) >> Memory::PAGE_BITS; | ||||||
| 
 | 
 | ||||||
|             // Skip when the size is zero
 |             // Skip when the size is zero and num_pages == 0
 | ||||||
|             if (size == 0) { |             if (size == 0) { | ||||||
|                 i += 1; |                 cmd_buf[i++] = 0; | ||||||
|                 break; |                 break; | ||||||
|             } |             } | ||||||
|  |             ASSERT(num_pages >= 1); | ||||||
| 
 | 
 | ||||||
|             if (reply) { |             if (reply) { | ||||||
|                 // TODO(Subv): Scan the target's command buffer to make sure that there was a
 |                 // Scan the target's command buffer for the matching mapped buffer
 | ||||||
|                 // MappedBuffer descriptor in the original request. The real kernel panics if you
 |                 std::size_t j = dst_untranslated_size; | ||||||
|                 // try to reply with an unsolicited MappedBuffer.
 |                 while (j < dst_command_size) { | ||||||
|  |                     u32 desc = dst_cmd_buf[j++]; | ||||||
| 
 | 
 | ||||||
|                 // Unmap the buffers. Readonly buffers do not need to be copied over to the target
 |                     if (IPC::GetDescriptorType(desc) == IPC::DescriptorType::MappedBuffer) { | ||||||
|                 // process again because they were (presumably) not modified. This behavior is
 |                         IPC::MappedBufferDescInfo dest_descInfo{desc}; | ||||||
|                 // consistent with the real kernel.
 |                         VAddr dest_address = dst_cmd_buf[j]; | ||||||
|                 if (permissions == IPC::MappedBufferPermissions::R) { |  | ||||||
|                     ResultCode result = src_process->vm_manager.UnmapRange( |  | ||||||
|                         page_start, num_pages * Memory::PAGE_SIZE); |  | ||||||
|                     ASSERT(result == RESULT_SUCCESS); |  | ||||||
|                 } else { |  | ||||||
|                     const auto vma_iter = src_process->vm_manager.vma_map.find(source_address); |  | ||||||
|                     const auto& vma = vma_iter->second; |  | ||||||
|                     const VAddr dest_address = vma.originating_buffer_address; |  | ||||||
| 
 | 
 | ||||||
|                     auto buffer = std::make_shared<std::vector<u8>>(size); |                         u32 dest_size = static_cast<u32>(dest_descInfo.size); | ||||||
|                     Memory::ReadBlock(*src_process, source_address, buffer->data(), size); |                         IPC::MappedBufferPermissions dest_permissions = dest_descInfo.perms; | ||||||
|                     Memory::WriteBlock(*dst_process, dest_address, buffer->data(), size); |  | ||||||
| 
 | 
 | ||||||
|                     ResultCode result = src_process->vm_manager.UnmapRange( |                         if (permissions == dest_permissions && size == dest_size) { | ||||||
|                         page_start, num_pages * Memory::PAGE_SIZE); |                             // Readonly buffers do not need to be copied over to the target
 | ||||||
|                     ASSERT(result == RESULT_SUCCESS); |                             // process again because they were (presumably) not modified. This
 | ||||||
|  |                             // behavior is consistent with the real kernel.
 | ||||||
|  |                             if (permissions != IPC::MappedBufferPermissions::R) { | ||||||
|  |                                 // Copy the modified buffer back into the target process
 | ||||||
|  |                                 Memory::CopyBlock(*src_process, *dst_process, source_address, | ||||||
|  |                                                   dest_address, size); | ||||||
|  |                             } | ||||||
|  | 
 | ||||||
|  |                             // Unmap the Reserved page before the buffer
 | ||||||
|  |                             ResultCode result = src_process->vm_manager.UnmapRange( | ||||||
|  |                                 page_start - Memory::PAGE_SIZE, Memory::PAGE_SIZE); | ||||||
|  |                             ASSERT(result == RESULT_SUCCESS); | ||||||
|  | 
 | ||||||
|  |                             // Unmap the buffer from the source process
 | ||||||
|  |                             result = src_process->vm_manager.UnmapRange( | ||||||
|  |                                 page_start, num_pages * Memory::PAGE_SIZE); | ||||||
|  |                             ASSERT(result == RESULT_SUCCESS); | ||||||
|  | 
 | ||||||
|  |                             // Check if this is the last mapped buffer
 | ||||||
|  |                             VAddr next_reserve = page_start + num_pages * Memory::PAGE_SIZE; | ||||||
|  |                             auto& vma = | ||||||
|  |                                 src_process->vm_manager.FindVMA(next_reserve + Memory::PAGE_SIZE) | ||||||
|  |                                     ->second; | ||||||
|  |                             if (vma.type == VMAType::Free) { | ||||||
|  |                                 // Unmap the Reserved page after the last buffer
 | ||||||
|  |                                 result = src_process->vm_manager.UnmapRange(next_reserve, | ||||||
|  |                                                                             Memory::PAGE_SIZE); | ||||||
|  |                                 ASSERT(result == RESULT_SUCCESS); | ||||||
|  |                             } | ||||||
|  | 
 | ||||||
|  |                             break; | ||||||
|  |                         } | ||||||
|  |                     } | ||||||
|  | 
 | ||||||
|  |                     j += 1; | ||||||
|                 } |                 } | ||||||
| 
 | 
 | ||||||
|                 i += 1; |                 i += 1; | ||||||
|  | @ -166,63 +204,38 @@ ResultCode TranslateCommandBuffer(SharedPtr<Thread> src_thread, SharedPtr<Thread | ||||||
| 
 | 
 | ||||||
|             VAddr target_address = 0; |             VAddr target_address = 0; | ||||||
| 
 | 
 | ||||||
|             auto IsPageAligned = [](VAddr address) -> bool { |  | ||||||
|                 return (address & Memory::PAGE_MASK) == 0; |  | ||||||
|             }; |  | ||||||
| 
 |  | ||||||
|             // TODO(Subv): Perform permission checks.
 |             // TODO(Subv): Perform permission checks.
 | ||||||
| 
 | 
 | ||||||
|             // TODO(Subv): Leave a page of unmapped memory before the first page and after the last
 |             // Reserve a page of memory before the mapped buffer
 | ||||||
|             // page.
 |             auto reserve_buffer = std::make_shared<std::vector<u8>>(Memory::PAGE_SIZE); | ||||||
|  |             dst_process->vm_manager.MapMemoryBlockToBase( | ||||||
|  |                 Memory::IPC_MAPPING_VADDR, Memory::IPC_MAPPING_SIZE, reserve_buffer, 0, | ||||||
|  |                 static_cast<u32>(reserve_buffer->size()), Kernel::MemoryState::Reserved); | ||||||
| 
 | 
 | ||||||
|             if (num_pages == 1 && !IsPageAligned(source_address) && |             auto buffer = std::make_shared<std::vector<u8>>(num_pages * Memory::PAGE_SIZE); | ||||||
|                 !IsPageAligned(source_address + size)) { |             Memory::ReadBlock(*src_process, source_address, buffer->data() + page_offset, size); | ||||||
|                 // If the address of the source buffer is not page-aligned or if the buffer doesn't
 |  | ||||||
|                 // fill an entire page, then we have to allocate a page of memory in the target
 |  | ||||||
|                 // process and copy over the data from the input buffer. This allocated buffer will
 |  | ||||||
|                 // be copied back to the source process and deallocated when the server replies to
 |  | ||||||
|                 // the request via ReplyAndReceive.
 |  | ||||||
| 
 | 
 | ||||||
|                 auto buffer = std::make_shared<std::vector<u8>>(Memory::PAGE_SIZE); |             // Map the page(s) into the target process' address space.
 | ||||||
| 
 |             target_address = dst_process->vm_manager | ||||||
|                 // Number of bytes until the next page.
 |                                  .MapMemoryBlockToBase( | ||||||
|                 std::size_t difference_to_page = |                                      Memory::IPC_MAPPING_VADDR, Memory::IPC_MAPPING_SIZE, buffer, 0, | ||||||
|                     Common::AlignUp(source_address, Memory::PAGE_SIZE) - source_address; |                                      static_cast<u32>(buffer->size()), Kernel::MemoryState::Shared) | ||||||
|                 // If the data fits in one page we can just copy the required size instead of the
 |                                  .Unwrap(); | ||||||
|                 // entire page.
 |  | ||||||
|                 std::size_t read_size = |  | ||||||
|                     num_pages == 1 ? static_cast<std::size_t>(size) : difference_to_page; |  | ||||||
| 
 |  | ||||||
|                 Memory::ReadBlock(*src_process, source_address, buffer->data() + page_offset, |  | ||||||
|                                   read_size); |  | ||||||
| 
 |  | ||||||
|                 // Map the page into the target process' address space.
 |  | ||||||
|                 target_address = |  | ||||||
|                     dst_process->vm_manager |  | ||||||
|                         .MapMemoryBlockToBase(Memory::IPC_MAPPING_VADDR, Memory::IPC_MAPPING_SIZE, |  | ||||||
|                                               buffer, 0, static_cast<u32>(buffer->size()), |  | ||||||
|                                               Kernel::MemoryState::Shared) |  | ||||||
|                         .Unwrap(); |  | ||||||
|             } else { |  | ||||||
|                 auto buffer = std::make_shared<std::vector<u8>>(num_pages * Memory::PAGE_SIZE); |  | ||||||
|                 Memory::ReadBlock(*src_process, source_address, buffer->data() + page_offset, size); |  | ||||||
| 
 |  | ||||||
|                 // Map the pages into the target process' address space.
 |  | ||||||
|                 target_address = |  | ||||||
|                     dst_process->vm_manager |  | ||||||
|                         .MapMemoryBlockToBase(Memory::IPC_MAPPING_VADDR + Memory::PAGE_SIZE, |  | ||||||
|                                               Memory::IPC_MAPPING_SIZE - Memory::PAGE_SIZE, buffer, |  | ||||||
|                                               0, static_cast<u32>(buffer->size()), |  | ||||||
|                                               Kernel::MemoryState::Shared) |  | ||||||
|                         .Unwrap(); |  | ||||||
|             } |  | ||||||
|             // Save the original address we copied the buffer from so that we can copy the modified
 |  | ||||||
|             // buffer back, if needed
 |  | ||||||
|             auto vma_iter = dst_process->vm_manager.vma_map.find(target_address + page_offset); |  | ||||||
|             auto& vma = vma_iter->second; |  | ||||||
|             vma.originating_buffer_address = source_address; |  | ||||||
| 
 | 
 | ||||||
|             cmd_buf[i++] = target_address + page_offset; |             cmd_buf[i++] = target_address + page_offset; | ||||||
|  | 
 | ||||||
|  |             // Check if this is the last mapped buffer
 | ||||||
|  |             if (i < command_size) { | ||||||
|  |                 u32 next_descriptor = cmd_buf[i]; | ||||||
|  |                 if (IPC::GetDescriptorType(next_descriptor) == IPC::DescriptorType::MappedBuffer) { | ||||||
|  |                     break; | ||||||
|  |                 } | ||||||
|  |             } | ||||||
|  | 
 | ||||||
|  |             // Reserve a page of memory after the last mapped buffer
 | ||||||
|  |             dst_process->vm_manager.MapMemoryBlockToBase( | ||||||
|  |                 Memory::IPC_MAPPING_VADDR, Memory::IPC_MAPPING_SIZE, reserve_buffer, 0, | ||||||
|  |                 static_cast<u32>(reserve_buffer->size()), Kernel::MemoryState::Reserved); | ||||||
|             break; |             break; | ||||||
|         } |         } | ||||||
|         default: |         default: | ||||||
|  |  | ||||||
|  | @ -86,9 +86,6 @@ struct VirtualMemoryArea { | ||||||
|     PAddr paddr = 0; |     PAddr paddr = 0; | ||||||
|     Memory::MMIORegionPointer mmio_handler = nullptr; |     Memory::MMIORegionPointer mmio_handler = nullptr; | ||||||
| 
 | 
 | ||||||
|     /// Originating address of the IPC mapped buffer
 |  | ||||||
|     VAddr originating_buffer_address = 0; |  | ||||||
| 
 |  | ||||||
|     /// Tests if this area can be merged to the right with `next`.
 |     /// Tests if this area can be merged to the right with `next`.
 | ||||||
|     bool CanBeMergedWith(const VirtualMemoryArea& next) const; |     bool CanBeMergedWith(const VirtualMemoryArea& next) const; | ||||||
| }; | }; | ||||||
|  |  | ||||||
|  | @ -700,6 +700,58 @@ void CopyBlock(VAddr dest_addr, VAddr src_addr, const std::size_t size) { | ||||||
|     CopyBlock(*Kernel::g_current_process, dest_addr, src_addr, size); |     CopyBlock(*Kernel::g_current_process, dest_addr, src_addr, size); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | void CopyBlock(const Kernel::Process& src_process, const Kernel::Process& dest_process, | ||||||
|  |                VAddr src_addr, VAddr dest_addr, std::size_t size) { | ||||||
|  |     auto& page_table = src_process.vm_manager.page_table; | ||||||
|  |     std::size_t remaining_size = size; | ||||||
|  |     std::size_t page_index = src_addr >> PAGE_BITS; | ||||||
|  |     std::size_t page_offset = src_addr & PAGE_MASK; | ||||||
|  | 
 | ||||||
|  |     while (remaining_size > 0) { | ||||||
|  |         const std::size_t copy_amount = std::min(PAGE_SIZE - page_offset, remaining_size); | ||||||
|  |         const VAddr current_vaddr = static_cast<VAddr>((page_index << PAGE_BITS) + page_offset); | ||||||
|  | 
 | ||||||
|  |         switch (page_table.attributes[page_index]) { | ||||||
|  |         case PageType::Unmapped: { | ||||||
|  |             LOG_ERROR(HW_Memory, | ||||||
|  |                       "unmapped CopyBlock @ 0x{:08X} (start address = 0x{:08X}, size = {})", | ||||||
|  |                       current_vaddr, src_addr, size); | ||||||
|  |             ZeroBlock(dest_process, dest_addr, copy_amount); | ||||||
|  |             break; | ||||||
|  |         } | ||||||
|  |         case PageType::Memory: { | ||||||
|  |             DEBUG_ASSERT(page_table.pointers[page_index]); | ||||||
|  |             const u8* src_ptr = page_table.pointers[page_index] + page_offset; | ||||||
|  |             WriteBlock(dest_process, dest_addr, src_ptr, copy_amount); | ||||||
|  |             break; | ||||||
|  |         } | ||||||
|  |         case PageType::Special: { | ||||||
|  |             MMIORegionPointer handler = GetMMIOHandler(page_table, current_vaddr); | ||||||
|  |             DEBUG_ASSERT(handler); | ||||||
|  |             std::vector<u8> buffer(copy_amount); | ||||||
|  |             handler->ReadBlock(current_vaddr, buffer.data(), buffer.size()); | ||||||
|  |             WriteBlock(dest_process, dest_addr, buffer.data(), buffer.size()); | ||||||
|  |             break; | ||||||
|  |         } | ||||||
|  |         case PageType::RasterizerCachedMemory: { | ||||||
|  |             RasterizerFlushVirtualRegion(current_vaddr, static_cast<u32>(copy_amount), | ||||||
|  |                                          FlushMode::Flush); | ||||||
|  |             WriteBlock(dest_process, dest_addr, GetPointerFromVMA(src_process, current_vaddr), | ||||||
|  |                        copy_amount); | ||||||
|  |             break; | ||||||
|  |         } | ||||||
|  |         default: | ||||||
|  |             UNREACHABLE(); | ||||||
|  |         } | ||||||
|  | 
 | ||||||
|  |         page_index++; | ||||||
|  |         page_offset = 0; | ||||||
|  |         dest_addr += static_cast<VAddr>(copy_amount); | ||||||
|  |         src_addr += static_cast<VAddr>(copy_amount); | ||||||
|  |         remaining_size -= copy_amount; | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
| template <> | template <> | ||||||
| u8 ReadMMIO<u8>(MMIORegionPointer mmio_handler, VAddr addr) { | u8 ReadMMIO<u8>(MMIORegionPointer mmio_handler, VAddr addr) { | ||||||
|     return mmio_handler->Read8(addr); |     return mmio_handler->Read8(addr); | ||||||
|  |  | ||||||
|  | @ -205,6 +205,8 @@ void ZeroBlock(const Kernel::Process& process, VAddr dest_addr, const std::size_ | ||||||
| void ZeroBlock(VAddr dest_addr, const std::size_t size); | void ZeroBlock(VAddr dest_addr, const std::size_t size); | ||||||
| void CopyBlock(const Kernel::Process& process, VAddr dest_addr, VAddr src_addr, std::size_t size); | void CopyBlock(const Kernel::Process& process, VAddr dest_addr, VAddr src_addr, std::size_t size); | ||||||
| void CopyBlock(VAddr dest_addr, VAddr src_addr, std::size_t size); | void CopyBlock(VAddr dest_addr, VAddr src_addr, std::size_t size); | ||||||
|  | void CopyBlock(const Kernel::Process& src_process, const Kernel::Process& dest_process, | ||||||
|  |                VAddr src_addr, VAddr dest_addr, std::size_t size); | ||||||
| 
 | 
 | ||||||
| u8* GetPointer(VAddr vaddr); | u8* GetPointer(VAddr vaddr); | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue