mirror of
				https://github.com/PabloMK7/citra.git
				synced 2025-10-31 05:40:04 +00:00 
			
		
		
		
	Use recursive_mutex instead of mutex to fix #2902
This commit is contained in:
		
							parent
							
								
									61442d6afb
								
							
						
					
					
						commit
						8266064796
					
				
					 4 changed files with 5 additions and 5 deletions
				
			
		|  | @ -7,5 +7,5 @@ | ||||||
| #include <core/hle/lock.h> | #include <core/hle/lock.h> | ||||||
| 
 | 
 | ||||||
| namespace HLE { | namespace HLE { | ||||||
| std::mutex g_hle_lock; | std::recursive_mutex g_hle_lock; | ||||||
| } | } | ||||||
|  |  | ||||||
|  | @ -14,5 +14,5 @@ namespace HLE { | ||||||
|  * to the emulated memory is not protected by this mutex, and should be avoided in any threads other |  * to the emulated memory is not protected by this mutex, and should be avoided in any threads other | ||||||
|  * than the CPU thread. |  * than the CPU thread. | ||||||
|  */ |  */ | ||||||
| extern std::mutex g_hle_lock; | extern std::recursive_mutex g_hle_lock; | ||||||
| } // namespace HLE
 | } // namespace HLE
 | ||||||
|  |  | ||||||
|  | @ -1334,7 +1334,7 @@ void CallSVC(u32 immediate) { | ||||||
|     MICROPROFILE_SCOPE(Kernel_SVC); |     MICROPROFILE_SCOPE(Kernel_SVC); | ||||||
| 
 | 
 | ||||||
|     // Lock the global kernel mutex when we enter the kernel HLE.
 |     // Lock the global kernel mutex when we enter the kernel HLE.
 | ||||||
|     std::lock_guard<std::mutex> lock(HLE::g_hle_lock); |     std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock); | ||||||
| 
 | 
 | ||||||
|     const FunctionDef* info = GetSVCInfo(immediate); |     const FunctionDef* info = GetSVCInfo(immediate); | ||||||
|     if (info) { |     if (info) { | ||||||
|  |  | ||||||
|  | @ -183,7 +183,7 @@ T Read(const VAddr vaddr) { | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     // The memory access might do an MMIO or cached access, so we have to lock the HLE kernel state
 |     // The memory access might do an MMIO or cached access, so we have to lock the HLE kernel state
 | ||||||
|     std::lock_guard<std::mutex> lock(HLE::g_hle_lock); |     std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock); | ||||||
| 
 | 
 | ||||||
|     PageType type = current_page_table->attributes[vaddr >> PAGE_BITS]; |     PageType type = current_page_table->attributes[vaddr >> PAGE_BITS]; | ||||||
|     switch (type) { |     switch (type) { | ||||||
|  | @ -224,7 +224,7 @@ void Write(const VAddr vaddr, const T data) { | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     // The memory access might do an MMIO or cached access, so we have to lock the HLE kernel state
 |     // The memory access might do an MMIO or cached access, so we have to lock the HLE kernel state
 | ||||||
|     std::lock_guard<std::mutex> lock(HLE::g_hle_lock); |     std::lock_guard<std::recursive_mutex> lock(HLE::g_hle_lock); | ||||||
| 
 | 
 | ||||||
|     PageType type = current_page_table->attributes[vaddr >> PAGE_BITS]; |     PageType type = current_page_table->attributes[vaddr >> PAGE_BITS]; | ||||||
|     switch (type) { |     switch (type) { | ||||||
|  |  | ||||||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue