mirror of
				https://github.com/PabloMK7/citra.git
				synced 2025-10-31 13:50:03 +00:00 
			
		
		
		
	Prefix all size_t with std::
done automatically by executing regex replace `([^:0-9a-zA-Z_])size_t([^0-9a-zA-Z_])` -> `$1std::size_t$2`
This commit is contained in:
		
							parent
							
								
									eca98eeb3e
								
							
						
					
					
						commit
						7d8f115185
					
				
					 158 changed files with 669 additions and 634 deletions
				
			
		|  | @ -351,7 +351,7 @@ static void WritePicaReg(u32 id, u32 value, u32 mask) { | |||
| 
 | ||||
|         // Simple circular-replacement vertex cache
 | ||||
|         // The size has been tuned for optimal balance between hit-rate and the cost of lookup
 | ||||
|         const size_t VERTEX_CACHE_SIZE = 32; | ||||
|         const std::size_t VERTEX_CACHE_SIZE = 32; | ||||
|         std::array<bool, VERTEX_CACHE_SIZE> vertex_cache_valid{}; | ||||
|         std::array<u16, VERTEX_CACHE_SIZE> vertex_cache_ids; | ||||
|         std::array<Shader::AttributeBuffer, VERTEX_CACHE_SIZE> vertex_cache; | ||||
|  |  | |||
|  | @ -315,7 +315,7 @@ std::unique_ptr<PicaTrace> FinishPicaTracing() { | |||
| 
 | ||||
| static std::string ReplacePattern(const std::string& input, const std::string& pattern, | ||||
|                                   const std::string& replacement) { | ||||
|     size_t start = input.find(pattern); | ||||
|     std::size_t start = input.find(pattern); | ||||
|     if (start == std::string::npos) | ||||
|         return input; | ||||
| 
 | ||||
|  | @ -451,7 +451,7 @@ std::string GetTevStageConfigAlphaCombinerString(const TexturingRegs::TevStageCo | |||
| 
 | ||||
| void DumpTevStageConfig(const std::array<TexturingRegs::TevStageConfig, 6>& stages) { | ||||
|     std::string stage_info = "Tev setup:\n"; | ||||
|     for (size_t index = 0; index < stages.size(); ++index) { | ||||
|     for (std::size_t index = 0; index < stages.size(); ++index) { | ||||
|         const auto& tev_stage = stages[index]; | ||||
|         stage_info += "Stage " + std::to_string(index) + ": " + | ||||
|                       GetTevStageConfigColorCombinerString(tev_stage) + "   " + | ||||
|  |  | |||
|  | @ -44,7 +44,7 @@ public: | |||
|         ASSERT(regs.pipeline.variable_primitive == 0); | ||||
|         ASSERT(regs.gs.input_to_uniform == 0); | ||||
|         vs_output_num = regs.pipeline.vs_outmap_total_minus_1_a + 1; | ||||
|         size_t gs_input_num = regs.gs.max_input_attribute_index + 1; | ||||
|         std::size_t gs_input_num = regs.gs.max_input_attribute_index + 1; | ||||
|         ASSERT(gs_input_num % vs_output_num == 0); | ||||
|         buffer_cur = attribute_buffer.attr; | ||||
|         buffer_end = attribute_buffer.attr + gs_input_num; | ||||
|  | @ -157,7 +157,7 @@ public: | |||
|         ASSERT(regs.gs.input_to_uniform == 1); | ||||
|         vs_output_num = regs.pipeline.vs_outmap_total_minus_1_a + 1; | ||||
|         ASSERT(vs_output_num == regs.pipeline.gs_config.stride_minus_1 + 1); | ||||
|         size_t vertex_num = regs.pipeline.gs_config.fixed_vertex_num_minus_1 + 1; | ||||
|         std::size_t vertex_num = regs.pipeline.gs_config.fixed_vertex_num_minus_1 + 1; | ||||
|         buffer_cur = buffer_begin = setup.uniforms.f + regs.pipeline.gs_config.start_index; | ||||
|         buffer_end = buffer_begin + vs_output_num * vertex_num; | ||||
|     } | ||||
|  |  | |||
|  | @ -37,15 +37,15 @@ namespace Pica { | |||
| #else | ||||
| // NOTE: Yeah, hacking in a static_assert here just to workaround the lacking MSVC compiler
 | ||||
| //       really is this annoying. This macro just forwards its first argument to PICA_REG_INDEX
 | ||||
| //       and then performs a (no-op) cast to size_t iff the second argument matches the expected
 | ||||
| //       field offset. Otherwise, the compiler will fail to compile this code.
 | ||||
| //       and then performs a (no-op) cast to std::size_t iff the second argument matches the
 | ||||
| //       expected field offset. Otherwise, the compiler will fail to compile this code.
 | ||||
| #define PICA_REG_INDEX_WORKAROUND(field_name, backup_workaround_index)                             \ | ||||
|     ((typename std::enable_if<backup_workaround_index == PICA_REG_INDEX(field_name),               \ | ||||
|                               size_t>::type) PICA_REG_INDEX(field_name)) | ||||
|                               std::size_t>::type) PICA_REG_INDEX(field_name)) | ||||
| #endif // _MSC_VER
 | ||||
| 
 | ||||
| struct Regs { | ||||
|     static constexpr size_t NUM_REGS = 0x300; | ||||
|     static constexpr std::size_t NUM_REGS = 0x300; | ||||
| 
 | ||||
|     union { | ||||
|         struct { | ||||
|  |  | |||
|  | @ -150,7 +150,7 @@ struct TexturingRegs { | |||
|         if (face != CubeFace::PositiveX) { | ||||
|             // Bits [22:27] from the main texture address is shared with all cubemap additional
 | ||||
|             // addresses.
 | ||||
|             auto& face_addr = cube_address[static_cast<size_t>(face) - 1]; | ||||
|             auto& face_addr = cube_address[static_cast<std::size_t>(face) - 1]; | ||||
|             address &= ~face_addr.mask; | ||||
|             address |= face_addr; | ||||
|         } | ||||
|  |  | |||
|  | @ -63,7 +63,7 @@ RasterizerOpenGL::RasterizerOpenGL(EmuWindow& window) | |||
|     state.clip_distance[0] = true; | ||||
| 
 | ||||
|     // Create sampler objects
 | ||||
|     for (size_t i = 0; i < texture_samplers.size(); ++i) { | ||||
|     for (std::size_t i = 0; i < texture_samplers.size(); ++i) { | ||||
|         texture_samplers[i].Create(); | ||||
|         state.texture_units[i].sampler = texture_samplers[i].sampler.handle; | ||||
|     } | ||||
|  | @ -91,11 +91,11 @@ RasterizerOpenGL::RasterizerOpenGL(EmuWindow& window) | |||
| 
 | ||||
|     glGetIntegerv(GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT, &uniform_buffer_alignment); | ||||
|     uniform_size_aligned_vs = | ||||
|         Common::AlignUp<size_t>(sizeof(VSUniformData), uniform_buffer_alignment); | ||||
|         Common::AlignUp<std::size_t>(sizeof(VSUniformData), uniform_buffer_alignment); | ||||
|     uniform_size_aligned_gs = | ||||
|         Common::AlignUp<size_t>(sizeof(GSUniformData), uniform_buffer_alignment); | ||||
|         Common::AlignUp<std::size_t>(sizeof(GSUniformData), uniform_buffer_alignment); | ||||
|     uniform_size_aligned_fs = | ||||
|         Common::AlignUp<size_t>(sizeof(UniformData), uniform_buffer_alignment); | ||||
|         Common::AlignUp<std::size_t>(sizeof(UniformData), uniform_buffer_alignment); | ||||
| 
 | ||||
|     // Set vertex attributes for software shader path
 | ||||
|     state.draw.vertex_array = sw_vao.handle; | ||||
|  | @ -1904,11 +1904,11 @@ void RasterizerOpenGL::SyncShadowBias() { | |||
| } | ||||
| 
 | ||||
| void RasterizerOpenGL::SyncAndUploadLUTs() { | ||||
|     constexpr size_t max_size = sizeof(GLvec2) * 256 * Pica::LightingRegs::NumLightingSampler + | ||||
|                                 sizeof(GLvec2) * 128 +     // fog
 | ||||
|                                 sizeof(GLvec2) * 128 * 3 + // proctex: noise + color + alpha
 | ||||
|                                 sizeof(GLvec4) * 256 +     // proctex
 | ||||
|                                 sizeof(GLvec4) * 256;      // proctex diff
 | ||||
|     constexpr std::size_t max_size = sizeof(GLvec2) * 256 * Pica::LightingRegs::NumLightingSampler + | ||||
|                                      sizeof(GLvec2) * 128 +     // fog
 | ||||
|                                      sizeof(GLvec2) * 128 * 3 + // proctex: noise + color + alpha
 | ||||
|                                      sizeof(GLvec4) * 256 +     // proctex
 | ||||
|                                      sizeof(GLvec4) * 256;      // proctex diff
 | ||||
| 
 | ||||
|     if (!uniform_block_data.lighting_lut_dirty_any && !uniform_block_data.fog_lut_dirty && | ||||
|         !uniform_block_data.proctex_noise_lut_dirty && | ||||
|  | @ -1921,7 +1921,7 @@ void RasterizerOpenGL::SyncAndUploadLUTs() { | |||
|     u8* buffer; | ||||
|     GLintptr offset; | ||||
|     bool invalidate; | ||||
|     size_t bytes_used = 0; | ||||
|     std::size_t bytes_used = 0; | ||||
|     glBindBuffer(GL_TEXTURE_BUFFER, texture_buffer.GetHandle()); | ||||
|     std::tie(buffer, offset, invalidate) = texture_buffer.Map(max_size, sizeof(GLvec4)); | ||||
| 
 | ||||
|  | @ -2068,9 +2068,9 @@ void RasterizerOpenGL::UploadUniforms(bool accelerate_draw, bool use_gs) { | |||
|     if (!sync_vs && !sync_gs && !sync_fs) | ||||
|         return; | ||||
| 
 | ||||
|     size_t uniform_size = | ||||
|     std::size_t uniform_size = | ||||
|         uniform_size_aligned_vs + uniform_size_aligned_gs + uniform_size_aligned_fs; | ||||
|     size_t used_bytes = 0; | ||||
|     std::size_t used_bytes = 0; | ||||
|     u8* uniforms; | ||||
|     GLintptr offset; | ||||
|     bool invalidate; | ||||
|  |  | |||
|  | @ -272,10 +272,10 @@ private: | |||
|     std::unique_ptr<ShaderProgramManager> shader_program_manager; | ||||
| 
 | ||||
|     // They shall be big enough for about one frame.
 | ||||
|     static constexpr size_t VERTEX_BUFFER_SIZE = 32 * 1024 * 1024; | ||||
|     static constexpr size_t INDEX_BUFFER_SIZE = 1 * 1024 * 1024; | ||||
|     static constexpr size_t UNIFORM_BUFFER_SIZE = 2 * 1024 * 1024; | ||||
|     static constexpr size_t TEXTURE_BUFFER_SIZE = 1 * 1024 * 1024; | ||||
|     static constexpr std::size_t VERTEX_BUFFER_SIZE = 32 * 1024 * 1024; | ||||
|     static constexpr std::size_t INDEX_BUFFER_SIZE = 1 * 1024 * 1024; | ||||
|     static constexpr std::size_t UNIFORM_BUFFER_SIZE = 2 * 1024 * 1024; | ||||
|     static constexpr std::size_t TEXTURE_BUFFER_SIZE = 1 * 1024 * 1024; | ||||
| 
 | ||||
|     OGLVertexArray sw_vao; // VAO for software shader draw
 | ||||
|     OGLVertexArray hw_vao; // VAO for hardware shader / accelerate draw
 | ||||
|  | @ -288,9 +288,9 @@ private: | |||
|     OGLStreamBuffer texture_buffer; | ||||
|     OGLFramebuffer framebuffer; | ||||
|     GLint uniform_buffer_alignment; | ||||
|     size_t uniform_size_aligned_vs; | ||||
|     size_t uniform_size_aligned_gs; | ||||
|     size_t uniform_size_aligned_fs; | ||||
|     std::size_t uniform_size_aligned_vs; | ||||
|     std::size_t uniform_size_aligned_gs; | ||||
|     std::size_t uniform_size_aligned_fs; | ||||
| 
 | ||||
|     SamplerInfo texture_cube_sampler; | ||||
| 
 | ||||
|  |  | |||
|  | @ -61,10 +61,10 @@ static constexpr FormatTuple tex_tuple = {GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE}; | |||
| static const FormatTuple& GetFormatTuple(PixelFormat pixel_format) { | ||||
|     const SurfaceType type = SurfaceParams::GetFormatType(pixel_format); | ||||
|     if (type == SurfaceType::Color) { | ||||
|         ASSERT(static_cast<size_t>(pixel_format) < fb_format_tuples.size()); | ||||
|         ASSERT(static_cast<std::size_t>(pixel_format) < fb_format_tuples.size()); | ||||
|         return fb_format_tuples[static_cast<unsigned int>(pixel_format)]; | ||||
|     } else if (type == SurfaceType::Depth || type == SurfaceType::DepthStencil) { | ||||
|         size_t tuple_idx = static_cast<size_t>(pixel_format) - 14; | ||||
|         std::size_t tuple_idx = static_cast<std::size_t>(pixel_format) - 14; | ||||
|         ASSERT(tuple_idx < depth_format_tuples.size()); | ||||
|         return depth_format_tuples[tuple_idx]; | ||||
|     } | ||||
|  | @ -669,13 +669,13 @@ void CachedSurface::LoadGLBuffer(PAddr load_start, PAddr load_end) { | |||
|                 for (unsigned x = rect.left; x < rect.right; ++x) { | ||||
|                     auto vec4 = | ||||
|                         Pica::Texture::LookupTexture(texture_src_data, x, height - 1 - y, tex_info); | ||||
|                     const size_t offset = (x + (width * y)) * 4; | ||||
|                     const std::size_t offset = (x + (width * y)) * 4; | ||||
|                     std::memcpy(&gl_buffer[offset], vec4.AsArray(), 4); | ||||
|                 } | ||||
|             } | ||||
|         } else { | ||||
|             morton_to_gl_fns[static_cast<size_t>(pixel_format)](stride, height, &gl_buffer[0], addr, | ||||
|                                                                 load_start, load_end); | ||||
|             morton_to_gl_fns[static_cast<std::size_t>(pixel_format)](stride, height, &gl_buffer[0], | ||||
|                                                                      addr, load_start, load_end); | ||||
|         } | ||||
|     } | ||||
| } | ||||
|  | @ -720,8 +720,8 @@ void CachedSurface::FlushGLBuffer(PAddr flush_start, PAddr flush_end) { | |||
|         ASSERT(type == SurfaceType::Color); | ||||
|         std::memcpy(dst_buffer + start_offset, &gl_buffer[start_offset], flush_end - flush_start); | ||||
|     } else { | ||||
|         gl_to_morton_fns[static_cast<size_t>(pixel_format)](stride, height, &gl_buffer[0], addr, | ||||
|                                                             flush_start, flush_end); | ||||
|         gl_to_morton_fns[static_cast<std::size_t>(pixel_format)](stride, height, &gl_buffer[0], | ||||
|                                                                  addr, flush_start, flush_end); | ||||
|     } | ||||
| } | ||||
| 
 | ||||
|  | @ -738,7 +738,7 @@ void CachedSurface::UploadGLTexture(const MathUtil::Rectangle<u32>& rect, GLuint | |||
|     // Load data from memory to the surface
 | ||||
|     GLint x0 = static_cast<GLint>(rect.left); | ||||
|     GLint y0 = static_cast<GLint>(rect.bottom); | ||||
|     size_t buffer_offset = (y0 * stride + x0) * GetGLBytesPerPixel(pixel_format); | ||||
|     std::size_t buffer_offset = (y0 * stride + x0) * GetGLBytesPerPixel(pixel_format); | ||||
| 
 | ||||
|     const FormatTuple& tuple = GetFormatTuple(pixel_format); | ||||
|     GLuint target_tex = texture.handle; | ||||
|  | @ -811,7 +811,8 @@ void CachedSurface::DownloadGLTexture(const MathUtil::Rectangle<u32>& rect, GLui | |||
|     // Ensure no bad interactions with GL_PACK_ALIGNMENT
 | ||||
|     ASSERT(stride * GetGLBytesPerPixel(pixel_format) % 4 == 0); | ||||
|     glPixelStorei(GL_PACK_ROW_LENGTH, static_cast<GLint>(stride)); | ||||
|     size_t buffer_offset = (rect.bottom * stride + rect.left) * GetGLBytesPerPixel(pixel_format); | ||||
|     std::size_t buffer_offset = | ||||
|         (rect.bottom * stride + rect.left) * GetGLBytesPerPixel(pixel_format); | ||||
| 
 | ||||
|     // If not 1x scale, blit scaled texture to a new 1x texture and use that to flush
 | ||||
|     if (res_scale != 1) { | ||||
|  |  | |||
|  | @ -115,8 +115,8 @@ struct SurfaceParams { | |||
|             32, // D24S8
 | ||||
|         }; | ||||
| 
 | ||||
|         assert(static_cast<size_t>(format) < bpp_table.size()); | ||||
|         return bpp_table[static_cast<size_t>(format)]; | ||||
|         assert(static_cast<std::size_t>(format) < bpp_table.size()); | ||||
|         return bpp_table[static_cast<std::size_t>(format)]; | ||||
|     } | ||||
|     unsigned int GetFormatBpp() const { | ||||
|         return GetFormatBpp(pixel_format); | ||||
|  | @ -321,7 +321,7 @@ struct CachedSurface : SurfaceParams, std::enable_shared_from_this<CachedSurface | |||
|     } | ||||
| 
 | ||||
|     std::unique_ptr<u8[]> gl_buffer; | ||||
|     size_t gl_buffer_size = 0; | ||||
|     std::size_t gl_buffer_size = 0; | ||||
| 
 | ||||
|     // Read/Write data in 3DS memory to/from gl_buffer
 | ||||
|     void LoadGLBuffer(PAddr load_start, PAddr load_end); | ||||
|  | @ -374,7 +374,7 @@ struct TextureCubeConfig { | |||
| namespace std { | ||||
| template <> | ||||
| struct hash<TextureCubeConfig> { | ||||
|     size_t operator()(const TextureCubeConfig& config) const { | ||||
|     std::size_t operator()(const TextureCubeConfig& config) const { | ||||
|         std::size_t hash = 0; | ||||
|         boost::hash_combine(hash, config.px); | ||||
|         boost::hash_combine(hash, config.nx); | ||||
|  |  | |||
|  | @ -201,7 +201,7 @@ public: | |||
|     void AddLine(const std::string& text) { | ||||
|         DEBUG_ASSERT(scope >= 0); | ||||
|         if (!text.empty()) { | ||||
|             shader_source += std::string(static_cast<size_t>(scope) * 4, ' '); | ||||
|             shader_source += std::string(static_cast<std::size_t>(scope) * 4, ' '); | ||||
|         } | ||||
|         shader_source += text + '\n'; | ||||
|     } | ||||
|  | @ -418,9 +418,10 @@ private: | |||
|     u32 CompileInstr(u32 offset) { | ||||
|         const Instruction instr = {program_code[offset]}; | ||||
| 
 | ||||
|         size_t swizzle_offset = instr.opcode.Value().GetInfo().type == OpCode::Type::MultiplyAdd | ||||
|                                     ? instr.mad.operand_desc_id | ||||
|                                     : instr.common.operand_desc_id; | ||||
|         std::size_t swizzle_offset = | ||||
|             instr.opcode.Value().GetInfo().type == OpCode::Type::MultiplyAdd | ||||
|                 ? instr.mad.operand_desc_id | ||||
|                 : instr.common.operand_desc_id; | ||||
|         const SwizzlePattern swizzle = {swizzle_data[swizzle_offset]}; | ||||
| 
 | ||||
|         shader.AddLine("// " + std::to_string(offset) + ": " + instr.opcode.Value().GetInfo().name); | ||||
|  |  | |||
|  | @ -129,7 +129,7 @@ PicaFSConfig PicaFSConfig::BuildFromRegs(const Pica::Regs& regs) { | |||
|     // shader uniform instead.
 | ||||
|     const auto& tev_stages = regs.texturing.GetTevStages(); | ||||
|     DEBUG_ASSERT(state.tev_stages.size() == tev_stages.size()); | ||||
|     for (size_t i = 0; i < tev_stages.size(); i++) { | ||||
|     for (std::size_t i = 0; i < tev_stages.size(); i++) { | ||||
|         const auto& tev_stage = tev_stages[i]; | ||||
|         state.tev_stages[i].sources_raw = tev_stage.sources_raw; | ||||
|         state.tev_stages[i].modifiers_raw = tev_stage.modifiers_raw; | ||||
|  | @ -272,8 +272,8 @@ void PicaGSConfigCommonRaw::Init(const Pica::Regs& regs) { | |||
|             regs.rasterizer.vs_output_attributes[attrib].map_w}; | ||||
|         for (u32 comp = 0; comp < 4; ++comp) { | ||||
|             const auto semantic = semantics[comp]; | ||||
|             if (static_cast<size_t>(semantic) < 24) { | ||||
|                 semantic_maps[static_cast<size_t>(semantic)] = {attrib, comp}; | ||||
|             if (static_cast<std::size_t>(semantic) < 24) { | ||||
|                 semantic_maps[static_cast<std::size_t>(semantic)] = {attrib, comp}; | ||||
|             } else if (semantic != VSOutputAttributes::INVALID) { | ||||
|                 LOG_ERROR(Render_OpenGL, "Invalid/unknown semantic id: {}", | ||||
|                           static_cast<u32>(semantic)); | ||||
|  | @ -1516,7 +1516,7 @@ vec4 secondary_fragment_color = vec4(0.0); | |||
|     out += "vec4 next_combiner_buffer = tev_combiner_buffer_color;\n"; | ||||
|     out += "vec4 last_tex_env_out = vec4(0.0);\n"; | ||||
| 
 | ||||
|     for (size_t index = 0; index < state.tev_stages.size(); ++index) | ||||
|     for (std::size_t index = 0; index < state.tev_stages.size(); ++index) | ||||
|         WriteTevStage(out, config, (unsigned)index); | ||||
| 
 | ||||
|     if (state.alpha_test_func != FramebufferRegs::CompareFunc::Always) { | ||||
|  |  | |||
|  | @ -261,28 +261,28 @@ std::string GenerateFragmentShader(const PicaFSConfig& config, bool separable_sh | |||
| namespace std { | ||||
| template <> | ||||
| struct hash<GLShader::PicaFSConfig> { | ||||
|     size_t operator()(const GLShader::PicaFSConfig& k) const { | ||||
|     std::size_t operator()(const GLShader::PicaFSConfig& k) const { | ||||
|         return k.Hash(); | ||||
|     } | ||||
| }; | ||||
| 
 | ||||
| template <> | ||||
| struct hash<GLShader::PicaVSConfig> { | ||||
|     size_t operator()(const GLShader::PicaVSConfig& k) const { | ||||
|     std::size_t operator()(const GLShader::PicaVSConfig& k) const { | ||||
|         return k.Hash(); | ||||
|     } | ||||
| }; | ||||
| 
 | ||||
| template <> | ||||
| struct hash<GLShader::PicaFixedGSConfig> { | ||||
|     size_t operator()(const GLShader::PicaFixedGSConfig& k) const { | ||||
|     std::size_t operator()(const GLShader::PicaFixedGSConfig& k) const { | ||||
|         return k.Hash(); | ||||
|     } | ||||
| }; | ||||
| 
 | ||||
| template <> | ||||
| struct hash<GLShader::PicaGSConfig> { | ||||
|     size_t operator()(const GLShader::PicaGSConfig& k) const { | ||||
|     std::size_t operator()(const GLShader::PicaGSConfig& k) const { | ||||
|         return k.Hash(); | ||||
|     } | ||||
| }; | ||||
|  |  | |||
|  | @ -9,7 +9,7 @@ | |||
| #include "video_core/renderer_opengl/gl_shader_manager.h" | ||||
| 
 | ||||
| static void SetShaderUniformBlockBinding(GLuint shader, const char* name, UniformBindings binding, | ||||
|                                          size_t expected_size) { | ||||
|                                          std::size_t expected_size) { | ||||
|     GLuint ub_index = glGetUniformBlockIndex(shader, name); | ||||
|     if (ub_index == GL_INVALID_INDEX) { | ||||
|         return; | ||||
|  |  | |||
|  | @ -317,7 +317,7 @@ void OpenGLState::Apply() const { | |||
|     } | ||||
| 
 | ||||
|     // Clip distance
 | ||||
|     for (size_t i = 0; i < clip_distance.size(); ++i) { | ||||
|     for (std::size_t i = 0; i < clip_distance.size(); ++i) { | ||||
|         if (clip_distance[i] != cur_state.clip_distance[i]) { | ||||
|             if (clip_distance[i]) { | ||||
|                 glEnable(GL_CLIP_DISTANCE0 + static_cast<GLenum>(i)); | ||||
|  |  | |||
|  | @ -60,7 +60,7 @@ std::tuple<u8*, GLintptr, bool> OGLStreamBuffer::Map(GLsizeiptr size, GLintptr a | |||
|     mapped_size = size; | ||||
| 
 | ||||
|     if (alignment > 0) { | ||||
|         buffer_pos = Common::AlignUp<size_t>(buffer_pos, alignment); | ||||
|         buffer_pos = Common::AlignUp<std::size_t>(buffer_pos, alignment); | ||||
|     } | ||||
| 
 | ||||
|     bool invalidate = false; | ||||
|  |  | |||
|  | @ -35,7 +35,7 @@ inline GLenum TextureFilterMode(Pica::TexturingRegs::TextureConfig::TextureFilte | |||
|         GL_LINEAR,  // TextureFilter::Linear
 | ||||
|     }}; | ||||
| 
 | ||||
|     const auto index = static_cast<size_t>(mode); | ||||
|     const auto index = static_cast<std::size_t>(mode); | ||||
| 
 | ||||
|     // Range check table for input
 | ||||
|     if (index >= filter_mode_table.size()) { | ||||
|  | @ -72,7 +72,7 @@ inline GLenum WrapMode(Pica::TexturingRegs::TextureConfig::WrapMode mode) { | |||
|         GL_REPEAT,          // WrapMode::Repeat3
 | ||||
|     }}; | ||||
| 
 | ||||
|     const auto index = static_cast<size_t>(mode); | ||||
|     const auto index = static_cast<std::size_t>(mode); | ||||
| 
 | ||||
|     // Range check table for input
 | ||||
|     if (index >= wrap_mode_table.size()) { | ||||
|  | @ -111,7 +111,7 @@ inline GLenum BlendEquation(Pica::FramebufferRegs::BlendEquation equation) { | |||
|         GL_MAX,                   // BlendEquation::Max
 | ||||
|     }}; | ||||
| 
 | ||||
|     const auto index = static_cast<size_t>(equation); | ||||
|     const auto index = static_cast<std::size_t>(equation); | ||||
| 
 | ||||
|     // Range check table for input
 | ||||
|     if (index >= blend_equation_table.size()) { | ||||
|  | @ -143,7 +143,7 @@ inline GLenum BlendFunc(Pica::FramebufferRegs::BlendFactor factor) { | |||
|         GL_SRC_ALPHA_SATURATE,       // BlendFactor::SourceAlphaSaturate
 | ||||
|     }}; | ||||
| 
 | ||||
|     const auto index = static_cast<size_t>(factor); | ||||
|     const auto index = static_cast<std::size_t>(factor); | ||||
| 
 | ||||
|     // Range check table for input
 | ||||
|     if (index >= blend_func_table.size()) { | ||||
|  | @ -176,7 +176,7 @@ inline GLenum LogicOp(Pica::FramebufferRegs::LogicOp op) { | |||
|         GL_OR_INVERTED,   // OrInverted
 | ||||
|     }}; | ||||
| 
 | ||||
|     const auto index = static_cast<size_t>(op); | ||||
|     const auto index = static_cast<std::size_t>(op); | ||||
| 
 | ||||
|     // Range check table for input
 | ||||
|     if (index >= logic_op_table.size()) { | ||||
|  | @ -201,7 +201,7 @@ inline GLenum CompareFunc(Pica::FramebufferRegs::CompareFunc func) { | |||
|         GL_GEQUAL,   // CompareFunc::GreaterThanOrEqual
 | ||||
|     }}; | ||||
| 
 | ||||
|     const auto index = static_cast<size_t>(func); | ||||
|     const auto index = static_cast<std::size_t>(func); | ||||
| 
 | ||||
|     // Range check table for input
 | ||||
|     if (index >= compare_func_table.size()) { | ||||
|  | @ -226,7 +226,7 @@ inline GLenum StencilOp(Pica::FramebufferRegs::StencilAction action) { | |||
|         GL_DECR_WRAP, // StencilAction::DecrementWrap
 | ||||
|     }}; | ||||
| 
 | ||||
|     const auto index = static_cast<size_t>(action); | ||||
|     const auto index = static_cast<std::size_t>(action); | ||||
| 
 | ||||
|     // Range check table for input
 | ||||
|     if (index >= stencil_op_table.size()) { | ||||
|  |  | |||
|  | @ -176,7 +176,7 @@ void RendererOpenGL::LoadFBToScreenInfo(const GPU::Regs::FramebufferConfig& fram | |||
|               (int)framebuffer.height, (int)framebuffer.format); | ||||
| 
 | ||||
|     int bpp = GPU::Regs::BytesPerPixel(framebuffer.color_format); | ||||
|     size_t pixel_stride = framebuffer.stride / bpp; | ||||
|     std::size_t pixel_stride = framebuffer.stride / bpp; | ||||
| 
 | ||||
|     // OpenGL only supports specifying a stride in units of pixels, not bytes, unfortunately
 | ||||
|     ASSERT(pixel_stride * bpp == framebuffer.stride); | ||||
|  |  | |||
|  | @ -24,9 +24,9 @@ namespace Shader { | |||
| void OutputVertex::ValidateSemantics(const RasterizerRegs& regs) { | ||||
|     unsigned int num_attributes = regs.vs_output_total; | ||||
|     ASSERT(num_attributes <= 7); | ||||
|     for (size_t attrib = 0; attrib < num_attributes; ++attrib) { | ||||
|     for (std::size_t attrib = 0; attrib < num_attributes; ++attrib) { | ||||
|         u32 output_register_map = regs.vs_output_attributes[attrib].raw; | ||||
|         for (size_t comp = 0; comp < 4; ++comp) { | ||||
|         for (std::size_t comp = 0; comp < 4; ++comp) { | ||||
|             u32 semantic = (output_register_map >> (8 * comp)) & 0x1F; | ||||
|             ASSERT_MSG(semantic < 24 || semantic == RasterizerRegs::VSOutputAttributes::INVALID, | ||||
|                        "Invalid/unknown semantic id: {}", semantic); | ||||
|  | @ -50,7 +50,7 @@ OutputVertex OutputVertex::FromAttributeBuffer(const RasterizerRegs& regs, | |||
|                   "Struct and array have different sizes."); | ||||
| 
 | ||||
|     unsigned int num_attributes = regs.vs_output_total & 7; | ||||
|     for (size_t attrib = 0; attrib < num_attributes; ++attrib) { | ||||
|     for (std::size_t attrib = 0; attrib < num_attributes; ++attrib) { | ||||
|         const auto output_register_map = regs.vs_output_attributes[attrib]; | ||||
|         vertex_slots_overflow[output_register_map.map_x] = input.attr[attrib][0]; | ||||
|         vertex_slots_overflow[output_register_map.map_y] = input.attr[attrib][1]; | ||||
|  | @ -117,7 +117,7 @@ void GSEmitter::Emit(Math::Vec4<float24> (&output_regs)[16]) { | |||
|     if (prim_emit) { | ||||
|         if (winding) | ||||
|             handlers->winding_setter(); | ||||
|         for (size_t i = 0; i < buffer.size(); ++i) { | ||||
|         for (std::size_t i = 0; i < buffer.size(); ++i) { | ||||
|             handlers->vertex_handler(buffer[i]); | ||||
|         } | ||||
|     } | ||||
|  |  | |||
|  | @ -118,7 +118,7 @@ struct UnitState { | |||
| 
 | ||||
|     GSEmitter* emitter_ptr; | ||||
| 
 | ||||
|     static size_t InputOffset(const SourceRegister& reg) { | ||||
|     static std::size_t InputOffset(const SourceRegister& reg) { | ||||
|         switch (reg.GetRegisterType()) { | ||||
|         case RegisterType::Input: | ||||
|             return offsetof(UnitState, registers.input) + | ||||
|  | @ -134,7 +134,7 @@ struct UnitState { | |||
|         } | ||||
|     } | ||||
| 
 | ||||
|     static size_t OutputOffset(const DestRegister& reg) { | ||||
|     static std::size_t OutputOffset(const DestRegister& reg) { | ||||
|         switch (reg.GetRegisterType()) { | ||||
|         case RegisterType::Output: | ||||
|             return offsetof(UnitState, registers.output) + | ||||
|  | @ -182,15 +182,15 @@ struct Uniforms { | |||
|     std::array<bool, 16> b; | ||||
|     std::array<Math::Vec4<u8>, 4> i; | ||||
| 
 | ||||
|     static size_t GetFloatUniformOffset(unsigned index) { | ||||
|     static std::size_t GetFloatUniformOffset(unsigned index) { | ||||
|         return offsetof(Uniforms, f) + index * sizeof(Math::Vec4<float24>); | ||||
|     } | ||||
| 
 | ||||
|     static size_t GetBoolUniformOffset(unsigned index) { | ||||
|     static std::size_t GetBoolUniformOffset(unsigned index) { | ||||
|         return offsetof(Uniforms, b) + index * sizeof(bool); | ||||
|     } | ||||
| 
 | ||||
|     static size_t GetIntUniformOffset(unsigned index) { | ||||
|     static std::size_t GetIntUniformOffset(unsigned index) { | ||||
|         return offsetof(Uniforms, i) + index * sizeof(Math::Vec4<u8>); | ||||
|     } | ||||
| }; | ||||
|  |  | |||
|  | @ -166,7 +166,7 @@ static void LogCritical(const char* msg) { | |||
| 
 | ||||
| void JitShader::Compile_Assert(bool condition, const char* msg) { | ||||
|     if (!condition) { | ||||
|         mov(ABI_PARAM1, reinterpret_cast<size_t>(msg)); | ||||
|         mov(ABI_PARAM1, reinterpret_cast<std::size_t>(msg)); | ||||
|         CallFarFunction(*this, LogCritical); | ||||
|     } | ||||
| } | ||||
|  | @ -181,7 +181,7 @@ void JitShader::Compile_Assert(bool condition, const char* msg) { | |||
| void JitShader::Compile_SwizzleSrc(Instruction instr, unsigned src_num, SourceRegister src_reg, | ||||
|                                    Xmm dest) { | ||||
|     Reg64 src_ptr; | ||||
|     size_t src_offset; | ||||
|     std::size_t src_offset; | ||||
| 
 | ||||
|     if (src_reg.GetRegisterType() == RegisterType::FloatUniform) { | ||||
|         src_ptr = UNIFORMS; | ||||
|  | @ -266,7 +266,7 @@ void JitShader::Compile_DestEnable(Instruction instr, Xmm src) { | |||
| 
 | ||||
|     SwizzlePattern swiz = {(*swizzle_data)[operand_desc_id]}; | ||||
| 
 | ||||
|     size_t dest_offset_disp = UnitState::OutputOffset(dest); | ||||
|     std::size_t dest_offset_disp = UnitState::OutputOffset(dest); | ||||
| 
 | ||||
|     // If all components are enabled, write the result to the destination register
 | ||||
|     if (swiz.dest_mask == NO_DEST_REG_MASK) { | ||||
|  | @ -354,7 +354,7 @@ void JitShader::Compile_EvaluateCondition(Instruction instr) { | |||
| } | ||||
| 
 | ||||
| void JitShader::Compile_UniformCondition(Instruction instr) { | ||||
|     size_t offset = Uniforms::GetBoolUniformOffset(instr.flow_control.bool_uniform_id); | ||||
|     std::size_t offset = Uniforms::GetBoolUniformOffset(instr.flow_control.bool_uniform_id); | ||||
|     cmp(byte[UNIFORMS + offset], 0); | ||||
| } | ||||
| 
 | ||||
|  | @ -733,7 +733,7 @@ void JitShader::Compile_LOOP(Instruction instr) { | |||
|     // This decodes the fields from the integer uniform at index instr.flow_control.int_uniform_id.
 | ||||
|     // The Y (LOOPCOUNT_REG) and Z (LOOPINC) component are kept multiplied by 16 (Left shifted by
 | ||||
|     // 4 bits) to be used as an offset into the 16-byte vector registers later
 | ||||
|     size_t offset = Uniforms::GetIntUniformOffset(instr.flow_control.int_uniform_id); | ||||
|     std::size_t offset = Uniforms::GetIntUniformOffset(instr.flow_control.int_uniform_id); | ||||
|     mov(LOOPCOUNT, dword[UNIFORMS + offset]); | ||||
|     mov(LOOPCOUNT_REG, LOOPCOUNT); | ||||
|     shr(LOOPCOUNT_REG, 4); | ||||
|  | @ -789,7 +789,7 @@ void JitShader::Compile_EMIT(Instruction instr) { | |||
|     jnz(have_emitter); | ||||
| 
 | ||||
|     ABI_PushRegistersAndAdjustStack(*this, PersistentCallerSavedRegs(), 0); | ||||
|     mov(ABI_PARAM1, reinterpret_cast<size_t>("Execute EMIT on VS")); | ||||
|     mov(ABI_PARAM1, reinterpret_cast<std::size_t>("Execute EMIT on VS")); | ||||
|     CallFarFunction(*this, LogCritical); | ||||
|     ABI_PopRegistersAndAdjustStack(*this, PersistentCallerSavedRegs(), 0); | ||||
|     jmp(end); | ||||
|  | @ -811,7 +811,7 @@ void JitShader::Compile_SETE(Instruction instr) { | |||
|     jnz(have_emitter); | ||||
| 
 | ||||
|     ABI_PushRegistersAndAdjustStack(*this, PersistentCallerSavedRegs(), 0); | ||||
|     mov(ABI_PARAM1, reinterpret_cast<size_t>("Execute SETEMIT on VS")); | ||||
|     mov(ABI_PARAM1, reinterpret_cast<std::size_t>("Execute SETEMIT on VS")); | ||||
|     CallFarFunction(*this, LogCritical); | ||||
|     ABI_PopRegistersAndAdjustStack(*this, PersistentCallerSavedRegs(), 0); | ||||
|     jmp(end); | ||||
|  | @ -866,7 +866,7 @@ void JitShader::Compile_NextInstr() { | |||
| void JitShader::FindReturnOffsets() { | ||||
|     return_offsets.clear(); | ||||
| 
 | ||||
|     for (size_t offset = 0; offset < program_code->size(); ++offset) { | ||||
|     for (std::size_t offset = 0; offset < program_code->size(); ++offset) { | ||||
|         Instruction instr = {(*program_code)[offset]}; | ||||
| 
 | ||||
|         switch (instr.opcode.Value()) { | ||||
|  | @ -922,12 +922,12 @@ void JitShader::Compile(const std::array<u32, MAX_PROGRAM_CODE_LENGTH>* program_ | |||
| 
 | ||||
|     // Used to set a register to one
 | ||||
|     static const __m128 one = {1.f, 1.f, 1.f, 1.f}; | ||||
|     mov(rax, reinterpret_cast<size_t>(&one)); | ||||
|     mov(rax, reinterpret_cast<std::size_t>(&one)); | ||||
|     movaps(ONE, xword[rax]); | ||||
| 
 | ||||
|     // Used to negate registers
 | ||||
|     static const __m128 neg = {-0.f, -0.f, -0.f, -0.f}; | ||||
|     mov(rax, reinterpret_cast<size_t>(&neg)); | ||||
|     mov(rax, reinterpret_cast<std::size_t>(&neg)); | ||||
|     movaps(NEGBIT, xword[rax]); | ||||
| 
 | ||||
|     // Jump to start of the shader program
 | ||||
|  |  | |||
|  | @ -24,7 +24,7 @@ namespace Pica { | |||
| namespace Shader { | ||||
| 
 | ||||
| /// Memory allocated for each compiled shader
 | ||||
| constexpr size_t MAX_SHADER_SIZE = MAX_PROGRAM_CODE_LENGTH * 64; | ||||
| constexpr std::size_t MAX_SHADER_SIZE = MAX_PROGRAM_CODE_LENGTH * 64; | ||||
| 
 | ||||
| /**
 | ||||
|  * This class implements the shader JIT compiler. It recompiles a Pica shader program into x86_64 | ||||
|  |  | |||
|  | @ -91,7 +91,7 @@ void ProcessTriangle(const OutputVertex& v0, const OutputVertex& v1, const Outpu | |||
|     // the new edge (or less in degenerate cases). As such, we can say that each clipping plane
 | ||||
|     // introduces at most 1 new vertex to the polygon. Since we start with a triangle and have a
 | ||||
|     // fixed 6 clipping planes, the maximum number of vertices of the clipped polygon is 3 + 6 = 9.
 | ||||
|     static const size_t MAX_VERTICES = 9; | ||||
|     static const std::size_t MAX_VERTICES = 9; | ||||
|     static_vector<Vertex, MAX_VERTICES> buffer_a = {v0, v1, v2}; | ||||
|     static_vector<Vertex, MAX_VERTICES> buffer_b; | ||||
| 
 | ||||
|  | @ -166,7 +166,7 @@ void ProcessTriangle(const OutputVertex& v0, const OutputVertex& v1, const Outpu | |||
|     InitScreenCoordinates((*output_list)[0]); | ||||
|     InitScreenCoordinates((*output_list)[1]); | ||||
| 
 | ||||
|     for (size_t i = 0; i < output_list->size() - 2; i++) { | ||||
|     for (std::size_t i = 0; i < output_list->size() - 2; i++) { | ||||
|         Vertex& vtx0 = (*output_list)[0]; | ||||
|         Vertex& vtx1 = (*output_list)[i + 1]; | ||||
|         Vertex& vtx2 = (*output_list)[i + 2]; | ||||
|  |  | |||
|  | @ -7,8 +7,8 @@ | |||
| 
 | ||||
| namespace Pica { | ||||
| 
 | ||||
| static float LookupLightingLut(const Pica::State::Lighting& lighting, size_t lut_index, u8 index, | ||||
|                                float delta) { | ||||
| static float LookupLightingLut(const Pica::State::Lighting& lighting, std::size_t lut_index, | ||||
|                                u8 index, float delta) { | ||||
|     ASSERT_MSG(lut_index < lighting.luts.size(), "Out of range lut"); | ||||
|     ASSERT_MSG(index < lighting.luts[lut_index].size(), "Out of range index"); | ||||
| 
 | ||||
|  | @ -93,8 +93,8 @@ std::tuple<Math::Vec4<u8>, Math::Vec4<u8>> ComputeFragmentsColors( | |||
|             auto distance = (-view - position).Length(); | ||||
|             float scale = Pica::float20::FromRaw(light_config.dist_atten_scale).ToFloat32(); | ||||
|             float bias = Pica::float20::FromRaw(light_config.dist_atten_bias).ToFloat32(); | ||||
|             size_t lut = | ||||
|                 static_cast<size_t>(LightingRegs::LightingSampler::DistanceAttenuation) + num; | ||||
|             std::size_t lut = | ||||
|                 static_cast<std::size_t>(LightingRegs::LightingSampler::DistanceAttenuation) + num; | ||||
| 
 | ||||
|             float sample_loc = std::clamp(scale * distance + bias, 0.0f, 1.0f); | ||||
| 
 | ||||
|  | @ -168,8 +168,8 @@ std::tuple<Math::Vec4<u8>, Math::Vec4<u8>> ComputeFragmentsColors( | |||
|             } | ||||
| 
 | ||||
|             float scale = lighting.lut_scale.GetScale(scale_enum); | ||||
|             return scale * | ||||
|                    LookupLightingLut(lighting_state, static_cast<size_t>(sampler), index, delta); | ||||
|             return scale * LookupLightingLut(lighting_state, static_cast<std::size_t>(sampler), | ||||
|                                              index, delta); | ||||
|         }; | ||||
| 
 | ||||
|         // If enabled, compute spot light attenuation value
 | ||||
|  |  | |||
|  | @ -18,8 +18,8 @@ using TextureFormat = Pica::TexturingRegs::TextureFormat; | |||
| namespace Pica { | ||||
| namespace Texture { | ||||
| 
 | ||||
| constexpr size_t TILE_SIZE = 8 * 8; | ||||
| constexpr size_t ETC1_SUBTILES = 2 * 2; | ||||
| constexpr std::size_t TILE_SIZE = 8 * 8; | ||||
| constexpr std::size_t ETC1_SUBTILES = 2 * 2; | ||||
| 
 | ||||
| size_t CalculateTileSize(TextureFormat format) { | ||||
|     switch (format) { | ||||
|  | @ -177,7 +177,7 @@ Math::Vec4<u8> LookupTexelInTile(const u8* source, unsigned int x, unsigned int | |||
|     case TextureFormat::ETC1: | ||||
|     case TextureFormat::ETC1A4: { | ||||
|         bool has_alpha = (info.format == TextureFormat::ETC1A4); | ||||
|         size_t subtile_size = has_alpha ? 16 : 8; | ||||
|         std::size_t subtile_size = has_alpha ? 16 : 8; | ||||
| 
 | ||||
|         // ETC1 further subdivides each 8x8 tile into four 4x4 subtiles
 | ||||
|         constexpr unsigned int subtile_width = 4; | ||||
|  |  | |||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue