mirror of
				https://github.com/PabloMK7/citra.git
				synced 2025-10-30 21:30:04 +00:00 
			
		
		
		
	ffmpeg: Properly handle non-planar formats
For non-planar formats, only the first data plane is used. Therefore, they need to be handled differently in certain places.
This commit is contained in:
		
							parent
							
								
									c9c26955d2
								
							
						
					
					
						commit
						a28eac08ae
					
				
					 1 changed files with 15 additions and 18 deletions
				
			
		|  | @ -227,20 +227,7 @@ bool FFmpegAudioStream::Init(AVFormatContext* format_context) { | |||
|     codec_context->codec_type = AVMEDIA_TYPE_AUDIO; | ||||
|     codec_context->bit_rate = Settings::values.audio_bitrate; | ||||
|     if (codec->sample_fmts) { | ||||
|         codec_context->sample_fmt = AV_SAMPLE_FMT_NONE; | ||||
|         // Use any planar format
 | ||||
|         const AVSampleFormat* ptr = codec->sample_fmts; | ||||
|         while ((*ptr) != -1) { | ||||
|             if (av_sample_fmt_is_planar((*ptr))) { | ||||
|                 codec_context->sample_fmt = (*ptr); | ||||
|                 break; | ||||
|             } | ||||
|             ptr++; | ||||
|         } | ||||
|         if (codec_context->sample_fmt == AV_SAMPLE_FMT_NONE) { | ||||
|             LOG_ERROR(Render, "Specified audio encoder does not support any planar format"); | ||||
|             return false; | ||||
|         } | ||||
|         codec_context->sample_fmt = codec->sample_fmts[0]; | ||||
|     } else { | ||||
|         codec_context->sample_fmt = AV_SAMPLE_FMT_S16P; | ||||
|     } | ||||
|  | @ -341,8 +328,14 @@ void FFmpegAudioStream::ProcessFrame(const VariableAudioFrame& channel0, | |||
|     const auto sample_size = av_get_bytes_per_sample(codec_context->sample_fmt); | ||||
|     std::array<const u8*, 2> src_data = {reinterpret_cast<const u8*>(channel0.data()), | ||||
|                                          reinterpret_cast<const u8*>(channel1.data())}; | ||||
|     std::array<u8*, 2> dst_data = {resampled_data[0] + sample_size * offset, | ||||
| 
 | ||||
|     std::array<u8*, 2> dst_data; | ||||
|     if (av_sample_fmt_is_planar(codec_context->sample_fmt)) { | ||||
|         dst_data = {resampled_data[0] + sample_size * offset, | ||||
|                     resampled_data[1] + sample_size * offset}; | ||||
|     } else { | ||||
|         dst_data = {resampled_data[0] + sample_size * offset * 2}; // 2 channels
 | ||||
|     } | ||||
| 
 | ||||
|     auto resampled_count = swr_convert(swr_context.get(), dst_data.data(), frame_size - offset, | ||||
|                                        src_data.data(), channel0.size()); | ||||
|  | @ -360,7 +353,9 @@ void FFmpegAudioStream::ProcessFrame(const VariableAudioFrame& channel0, | |||
|         // Prepare frame
 | ||||
|         audio_frame->nb_samples = frame_size; | ||||
|         audio_frame->data[0] = resampled_data[0]; | ||||
|         if (av_sample_fmt_is_planar(codec_context->sample_fmt)) { | ||||
|             audio_frame->data[1] = resampled_data[1]; | ||||
|         } | ||||
|         audio_frame->pts = frame_count * frame_size; | ||||
|         frame_count++; | ||||
| 
 | ||||
|  | @ -383,7 +378,9 @@ void FFmpegAudioStream::Flush() { | |||
|     // Send the last samples
 | ||||
|     audio_frame->nb_samples = offset; | ||||
|     audio_frame->data[0] = resampled_data[0]; | ||||
|     if (av_sample_fmt_is_planar(codec_context->sample_fmt)) { | ||||
|         audio_frame->data[1] = resampled_data[1]; | ||||
|     } | ||||
|     audio_frame->pts = frame_count * frame_size; | ||||
| 
 | ||||
|     SendFrame(audio_frame.get()); | ||||
|  |  | |||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue