Fix T91005: Autosplit produces unusable files

Audio PTS was reset for each new file. This caused misalignment of video
and audio streams. In Blender, these files can't be loaded, other
players will fail to align audio and video.

Since timestamps are reset intentionally, reset also video stream
timestamps.

There were other bugs:
After timestamp was reset for audio, write_audio_frames started
encoding from timeline start until target frame, so each split video
had more audio than it should.

Also audio for last frame before splitting was written into new file.

Differential Revision: https://developer.blender.org/D13280
This commit is contained in:
Richard Antalik 2021-12-14 01:16:24 +01:00
parent b647509913
commit c1f5d8d023
Notes: blender-bot 2023-02-14 11:35:46 +01:00
Referenced by issue #88449: Blender LTS: Maintenance Task 2.93
Referenced by issue #88449, Blender LTS: Maintenance Task 2.93
Referenced by issue #91005, Autosplit Output gives unexepected results
1 changed files with 17 additions and 9 deletions

View File

@ -87,6 +87,7 @@ typedef struct FFMpegContext {
AVStream *video_stream;
AVStream *audio_stream;
AVFrame *current_frame; /* Image frame in output pixel format. */
int video_time;
/* Image frame in Blender's own pixel format, may need conversion to the output pixel format. */
AVFrame *img_convert_frame;
@ -96,6 +97,7 @@ typedef struct FFMpegContext {
uint8_t *audio_deinterleave_buffer;
int audio_input_samples;
double audio_time;
double audio_time_total;
bool audio_deinterleave;
int audio_sample_size;
@ -318,14 +320,15 @@ static const char **get_file_extensions(int format)
}
/* Write a frame to the output file */
static int write_video_frame(FFMpegContext *context, int cfra, AVFrame *frame, ReportList *reports)
static int write_video_frame(FFMpegContext *context, AVFrame *frame, ReportList *reports)
{
int ret, success = 1;
AVPacket *packet = av_packet_alloc();
AVCodecContext *c = context->video_codec;
frame->pts = cfra;
frame->pts = context->video_time;
context->video_time++;
ret = avcodec_send_frame(c, frame);
if (ret < 0) {
@ -804,6 +807,8 @@ static AVStream *alloc_video_stream(FFMpegContext *context,
avcodec_parameters_from_context(st->codecpar, c);
context->video_time = 0.0f;
return st;
}
@ -1397,9 +1402,10 @@ static void write_audio_frames(FFMpegContext *context, double to_pts)
AVCodecContext *c = context->audio_codec;
while (context->audio_stream) {
if ((context->audio_time >= to_pts) || !write_audio_frame(context)) {
if ((context->audio_time_total >= to_pts) || !write_audio_frame(context)) {
break;
}
context->audio_time_total += (double)context->audio_input_samples / (double)c->sample_rate;
context->audio_time += (double)context->audio_input_samples / (double)c->sample_rate;
}
}
@ -1423,22 +1429,23 @@ int BKE_ffmpeg_append(void *context_v,
if (context->video_stream) {
avframe = generate_video_frame(context, (unsigned char *)pixels);
success = (avframe && write_video_frame(context, frame - start_frame, avframe, reports));
success = (avframe && write_video_frame(context, avframe, reports));
# ifdef WITH_AUDASPACE
/* Add +1 frame because we want to encode audio up until the next video frame. */
write_audio_frames(
context, (frame - start_frame + 1) / (((double)rd->frs_sec) / (double)rd->frs_sec_base));
# endif
if (context->ffmpeg_autosplit) {
if (avio_tell(context->outfile->pb) > FFMPEG_AUTOSPLIT_SIZE) {
end_ffmpeg_impl(context, true);
context->ffmpeg_autosplit_count++;
success &= start_ffmpeg_impl(context, rd, rectx, recty, suffix, reports);
}
}
}
# ifdef WITH_AUDASPACE
/* Add +1 frame because we want to encode audio up until the next video frame. */
write_audio_frames(
context, (frame - start_frame + 1) / (((double)rd->frs_sec) / (double)rd->frs_sec_base));
# endif
return success;
}
@ -1881,6 +1888,7 @@ void *BKE_ffmpeg_context_create(void)
context->ffmpeg_autosplit_count = 0;
context->ffmpeg_preview = false;
context->stamp_data = NULL;
context->audio_time_total = 0.0;
return context;
}