File indexing completed on 2024-06-16 04:38:28

0001 /*
0002     SPDX-FileCopyrightText: 2003 Fabrice Bellard
0003     SPDX-FileCopyrightText: 2020-2022 Mladen Milinkovic <max@smoothware.net>
0004 
0005     SPDX-License-Identifier: GPL-2.0-or-later
0006 */
0007 
0008 #include "audiodecoder.h"
0009 
0010 #include <QMap>
0011 
0012 #include "videoplayer/backend/ffplayer.h"
0013 #include "videoplayer/backend/framequeue.h"
0014 #include "videoplayer/backend/videostate.h"
0015 #include "videoplayer/backend/renderthread.h"
0016 
0017 extern "C" {
0018 #include "libavutil/time.h"
0019 #include "libswresample/swresample.h"
0020 }
0021 
0022 #include <AL/al.h>
0023 #include <AL/alc.h>
0024 
0025 // maximum audio speed change to get correct sync
0026 #define SAMPLE_CORRECTION_PERCENT_MAX 10
0027 
0028 // we use about AUDIO_DIFF_AVG_NB A-V differences to make the average
0029 #define AUDIO_DIFF_AVG_NB   20
0030 
0031 // Minimum audio buffer size, in samples.
0032 #define AUDIO_MIN_BUFFER_SIZE 512
0033 
0034 
0035 using namespace SubtitleComposer;
0036 
0037 AudioDecoder::AudioDecoder(VideoState *state, QObject *parent)
0038     : Decoder(parent),
0039       m_vs(state),
0040       m_swrCtx(nullptr),
0041       m_audioBuf(nullptr),
0042       m_bufSize(0),
0043       m_audioBuf1(nullptr),
0044       m_buf1Size(0),
0045       m_alDev(nullptr),
0046       m_alCtx(nullptr),
0047       m_alSrc(0),
0048       m_bufCnt(0),
0049       m_bufFmt(0)
0050 {
0051 }
0052 
0053 void
0054 AudioDecoder::destroy()
0055 {
0056     Decoder::destroy();
0057     swr_free(&m_swrCtx);
0058 
0059     av_freep(&m_audioBuf1);
0060     m_buf1Size = 0;
0061     m_audioBuf = nullptr;
0062 }
0063 
0064 void
0065 AudioDecoder::abort()
0066 {
0067     Decoder::abort();
0068     close();
0069 }
0070 
0071 void
0072 AudioDecoder::play()
0073 {
0074     alSourcePlay(m_alSrc);
0075 }
0076 
0077 void
0078 AudioDecoder::pause()
0079 {
0080     alSourcePause(m_alSrc);
0081 }
0082 
0083 void
0084 AudioDecoder::setListenerGain(double gain)
0085 {
0086     alListenerf(AL_GAIN, gain);
0087 }
0088 
0089 double
0090 AudioDecoder::pitch() const
0091 {
0092     ALfloat pitch = 1.0;
0093     alGetSourcef(m_alSrc, AL_PITCH, &pitch);
0094     return pitch;
0095 }
0096 
0097 void
0098 AudioDecoder::setPitch(double pitch)
0099 {
0100     alSourcef(m_alSrc, AL_PITCH, pitch);
0101     m_vs->notifySpeed();
0102 }
0103 
0104 void
0105 AudioDecoder::flush()
0106 {
0107     for(;;) {
0108         alSourceStop(m_alSrc);
0109 
0110         ALint bufferCnt = 0;
0111         alGetSourcei(m_alSrc, AL_BUFFERS_QUEUED, &bufferCnt);
0112         if(bufferCnt == 0)
0113             break;
0114 
0115         bufferCnt = 0;
0116         alGetSourcei(m_alSrc, AL_BUFFERS_PROCESSED, &bufferCnt);
0117         if(bufferCnt == 0) {
0118             av_log(nullptr, AV_LOG_WARNING, "openal: source didn't stop... retrying flush\n");
0119             av_usleep(10);
0120             continue;
0121         }
0122 
0123         ALuint *bufs = new ALuint[bufferCnt];
0124         alSourceUnqueueBuffers(m_alSrc, bufferCnt, bufs);
0125         alDeleteBuffers(bufferCnt, bufs);
0126         delete[] bufs;
0127         m_hwBufQueueSize = 0;
0128     }
0129 }
0130 
0131 void
0132 AudioDecoder::close()
0133 {
0134     flush();
0135     alcMakeContextCurrent(nullptr);
0136     if(m_alCtx) {
0137         alcDestroyContext(m_alCtx);
0138         m_alCtx = nullptr;
0139     }
0140     if(m_alDev) {
0141         alcCloseDevice(m_alDev);
0142         m_alDev = nullptr;
0143     }
0144     if(m_alSrc)
0145         alDeleteSources(1, &m_alSrc);
0146     m_alSrc = 0;
0147 }
0148 
0149 bool
0150 AudioDecoder::open(int64_t wantChLayout, int wantNbChan, int wantSampleRate)
0151 {
0152     const static QMap<int, const char *> bufFmtMap = {
0153         { 4, "AL_FORMAT_QUAD16" },
0154         { 6, "AL_FORMAT_51CHN16" },
0155         { 7, "AL_FORMAT_61CHN16" },
0156         { 8, "AL_FORMAT_71CHN16" },
0157     };
0158 
0159     int err;
0160 
0161     if(wantSampleRate <= 0 || wantNbChan <= 0) {
0162         av_log(nullptr, AV_LOG_ERROR, "openal: invalid sample rate or channel count!\n");
0163         return false;
0164     }
0165 
0166     int availNbChan = wantNbChan;
0167     for(;;) {
0168         while(availNbChan > 2 && !bufFmtMap.contains(availNbChan))
0169             availNbChan--;
0170         if(availNbChan <= 2) {
0171             m_bufFmt = availNbChan == 2 ? AL_FORMAT_STEREO16 : AL_FORMAT_MONO16;
0172             break;
0173         }
0174         m_bufFmt = alGetEnumValue(bufFmtMap[wantNbChan]);
0175         if(m_bufFmt)
0176             break;
0177         availNbChan--;
0178     }
0179 
0180     if(!wantChLayout || wantNbChan != availNbChan || wantNbChan != av_get_channel_layout_nb_channels(wantChLayout)) {
0181         wantChLayout = av_get_default_channel_layout(availNbChan);
0182         wantChLayout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
0183     }
0184 
0185     m_alDev = alcOpenDevice(nullptr);
0186     if(!m_alDev) {
0187         av_log(nullptr, AV_LOG_ERROR, "openal: error opening default audio device!\n");
0188         close();
0189         return false;
0190     }
0191 
0192     m_alCtx = alcCreateContext(m_alDev, nullptr);
0193     if(!m_alCtx) {
0194         av_log(nullptr, AV_LOG_ERROR, "openal: error creating audio context!\n");
0195         close();
0196         return false;
0197     }
0198     if(!alcMakeContextCurrent(m_alCtx)) {
0199         av_log(nullptr, AV_LOG_ERROR, "openal: error setting current audio context!\n");
0200         close();
0201         return false;
0202     }
0203 
0204     alGetError(); // clear error
0205 
0206     alGenSources(1, &m_alSrc);
0207     if((err = alGetError()) != AL_NO_ERROR) {
0208         av_log(nullptr, AV_LOG_ERROR, "openal: error generating audio source: %d\n", err);
0209         close();
0210         return false;
0211     }
0212 
0213     m_fmtTgt.fmt = AV_SAMPLE_FMT_S16;
0214     m_fmtTgt.freq = wantSampleRate;
0215     m_fmtTgt.channelLayout = wantChLayout;
0216     m_fmtTgt.channels = availNbChan;
0217     m_fmtTgt.frameSize = av_samples_get_buffer_size(nullptr, m_fmtTgt.channels, 1, m_fmtTgt.fmt, 1);
0218     m_fmtTgt.bytesPerSec = av_samples_get_buffer_size(nullptr, m_fmtTgt.channels, m_fmtTgt.freq, m_fmtTgt.fmt, 1);
0219     if(m_fmtTgt.bytesPerSec <= 0 || m_fmtTgt.frameSize <= 0) {
0220         av_log(nullptr, AV_LOG_ERROR, "av_samples_get_buffer_size failed\n");
0221         close();
0222         return false;
0223     }
0224 
0225     alListenerf(AL_GAIN, m_vs->player->muted() ? 0. : m_vs->player->volume());
0226 
0227     m_fmtSrc = m_fmtTgt;
0228     m_hwBufQueueSize = 0;
0229     m_bufSize = 0;
0230 
0231     // init averaging filter
0232     m_diffAvgCoef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
0233     m_diffAvgCount = 0;
0234 
0235     return true;
0236 }
0237 
0238 void
0239 AudioDecoder::queueBuffer(uint8_t *data, int len)
0240 {
0241     int err = alGetError(); // reset error
0242     ALuint buf;
0243 
0244     // get next buffer to fill - maybe use alGetSourcei(AL_BUFFERS_QUEUED / AL_BUFFERS_PROCESSED)
0245     alSourceUnqueueBuffers(m_alSrc, 1, &buf);
0246     if((err = alGetError()) != AL_NO_ERROR) {
0247         if(err == AL_INVALID_VALUE) {
0248             // buffer can not be unqueued because it has not been processed yet.
0249             alGenBuffers(1, &buf);
0250             if((err = alGetError()) != AL_NO_ERROR) {
0251                 av_log(nullptr, AV_LOG_ERROR, "openal: alGenBuffers() failed: %d\n", err);
0252                 return;
0253             } else {
0254                 m_bufCnt++;
0255             }
0256         } else {
0257             av_log(nullptr, AV_LOG_ERROR, "openal: alSourceUnqueueBuffers() failed: %d\n", err);
0258             return;
0259         }
0260     } else {
0261         ALint hwBufSize;
0262         alGetBufferi(buf, AL_SIZE, &hwBufSize);
0263         if((err = alGetError()) != AL_NO_ERROR) {
0264             av_log(nullptr, AV_LOG_ERROR, "openal: alGetBufferi(AL_SIZE) failed: %d\n", err);
0265             return;
0266         }
0267         m_hwBufQueueSize -= hwBufSize;
0268     }
0269 
0270     // copy data to buffer
0271     alBufferData(buf, m_bufFmt, data, len, m_fmtTgt.freq);
0272     if((err = alGetError()) != AL_NO_ERROR) {
0273         av_log(nullptr, AV_LOG_ERROR, "openal: alBufferData() failed: %d\n", err);
0274         return;
0275     }
0276 
0277     // queue buffer
0278     alSourceQueueBuffers(m_alSrc, 1, &buf); // stream
0279     if((err = alGetError()) != AL_NO_ERROR) {
0280         av_log(nullptr, AV_LOG_ERROR, "openal: alSourceQueueBuffers() failed: %d\n", err);
0281         return;
0282     }
0283 
0284     m_hwBufQueueSize += len;
0285 
0286     // get current state
0287     ALint state = AL_INITIAL;
0288     alGetSourcei(m_alSrc, AL_SOURCE_STATE, &state);
0289     if((err = alGetError()) != AL_NO_ERROR) {
0290         av_log(nullptr, AV_LOG_ERROR, "openal: alGetSourcei(AL_SOURCE_STATE) failed: %d\n", err);
0291         return;
0292     }
0293 
0294     // start playing
0295     if(state != AL_PLAYING) {
0296         play();
0297         if(m_vs->paused || m_vs->step)
0298             pause();
0299     }
0300 }
0301 
0302 /* return the wanted number of samples to get better sync if sync_type is video
0303  * or external master clock */
0304 int
0305 AudioDecoder::syncAudio(int nbSamples)
0306 {
0307     int wantedNbSamples = nbSamples;
0308 
0309     // if not master, then we try to remove or add samples to correct the clock
0310     if(m_vs->masterSyncType() != AV_SYNC_AUDIO_MASTER) {
0311         int minNbSamples, maxNbSamples;
0312 
0313         double diff = m_vs->audClk.get() - m_vs->masterTime();
0314 
0315         if(!std::isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD) {
0316             m_diffCum = diff + m_diffAvgCoef * m_diffCum;
0317             if(m_diffAvgCount < AUDIO_DIFF_AVG_NB) {
0318                 // not enough measures to have a correct estimate
0319                 m_diffAvgCount++;
0320             } else {
0321                 // estimate the A-V difference
0322                 const double avgDiff = m_diffCum * (1.0 - m_diffAvgCoef);
0323                 if(avgDiff != 0.) {
0324                     wantedNbSamples = nbSamples + (int)(diff * m_fmtSrc.freq);
0325                     minNbSamples = ((nbSamples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
0326                     maxNbSamples = ((nbSamples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
0327                     wantedNbSamples = av_clip(wantedNbSamples, minNbSamples, maxNbSamples);
0328                 }
0329                 av_log(nullptr, AV_LOG_TRACE, "diff=%f adiff=%f sample_diff=%d\n",
0330                        diff, avgDiff, wantedNbSamples - nbSamples);
0331             }
0332         } else {
0333             // too big difference : may be initial PTS errors, so reset A-V filter
0334             m_diffAvgCount = 0;
0335             m_diffCum = 0;
0336         }
0337     }
0338 
0339     return wantedNbSamples;
0340 }
0341 
0342 /**
0343  * Decode one audio frame and return its uncompressed size.
0344  *
0345  * The processed audio frame is decoded, converted if required, and
0346  * stored in m_audioBuf, with size in bytes given by the return
0347  * value.
0348  */
0349 int
0350 AudioDecoder::decodeFrame(Frame *af)
0351 {
0352     if(af->serial != m_queue->serial())
0353         return -1;
0354 
0355     int dataSize = av_samples_get_buffer_size(nullptr, af->frame->channels,
0356                                            af->frame->nb_samples,
0357                                            (AVSampleFormat)af->frame->format, 1);
0358     int resampledDataSize;
0359 
0360     uint64_t decChannelLayout =
0361         (af->frame->channel_layout &&
0362          af->frame->channels == av_get_channel_layout_nb_channels(af->frame->channel_layout)) ?
0363         af->frame->channel_layout : av_get_default_channel_layout(af->frame->channels);
0364     int wantedNbSamples = syncAudio(af->frame->nb_samples);
0365 
0366     if(af->frame->format != m_fmtSrc.fmt
0367     || decChannelLayout != m_fmtSrc.channelLayout
0368     || af->frame->sample_rate != m_fmtSrc.freq
0369     || (wantedNbSamples != af->frame->nb_samples && !m_swrCtx)) {
0370         swr_free(&m_swrCtx);
0371         m_swrCtx = swr_alloc_set_opts(nullptr,
0372                                          m_fmtTgt.channelLayout, m_fmtTgt.fmt, m_fmtTgt.freq,
0373                                          decChannelLayout, (AVSampleFormat)af->frame->format, af->frame->sample_rate,
0374                                          0, nullptr);
0375         if(!m_swrCtx || swr_init(m_swrCtx) < 0) {
0376             av_log(nullptr, AV_LOG_ERROR,
0377                    "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
0378                    af->frame->sample_rate, av_get_sample_fmt_name((AVSampleFormat)af->frame->format),
0379                    af->frame->channels,
0380                    m_fmtTgt.freq, av_get_sample_fmt_name(m_fmtTgt.fmt), m_fmtTgt.channels);
0381             swr_free(&m_swrCtx);
0382             return -1;
0383         }
0384         m_fmtSrc.channelLayout = decChannelLayout;
0385         m_fmtSrc.channels = af->frame->channels;
0386         m_fmtSrc.freq = af->frame->sample_rate;
0387         m_fmtSrc.fmt = (AVSampleFormat)af->frame->format;
0388     }
0389 
0390     if(m_swrCtx) {
0391         const int outCount = (int64_t)wantedNbSamples * m_fmtTgt.freq / af->frame->sample_rate + 256;
0392         const int outSize = av_samples_get_buffer_size(nullptr, m_fmtTgt.channels, outCount, m_fmtTgt.fmt, 0);
0393         if(outSize < 0) {
0394             av_log(nullptr, AV_LOG_ERROR, "av_samples_get_buffer_size() failed\n");
0395             return -1;
0396         }
0397         if(wantedNbSamples != af->frame->nb_samples) {
0398             if(swr_set_compensation(m_swrCtx,
0399                     (wantedNbSamples - af->frame->nb_samples) * m_fmtTgt.freq / af->frame->sample_rate,
0400                     wantedNbSamples * m_fmtTgt.freq / af->frame->sample_rate) < 0) {
0401                 av_log(nullptr, AV_LOG_ERROR, "swr_set_compensation() failed\n");
0402                 return -1;
0403             }
0404         }
0405         av_fast_malloc(&m_audioBuf1, &m_buf1Size, outSize);
0406         if(!m_audioBuf1)
0407             return AVERROR(ENOMEM);
0408         const int outSamplesPerChannel = swr_convert(m_swrCtx, &m_audioBuf1, outCount,
0409                            (const uint8_t **)af->frame->extended_data, af->frame->nb_samples);
0410         if(outSamplesPerChannel < 0) {
0411             av_log(nullptr, AV_LOG_ERROR, "swr_convert() failed\n");
0412             return -1;
0413         }
0414         if(outSamplesPerChannel == outCount) {
0415             av_log(nullptr, AV_LOG_WARNING, "audio buffer is probably too small\n");
0416             if(swr_init(m_swrCtx) < 0)
0417                 swr_free(&m_swrCtx);
0418         }
0419         m_audioBuf = m_audioBuf1;
0420         resampledDataSize = outSamplesPerChannel * m_fmtTgt.channels * av_get_bytes_per_sample(m_fmtTgt.fmt);
0421     } else {
0422         m_audioBuf = af->frame->data[0];
0423         resampledDataSize = dataSize;
0424     }
0425 
0426     return resampledDataSize;
0427 }
0428 
0429 int
0430 AudioDecoder::getFrame(AVFrame *frame)
0431 {
0432     const int gotFrame = Decoder::decodeFrame(frame, nullptr);
0433 
0434     if(gotFrame <= 0 || frame->pts == AV_NOPTS_VALUE)
0435         return gotFrame;
0436 
0437     const double dPts = double(frame->pts) / frame->sample_rate;
0438 
0439     if(!std::isnan(dPts) && m_vs->seekDecoder > 0. && m_vs->seekDecoder > dPts) {
0440         av_frame_unref(frame);
0441         return 0;
0442     }
0443 
0444     return gotFrame;
0445 }
0446 
0447 void
0448 AudioDecoder::queueFrame(Frame *af)
0449 {
0450     int audioSize = decodeFrame(af);
0451     if(audioSize < 0) {
0452         // if error, just output silence
0453         m_audioBuf = nullptr;
0454         m_bufSize = (AUDIO_MIN_BUFFER_SIZE / m_fmtTgt.frameSize) * m_fmtTgt.frameSize;
0455     } else {
0456         if(m_vs->showMode != SHOW_MODE_VIDEO)
0457             m_vs->renderThread->updateSampleDisplay((int16_t *)(void *)m_audioBuf, audioSize);
0458         m_bufSize = audioSize;
0459     }
0460 
0461     if(!m_audioBuf) {
0462         uint8_t *silence = new uint8_t[m_bufSize]();
0463         queueBuffer(silence, m_bufSize);
0464         delete[] silence;
0465     } else {
0466         queueBuffer((uint8_t *)m_audioBuf, audioSize);
0467     }
0468 }
0469 
0470 void
0471 AudioDecoder::run()
0472 {
0473     AVFrame *frame = av_frame_alloc();
0474     Frame *af = new Frame();
0475 
0476     if(!frame)
0477         return;
0478 
0479     for(;;) {
0480         const int got_frame = getFrame(frame);
0481         Q_ASSERT(got_frame != AVERROR(EAGAIN));
0482         Q_ASSERT(got_frame != AVERROR_EOF);
0483         if(got_frame < 0)
0484             break;
0485 
0486         if(got_frame) {
0487             if(!(af->frame = av_frame_alloc()))
0488                 break;
0489 
0490             af->pts = frame->pts == AV_NOPTS_VALUE ? NAN : double(frame->pts) / frame->sample_rate;
0491             af->pos = frame->pkt_pos;
0492             af->serial = m_pktSerial;
0493             af->duration = double(frame->nb_samples) / frame->sample_rate;
0494 
0495             av_frame_move_ref(af->frame, frame);
0496 
0497             // time to unqueue one sample in microseconds (AV_TIME_BASE)
0498             const int64_t sleepTime = int64_t(double(AUDIO_MIN_BUFFER_SIZE / m_fmtTgt.frameSize) / (m_fmtTgt.freq * m_vs->audClk.speed()) * AV_TIME_BASE);
0499             // bytes needed for 100ms of audio
0500             const ALint hwMinBytes = m_vs->audClk.speed() * m_fmtTgt.bytesPerSec * .100;
0501 
0502             while(!m_vs->abortRequested && !isInterruptionRequested()) {
0503                 ALint hwBufOffset = 0;
0504                 alGetSourcei(m_alSrc, AL_BYTE_OFFSET, &hwBufOffset);
0505                 if(!std::isnan(af->pts)) {
0506                     m_vs->audClk.setAt(
0507                                  af->pts - double(m_hwBufQueueSize - hwBufOffset) / m_fmtTgt.bytesPerSec,
0508                                  af->serial,
0509                                  av_gettime_relative() / double(AV_TIME_BASE));
0510                     m_vs->extClk.syncTo(&m_vs->audClk);
0511                 }
0512 
0513                 if(!m_vs->paused) {
0514                     if(m_hwBufQueueSize - hwBufOffset < hwMinBytes)
0515                         break;
0516 
0517                     ALint bufReady = 0;
0518                     alGetSourcei(m_alSrc, AL_BUFFERS_PROCESSED, &bufReady);
0519                     if(bufReady > 0)
0520                         break;
0521                 }
0522 
0523                 av_usleep(sleepTime);
0524             }
0525 
0526             queueFrame(af);
0527 
0528             av_frame_unref(af->frame);
0529             av_frame_free(&af->frame);
0530         }
0531     }
0532 
0533     av_frame_free(&frame);
0534     delete af;
0535     return;
0536 }