File indexing completed on 2024-04-21 04:49:07
0001 /* 0002 SPDX-FileCopyrightText: 2010 Dirk Vanden Boer <dirk.vdb@gmail.com> 0003 0004 SPDX-License-Identifier: GPL-2.0-or-later 0005 */ 0006 0007 #include "framedecoder.h" 0008 0009 #include <QFileInfo> 0010 #include <QImage> 0011 0012 extern "C" { 0013 #include <libavutil/imgutils.h> 0014 #include <libswscale/swscale.h> 0015 } 0016 0017 using namespace std; 0018 0019 FrameDecoder::FrameDecoder(const QString &filename, AVFormatContext *pavContext) 0020 : m_VideoStream(-1) 0021 , m_pFormatContext(pavContext) 0022 , m_pVideoCodecContext(nullptr) 0023 , m_pVideoCodec(nullptr) 0024 , m_pFrame(nullptr) 0025 , m_pFrameBuffer(nullptr) 0026 , m_pPacket(nullptr) 0027 , m_FormatContextWasGiven(pavContext != nullptr) 0028 , m_AllowSeek(true) 0029 , m_initialized(false) 0030 , m_bufferSinkContext(nullptr) 0031 , m_bufferSourceContext(nullptr) 0032 , m_filterGraph(nullptr) 0033 , m_filterFrame(nullptr) 0034 { 0035 initialize(filename); 0036 } 0037 0038 FrameDecoder::~FrameDecoder() 0039 { 0040 destroy(); 0041 } 0042 0043 void FrameDecoder::initialize(const QString &filename) 0044 { 0045 m_lastWidth = -1; 0046 m_lastHeight = -1; 0047 m_lastPixfmt = AV_PIX_FMT_NONE; 0048 0049 #if (LIBAVFORMAT_VERSION_MAJOR < 58) 0050 av_register_all(); 0051 #endif 0052 0053 QFileInfo fileInfo(filename); 0054 0055 if ((!m_FormatContextWasGiven) && avformat_open_input(&m_pFormatContext, fileInfo.absoluteFilePath().toLocal8Bit().data(), nullptr, nullptr) != 0) { 0056 qDebug() << "Could not open input file: " << fileInfo.absoluteFilePath(); 0057 return; 0058 } 0059 0060 if (avformat_find_stream_info(m_pFormatContext, nullptr) < 0) { 0061 qDebug() << "Could not find stream information"; 0062 return; 0063 } 0064 0065 if (!initializeVideo()) { 0066 // It already printed a message 0067 return; 0068 } 0069 m_pFrame = av_frame_alloc(); 0070 0071 if (m_pFrame) { 0072 m_initialized = true; 0073 } 0074 } 0075 0076 bool FrameDecoder::getInitialized() 0077 { 0078 return m_initialized; 0079 } 0080 0081 void FrameDecoder::destroy() 0082 { 0083 deleteFilterGraph(); 0084 if (m_pVideoCodecContext) { 0085 avcodec_close(m_pVideoCodecContext); 0086 avcodec_free_context(&m_pVideoCodecContext); 0087 m_pVideoCodecContext = nullptr; 0088 } 0089 0090 if ((!m_FormatContextWasGiven) && m_pFormatContext) { 0091 avformat_close_input(&m_pFormatContext); 0092 m_pFormatContext = nullptr; 0093 } 0094 0095 if (m_pPacket) { 0096 av_packet_unref(m_pPacket); 0097 delete m_pPacket; 0098 m_pPacket = nullptr; 0099 } 0100 0101 if (m_pFrame) { 0102 av_frame_free(&m_pFrame); 0103 m_pFrame = nullptr; 0104 } 0105 0106 if (m_pFrameBuffer) { 0107 av_free(m_pFrameBuffer); 0108 m_pFrameBuffer = nullptr; 0109 } 0110 } 0111 0112 QString FrameDecoder::getCodec() 0113 { 0114 QString codecName; 0115 if (m_pVideoCodec) { 0116 codecName = QString::fromLatin1(m_pVideoCodec->name); 0117 } 0118 return codecName; 0119 } 0120 0121 bool FrameDecoder::initializeVideo() 0122 { 0123 m_VideoStream = av_find_best_stream(m_pFormatContext, AVMEDIA_TYPE_VIDEO, -1, -1, &m_pVideoCodec, 0); 0124 if (m_VideoStream < 0) { 0125 qDebug() << "Could not find video stream"; 0126 return false; 0127 } 0128 0129 m_pVideoCodecContext = avcodec_alloc_context3(m_pVideoCodec); 0130 avcodec_parameters_to_context(m_pVideoCodecContext, m_pFormatContext->streams[m_VideoStream]->codecpar); 0131 0132 if (m_pVideoCodec == nullptr) { 0133 // set to nullptr, otherwise avcodec_close(m_pVideoCodecContext) crashes 0134 m_pVideoCodecContext = nullptr; 0135 qDebug() << "Video Codec not found"; 0136 return false; 0137 } 0138 0139 m_pVideoCodecContext->workaround_bugs = 1; 0140 0141 if (avcodec_open2(m_pVideoCodecContext, m_pVideoCodec, nullptr) < 0) { 0142 qDebug() << "Could not open video codec"; 0143 return false; 0144 } 0145 0146 return true; 0147 } 0148 0149 int FrameDecoder::getWidth() 0150 { 0151 if (m_pVideoCodecContext) { 0152 return m_pVideoCodecContext->width; 0153 } 0154 0155 return -1; 0156 } 0157 0158 int FrameDecoder::getHeight() 0159 { 0160 if (m_pVideoCodecContext) { 0161 return m_pVideoCodecContext->height; 0162 } 0163 0164 return -1; 0165 } 0166 0167 int FrameDecoder::getDuration() 0168 { 0169 if (m_pFormatContext) { 0170 return static_cast<int>(m_pFormatContext->duration / AV_TIME_BASE); 0171 } 0172 0173 return 0; 0174 } 0175 0176 void FrameDecoder::seek(int timeInSeconds) 0177 { 0178 if (!m_AllowSeek) { 0179 return; 0180 } 0181 0182 qint64 timestamp = AV_TIME_BASE * static_cast<qint64>(timeInSeconds); 0183 0184 if (timestamp < 0) { 0185 timestamp = 0; 0186 } 0187 0188 int ret = av_seek_frame(m_pFormatContext, -1, timestamp, 0); 0189 if (ret >= 0) { 0190 avcodec_flush_buffers(m_pVideoCodecContext); 0191 } else { 0192 qDebug() << "Seeking in video failed"; 0193 return; 0194 } 0195 0196 int keyFrameAttempts = 0; 0197 bool gotFrame = 0; 0198 0199 do { 0200 int count = 0; 0201 gotFrame = 0; 0202 0203 while (!gotFrame && count < 20) { 0204 getVideoPacket(); 0205 gotFrame = decodeVideoPacket(); 0206 ++count; 0207 } 0208 0209 ++keyFrameAttempts; 0210 } while ((!gotFrame || !m_pFrame->key_frame) && keyFrameAttempts < 200); 0211 0212 if (gotFrame == 0) { 0213 qDebug() << "Seeking in video failed"; 0214 } 0215 } 0216 0217 bool FrameDecoder::decodeVideoFrame() 0218 { 0219 bool frameFinished = false; 0220 0221 while (!frameFinished && getVideoPacket()) { 0222 frameFinished = decodeVideoPacket(); 0223 } 0224 0225 if (!frameFinished) { 0226 qDebug() << "decodeVideoFrame() failed: frame not finished"; 0227 } 0228 0229 return frameFinished; 0230 } 0231 0232 bool FrameDecoder::decodeVideoPacket() 0233 { 0234 if (m_pPacket->stream_index != m_VideoStream) { 0235 return false; 0236 } 0237 0238 av_frame_unref(m_pFrame); 0239 0240 avcodec_send_packet(m_pVideoCodecContext, m_pPacket); 0241 int ret = avcodec_receive_frame(m_pVideoCodecContext, m_pFrame); 0242 if (ret == AVERROR(EAGAIN)) { 0243 return false; 0244 } 0245 0246 return true; 0247 } 0248 0249 bool FrameDecoder::getVideoPacket() 0250 { 0251 bool framesAvailable = true; 0252 bool frameDecoded = false; 0253 0254 int attempts = 0; 0255 0256 if (m_pPacket) { 0257 av_packet_unref(m_pPacket); 0258 delete m_pPacket; 0259 } 0260 0261 m_pPacket = new AVPacket(); 0262 0263 while (framesAvailable && !frameDecoded && (attempts++ < 1000)) { 0264 framesAvailable = av_read_frame(m_pFormatContext, m_pPacket) >= 0; 0265 if (framesAvailable) { 0266 frameDecoded = m_pPacket->stream_index == m_VideoStream; 0267 if (!frameDecoded) { 0268 av_packet_unref(m_pPacket); 0269 } 0270 } 0271 } 0272 0273 return frameDecoded; 0274 } 0275 0276 void FrameDecoder::deleteFilterGraph() 0277 { 0278 if (m_filterGraph) { 0279 av_frame_free(&m_filterFrame); 0280 avfilter_graph_free(&m_filterGraph); 0281 m_filterGraph = nullptr; 0282 } 0283 } 0284 0285 bool FrameDecoder::initFilterGraph(enum AVPixelFormat pixfmt, int width, int height) 0286 { 0287 AVFilterInOut *inputs = nullptr, *outputs = nullptr; 0288 0289 deleteFilterGraph(); 0290 m_filterGraph = avfilter_graph_alloc(); 0291 0292 QByteArray arguments("buffer="); 0293 arguments += "video_size=" + QByteArray::number(width) + 'x' + QByteArray::number(height) + ':'; 0294 arguments += "pix_fmt=" + QByteArray::number(pixfmt) + ':'; 0295 arguments += "time_base=1/1:pixel_aspect=0/1[in];"; 0296 arguments += "[in]yadif[out];"; 0297 arguments += "[out]buffersink"; 0298 0299 int ret = avfilter_graph_parse2(m_filterGraph, arguments.constData(), &inputs, &outputs); 0300 if (ret < 0) { 0301 qWarning() << "Unable to parse filter graph"; 0302 return false; 0303 } 0304 0305 if (inputs || outputs) { 0306 return false; 0307 } 0308 0309 ret = avfilter_graph_config(m_filterGraph, nullptr); 0310 if (ret < 0) { 0311 qWarning() << "Unable to validate filter graph"; 0312 return false; 0313 } 0314 0315 m_bufferSourceContext = avfilter_graph_get_filter(m_filterGraph, "Parsed_buffer_0"); 0316 m_bufferSinkContext = avfilter_graph_get_filter(m_filterGraph, "Parsed_buffersink_2"); 0317 if (!m_bufferSourceContext || !m_bufferSinkContext) { 0318 qWarning() << "Unable to get source or sink"; 0319 return false; 0320 } 0321 m_filterFrame = av_frame_alloc(); 0322 m_lastWidth = width; 0323 m_lastHeight = height; 0324 m_lastPixfmt = pixfmt; 0325 0326 return true; 0327 } 0328 0329 bool FrameDecoder::processFilterGraph(AVFrame *dst, const AVFrame *src, enum AVPixelFormat pixfmt, int width, int height) 0330 { 0331 if (!m_filterGraph || width != m_lastWidth || height != m_lastHeight || pixfmt != m_lastPixfmt) { 0332 if (!initFilterGraph(pixfmt, width, height)) { 0333 return false; 0334 } 0335 } 0336 0337 memcpy(m_filterFrame->data, src->data, sizeof(src->data)); 0338 memcpy(m_filterFrame->linesize, src->linesize, sizeof(src->linesize)); 0339 m_filterFrame->width = width; 0340 m_filterFrame->height = height; 0341 m_filterFrame->format = pixfmt; 0342 0343 int ret = av_buffersrc_add_frame(m_bufferSourceContext, m_filterFrame); 0344 if (ret < 0) { 0345 return false; 0346 } 0347 0348 ret = av_buffersink_get_frame(m_bufferSinkContext, m_filterFrame); 0349 if (ret < 0) { 0350 return false; 0351 } 0352 0353 av_image_copy(dst->data, dst->linesize, (const uint8_t **)m_filterFrame->data, m_filterFrame->linesize, pixfmt, width, height); 0354 av_frame_unref(m_filterFrame); 0355 0356 return true; 0357 } 0358 0359 void FrameDecoder::getScaledVideoFrame(int scaledSize, bool maintainAspectRatio, QImage &videoFrame) 0360 { 0361 if (m_pFrame->interlaced_frame) { 0362 processFilterGraph((AVFrame *)m_pFrame, (AVFrame *)m_pFrame, m_pVideoCodecContext->pix_fmt, m_pVideoCodecContext->width, m_pVideoCodecContext->height); 0363 } 0364 0365 int scaledWidth, scaledHeight; 0366 convertAndScaleFrame(AV_PIX_FMT_RGB24, scaledSize, maintainAspectRatio, scaledWidth, scaledHeight); 0367 // .copy() since QImage otherwise assumes the memory will continue to be available. 0368 // We could instead pass a custom deleter, but meh. 0369 videoFrame = QImage(m_pFrame->data[0], scaledWidth, scaledHeight, m_pFrame->linesize[0], QImage::Format_RGB888).copy(); 0370 } 0371 0372 void FrameDecoder::convertAndScaleFrame(AVPixelFormat format, int scaledSize, bool maintainAspectRatio, int &scaledWidth, int &scaledHeight) 0373 { 0374 calculateDimensions(scaledSize, maintainAspectRatio, scaledWidth, scaledHeight); 0375 SwsContext *scaleContext = sws_getContext(m_pVideoCodecContext->width, 0376 m_pVideoCodecContext->height, 0377 m_pVideoCodecContext->pix_fmt, 0378 scaledWidth, 0379 scaledHeight, 0380 format, 0381 SWS_BICUBIC, 0382 nullptr, 0383 nullptr, 0384 nullptr); 0385 0386 if (nullptr == scaleContext) { 0387 qDebug() << "Failed to create resize context"; 0388 return; 0389 } 0390 0391 AVFrame *convertedFrame = nullptr; 0392 uint8_t *convertedFrameBuffer = nullptr; 0393 0394 createAVFrame(&convertedFrame, &convertedFrameBuffer, scaledWidth, scaledHeight, format); 0395 0396 sws_scale(scaleContext, m_pFrame->data, m_pFrame->linesize, 0, m_pVideoCodecContext->height, convertedFrame->data, convertedFrame->linesize); 0397 sws_freeContext(scaleContext); 0398 0399 av_frame_free(&m_pFrame); 0400 av_free(m_pFrameBuffer); 0401 0402 m_pFrame = convertedFrame; 0403 m_pFrameBuffer = convertedFrameBuffer; 0404 } 0405 0406 void FrameDecoder::calculateDimensions(int squareSize, bool maintainAspectRatio, int &destWidth, int &destHeight) 0407 { 0408 if (!maintainAspectRatio) { 0409 destWidth = squareSize; 0410 destHeight = squareSize; 0411 } else { 0412 int srcWidth = m_pVideoCodecContext->width; 0413 int srcHeight = m_pVideoCodecContext->height; 0414 int ascpectNominator = m_pVideoCodecContext->sample_aspect_ratio.num; 0415 int ascpectDenominator = m_pVideoCodecContext->sample_aspect_ratio.den; 0416 0417 if (ascpectNominator != 0 && ascpectDenominator != 0) { 0418 srcWidth = srcWidth * ascpectNominator / ascpectDenominator; 0419 } 0420 0421 if (srcWidth > srcHeight) { 0422 destWidth = squareSize; 0423 destHeight = int(float(squareSize) / srcWidth * srcHeight); 0424 } else { 0425 destWidth = int(float(squareSize) / srcHeight * srcWidth); 0426 destHeight = squareSize; 0427 } 0428 } 0429 } 0430 0431 void FrameDecoder::createAVFrame(AVFrame **avFrame, quint8 **frameBuffer, int width, int height, AVPixelFormat format) 0432 { 0433 *avFrame = av_frame_alloc(); 0434 0435 int numBytes = av_image_get_buffer_size(format, width + 1, height + 1, 16); 0436 *frameBuffer = reinterpret_cast<quint8 *>(av_malloc(numBytes)); 0437 av_image_fill_arrays((*avFrame)->data, (*avFrame)->linesize, *frameBuffer, format, width, height, 1); 0438 }