File indexing completed on 2025-01-05 04:01:21

0001 /*
0002  * SPDX-FileCopyrightText: 2019-2023 Mattia Basaglia <dev@dragon.best>
0003  *
0004  * SPDX-License-Identifier: GPL-3.0-or-later
0005  */
0006 
0007 #include "video_format.hpp"
0008 
0009 #include <mutex>
0010 #include <cstring>
0011 #include <set>
0012 #include <string>
0013 
0014 extern "C" {
0015 #include <libavcodec/avcodec.h>
0016 #include <libavformat/avformat.h>
0017 #include <libavformat/avio.h>
0018 #include <libswscale/swscale.h>
0019 }
0020 
0021 #include "app/qstring_exception.hpp"
0022 #include "app/log/log.hpp"
0023 #include "model/assets/composition.hpp"
0024 
0025 namespace glaxnimate::av {
0026 
0027 template<int max_size, class Callback, class... Args>
0028 QString to_str(Callback callback, Args... args)
0029 {
0030     char buf[max_size] = {0};
0031     return callback(buf, args...);
0032 }
0033 
0034 QString err2str(int errnum)
0035 {
0036     return to_str<AV_ERROR_MAX_STRING_SIZE>(&av_make_error_string, AV_ERROR_MAX_STRING_SIZE, errnum);
0037 }
0038 
0039 class Error: public app::QStringException<>{ using Ctor::Ctor; };
0040 
0041 
0042 template<class Callback, class Object>
0043 class CGuard
0044 {
0045 public:
0046     CGuard(Callback callback, Object object)
0047     : callback(callback), object(object) {}
0048 
0049     ~CGuard()
0050     {
0051         callback(object);
0052     }
0053 
0054 private:
0055     Callback callback;
0056     Object object;
0057 };
0058 
0059 
0060 // a wrapper around a single output AVStream
0061 struct OutputStream
0062 {
0063     OutputStream(
0064         AVFormatContext *oc,
0065         AVCodecID codec_id
0066     )
0067     {
0068         format_context = oc;
0069 
0070         // find the encoder
0071         codec = (AVCodec*)avcodec_find_encoder(codec_id);
0072         if ( !codec )
0073             throw av::Error(i18n("Could not find encoder for '%1'", avcodec_get_name(codec_id)));
0074 
0075         stream = avformat_new_stream(oc, nullptr);
0076         if (!stream)
0077             throw av::Error(i18n("Could not allocate stream"));
0078 
0079         stream->id = oc->nb_streams-1;
0080         codec_context = avcodec_alloc_context3(codec);
0081         if ( !codec_context )
0082             throw av::Error(i18n("Could not alloc an encoding context"));
0083 
0084         // Some formats want stream headers to be separate.
0085         if (oc->oformat->flags & AVFMT_GLOBALHEADER)
0086             codec_context->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
0087     }
0088 
0089     ~OutputStream()
0090     {
0091         avcodec_free_context(&codec_context);
0092         av_frame_free(&frame);
0093         av_frame_free(&tmp_frame);
0094         sws_freeContext(sws_context);
0095     }
0096 
0097     int read_packets()
0098     {
0099         int ret = 0;
0100 
0101         while ( ret >= 0 )
0102         {
0103             AVPacket pkt;
0104             memset(&pkt, 0, sizeof(AVPacket));
0105 
0106             ret = avcodec_receive_packet(codec_context, &pkt);
0107             if ( ret == AVERROR(EAGAIN) || ret == AVERROR_EOF )
0108                 break;
0109             else if (ret < 0)
0110                 throw av::Error(i18n("Error encoding a frame: %1", av::err2str(ret)));
0111 
0112             // rescale output packet timestamp values from codec to stream timebase
0113             av_packet_rescale_ts(&pkt, codec_context->time_base, stream->time_base);
0114             pkt.stream_index = stream->index;
0115 
0116             // Write the compressed frame to the media file.
0117             ret = av_interleaved_write_frame(format_context, &pkt);
0118             av_packet_unref(&pkt);
0119             if (ret < 0)
0120                 throw av::Error(i18n("Error while writing output packet: %1", av::err2str(ret)));
0121         }
0122 
0123         return ret;
0124     }
0125 
0126     int write_frame(AVFrame *frame)
0127     {
0128         // send the frame to the encoder
0129         int ret = avcodec_send_frame(codec_context, frame);
0130         if ( ret < 0 )
0131             throw av::Error(i18n("Error sending a frame to the encoder: %1", av::err2str(ret)));
0132 
0133         return read_packets();
0134     }
0135 
0136 
0137     int flush_frames()
0138     {
0139         return write_frame(nullptr);
0140     }
0141 
0142     AVStream *stream = nullptr;
0143     AVCodecContext *codec_context = nullptr;
0144 
0145     // pts of the next frame that will be generated
0146     int64_t next_pts = 0;
0147 
0148     AVFrame *frame = nullptr;
0149     AVFrame *tmp_frame = nullptr;
0150 
0151     SwsContext *sws_context = nullptr;
0152     AVFormatContext *format_context = nullptr;
0153     AVCodec *codec = nullptr;
0154 };
0155 
0156 class DictWrapper
0157 {
0158 public:
0159     class Item
0160     {
0161     public:
0162         Item(AVDictionary** av_dict, QByteArray key)
0163         :  av_dict(av_dict), key(std::move(key)), value(nullptr)
0164         {}
0165 
0166         operator const char*() const
0167         {
0168             return get();
0169         }
0170 
0171         const char* get() const
0172         {
0173             if ( value == nullptr )
0174             {
0175                 if ( auto entry = av_dict_get(*av_dict, key.data(), nullptr, 0) )
0176                     value = entry->value;
0177             }
0178             return value;
0179         }
0180 
0181         void set(const char* text)
0182         {
0183             int ret = av_dict_set(av_dict, key.data(), text, 0);
0184             if ( ret >= 0 )
0185                 value = nullptr;
0186             else
0187                 throw Error(i18n("Could not set dict key `%1`: %2", QString(key), err2str(ret)));
0188         }
0189 
0190         void set(const QString& s)
0191         {
0192             set(s.toUtf8().data());
0193         }
0194 
0195         void set(int64_t v)
0196         {
0197             int ret = av_dict_set_int(av_dict, key.data(), v, 0);
0198             if ( ret >= 0 )
0199                 value = nullptr;
0200             else
0201                 throw Error(i18n("Could not set dict key `%1`: %2", QString(key), err2str(ret)));
0202         }
0203 
0204         Item& operator=(const char* text)
0205         {
0206             set(text);
0207             return *this;
0208         }
0209 
0210         Item& operator=(const QString& text)
0211         {
0212             set(text);
0213             return *this;
0214         }
0215 
0216         Item& operator=(int64_t v)
0217         {
0218             set(v);
0219             return *this;
0220         }
0221 
0222     private:
0223         AVDictionary** av_dict;
0224         QByteArray key;
0225         mutable const char* value;
0226     };
0227 
0228     DictWrapper(AVDictionary** av_dict)
0229     : av_dict(av_dict)
0230     {}
0231 
0232     Item operator[](const QString& key)
0233     {
0234         return Item(av_dict, key.toUtf8());
0235     }
0236 
0237     int size() const
0238     {
0239         return av_dict_count(*av_dict);
0240     }
0241 
0242     void erase(const QString& key)
0243     {
0244         int ret = av_dict_set(av_dict, key.toUtf8().data(), nullptr, 0);
0245         if ( ret < 0 )
0246             throw Error(i18n("Could not erase dict key `%1`: %2", key, err2str(ret)));
0247     }
0248 
0249 private:
0250     AVDictionary** av_dict;
0251 };
0252 
0253 class Dict : public DictWrapper
0254 {
0255 public:
0256     Dict() : DictWrapper(&local_dict) {}
0257 
0258     Dict(Dict&& other) : Dict()
0259     {
0260         std::swap(local_dict, other.local_dict);
0261     }
0262 
0263     Dict(const Dict& other) : Dict()
0264     {
0265         int ret = av_dict_copy(&local_dict, other.local_dict, 0);
0266         if ( ret < 0 )
0267             throw Error(i18n("Could not copy dict: %1", err2str(ret)));
0268     }
0269 
0270     Dict& operator=(Dict&& other)
0271     {
0272         std::swap(local_dict, other.local_dict);
0273         return *this;
0274     }
0275 
0276     Dict& operator=(const Dict& other)
0277     {
0278         int ret = av_dict_copy(&local_dict, other.local_dict, 0);
0279         if ( ret < 0 )
0280             throw Error(i18n("Could not copy dict `%1`: %2", err2str(ret)));
0281         return *this;
0282     }
0283 
0284     ~Dict()
0285     {
0286         av_dict_free(&local_dict);
0287     }
0288 
0289     AVDictionary** dict()
0290     {
0291         return &local_dict;
0292     }
0293 
0294 private:
0295     AVDictionary* local_dict = nullptr;
0296 };
0297 
0298 
0299 class Video
0300 {
0301 public:
0302     static AVFrame *alloc_picture(AVPixelFormat pix_fmt, int width, int height)
0303     {
0304         AVFrame *picture;
0305         int ret;
0306 
0307         picture = av_frame_alloc();
0308         if (!picture)
0309             return nullptr;
0310 
0311         picture->format = pix_fmt;
0312         picture->width  = width;
0313         picture->height = height;
0314 
0315         // allocate the buffers for the frame data
0316         ret = av_frame_get_buffer(picture, 0);
0317         if (ret < 0)
0318             throw av::Error(i18n("Could not allocate frame data."));
0319 
0320         return picture;
0321     }
0322 
0323     static std::pair<AVPixelFormat, QImage::Format> image_format(QImage::Format format)
0324     {
0325         switch ( format )
0326         {
0327             case QImage::Format_Invalid:
0328             default:
0329                 return {AV_PIX_FMT_NONE, QImage::Format_Invalid};
0330             case QImage::Format_Mono:
0331             case QImage::Format_MonoLSB:
0332                 return {AV_PIX_FMT_MONOBLACK, format};
0333             case QImage::Format_Indexed8:
0334                 return {AV_PIX_FMT_ARGB, QImage::Format_RGB32};
0335             case QImage::Format_RGB32:
0336                 return {AV_PIX_FMT_0RGB, format};
0337             case QImage::Format_ARGB32:
0338                 return {AV_PIX_FMT_ARGB, format};
0339             case QImage::Format_ARGB32_Premultiplied:
0340                 return {AV_PIX_FMT_ARGB, format};
0341             case QImage::Format_RGB16:
0342                 return {AV_PIX_FMT_RGB565LE, format};
0343             case QImage::Format_RGB555:
0344                 return {AV_PIX_FMT_RGB555LE, format};
0345             case QImage::Format_RGB888:
0346                 return {AV_PIX_FMT_RGB24, format};
0347             case QImage::Format_RGBX8888:
0348                 return {AV_PIX_FMT_RGB0, format};
0349             case QImage::Format_RGBA8888:
0350             case QImage::Format_RGBA8888_Premultiplied:
0351                 return {AV_PIX_FMT_RGBA, format};
0352             case QImage::Format_Alpha8:
0353             case QImage::Format_Grayscale8:
0354                 return {AV_PIX_FMT_GRAY8, format};
0355             case QImage::Format_RGBA64_Premultiplied:
0356             case QImage::Format_ARGB8555_Premultiplied:
0357             case QImage::Format_ARGB8565_Premultiplied:
0358             case QImage::Format_ARGB6666_Premultiplied:
0359             case QImage::Format_ARGB4444_Premultiplied:
0360             case QImage::Format_A2RGB30_Premultiplied:
0361             case QImage::Format_A2BGR30_Premultiplied:
0362                 return {AV_PIX_FMT_ARGB, QImage::Format_ARGB32};
0363             case QImage::Format_RGB30:
0364             case QImage::Format_RGB444:
0365             case QImage::Format_RGB666:
0366             case QImage::Format_BGR30:
0367             case QImage::Format_RGBX64:
0368             case QImage::Format_RGBA64:
0369                 return {AV_PIX_FMT_RGB24, QImage::Format_RGB888};
0370         }
0371     }
0372 
0373     static AVPixelFormat best_pixel_format(const AVPixelFormat* pix_fmts)
0374     {
0375         static const std::array<AVPixelFormat, 8> preferred = {
0376             // RGBA and similar (no conversion)
0377             AV_PIX_FMT_ARGB, AV_PIX_FMT_RGBA, AV_PIX_FMT_ABGR, AV_PIX_FMT_BGRA,
0378             // YUV + Alpha
0379             AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUVA420P,
0380             // RGB
0381             AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24,
0382 
0383         };
0384 
0385         if ( !pix_fmts )
0386             return AV_PIX_FMT_NONE;
0387 
0388         std::set<AVPixelFormat> supported;
0389         for ( auto it = pix_fmts; it && *it != AV_PIX_FMT_NONE; ++it )
0390             supported.insert(*it);
0391 
0392         for ( auto pref : preferred )
0393             if ( supported.count(pref) )
0394                 return pref;
0395 
0396         return *pix_fmts;
0397     }
0398 
0399     Video(AVFormatContext *oc, Dict options, AVCodecID codec_id, int64_t bit_rate, int width, int height, int fps)
0400         : ost(oc, codec_id)
0401     {
0402         if ( ost.codec->type != AVMEDIA_TYPE_VIDEO )
0403             throw Error(i18n("No video codec"));
0404 
0405         ost.codec_context->codec_id = codec_id;
0406 
0407         ost.codec_context->bit_rate = bit_rate;
0408 
0409         // Resolution must be a multiple of two
0410         ost.codec_context->width = width;
0411         if ( ost.codec_context->width % 2 )
0412             ost.codec_context->width -= 1;
0413         ost.codec_context->height = height;
0414         if ( ost.codec_context->height % 2 )
0415             ost.codec_context->height -= 1;
0416 
0417         // timebase: This is the fundamental unit of time (in seconds) in terms
0418         // of which frame timestamps are represented. For fixed-fps content,
0419         // timebase should be 1/framerate and timestamp increments should be
0420         // identical to 1.
0421         ost.stream->time_base = AVRational{ 1, fps };
0422         ost.codec_context->time_base = ost.stream->time_base;
0423 
0424         // Q_EMIT one intra frame every twelve frames at most
0425         ost.codec_context->gop_size = 12;
0426 
0427         // get_format() for some reason returns an invalid value
0428         ost.codec_context->pix_fmt = best_pixel_format(ost.codec->pix_fmts);
0429 
0430         if ( ost.codec_context->pix_fmt == AV_PIX_FMT_NONE )
0431                 throw av::Error(i18n("Could not determine pixel format"));
0432 
0433 //         // just for testing, we also add B-frames
0434 //         if ( ost.codec_context->codec_id == AV_CODEC_ID_MPEG2VIDEO )
0435 //             ost.codec_context->max_b_frames = 2;
0436 
0437         int ret;
0438         // open the codec
0439         ret = avcodec_open2(ost.codec_context, ost.codec, options.dict());
0440         if (ret < 0)
0441             throw av::Error(i18n("Could not open video codec: %1", av::err2str(ret)));
0442 
0443         // allocate and init a re-usable frame
0444         ost.frame = alloc_picture(ost.codec_context->pix_fmt, ost.codec_context->width, ost.codec_context->height);
0445         if (!ost.frame)
0446             throw av::Error(i18n("Could not allocate video frame"));
0447 
0448         ost.tmp_frame = nullptr;
0449 
0450         /* copy the stream parameters to the muxer */
0451         ret = avcodec_parameters_from_context(ost.stream->codecpar, ost.codec_context);
0452         if (ret < 0)
0453             throw av::Error(i18n("Could not copy the stream parameters"));
0454     }
0455 
0456     static void fill_image(AVFrame *pict, const QImage& image)
0457     {
0458         for ( int y = 0; y < image.height(); y++)
0459         {
0460             auto line = image.constScanLine(y);
0461             for ( int x = 0; x < image.bytesPerLine(); x++ )
0462             {
0463                 pict->data[0][y * pict->linesize[0] + x] = line[x];
0464             }
0465         }
0466     }
0467 
0468     AVFrame *get_video_frame(QImage image)
0469     {
0470         // when we pass a frame to the encoder, it may keep a reference to it
0471         // internally; make sure we do not overwrite it here
0472         if ( av_frame_make_writable(ost.frame) < 0 )
0473             throw av::Error(i18n("Error while creating video frame"));
0474 
0475         auto format = image_format(image.format());
0476         if ( format.first == AV_PIX_FMT_NONE )
0477         {
0478             image = QImage(ost.codec_context->width, ost.codec_context->height, QImage::Format_RGB888);
0479             format.first = AV_PIX_FMT_RGB24;
0480         }
0481         else if ( format.second != image.format() )
0482         {
0483             image = image.convertToFormat(format.second);
0484         }
0485 
0486         if ( ost.codec_context->pix_fmt != format.first || image.width() != ost.codec_context->width || image.height() != ost.codec_context->height )
0487         {
0488             if (!ost.sws_context)
0489             {
0490                 ost.sws_context = sws_getContext(
0491                     image.width(), image.height(), format.first,
0492                     ost.codec_context->width, ost.codec_context->height, ost.codec_context->pix_fmt,
0493                     SWS_BICUBIC,
0494                     nullptr, nullptr, nullptr
0495                 );
0496                 if (!ost.sws_context)
0497                     throw av::Error(i18n("Could not initialize the conversion context"));
0498             }
0499             if ( !ost.tmp_frame )
0500             {
0501                 ost.tmp_frame = alloc_picture(format.first, image.width(), image.height());
0502                 if (!ost.tmp_frame)
0503                     throw av::Error(i18n("Could not allocate temporary picture"));
0504             }
0505             fill_image(ost.tmp_frame, image);
0506             sws_scale(ost.sws_context, (const uint8_t * const *) ost.tmp_frame->data,
0507                     ost.tmp_frame->linesize, 0, ost.codec_context->height, ost.frame->data,
0508                     ost.frame->linesize);
0509         }
0510         else
0511         {
0512             fill_image(ost.frame, image);
0513         }
0514 
0515         ost.frame->pts = ost.next_pts++;
0516 
0517         return ost.frame;
0518     }
0519 
0520     void write_video_frame(const QImage& image)
0521     {
0522         ost.write_frame(get_video_frame(image));
0523     }
0524 
0525     void flush()
0526     {
0527         ost.flush_frames();
0528     }
0529 
0530 private:
0531     OutputStream ost;
0532 };
0533 
0534 
0535 class Logger
0536 {
0537 private:
0538     struct LogData
0539     {
0540         std::mutex mutex;
0541         io::video::VideoFormat* format = nullptr;
0542         int log_level;
0543 
0544         static LogData& instance()
0545         {
0546             static LogData instance;
0547             return instance;
0548         }
0549 
0550         static void static_callback(void *, int level, const char *fmt, va_list vl)
0551         {
0552             instance().callback(level, fmt, vl);
0553         }
0554 
0555         void setup(io::video::VideoFormat* format, int log_level)
0556         {
0557             auto guard = std::lock_guard(mutex);
0558             this->format = format;
0559             this->log_level = log_level;
0560             av_log_set_callback(&LogData::static_callback);
0561         }
0562 
0563         void teardown()
0564         {
0565             auto guard = std::lock_guard(mutex);
0566             av_log_set_callback(&av_log_default_callback);
0567             format = nullptr;
0568         }
0569 
0570         void callback(int level, const char *fmt, va_list vl)
0571         {
0572             auto guard = std::lock_guard(mutex);
0573             if ( level > log_level )
0574                 return;
0575             char buffer[1024];
0576             std::vsprintf(buffer, fmt, vl);
0577             QString msg(buffer);
0578             if ( msg.endsWith('\n') )
0579                 msg.remove(msg.size()-1, 1);
0580 
0581             if ( level > AV_LOG_WARNING )
0582             {
0583                 format->information(msg);
0584             }
0585             else if ( level == AV_LOG_WARNING )
0586             {
0587                 app::log::Log("libav").log(msg);
0588                 format->warning(msg);
0589             }
0590             else
0591             {
0592                 app::log::Log("libav").log(msg, app::log::Error);
0593                 format->error(msg);
0594             }
0595         }
0596     };
0597 
0598 public:
0599     Logger(io::video::VideoFormat* format)
0600         : Logger(format, av_log_get_level())
0601     {}
0602 
0603     Logger(io::video::VideoFormat* format, int log_level)
0604     {
0605         level = av_log_get_level();
0606         av_log_set_level(log_level);
0607         LogData::instance().setup(format, log_level);
0608     }
0609 
0610     ~Logger()
0611     {
0612         LogData::instance().teardown();
0613         av_log_set_level(level);
0614     }
0615 
0616 private:
0617     int level;
0618 };
0619 
0620 class DeviceIo
0621 {
0622 public:
0623     DeviceIo(QIODevice* device, int block_size = 4*1024)
0624     {
0625         buffer = (unsigned char*)av_malloc(block_size);
0626         context_ = avio_alloc_context(
0627             buffer,
0628             block_size,
0629             1,
0630             device,
0631             &DeviceIo::read_packet,
0632             &DeviceIo::write_packet,
0633             &DeviceIo::seek
0634         );
0635     }
0636 
0637     ~DeviceIo()
0638     {
0639         avio_context_free(&context_);
0640         av_free(buffer);
0641     }
0642 
0643     AVIOContext* context() const noexcept
0644     {
0645         return context_;
0646     }
0647 
0648 private:
0649     static int read_packet(void *opaque, uint8_t *buf, int buf_size)
0650     {
0651         QIODevice* device = (QIODevice*)opaque;
0652         return device->read((char*)buf, buf_size);
0653     }
0654 
0655     static int write_packet(void *opaque, uint8_t *buf, int buf_size)
0656     {
0657         QIODevice* device = (QIODevice*)opaque;
0658         return device->write((char*)buf, buf_size);
0659     }
0660 
0661     static int64_t seek(void *opaque, int64_t offset, int whence)
0662     {
0663         QIODevice* device = (QIODevice*)opaque;
0664 
0665         switch ( whence )
0666         {
0667             case SEEK_SET:
0668                 return device->seek(offset);
0669             case SEEK_CUR:
0670                 return device->seek(offset + device->pos());
0671             case SEEK_END:
0672                 device->readAll();
0673                 return device->seek(offset + device->pos());
0674         }
0675         return 0;
0676     }
0677 
0678     AVIOContext* context_;
0679     unsigned char * buffer;
0680 };
0681 
0682 } // namespace glaxnimate::av
0683 
0684 glaxnimate::io::Autoreg<glaxnimate::io::video::VideoFormat> glaxnimate::io::video::VideoFormat::autoreg;
0685 
0686 
0687 static QStringList out_ext;
0688 
0689 static bool format_skip(const AVOutputFormat* format)
0690 {
0691     static std::set<std::string> blacklisted = {
0692         "webp", "gif", "ico"
0693     };
0694 
0695     return
0696         blacklisted.count(format->name) ||
0697         format->video_codec == AV_CODEC_ID_NONE ||
0698         format->flags & (AVFMT_NOFILE|AVFMT_NEEDNUMBER)
0699     ;
0700 }
0701 
0702 static void get_formats()
0703 {
0704     out_ext.push_back("mp4");
0705 
0706     void* opaque = nullptr;
0707     while ( auto format = av_muxer_iterate(&opaque) )
0708     {
0709         if ( format_skip(format) )
0710             continue;
0711 
0712         out_ext += QString(format->extensions).split(',',
0713 #if QT_VERSION >= QT_VERSION_CHECK(5, 14, 0)
0714             Qt::SkipEmptyParts
0715 #else
0716             QString::SkipEmptyParts
0717 #endif
0718         );
0719     }
0720 }
0721 
0722 QStringList glaxnimate::io::video::VideoFormat::extensions() const
0723 {
0724     if ( out_ext.empty() )
0725         get_formats();
0726     return out_ext;
0727 }
0728 
0729 
0730 bool glaxnimate::io::video::VideoFormat::on_save(QIODevice& dev, const QString& name, model::Composition* comp, const QVariantMap& settings)
0731 {
0732     try
0733     {
0734         av::Logger logger(this, settings["verbose"].toBool() ? AV_LOG_INFO : AV_LOG_WARNING);
0735 
0736         auto filename = name.toUtf8();
0737 
0738         // allocate the output media context
0739         AVFormatContext *oc;
0740 
0741         avformat_alloc_output_context2(&oc, nullptr, nullptr, filename.data());
0742 
0743         if ( !oc )
0744         {
0745             warning(i18n("Could not deduce output format from file extension: using MPEG."));
0746             avformat_alloc_output_context2(&oc, nullptr, "mpeg", filename.data());
0747             if ( !oc )
0748             {
0749                 error(i18n("Could not find output format"));
0750                 return false;
0751             }
0752         }
0753 
0754         // see https://libav.org/documentation/doxygen/master/group__metadata__api.html
0755         av::DictWrapper metadata(&oc->metadata);
0756 
0757         auto document = comp->document();
0758 
0759         metadata["title"] = comp->name.get();
0760 
0761         if ( !document->info().author.isEmpty() )
0762             metadata["artist"] = document->info().author;
0763 
0764         if ( !document->info().description.isEmpty() )
0765             metadata["comment"] = document->info().description;
0766 
0767         for ( auto it = document->metadata().begin(); it != document->metadata().end(); ++it )
0768             metadata[it.key()] = it->toString();
0769 
0770         av::CGuard guard(&avformat_free_context, oc);
0771 
0772         // Add the audio and video streams using the given (or default)
0773         // format codecs and initialize the codecs.
0774         AVCodecID codec_id = oc->oformat->video_codec;
0775         if ( codec_id == AV_CODEC_ID_NONE )
0776         {
0777             error(i18n("No video codec"));
0778             return false;
0779         }
0780 
0781         // Options
0782         av::Dict opt;
0783         opt["crf"] = 23;
0784         if ( codec_id == AV_CODEC_ID_H264 )
0785         {
0786             opt["profile"] = "high";
0787             opt["preset"] = "veryslow";
0788             opt["tune"] = "animation";
0789         }
0790 
0791         for ( auto it = settings.begin(); it != settings.end(); ++it )
0792         {
0793             if ( it.key().startsWith("ffmpeg:") )
0794             {
0795                 auto value = it->toString();
0796                 opt[it.key().mid(7)] = value;
0797             }
0798         }
0799 
0800         // Now that all the parameters are set, we can open the audio and
0801         // video codecs and allocate the necessary encode buffers.
0802         int width = settings["width"].toInt();
0803         if ( width == 0 )
0804             width = comp->width.get();
0805         int height = settings["height"].toInt();
0806         if ( height == 0 )
0807             height = comp->height.get();
0808         int fps = qRound(comp->fps.get());
0809         av::Video video(oc, opt, codec_id, 7000000, width, height, fps);
0810 
0811         // log format info
0812         av_dump_format(oc, 0, filename.constData(), 1);
0813 
0814         // open the output file, if needed
0815         av::DeviceIo io(&dev);
0816         oc->pb = io.context();
0817 
0818         // Write the stream header, if any
0819         int ret = avformat_write_header(oc, opt.dict());
0820         if ( ret < 0 )
0821         {
0822             error(i18n("Error occurred when opening output file: %1", av::err2str(ret)));
0823             return false;
0824         }
0825 
0826         auto first_frame = comp->animation->first_frame.get();
0827         auto last_frame = comp->animation->last_frame.get();
0828         QColor background = settings["background"].value<QColor>();
0829         Q_EMIT progress_max_changed(last_frame - first_frame);
0830         for ( auto i = first_frame; i < last_frame; i++ )
0831         {
0832             video.write_video_frame(comp->render_image(i, {width, height}, background));
0833             Q_EMIT progress(i - first_frame);
0834         }
0835 
0836         video.flush();
0837 
0838         // Write the trailer, if any. The trailer must be written before you
0839         // close the CodecContexts open when you wrote the header; otherwise
0840         // av_write_trailer() may try to use memory that was freed on
0841         // av_codec_close().
0842         av_write_trailer(oc);
0843 
0844         return true;
0845     }
0846     catch ( const av::Error& e )
0847     {
0848         error(e.message());
0849         return false;
0850     }
0851 }
0852 
0853 std::unique_ptr<app::settings::SettingsGroup> glaxnimate::io::video::VideoFormat::save_settings(model::Composition* comp) const
0854 {
0855     return std::make_unique<app::settings::SettingsGroup>(app::settings::SettingList{
0856         //                      slug            label             description                                           default             min max
0857         app::settings::Setting{"background",    i18n("Background"), i18n("Background color"),                               QColor(0, 0, 0, 0)},
0858         app::settings::Setting{"width",         i18n("Width"),      i18n("If not 0, it will overwrite the size"),           comp->width.get(),  0, 99999},
0859         app::settings::Setting{"height",        i18n("Height"),     i18n("If not 0, it will overwrite the size"),           comp->height.get(), 0, 99999},
0860         app::settings::Setting{"verbose",       i18n("Verbose"),    i18n("Show verbose information on the conversion"),     false},
0861     });
0862 }
0863 
0864 QString glaxnimate::io::video::VideoFormat::library_version()
0865 {
0866     return QStringList{
0867         LIBAVUTIL_IDENT,
0868         LIBAVFORMAT_IDENT,
0869         LIBAVCODEC_IDENT,
0870         LIBSWSCALE_IDENT
0871     }.join(", ");
0872 }