File indexing completed on 2024-05-05 05:30:17

0001 /*
0002     SPDX-FileCopyrightText: 2023 Aleix Pol Gonzalez <aleixpol@kde.org>
0003     SPDX-FileCopyrightText: 2023 Marco Martin <mart@kde.org>
0004     SPDX-FileCopyrightText: 2023 Arjen Hiemstra <ahiemstra@heimr.nl>
0005 
0006     SPDX-License-Identifier: LGPL-2.1-only OR LGPL-3.0-only OR LicenseRef-KDE-Accepted-LGPL
0007 */
0008 
0009 #include "encoder_p.h"
0010 
0011 #include <mutex>
0012 
0013 extern "C" {
0014 #include <libavcodec/avcodec.h>
0015 #include <libavfilter/avfilter.h>
0016 #include <libavfilter/buffersink.h>
0017 #include <libavfilter/buffersrc.h>
0018 #include <libavutil/avutil.h>
0019 #include <libavutil/hwcontext.h>
0020 #include <libavutil/hwcontext_drm.h>
0021 #include <libavutil/imgutils.h>
0022 }
0023 
0024 #include <libdrm/drm_fourcc.h>
0025 
0026 #include "pipewireproduce_p.h"
0027 #include "vaapiutils_p.h"
0028 
0029 #include "logging_record.h"
0030 
0031 #undef av_err2str
0032 // The one provided by libav fails to compile on GCC due to passing data from the function scope outside
0033 char str[AV_ERROR_MAX_STRING_SIZE];
0034 char *av_err2str(int errnum)
0035 {
0036     return av_make_error_string(str, AV_ERROR_MAX_STRING_SIZE, errnum);
0037 }
0038 
0039 static AVPixelFormat convertQImageFormatToAVPixelFormat(QImage::Format format)
0040 {
0041     // Listing those handed by SpaToQImageFormat
0042     switch (format) {
0043     case QImage::Format_RGB888:
0044         return AV_PIX_FMT_RGB24;
0045     case QImage::Format_BGR888:
0046         return AV_PIX_FMT_BGR24;
0047     case QImage::Format_RGBX8888:
0048     case QImage::Format_RGBA8888_Premultiplied:
0049         return AV_PIX_FMT_RGBA;
0050     case QImage::Format_RGB32:
0051     case QImage::Format_ARGB32:
0052         return AV_PIX_FMT_RGB32;
0053     default:
0054         qDebug() << "Unexpected pixel format" << format;
0055         return AV_PIX_FMT_RGB32;
0056     }
0057 }
0058 
0059 static int percentageToFrameQuality(quint8 quality)
0060 {
0061     return std::max(1, int(FF_LAMBDA_MAX - (quality / 100.0) * FF_LAMBDA_MAX));
0062 }
0063 
0064 Encoder::Encoder(PipeWireProduce *produce)
0065     : QObject(nullptr)
0066     , m_produce(produce)
0067 {
0068 }
0069 
0070 Encoder::~Encoder()
0071 {
0072     if (m_avFilterGraph) {
0073         avfilter_graph_free(&m_avFilterGraph);
0074     }
0075 
0076     if (m_avCodecContext) {
0077         avcodec_close(m_avCodecContext);
0078         av_free(m_avCodecContext);
0079     }
0080 }
0081 
0082 std::pair<int, int> Encoder::encodeFrame(int maximumFrames)
0083 {
0084     auto frame = av_frame_alloc();
0085     if (!frame) {
0086         qFatal("Failed to allocate memory");
0087     }
0088 
0089     int filtered = 0;
0090     int queued = 0;
0091 
0092     for (;;) {
0093         if (auto result = av_buffersink_get_frame(m_outputFilter, frame); result < 0) {
0094             if (result != AVERROR_EOF && result != AVERROR(EAGAIN)) {
0095                 qCWarning(PIPEWIRERECORD_LOGGING) << "Failed receiving filtered frame:" << av_err2str(result);
0096             }
0097             break;
0098         }
0099 
0100         filtered++;
0101 
0102         if (queued + 1 < maximumFrames) {
0103             auto ret = -1;
0104             {
0105                 std::lock_guard guard(m_avCodecMutex);
0106                 ret = avcodec_send_frame(m_avCodecContext, frame);
0107             }
0108             if (ret < 0) {
0109                 if (ret != AVERROR_EOF && ret != AVERROR(EAGAIN)) {
0110                     qCWarning(PIPEWIRERECORD_LOGGING) << "Error sending a frame for encoding:" << av_err2str(ret);
0111                 }
0112                 break;
0113             }
0114             queued++;
0115         } else {
0116             qCWarning(PIPEWIRERECORD_LOGGING) << "Encode queue is full, discarding filtered frame" << frame->pts;
0117         }
0118         av_frame_unref(frame);
0119     }
0120 
0121     av_frame_free(&frame);
0122 
0123     return std::make_pair(filtered, queued);
0124 }
0125 
0126 int Encoder::receivePacket()
0127 {
0128     auto packet = av_packet_alloc();
0129     if (!packet) {
0130         qFatal("Failed to allocate memory");
0131     }
0132 
0133     int received = 0;
0134 
0135     for (;;) {
0136         auto ret = -1;
0137         {
0138             std::lock_guard guard(m_avCodecMutex);
0139             ret = avcodec_receive_packet(m_avCodecContext, packet);
0140         }
0141         if (ret < 0) {
0142             if (ret != AVERROR_EOF && ret != AVERROR(EAGAIN)) {
0143                 qCWarning(PIPEWIRERECORD_LOGGING) << "Error encoding a frame: " << av_err2str(ret);
0144             }
0145             av_packet_unref(packet);
0146             break;
0147         }
0148 
0149         received++;
0150 
0151         m_produce->processPacket(packet);
0152         av_packet_unref(packet);
0153     }
0154 
0155     av_packet_free(&packet);
0156 
0157     return received;
0158 }
0159 
0160 void Encoder::finish()
0161 {
0162     std::lock_guard guard(m_avCodecMutex);
0163     avcodec_send_frame(m_avCodecContext, nullptr);
0164 }
0165 
0166 AVCodecContext *Encoder::avCodecContext() const
0167 {
0168     return m_avCodecContext;
0169 }
0170 
0171 void Encoder::setQuality(std::optional<quint8> quality)
0172 {
0173     m_quality = quality;
0174     if (m_avCodecContext) {
0175         m_avCodecContext->global_quality = percentageToAbsoluteQuality(quality);
0176     }
0177 }
0178 
0179 SoftwareEncoder::SoftwareEncoder(PipeWireProduce *produce)
0180     : Encoder(produce)
0181 {
0182 }
0183 
0184 void SoftwareEncoder::filterFrame(const PipeWireFrame &frame)
0185 {
0186     auto size = m_produce->m_stream->size();
0187 
0188     QImage image;
0189     if (frame.dmabuf) {
0190         image = QImage(m_produce->m_stream->size(), QImage::Format_RGBA8888_Premultiplied);
0191         if (!m_dmaBufHandler.downloadFrame(image, frame)) {
0192             m_produce->m_stream->renegotiateModifierFailed(frame.format, frame.dmabuf->modifier);
0193             return;
0194         }
0195     } else if (frame.dataFrame) {
0196         image = frame.dataFrame->toImage();
0197     } else {
0198         return;
0199     }
0200 
0201     AVFrame *avFrame = av_frame_alloc();
0202     if (!avFrame) {
0203         qFatal("Failed to allocate memory");
0204     }
0205     avFrame->format = convertQImageFormatToAVPixelFormat(image.format());
0206     avFrame->width = size.width();
0207     avFrame->height = size.height();
0208     if (m_quality) {
0209         avFrame->quality = percentageToFrameQuality(m_quality.value());
0210     }
0211 
0212     av_frame_get_buffer(avFrame, 32);
0213 
0214     const std::uint8_t *buffers[] = {image.constBits(), nullptr};
0215     const int strides[] = {static_cast<int>(image.bytesPerLine()), 0, 0, 0};
0216 
0217     av_image_copy(avFrame->data, avFrame->linesize, buffers, strides, static_cast<AVPixelFormat>(avFrame->format), size.width(), size.height());
0218 
0219     if (frame.presentationTimestamp) {
0220         avFrame->pts = m_produce->framePts(frame.presentationTimestamp);
0221     }
0222 
0223     if (auto result = av_buffersrc_add_frame(m_inputFilter, avFrame); result < 0) {
0224         qCWarning(PIPEWIRERECORD_LOGGING) << "Failed to submit frame for filtering";
0225     }
0226 }
0227 
0228 bool SoftwareEncoder::createFilterGraph(const QSize &size)
0229 {
0230     m_avFilterGraph = avfilter_graph_alloc();
0231     if (!m_avFilterGraph) {
0232         qFatal("Failed to allocate memory");
0233     }
0234 
0235     int ret = avfilter_graph_create_filter(&m_inputFilter,
0236                                            avfilter_get_by_name("buffer"),
0237                                            "in",
0238                                            "width=1:height=1:pix_fmt=rgba:time_base=1/1",
0239                                            nullptr,
0240                                            m_avFilterGraph);
0241     if (ret < 0) {
0242         qCWarning(PIPEWIRERECORD_LOGGING) << "Failed to create the buffer filter";
0243         return false;
0244     }
0245 
0246     auto parameters = av_buffersrc_parameters_alloc();
0247     if (!parameters) {
0248         qFatal("Failed to allocate memory");
0249     }
0250 
0251     parameters->format = AV_PIX_FMT_RGBA;
0252     parameters->width = size.width();
0253     parameters->height = size.height();
0254     parameters->time_base = {1, 1000};
0255 
0256     av_buffersrc_parameters_set(m_inputFilter, parameters);
0257     av_free(parameters);
0258     parameters = nullptr;
0259 
0260     ret = avfilter_graph_create_filter(&m_outputFilter, avfilter_get_by_name("buffersink"), "out", nullptr, nullptr, m_avFilterGraph);
0261     if (ret < 0) {
0262         qCWarning(PIPEWIRERECORD_LOGGING) << "Could not create buffer output filter";
0263         return false;
0264     }
0265 
0266     auto inputs = avfilter_inout_alloc();
0267     if (!inputs) {
0268         qFatal("Failed to allocate memory");
0269     }
0270     inputs->name = av_strdup("in");
0271     inputs->filter_ctx = m_inputFilter;
0272     inputs->pad_idx = 0;
0273     inputs->next = nullptr;
0274 
0275     auto outputs = avfilter_inout_alloc();
0276     if (!outputs) {
0277         qFatal("Failed to allocate memory");
0278     }
0279     outputs->name = av_strdup("out");
0280     outputs->filter_ctx = m_outputFilter;
0281     outputs->pad_idx = 0;
0282     outputs->next = nullptr;
0283 
0284     ret = avfilter_graph_parse(m_avFilterGraph, "format=pix_fmts=yuv420p", outputs, inputs, NULL);
0285     if (ret < 0) {
0286         qCWarning(PIPEWIRERECORD_LOGGING) << "Failed creating filter graph";
0287         return false;
0288     }
0289 
0290     ret = avfilter_graph_config(m_avFilterGraph, nullptr);
0291     if (ret < 0) {
0292         qCWarning(PIPEWIRERECORD_LOGGING) << "Failed configuring filter graph";
0293         return false;
0294     }
0295 
0296     return true;
0297 }
0298 
0299 HardwareEncoder::HardwareEncoder(PipeWireProduce *produce)
0300     : Encoder(produce)
0301 {
0302 }
0303 
0304 HardwareEncoder::~HardwareEncoder()
0305 {
0306     if (m_drmFramesContext) {
0307         av_free(m_drmFramesContext);
0308     }
0309 
0310     if (m_drmContext) {
0311         av_free(m_drmContext);
0312     }
0313 }
0314 
0315 void HardwareEncoder::filterFrame(const PipeWireFrame &frame)
0316 {
0317     if (!frame.dmabuf) {
0318         return;
0319     }
0320 
0321     auto attribs = frame.dmabuf.value();
0322 
0323     if (!m_supportsHardwareModifiers && attribs.modifier != 0) {
0324         m_produce->m_stream->renegotiateModifierFailed(frame.format, attribs.modifier);
0325         return;
0326     }
0327 
0328     auto drmFrame = av_frame_alloc();
0329     if (!drmFrame) {
0330         qFatal("Failed to allocate memory");
0331     }
0332     drmFrame->format = AV_PIX_FMT_DRM_PRIME;
0333     drmFrame->width = attribs.width;
0334     drmFrame->height = attribs.height;
0335     if (m_quality) {
0336         drmFrame->quality = percentageToFrameQuality(m_quality.value());
0337     }
0338 
0339     auto frameDesc = new AVDRMFrameDescriptor;
0340     frameDesc->nb_layers = 1;
0341     frameDesc->layers[0].nb_planes = attribs.planes.count();
0342     frameDesc->layers[0].format = attribs.format;
0343     for (int i = 0; i < attribs.planes.count(); ++i) {
0344         const auto &plane = attribs.planes[i];
0345         frameDesc->layers[0].planes[i].object_index = 0;
0346         frameDesc->layers[0].planes[i].offset = plane.offset;
0347         frameDesc->layers[0].planes[i].pitch = plane.stride;
0348     }
0349     frameDesc->nb_objects = 1;
0350     frameDesc->objects[0].fd = attribs.planes[0].fd;
0351     frameDesc->objects[0].format_modifier = attribs.modifier;
0352     frameDesc->objects[0].size = attribs.width * attribs.height * 4;
0353 
0354     drmFrame->data[0] = reinterpret_cast<uint8_t *>(frameDesc);
0355     drmFrame->buf[0] = av_buffer_create(reinterpret_cast<uint8_t *>(frameDesc), sizeof(*frameDesc), av_buffer_default_free, nullptr, 0);
0356     if (frame.presentationTimestamp) {
0357         drmFrame->pts = m_produce->framePts(frame.presentationTimestamp);
0358     }
0359 
0360     if (auto result = av_buffersrc_add_frame(m_inputFilter, drmFrame); result < 0) {
0361         qCDebug(PIPEWIRERECORD_LOGGING) << "Failed sending frame for encoding" << av_err2str(result);
0362         av_frame_unref(drmFrame);
0363         return;
0364     }
0365 
0366     av_frame_free(&drmFrame);
0367 }
0368 
0369 QByteArray HardwareEncoder::checkVaapi(const QSize &size)
0370 {
0371     VaapiUtils utils;
0372     if (utils.devicePath().isEmpty()) {
0373         qCWarning(PIPEWIRERECORD_LOGGING) << "Hardware encoding is not supported on this device.";
0374         return QByteArray{};
0375     }
0376 
0377     auto minSize = utils.minimumSize();
0378     if (size.width() < minSize.width() || size.height() < minSize.height()) {
0379         qCWarning(PIPEWIRERECORD_LOGGING) << "Requested size" << size << "less than minimum supported hardware size" << minSize;
0380         return QByteArray{};
0381     }
0382 
0383     auto maxSize = utils.maximumSize();
0384     if (size.width() > maxSize.width() || size.height() > maxSize.height()) {
0385         qCWarning(PIPEWIRERECORD_LOGGING) << "Requested size" << size << "exceeds maximum supported hardware size" << maxSize;
0386         return QByteArray{};
0387     }
0388     m_supportsHardwareModifiers = utils.supportsHardwareModifiers();
0389 
0390     return utils.devicePath();
0391 }
0392 
0393 bool HardwareEncoder::createDrmContext(const QSize &size)
0394 {
0395     auto path = checkVaapi(size);
0396     if (path.isEmpty()) {
0397         return false;
0398     }
0399 
0400     int err = av_hwdevice_ctx_create(&m_drmContext, AV_HWDEVICE_TYPE_DRM, path.data(), NULL, AV_HWFRAME_MAP_READ);
0401     if (err < 0) {
0402         qCWarning(PIPEWIRERECORD_LOGGING) << "Failed to create DRM device. Error" << av_err2str(err);
0403         return false;
0404     }
0405 
0406     m_drmFramesContext = av_hwframe_ctx_alloc(m_drmContext);
0407     if (!m_drmFramesContext) {
0408         qCWarning(PIPEWIRERECORD_LOGGING) << "Failed to create DRM frames context";
0409         return false;
0410     }
0411 
0412     auto framesContext = reinterpret_cast<AVHWFramesContext *>(m_drmFramesContext->data);
0413     framesContext->format = AV_PIX_FMT_DRM_PRIME;
0414     framesContext->sw_format = AV_PIX_FMT_0BGR;
0415     framesContext->width = size.width();
0416     framesContext->height = size.height();
0417 
0418     if (auto result = av_hwframe_ctx_init(m_drmFramesContext); result < 0) {
0419         qCWarning(PIPEWIRERECORD_LOGGING) << "Failed initializing DRM frames context" << av_err2str(result);
0420         av_buffer_unref(&m_drmFramesContext);
0421         return false;
0422     }
0423 
0424     return true;
0425 }
0426 
0427 #include "moc_encoder_p.cpp"