File indexing completed on 2025-04-20 10:57:36

0001 /*
0002     KWin - the KDE window manager
0003     This file is part of the KDE project.
0004 
0005     SPDX-FileCopyrightText: 2021 Xaver Hugl <xaver.hugl@gmail.com>
0006 
0007     SPDX-License-Identifier: GPL-2.0-or-later
0008 */
0009 
0010 #include "drm_pipeline.h"
0011 
0012 #include <errno.h>
0013 
0014 #include "core/session.h"
0015 #include "drm_backend.h"
0016 #include "drm_buffer.h"
0017 #include "drm_buffer_gbm.h"
0018 #include "drm_connector.h"
0019 #include "drm_crtc.h"
0020 #include "drm_egl_backend.h"
0021 #include "drm_gpu.h"
0022 #include "drm_layer.h"
0023 #include "drm_logging.h"
0024 #include "drm_output.h"
0025 #include "drm_plane.h"
0026 
0027 #include <drm_fourcc.h>
0028 #include <gbm.h>
0029 
0030 namespace KWin
0031 {
0032 
0033 static const QMap<uint32_t, QVector<uint64_t>> legacyFormats = {{DRM_FORMAT_XRGB8888, {}}};
0034 static const QMap<uint32_t, QVector<uint64_t>> legacyCursorFormats = {{DRM_FORMAT_ARGB8888, {}}};
0035 
0036 DrmPipeline::DrmPipeline(DrmConnector *conn)
0037     : m_connector(conn)
0038 {
0039 }
0040 
0041 DrmPipeline::~DrmPipeline()
0042 {
0043     if (m_pageflipPending && m_current.crtc) {
0044         pageFlipped({});
0045     }
0046 }
0047 
0048 bool DrmPipeline::testScanout()
0049 {
0050     // TODO make the modeset check only be tested at most once per scanout cycle
0051     if (gpu()->needsModeset()) {
0052         return false;
0053     }
0054     if (gpu()->atomicModeSetting()) {
0055         return commitPipelines({this}, CommitMode::Test) == Error::None;
0056     } else {
0057         if (m_pending.layer->currentBuffer()->buffer()->size() != m_pending.mode->size()) {
0058             // scaling isn't supported with the legacy API
0059             return false;
0060         }
0061         // no other way to test than to do it.
0062         // As we only have a maximum of one test per scanout cycle, this is fine
0063         return presentLegacy() == Error::None;
0064     }
0065 }
0066 
0067 DrmPipeline::Error DrmPipeline::present()
0068 {
0069     Q_ASSERT(m_pending.crtc);
0070     if (gpu()->atomicModeSetting()) {
0071         return commitPipelines({this}, CommitMode::Commit);
0072     } else {
0073         if (m_pending.layer->hasDirectScanoutBuffer()) {
0074             // already presented
0075             return Error::None;
0076         }
0077         return presentLegacy();
0078     }
0079 }
0080 
0081 bool DrmPipeline::maybeModeset()
0082 {
0083     m_modesetPresentPending = true;
0084     return gpu()->maybeModeset();
0085 }
0086 
0087 DrmPipeline::Error DrmPipeline::commitPipelines(const QVector<DrmPipeline *> &pipelines, CommitMode mode, const QVector<DrmObject *> &unusedObjects)
0088 {
0089     Q_ASSERT(!pipelines.isEmpty());
0090     if (pipelines[0]->gpu()->atomicModeSetting()) {
0091         return commitPipelinesAtomic(pipelines, mode, unusedObjects);
0092     } else {
0093         return commitPipelinesLegacy(pipelines, mode);
0094     }
0095 }
0096 
0097 DrmPipeline::Error DrmPipeline::commitPipelinesAtomic(const QVector<DrmPipeline *> &pipelines, CommitMode mode, const QVector<DrmObject *> &unusedObjects)
0098 {
0099     if (mode == CommitMode::Test) {
0100         // if there's a modeset pending, the tests on top of that state
0101         // also have to allow modesets or they'll always fail
0102         const bool wantsModeset = std::any_of(pipelines.begin(), pipelines.end(), [](DrmPipeline *pipeline) {
0103             return pipeline->needsModeset();
0104         });
0105         if (wantsModeset) {
0106             mode = CommitMode::TestAllowModeset;
0107         }
0108     }
0109     const auto &failed = [&pipelines, &unusedObjects]() {
0110         for (const auto &pipeline : pipelines) {
0111             pipeline->printDebugInfo();
0112             pipeline->atomicCommitFailed();
0113         }
0114         for (const auto &obj : unusedObjects) {
0115             obj->printProps(DrmObject::PrintMode::OnlyChanged);
0116             obj->rollbackPending();
0117         }
0118     };
0119 
0120     DrmUniquePtr<drmModeAtomicReq> req{drmModeAtomicAlloc()};
0121     if (!req) {
0122         qCCritical(KWIN_DRM) << "Failed to allocate drmModeAtomicReq!" << strerror(errno);
0123         return Error::OutofMemory;
0124     }
0125     for (const auto &pipeline : pipelines) {
0126         pipeline->checkHardwareRotation();
0127         if (pipeline->activePending()) {
0128             if (!pipeline->m_pending.layer->checkTestBuffer()) {
0129                 qCWarning(KWIN_DRM) << "Checking test buffer failed for" << mode;
0130                 failed();
0131                 return Error::TestBufferFailed;
0132             }
0133             if (!pipeline->prepareAtomicPresentation()) {
0134                 failed();
0135                 return Error::InvalidArguments;
0136             }
0137             if (mode == CommitMode::TestAllowModeset || mode == CommitMode::CommitModeset) {
0138                 pipeline->prepareAtomicModeset();
0139             }
0140         } else {
0141             pipeline->prepareAtomicDisable();
0142         }
0143         if (!pipeline->populateAtomicValues(req.get())) {
0144             failed();
0145             return errnoToError();
0146         }
0147     }
0148     for (const auto &unused : unusedObjects) {
0149         unused->disable();
0150         if (!unused->atomicPopulate(req.get())) {
0151             qCWarning(KWIN_DRM) << "Populating atomic values failed for unused resource" << unused;
0152             failed();
0153             return errnoToError();
0154         }
0155     }
0156     const auto gpu = pipelines[0]->gpu();
0157     switch (mode) {
0158     case CommitMode::TestAllowModeset: {
0159         bool withModeset = drmModeAtomicCommit(gpu->fd(), req.get(), DRM_MODE_ATOMIC_ALLOW_MODESET | DRM_MODE_ATOMIC_TEST_ONLY, nullptr) == 0;
0160         if (!withModeset) {
0161             qCDebug(KWIN_DRM) << "Atomic modeset test failed!" << strerror(errno);
0162             failed();
0163             return errnoToError();
0164         }
0165         bool withoutModeset = drmModeAtomicCommit(gpu->fd(), req.get(), DRM_MODE_ATOMIC_TEST_ONLY, nullptr) == 0;
0166         for (const auto &pipeline : pipelines) {
0167             pipeline->m_pending.needsModeset = !withoutModeset;
0168         }
0169         std::for_each(pipelines.begin(), pipelines.end(), std::mem_fn(&DrmPipeline::atomicTestSuccessful));
0170         std::for_each(unusedObjects.begin(), unusedObjects.end(), std::mem_fn(&DrmObject::commitPending));
0171         return Error::None;
0172     }
0173     case CommitMode::CommitModeset: {
0174         // The kernel fails commits with DRM_MODE_PAGE_FLIP_EVENT when a crtc is disabled in the commit
0175         // and already was disabled before, to work around some quirks in old userspace.
0176         // Instead of using DRM_MODE_PAGE_FLIP_EVENT | DRM_MODE_ATOMIC_NONBLOCK, do the modeset in a blocking
0177         // fashion without page flip events and directly call the pageFlipped method afterwards
0178         bool commit = drmModeAtomicCommit(gpu->fd(), req.get(), DRM_MODE_ATOMIC_ALLOW_MODESET, nullptr) == 0;
0179         if (!commit) {
0180             qCCritical(KWIN_DRM) << "Atomic modeset commit failed!" << strerror(errno);
0181             failed();
0182             return errnoToError();
0183         }
0184         std::for_each(pipelines.begin(), pipelines.end(), std::mem_fn(&DrmPipeline::atomicModesetSuccessful));
0185         for (const auto &obj : unusedObjects) {
0186             obj->commitPending();
0187             obj->commit();
0188             if (auto crtc = dynamic_cast<DrmCrtc *>(obj)) {
0189                 crtc->flipBuffer();
0190             } else if (auto plane = dynamic_cast<DrmPlane *>(obj)) {
0191                 plane->flipBuffer();
0192             }
0193         }
0194         return Error::None;
0195     }
0196     case CommitMode::Test: {
0197         bool test = drmModeAtomicCommit(pipelines[0]->gpu()->fd(), req.get(), DRM_MODE_ATOMIC_TEST_ONLY, nullptr) == 0;
0198         if (!test) {
0199             qCDebug(KWIN_DRM) << "Atomic test failed!" << strerror(errno);
0200             failed();
0201             return errnoToError();
0202         }
0203         std::for_each(pipelines.begin(), pipelines.end(), std::mem_fn(&DrmPipeline::atomicTestSuccessful));
0204         Q_ASSERT(unusedObjects.isEmpty());
0205         return Error::None;
0206     }
0207     case CommitMode::Commit: {
0208         bool commit = drmModeAtomicCommit(pipelines[0]->gpu()->fd(), req.get(), DRM_MODE_ATOMIC_NONBLOCK | DRM_MODE_PAGE_FLIP_EVENT, gpu) == 0;
0209         if (!commit) {
0210             qCCritical(KWIN_DRM) << "Atomic commit failed!" << strerror(errno);
0211             failed();
0212             return errnoToError();
0213         }
0214         std::for_each(pipelines.begin(), pipelines.end(), std::mem_fn(&DrmPipeline::atomicCommitSuccessful));
0215         Q_ASSERT(unusedObjects.isEmpty());
0216         return Error::None;
0217     }
0218     default:
0219         Q_UNREACHABLE();
0220     }
0221 }
0222 
0223 static QSize orientateSize(const QSize &size, DrmPlane::Transformations transforms)
0224 {
0225     if (transforms & (DrmPlane::Transformation::Rotate90 | DrmPlane::Transformation::Rotate270)) {
0226         return size.transposed();
0227     } else {
0228         return size;
0229     }
0230 }
0231 
0232 static QRect centerBuffer(const QSize &bufferSize, const QSize &modeSize)
0233 {
0234     const double widthScale = bufferSize.width() / double(modeSize.width());
0235     const double heightScale = bufferSize.height() / double(modeSize.height());
0236     if (widthScale > heightScale) {
0237         const QSize size = bufferSize / widthScale;
0238         const uint32_t yOffset = (modeSize.height() - size.height()) / 2;
0239         return QRect(QPoint(0, yOffset), size);
0240     } else {
0241         const QSize size = bufferSize / heightScale;
0242         const uint32_t xOffset = (modeSize.width() - size.width()) / 2;
0243         return QRect(QPoint(xOffset, 0), size);
0244     }
0245 }
0246 
0247 bool DrmPipeline::prepareAtomicPresentation()
0248 {
0249     if (const auto contentType = m_connector->getProp(DrmConnector::PropertyIndex::ContentType)) {
0250         contentType->setEnum(m_pending.contentType);
0251     }
0252 
0253     m_pending.crtc->setPending(DrmCrtc::PropertyIndex::VrrEnabled, m_pending.syncMode == RenderLoopPrivate::SyncMode::Adaptive || m_pending.syncMode == RenderLoopPrivate::SyncMode::AdaptiveAsync);
0254     if (const auto gamma = m_pending.crtc->getProp(DrmCrtc::PropertyIndex::Gamma_LUT)) {
0255         gamma->setPending(m_pending.gamma ? m_pending.gamma->blobId() : 0);
0256     } else if (m_pending.gamma) {
0257         return false;
0258     }
0259     if (const auto ctm = m_pending.crtc->getProp(DrmCrtc::PropertyIndex::CTM)) {
0260         ctm->setPending(m_pending.ctm ? m_pending.ctm->blobId() : 0);
0261     } else if (m_pending.ctm) {
0262         return false;
0263     }
0264 
0265     const auto fb = m_pending.layer->currentBuffer().get();
0266     m_pending.crtc->primaryPlane()->set(QPoint(0, 0), fb->buffer()->size(), centerBuffer(orientateSize(fb->buffer()->size(), m_pending.bufferOrientation), m_pending.mode->size()));
0267     m_pending.crtc->primaryPlane()->setBuffer(fb);
0268 
0269     if (m_pending.crtc->cursorPlane()) {
0270         const auto layer = cursorLayer();
0271         m_pending.crtc->cursorPlane()->set(QPoint(0, 0), gpu()->cursorSize(), QRect(layer->position(), gpu()->cursorSize()));
0272         m_pending.crtc->cursorPlane()->setBuffer(layer->isVisible() ? layer->currentBuffer().get() : nullptr);
0273         m_pending.crtc->cursorPlane()->setPending(DrmPlane::PropertyIndex::CrtcId, layer->isVisible() ? m_pending.crtc->id() : 0);
0274     }
0275     return true;
0276 }
0277 
0278 void DrmPipeline::prepareAtomicDisable()
0279 {
0280     m_connector->disable();
0281     if (m_pending.crtc) {
0282         m_pending.crtc->disable();
0283         m_pending.crtc->primaryPlane()->disable();
0284         if (auto cursor = m_pending.crtc->cursorPlane()) {
0285             cursor->disable();
0286         }
0287     }
0288 }
0289 
0290 void DrmPipeline::prepareAtomicModeset()
0291 {
0292     m_connector->setPending(DrmConnector::PropertyIndex::CrtcId, m_pending.crtc->id());
0293     if (const auto &prop = m_connector->getProp(DrmConnector::PropertyIndex::Broadcast_RGB)) {
0294         prop->setEnum(m_pending.rgbRange);
0295     }
0296     if (const auto &prop = m_connector->getProp(DrmConnector::PropertyIndex::LinkStatus)) {
0297         prop->setEnum(DrmConnector::LinkStatus::Good);
0298     }
0299     if (const auto overscan = m_connector->getProp(DrmConnector::PropertyIndex::Overscan)) {
0300         overscan->setPending(m_pending.overscan);
0301     } else if (const auto underscan = m_connector->getProp(DrmConnector::PropertyIndex::Underscan)) {
0302         const uint32_t hborder = calculateUnderscan();
0303         underscan->setEnum(m_pending.overscan != 0 ? DrmConnector::UnderscanOptions::On : DrmConnector::UnderscanOptions::Off);
0304         m_connector->getProp(DrmConnector::PropertyIndex::Underscan_vborder)->setPending(m_pending.overscan);
0305         m_connector->getProp(DrmConnector::PropertyIndex::Underscan_hborder)->setPending(hborder);
0306     }
0307     if (const auto bpc = m_connector->getProp(DrmConnector::PropertyIndex::MaxBpc)) {
0308         uint64_t preferred = 8;
0309         if (auto backend = dynamic_cast<EglGbmBackend *>(gpu()->platform()->renderBackend()); backend && backend->prefer10bpc()) {
0310             preferred = 10;
0311         }
0312         bpc->setPending(std::min(bpc->maxValue(), preferred));
0313     }
0314     if (const auto hdr = m_connector->getProp(DrmConnector::PropertyIndex::HdrMetadata)) {
0315         hdr->setPending(0);
0316     }
0317     if (const auto scaling = m_connector->getProp(DrmConnector::PropertyIndex::ScalingMode); scaling && scaling->hasEnum(DrmConnector::ScalingMode::None)) {
0318         scaling->setEnum(DrmConnector::ScalingMode::None);
0319     }
0320 
0321     m_pending.crtc->setPending(DrmCrtc::PropertyIndex::Active, 1);
0322     m_pending.crtc->setPending(DrmCrtc::PropertyIndex::ModeId, m_pending.mode->blobId());
0323 
0324     m_pending.crtc->primaryPlane()->setPending(DrmPlane::PropertyIndex::CrtcId, m_pending.crtc->id());
0325     if (const auto rotation = m_pending.crtc->primaryPlane()->getProp(DrmPlane::PropertyIndex::Rotation)) {
0326         rotation->setEnum(m_pending.bufferOrientation);
0327     }
0328     if (m_pending.crtc->cursorPlane()) {
0329         if (const auto rotation = m_pending.crtc->cursorPlane()->getProp(DrmPlane::PropertyIndex::Rotation)) {
0330             rotation->setEnum(DrmPlane::Transformation::Rotate0);
0331         }
0332     }
0333 }
0334 
0335 bool DrmPipeline::populateAtomicValues(drmModeAtomicReq *req)
0336 {
0337     if (!m_connector->atomicPopulate(req)) {
0338         return false;
0339     }
0340     if (m_pending.crtc) {
0341         if (!m_pending.crtc->atomicPopulate(req)) {
0342             return false;
0343         }
0344         if (!m_pending.crtc->primaryPlane()->atomicPopulate(req)) {
0345             return false;
0346         }
0347         if (m_pending.crtc->cursorPlane() && !m_pending.crtc->cursorPlane()->atomicPopulate(req)) {
0348             return false;
0349         }
0350     }
0351     return true;
0352 }
0353 
0354 void DrmPipeline::checkHardwareRotation()
0355 {
0356     if (m_pending.crtc && m_pending.crtc->primaryPlane()) {
0357         const bool supported = (m_pending.bufferOrientation & m_pending.crtc->primaryPlane()->supportedTransformations());
0358         if (!supported) {
0359             m_pending.bufferOrientation = DrmPlane::Transformation::Rotate0;
0360         }
0361     } else {
0362         m_pending.bufferOrientation = DrmPlane::Transformation::Rotate0;
0363     }
0364 }
0365 
0366 uint32_t DrmPipeline::calculateUnderscan()
0367 {
0368     const auto size = m_pending.mode->size();
0369     const float aspectRatio = size.width() / static_cast<float>(size.height());
0370     uint32_t hborder = m_pending.overscan * aspectRatio;
0371     if (hborder > 128) {
0372         // overscan only goes from 0-100 so we cut off the 101-128 value range of underscan_vborder
0373         hborder = 128;
0374         m_pending.overscan = 128 / aspectRatio;
0375     }
0376     return hborder;
0377 }
0378 
0379 DrmPipeline::Error DrmPipeline::errnoToError()
0380 {
0381     switch (errno) {
0382     case EINVAL:
0383         return Error::InvalidArguments;
0384     case EBUSY:
0385         return Error::FramePending;
0386     case ENOMEM:
0387         return Error::OutofMemory;
0388     case EACCES:
0389         return Error::NoPermission;
0390     default:
0391         return Error::Unknown;
0392     }
0393 }
0394 
0395 void DrmPipeline::atomicCommitFailed()
0396 {
0397     m_connector->rollbackPending();
0398     if (m_pending.crtc) {
0399         m_pending.crtc->rollbackPending();
0400         m_pending.crtc->primaryPlane()->rollbackPending();
0401         if (m_pending.crtc->cursorPlane()) {
0402             m_pending.crtc->cursorPlane()->rollbackPending();
0403         }
0404     }
0405 }
0406 
0407 void DrmPipeline::atomicTestSuccessful()
0408 {
0409     m_connector->commitPending();
0410     if (m_pending.crtc) {
0411         m_pending.crtc->commitPending();
0412         m_pending.crtc->primaryPlane()->commitPending();
0413         if (m_pending.crtc->cursorPlane()) {
0414             m_pending.crtc->cursorPlane()->commitPending();
0415         }
0416     }
0417 }
0418 
0419 void DrmPipeline::atomicCommitSuccessful()
0420 {
0421     atomicTestSuccessful();
0422     m_pending.needsModeset = false;
0423     if (activePending()) {
0424         m_pageflipPending = true;
0425     }
0426     m_connector->commit();
0427     if (m_pending.crtc) {
0428         m_pending.crtc->commit();
0429         m_pending.crtc->primaryPlane()->setNext(m_pending.layer->currentBuffer());
0430         m_pending.crtc->primaryPlane()->commit();
0431         if (m_pending.crtc->cursorPlane()) {
0432             m_pending.crtc->cursorPlane()->setNext(cursorLayer()->currentBuffer());
0433             m_pending.crtc->cursorPlane()->commit();
0434         }
0435     }
0436     m_current = m_next = m_pending;
0437 }
0438 
0439 void DrmPipeline::atomicModesetSuccessful()
0440 {
0441     atomicCommitSuccessful();
0442     if (activePending()) {
0443         pageFlipped(std::chrono::steady_clock::now().time_since_epoch());
0444     }
0445 }
0446 
0447 bool DrmPipeline::setCursor(const QPoint &hotspot)
0448 {
0449     bool result;
0450     m_pending.cursorHotspot = hotspot;
0451     // explicitly check for the cursor plane and not for AMS, as we might not always have one
0452     if (m_pending.crtc->cursorPlane()) {
0453         result = commitPipelines({this}, CommitMode::Test) == Error::None;
0454         if (result && m_output) {
0455             m_output->renderLoop()->scheduleRepaint();
0456         }
0457     } else {
0458         result = setCursorLegacy();
0459     }
0460     if (result) {
0461         m_next = m_pending;
0462     } else {
0463         m_pending = m_next;
0464     }
0465     return result;
0466 }
0467 
0468 bool DrmPipeline::moveCursor()
0469 {
0470     bool result;
0471     // explicitly check for the cursor plane and not for AMS, as we might not always have one
0472     if (m_pending.crtc->cursorPlane()) {
0473         result = commitPipelines({this}, CommitMode::Test) == Error::None;
0474     } else {
0475         result = moveCursorLegacy();
0476     }
0477     if (result) {
0478         m_next = m_pending;
0479         if (m_output) {
0480             m_output->renderLoop()->scheduleRepaint();
0481         }
0482     } else {
0483         m_pending = m_next;
0484     }
0485     return result;
0486 }
0487 
0488 void DrmPipeline::applyPendingChanges()
0489 {
0490     m_next = m_pending;
0491 }
0492 
0493 QSize DrmPipeline::bufferSize() const
0494 {
0495     const auto modeSize = m_pending.mode->size();
0496     if (m_pending.bufferOrientation & (DrmPlane::Transformation::Rotate90 | DrmPlane::Transformation::Rotate270)) {
0497         return modeSize.transposed();
0498     }
0499     return modeSize;
0500 }
0501 
0502 DrmConnector *DrmPipeline::connector() const
0503 {
0504     return m_connector;
0505 }
0506 
0507 DrmGpu *DrmPipeline::gpu() const
0508 {
0509     return m_connector->gpu();
0510 }
0511 
0512 void DrmPipeline::pageFlipped(std::chrono::nanoseconds timestamp)
0513 {
0514     m_current.crtc->flipBuffer();
0515     if (m_current.crtc->primaryPlane()) {
0516         m_current.crtc->primaryPlane()->flipBuffer();
0517     }
0518     if (m_current.crtc->cursorPlane()) {
0519         m_current.crtc->cursorPlane()->flipBuffer();
0520     }
0521     m_pageflipPending = false;
0522     if (m_output) {
0523         m_output->pageFlipped(timestamp);
0524     }
0525 }
0526 
0527 void DrmPipeline::setOutput(DrmOutput *output)
0528 {
0529     m_output = output;
0530 }
0531 
0532 DrmOutput *DrmPipeline::output() const
0533 {
0534     return m_output;
0535 }
0536 
0537 QMap<uint32_t, QVector<uint64_t>> DrmPipeline::formats() const
0538 {
0539     return m_pending.formats;
0540 }
0541 
0542 QMap<uint32_t, QVector<uint64_t>> DrmPipeline::cursorFormats() const
0543 {
0544     if (m_pending.crtc && m_pending.crtc->cursorPlane()) {
0545         return m_pending.crtc->cursorPlane()->formats();
0546     } else {
0547         return legacyCursorFormats;
0548     }
0549 }
0550 
0551 bool DrmPipeline::pruneModifier()
0552 {
0553     if (!m_pending.layer->currentBuffer()
0554         || m_pending.layer->currentBuffer()->buffer()->modifier() == DRM_FORMAT_MOD_NONE
0555         || m_pending.layer->currentBuffer()->buffer()->modifier() == DRM_FORMAT_MOD_INVALID) {
0556         return false;
0557     }
0558     auto &modifiers = m_pending.formats[m_pending.layer->currentBuffer()->buffer()->format()];
0559     if (modifiers.empty()) {
0560         return false;
0561     } else {
0562         modifiers.clear();
0563         return true;
0564     }
0565 }
0566 
0567 bool DrmPipeline::needsModeset() const
0568 {
0569     return m_pending.needsModeset;
0570 }
0571 
0572 bool DrmPipeline::activePending() const
0573 {
0574     return m_pending.crtc && m_pending.mode && m_pending.active;
0575 }
0576 
0577 void DrmPipeline::revertPendingChanges()
0578 {
0579     m_pending = m_next;
0580 }
0581 
0582 bool DrmPipeline::pageflipPending() const
0583 {
0584     return m_pageflipPending;
0585 }
0586 
0587 bool DrmPipeline::modesetPresentPending() const
0588 {
0589     return m_modesetPresentPending;
0590 }
0591 
0592 void DrmPipeline::resetModesetPresentPending()
0593 {
0594     m_modesetPresentPending = false;
0595 }
0596 
0597 DrmCrtc *DrmPipeline::currentCrtc() const
0598 {
0599     return m_current.crtc;
0600 }
0601 
0602 DrmGammaRamp::DrmGammaRamp(DrmCrtc *crtc, const std::shared_ptr<ColorTransformation> &transformation)
0603     : m_gpu(crtc->gpu())
0604     , m_lut(transformation, crtc->gammaRampSize())
0605 {
0606     if (crtc->gpu()->atomicModeSetting()) {
0607         QVector<drm_color_lut> atomicLut(m_lut.size());
0608         for (uint32_t i = 0; i < m_lut.size(); i++) {
0609             atomicLut[i].red = m_lut.red()[i];
0610             atomicLut[i].green = m_lut.green()[i];
0611             atomicLut[i].blue = m_lut.blue()[i];
0612         }
0613         if (drmModeCreatePropertyBlob(crtc->gpu()->fd(), atomicLut.data(), sizeof(drm_color_lut) * m_lut.size(), &m_blobId) != 0) {
0614             qCWarning(KWIN_DRM) << "Failed to create gamma blob!" << strerror(errno);
0615         }
0616     }
0617 }
0618 
0619 DrmGammaRamp::~DrmGammaRamp()
0620 {
0621     if (m_blobId != 0) {
0622         drmModeDestroyPropertyBlob(m_gpu->fd(), m_blobId);
0623     }
0624 }
0625 
0626 uint32_t DrmGammaRamp::blobId() const
0627 {
0628     return m_blobId;
0629 }
0630 
0631 const ColorLUT &DrmGammaRamp::lut() const
0632 {
0633     return m_lut;
0634 }
0635 
0636 void DrmPipeline::printFlags(uint32_t flags)
0637 {
0638     if (flags == 0) {
0639         qCDebug(KWIN_DRM) << "Flags: none";
0640     } else {
0641         qCDebug(KWIN_DRM) << "Flags:";
0642         if (flags & DRM_MODE_PAGE_FLIP_EVENT) {
0643             qCDebug(KWIN_DRM) << "\t DRM_MODE_PAGE_FLIP_EVENT";
0644         }
0645         if (flags & DRM_MODE_ATOMIC_ALLOW_MODESET) {
0646             qCDebug(KWIN_DRM) << "\t DRM_MODE_ATOMIC_ALLOW_MODESET";
0647         }
0648         if (flags & DRM_MODE_PAGE_FLIP_ASYNC) {
0649             qCDebug(KWIN_DRM) << "\t DRM_MODE_PAGE_FLIP_ASYNC";
0650         }
0651     }
0652 }
0653 
0654 void DrmPipeline::printDebugInfo() const
0655 {
0656     qCDebug(KWIN_DRM) << "Drm objects:";
0657     m_connector->printProps(DrmObject::PrintMode::All);
0658     if (m_pending.crtc) {
0659         m_pending.crtc->printProps(DrmObject::PrintMode::All);
0660         if (m_pending.crtc->primaryPlane()) {
0661             m_pending.crtc->primaryPlane()->printProps(DrmObject::PrintMode::All);
0662         }
0663         if (m_pending.crtc->cursorPlane()) {
0664             m_pending.crtc->cursorPlane()->printProps(DrmObject::PrintMode::All);
0665         }
0666     }
0667 }
0668 
0669 DrmCrtc *DrmPipeline::crtc() const
0670 {
0671     return m_pending.crtc;
0672 }
0673 
0674 std::shared_ptr<DrmConnectorMode> DrmPipeline::mode() const
0675 {
0676     return m_pending.mode;
0677 }
0678 
0679 bool DrmPipeline::active() const
0680 {
0681     return m_pending.active;
0682 }
0683 
0684 bool DrmPipeline::enabled() const
0685 {
0686     return m_pending.enabled;
0687 }
0688 
0689 DrmPipelineLayer *DrmPipeline::primaryLayer() const
0690 {
0691     return m_pending.layer.get();
0692 }
0693 
0694 DrmOverlayLayer *DrmPipeline::cursorLayer() const
0695 {
0696     return m_pending.cursorLayer.get();
0697 }
0698 
0699 DrmPlane::Transformations DrmPipeline::renderOrientation() const
0700 {
0701     return m_pending.renderOrientation;
0702 }
0703 
0704 DrmPlane::Transformations DrmPipeline::bufferOrientation() const
0705 {
0706     return m_pending.bufferOrientation;
0707 }
0708 
0709 RenderLoopPrivate::SyncMode DrmPipeline::syncMode() const
0710 {
0711     return m_pending.syncMode;
0712 }
0713 
0714 uint32_t DrmPipeline::overscan() const
0715 {
0716     return m_pending.overscan;
0717 }
0718 
0719 Output::RgbRange DrmPipeline::rgbRange() const
0720 {
0721     return m_pending.rgbRange;
0722 }
0723 
0724 DrmConnector::DrmContentType DrmPipeline::contentType() const
0725 {
0726     return m_pending.contentType;
0727 }
0728 
0729 void DrmPipeline::setCrtc(DrmCrtc *crtc)
0730 {
0731     if (crtc && m_pending.crtc && crtc->gammaRampSize() != m_pending.crtc->gammaRampSize() && m_pending.colorTransformation) {
0732         m_pending.gamma = std::make_shared<DrmGammaRamp>(crtc, m_pending.colorTransformation);
0733     }
0734     m_pending.crtc = crtc;
0735     if (crtc) {
0736         m_pending.formats = crtc->primaryPlane() ? crtc->primaryPlane()->formats() : legacyFormats;
0737     } else {
0738         m_pending.formats = {};
0739     }
0740 }
0741 
0742 void DrmPipeline::setMode(const std::shared_ptr<DrmConnectorMode> &mode)
0743 {
0744     m_pending.mode = mode;
0745 }
0746 
0747 void DrmPipeline::setActive(bool active)
0748 {
0749     m_pending.active = active;
0750 }
0751 
0752 void DrmPipeline::setEnable(bool enable)
0753 {
0754     m_pending.enabled = enable;
0755 }
0756 
0757 void DrmPipeline::setLayers(const std::shared_ptr<DrmPipelineLayer> &primaryLayer, const std::shared_ptr<DrmOverlayLayer> &cursorLayer)
0758 {
0759     m_pending.layer = primaryLayer;
0760     m_pending.cursorLayer = cursorLayer;
0761 }
0762 
0763 void DrmPipeline::setRenderOrientation(DrmPlane::Transformations orientation)
0764 {
0765     m_pending.renderOrientation = orientation;
0766 }
0767 
0768 void DrmPipeline::setBufferOrientation(DrmPlane::Transformations orientation)
0769 {
0770     m_pending.bufferOrientation = orientation;
0771 }
0772 
0773 void DrmPipeline::setSyncMode(RenderLoopPrivate::SyncMode mode)
0774 {
0775     m_pending.syncMode = mode;
0776 }
0777 
0778 void DrmPipeline::setOverscan(uint32_t overscan)
0779 {
0780     m_pending.overscan = overscan;
0781 }
0782 
0783 void DrmPipeline::setRgbRange(Output::RgbRange range)
0784 {
0785     m_pending.rgbRange = range;
0786 }
0787 
0788 void DrmPipeline::setGammaRamp(const std::shared_ptr<ColorTransformation> &transformation)
0789 {
0790     m_pending.colorTransformation = transformation;
0791     m_pending.gamma = std::make_shared<DrmGammaRamp>(m_pending.crtc, transformation);
0792 }
0793 
0794 void DrmPipeline::setCTM(const QMatrix3x3 &ctm)
0795 {
0796     if (ctm.isIdentity()) {
0797         m_pending.ctm.reset();
0798     } else {
0799         m_pending.ctm = std::make_shared<DrmCTM>(gpu(), ctm);
0800     }
0801 }
0802 
0803 void DrmPipeline::setContentType(DrmConnector::DrmContentType type)
0804 {
0805     m_pending.contentType = type;
0806 }
0807 
0808 static uint64_t doubleToFixed(double value)
0809 {
0810     // ctm values are in S31.32 sign-magnitude format
0811     uint64_t ret = std::abs(value) * (1ul << 32);
0812     if (value < 0) {
0813         ret |= 1ul << 63;
0814     }
0815     return ret;
0816 }
0817 
0818 DrmCTM::DrmCTM(DrmGpu *gpu, const QMatrix3x3 &ctm)
0819     : m_gpu(gpu)
0820 {
0821     drm_color_ctm blob = {
0822         .matrix = {
0823             doubleToFixed(ctm(0, 0)), doubleToFixed(ctm(1, 0)), doubleToFixed(ctm(2, 0)),
0824             doubleToFixed(ctm(0, 1)), doubleToFixed(ctm(1, 1)), doubleToFixed(ctm(2, 1)),
0825             doubleToFixed(ctm(0, 2)), doubleToFixed(ctm(1, 2)), doubleToFixed(ctm(2, 2))},
0826     };
0827     drmModeCreatePropertyBlob(m_gpu->fd(), &blob, sizeof(drm_color_ctm), &m_blobId);
0828 }
0829 
0830 DrmCTM::~DrmCTM()
0831 {
0832     if (m_blobId) {
0833         drmModeDestroyPropertyBlob(m_gpu->fd(), m_blobId);
0834     }
0835 }
0836 
0837 uint32_t DrmCTM::blobId() const
0838 {
0839     return m_blobId;
0840 }
0841 }