File indexing completed on 2024-04-28 04:58:02

0001 /*
0002     SPDX-License-Identifier: GPL-2.0-only OR GPL-3.0-only OR LicenseRef-KDE-Accepted-GPL
0003     SPDX-FileCopyrightText: 2020 Harald Sitter <sitter@kde.org>
0004 */
0005 
0006 #include "transfer.h"
0007 
0008 #include <future>
0009 
0010 TransferSegment::TransferSegment(const off_t fileSize)
0011     : buf(segmentSizeForFileSize(fileSize))
0012 {
0013 }
0014 
0015 off_t TransferSegment::segmentSizeForFileSize(const off_t fileSize_)
0016 {
0017     const off_t fileSize = qMax<off_t>(0, fileSize_);
0018 
0019     // read() internally splits our read requests into multiple server
0020     // requests and then assembles the responses into our buffer.
0021     // The larger the chunks we request the better the performance.
0022     // At the same time we'll want a semblance of progress reporting
0023     // and also not eat too much RAM. It's a balancing act :|
0024     off_t segmentSize = c_minSegmentSize;
0025     // The segment size is largely arbitrary and sacrifices better throughput for
0026     // greater memory use.
0027     // This only goes up to a maximum because bigger transfer blobs directly
0028     // translate to more RAM use. Mind that the effective RAM use will
0029     // be (segmentSize * (segments + 1)). The +1 is because smbc internally will also
0030     // allocate up to a full segment for one read() call.
0031     //
0032     // Unfortunately we have no way of knowing what size smbc will use for the
0033     // network requests, so we can't use a multiple of that. Which means we'll
0034     // almost never reach best performance.
0035     //
0036     // TODO: perhaps it would actually make sense to read at a multiple of
0037     // the target drive's block size?
0038     const off_t idealSegmentSize = qMin<off_t>(fileSize / 50, c_maxSegmentSize);
0039     segmentSize = qMax<off_t>(segmentSize, idealSegmentSize);
0040     // If the segment size is larger than the file size it appears we can
0041     // actually degrade performance, so pick the smaller of the two.
0042     if (fileSize != 0) {
0043         segmentSize = qMin<off_t>(segmentSize, fileSize);
0044     }
0045     return segmentSize;
0046 }
0047 
0048 TransferRingBuffer::TransferRingBuffer(const off_t fileSize)
0049 {
0050     for (size_t i = 0; i < m_capacity; ++i) {
0051         m_buffer[i] = std::unique_ptr<TransferSegment>(new TransferSegment(fileSize));
0052     }
0053 }
0054 
0055 TransferSegment *TransferRingBuffer::pop()
0056 {
0057     std::unique_lock<std::mutex> lock(m_mutex);
0058 
0059     while (head == tail) {
0060         if (!m_done) {
0061             m_cond.wait(lock);
0062         } else {
0063             return nullptr;
0064         }
0065     }
0066 
0067     auto segment = m_buffer[tail].get();
0068     m_cond.notify_all();
0069     return segment;
0070 }
0071 
0072 void TransferRingBuffer::unpop()
0073 {
0074     std::unique_lock<std::mutex> lock(m_mutex);
0075     tail = (tail + 1) % m_capacity;
0076     m_cond.notify_all();
0077 }
0078 
0079 TransferSegment *TransferRingBuffer::nextFree()
0080 {
0081     // This does not require synchronization. As soon
0082     // as we pushed the last item we gained exclusive lock
0083     // on the new item.
0084     m_cond.notify_all();
0085     return m_buffer[head].get();
0086 }
0087 
0088 void TransferRingBuffer::push()
0089 {
0090     const auto newHead = (head + 1) % m_capacity;
0091     std::unique_lock<std::mutex> lock(m_mutex);
0092     while (newHead == tail) {
0093         // do not move to the item the reading thread is on
0094         m_cond.wait(lock);
0095     }
0096     head = newHead;
0097     m_cond.notify_all();
0098 }
0099 
0100 void TransferRingBuffer::done()
0101 {
0102     std::unique_lock<std::mutex> lock(m_mutex);
0103     m_done = true;
0104     m_cond.notify_all();
0105 }