File indexing completed on 2024-05-12 03:54:53

0001 /*
0002  *    This file is part of the KDE project.
0003  *
0004  *    SPDX-FileCopyrightText: 2010 Michael Pyne <mpyne@kde.org>
0005  *    SPDX-License-Identifier: LGPL-2.0-only
0006  *
0007  *    This library includes "MurmurHash" code from Austin Appleby, which is
0008  *    placed in the public domain. See http://sites.google.com/site/murmurhash/
0009  */
0010 
0011 #include "kcoreaddons_debug.h"
0012 
0013 #include "ksdcmemory_p.h"
0014 
0015 #include <QByteArray>
0016 
0017 //-----------------------------------------------------------------------------
0018 // MurmurHashAligned, by Austin Appleby
0019 // (Released to the public domain, or licensed under the MIT license where
0020 // software may not be released to the public domain. See
0021 // http://sites.google.com/site/murmurhash/)
0022 
0023 // Same algorithm as MurmurHash, but only does aligned reads - should be safer
0024 // on certain platforms.
0025 static unsigned int MurmurHashAligned(const void *key, int len, unsigned int seed)
0026 {
0027     const unsigned int m = 0xc6a4a793;
0028     const int r = 16;
0029 
0030     const unsigned char *data = reinterpret_cast<const unsigned char *>(key);
0031 
0032     unsigned int h = seed ^ (len * m);
0033 
0034     int align = reinterpret_cast<quintptr>(data) & 3;
0035 
0036     if (align && len >= 4) {
0037         // Pre-load the temp registers
0038 
0039         unsigned int t = 0;
0040         unsigned int d = 0;
0041 
0042         switch (align) {
0043         case 1:
0044             t |= data[2] << 16;
0045             Q_FALLTHROUGH();
0046         case 2:
0047             t |= data[1] << 8;
0048             Q_FALLTHROUGH();
0049         case 3:
0050             t |= data[0];
0051         }
0052 
0053         t <<= (8 * align);
0054 
0055         data += 4 - align;
0056         len -= 4 - align;
0057 
0058         int sl = 8 * (4 - align);
0059         int sr = 8 * align;
0060 
0061         // Mix
0062 
0063         while (len >= 4) {
0064             d = *reinterpret_cast<const unsigned int *>(data);
0065             t = (t >> sr) | (d << sl);
0066             h += t;
0067             h *= m;
0068             h ^= h >> r;
0069             t = d;
0070 
0071             data += 4;
0072             len -= 4;
0073         }
0074 
0075         // Handle leftover data in temp registers
0076 
0077         int pack = len < align ? len : align;
0078 
0079         d = 0;
0080 
0081         switch (pack) {
0082         case 3:
0083             d |= data[2] << 16;
0084             Q_FALLTHROUGH();
0085         case 2:
0086             d |= data[1] << 8;
0087             Q_FALLTHROUGH();
0088         case 1:
0089             d |= data[0];
0090             Q_FALLTHROUGH();
0091         case 0:
0092             h += (t >> sr) | (d << sl);
0093             h *= m;
0094             h ^= h >> r;
0095         }
0096 
0097         data += pack;
0098         len -= pack;
0099     } else {
0100         while (len >= 4) {
0101             h += *reinterpret_cast<const unsigned int *>(data);
0102             h *= m;
0103             h ^= h >> r;
0104 
0105             data += 4;
0106             len -= 4;
0107         }
0108     }
0109 
0110     //----------
0111     // Handle tail bytes
0112 
0113     switch (len) {
0114     case 3:
0115         h += data[2] << 16;
0116         Q_FALLTHROUGH();
0117     case 2:
0118         h += data[1] << 8;
0119         Q_FALLTHROUGH();
0120     case 1:
0121         h += data[0];
0122         h *= m;
0123         h ^= h >> r;
0124     };
0125 
0126     h *= m;
0127     h ^= h >> 10;
0128     h *= m;
0129     h ^= h >> 17;
0130 
0131     return h;
0132 }
0133 
0134 /**
0135  * This is the hash function used for our data to hopefully make the
0136  * hashing used to place the QByteArrays as efficient as possible.
0137  */
0138 quint32 SharedMemory::generateHash(const QByteArray &buffer)
0139 {
0140     // The final constant is the "seed" for MurmurHash. Do *not* change it
0141     // without incrementing the cache version.
0142     return MurmurHashAligned(buffer.data(), buffer.size(), 0xF0F00F0F);
0143 }
0144 
0145 // Alignment concerns become a big deal when we're dealing with shared memory,
0146 // since trying to access a structure sized at, say 8 bytes at an address that
0147 // is not evenly divisible by 8 is a crash-inducing error on some
0148 // architectures. The compiler would normally take care of this, but with
0149 // shared memory the compiler will not necessarily know the alignment expected,
0150 // so make sure we account for this ourselves. To do so we need a way to find
0151 // out the expected alignment. Enter ALIGNOF...
0152 #ifndef ALIGNOF
0153 #if defined(Q_CC_GNU) || defined(Q_CC_SUN)
0154 #define ALIGNOF(x) (__alignof__(x)) // GCC provides what we want directly
0155 #else
0156 
0157 #include <stddef.h> // offsetof
0158 
0159 template<class T>
0160 struct __alignmentHack {
0161     char firstEntry;
0162     T obj;
0163     static const size_t size = offsetof(__alignmentHack, obj);
0164 };
0165 #define ALIGNOF(x) (__alignmentHack<x>::size)
0166 #endif // Non gcc
0167 #endif // ALIGNOF undefined
0168 
0169 // Returns a pointer properly aligned to handle size alignment.
0170 // size should be a power of 2. start is assumed to be the lowest
0171 // permissible address, therefore the return value will be >= start.
0172 template<class T>
0173 T *alignTo(const void *start, uint size = ALIGNOF(T))
0174 {
0175     quintptr mask = size - 1;
0176 
0177     // Cast to int-type to handle bit-twiddling
0178     quintptr basePointer = reinterpret_cast<quintptr>(start);
0179 
0180     // If (and only if) we are already aligned, adding mask into basePointer
0181     // will not increment any of the bits in ~mask and we get the right answer.
0182     basePointer = (basePointer + mask) & ~mask;
0183 
0184     return reinterpret_cast<T *>(basePointer);
0185 }
0186 
0187 /**
0188  * Returns a pointer to a const object of type T, assumed to be @p offset
0189  * *BYTES* greater than the base address. Note that in order to meet alignment
0190  * requirements for T, it is possible that the returned pointer points greater
0191  * than @p offset into @p base.
0192  */
0193 template<class T>
0194 const T *offsetAs(const void *const base, qint32 offset)
0195 {
0196     const char *ptr = reinterpret_cast<const char *>(base);
0197     return alignTo<const T>(ptr + offset);
0198 }
0199 
0200 // Same as above, but for non-const objects
0201 template<class T>
0202 T *offsetAs(void *const base, qint32 offset)
0203 {
0204     char *ptr = reinterpret_cast<char *>(base);
0205     return alignTo<T>(ptr + offset);
0206 }
0207 
0208 /**
0209  * @return the smallest integer greater than or equal to (@p a / @p b).
0210  * @param a Numerator, should be ≥ 0.
0211  * @param b Denominator, should be > 0.
0212  */
0213 unsigned SharedMemory::intCeil(unsigned a, unsigned b)
0214 {
0215     // The overflow check is unsigned and so is actually defined behavior.
0216     if (Q_UNLIKELY(b == 0 || ((a + b) < a))) {
0217         throw KSDCCorrupted();
0218     }
0219 
0220     return (a + b - 1) / b;
0221 }
0222 
0223 /**
0224  * @return number of set bits in @p value (see also "Hamming weight")
0225  */
0226 static unsigned countSetBits(unsigned value)
0227 {
0228     // K&R / Wegner's algorithm used. GCC supports __builtin_popcount but we
0229     // expect there to always be only 1 bit set so this should be perhaps a bit
0230     // faster 99.9% of the time.
0231     unsigned count = 0;
0232     for (count = 0; value != 0; count++) {
0233         value &= (value - 1); // Clears least-significant set bit.
0234     }
0235     return count;
0236 }
0237 
0238 /**
0239  * Converts the given average item size into an appropriate page size.
0240  */
0241 unsigned SharedMemory::equivalentPageSize(unsigned itemSize)
0242 {
0243     if (itemSize == 0) {
0244         return 4096; // Default average item size.
0245     }
0246 
0247     int log2OfSize = 0;
0248     while ((itemSize >>= 1) != 0) {
0249         log2OfSize++;
0250     }
0251 
0252     // Bound page size between 512 bytes and 256 KiB.
0253     // If this is adjusted, also alter validSizeMask in cachePageSize
0254     log2OfSize = qBound(9, log2OfSize, 18);
0255 
0256     return (1 << log2OfSize);
0257 }
0258 
0259 // Returns pageSize in unsigned format.
0260 unsigned SharedMemory::cachePageSize() const
0261 {
0262     unsigned _pageSize = static_cast<unsigned>(pageSize.loadRelaxed());
0263     // bits 9-18 may be set.
0264     static const unsigned validSizeMask = 0x7FE00u;
0265 
0266     // Check for page sizes that are not a power-of-2, or are too low/high.
0267     if (Q_UNLIKELY(countSetBits(_pageSize) != 1 || (_pageSize & ~validSizeMask))) {
0268         throw KSDCCorrupted();
0269     }
0270 
0271     return _pageSize;
0272 }
0273 
0274 /**
0275  * This is effectively the class ctor.  But since we're in shared memory,
0276  * there's a few rules:
0277  *
0278  * 1. To allow for some form of locking in the initial-setup case, we
0279  * use an atomic int, which will be initialized to 0 by mmap().  Then to
0280  * take the lock we atomically increment the 0 to 1.  If we end up calling
0281  * the QAtomicInt constructor we can mess that up, so we can't use a
0282  * constructor for this class either.
0283  * 2. Any member variable you add takes up space in shared memory as well,
0284  * so make sure you need it.
0285  */
0286 bool SharedMemory::performInitialSetup(uint _cacheSize, uint _pageSize)
0287 {
0288     if (_cacheSize < MINIMUM_CACHE_SIZE) {
0289         qCCritical(KCOREADDONS_DEBUG) << "Internal error: Attempted to create a cache sized < " << MINIMUM_CACHE_SIZE;
0290         return false;
0291     }
0292 
0293     if (_pageSize == 0) {
0294         qCCritical(KCOREADDONS_DEBUG) << "Internal error: Attempted to create a cache with 0-sized pages.";
0295         return false;
0296     }
0297 
0298     shmLock.type = findBestSharedLock();
0299     if (shmLock.type == LOCKTYPE_INVALID) {
0300         qCCritical(KCOREADDONS_DEBUG) << "Unable to find an appropriate lock to guard the shared cache. "
0301                                       << "This *should* be essentially impossible. :(";
0302         return false;
0303     }
0304 
0305     bool isProcessShared = false;
0306     std::unique_ptr<KSDCLock> tempLock(createLockFromId(shmLock.type, shmLock));
0307 
0308     if (!tempLock->initialize(isProcessShared)) {
0309         qCCritical(KCOREADDONS_DEBUG) << "Unable to initialize the lock for the cache!";
0310         return false;
0311     }
0312 
0313     if (!isProcessShared) {
0314         qCWarning(KCOREADDONS_DEBUG) << "Cache initialized, but does not support being"
0315                                      << "shared across processes.";
0316     }
0317 
0318     // These must be updated to make some of our auxiliary functions
0319     // work right since their values will be based on the cache size.
0320     cacheSize = _cacheSize;
0321     pageSize = _pageSize;
0322     version = PIXMAP_CACHE_VERSION;
0323     cacheTimestamp = static_cast<unsigned>(::time(nullptr));
0324 
0325     clearInternalTables();
0326 
0327     // Unlock the mini-lock, and introduce a total memory barrier to make
0328     // sure all changes have propagated even without a mutex.
0329     return ready.ref();
0330 }
0331 
0332 void SharedMemory::clearInternalTables()
0333 {
0334     // Assumes we're already locked somehow.
0335     cacheAvail = pageTableSize();
0336 
0337     // Setup page tables to point nowhere
0338     PageTableEntry *table = pageTable();
0339     for (uint i = 0; i < pageTableSize(); ++i) {
0340         table[i].index = -1;
0341     }
0342 
0343     // Setup index tables to be accurate.
0344     IndexTableEntry *indices = indexTable();
0345     for (uint i = 0; i < indexTableSize(); ++i) {
0346         indices[i].firstPage = -1;
0347         indices[i].useCount = 0;
0348         indices[i].fileNameHash = 0;
0349         indices[i].totalItemSize = 0;
0350         indices[i].addTime = 0;
0351         indices[i].lastUsedTime = 0;
0352     }
0353 }
0354 
0355 const IndexTableEntry *SharedMemory::indexTable() const
0356 {
0357     // Index Table goes immediately after this struct, at the first byte
0358     // where alignment constraints are met (accounted for by offsetAs).
0359     return offsetAs<IndexTableEntry>(this, sizeof(*this));
0360 }
0361 
0362 const PageTableEntry *SharedMemory::pageTable() const
0363 {
0364     const IndexTableEntry *base = indexTable();
0365     base += indexTableSize();
0366 
0367     // Let's call wherever we end up the start of the page table...
0368     return alignTo<PageTableEntry>(base);
0369 }
0370 
0371 const void *SharedMemory::cachePages() const
0372 {
0373     const PageTableEntry *tableStart = pageTable();
0374     tableStart += pageTableSize();
0375 
0376     // Let's call wherever we end up the start of the data...
0377     return alignTo<void>(tableStart, cachePageSize());
0378 }
0379 
0380 const void *SharedMemory::page(pageID at) const
0381 {
0382     if (static_cast<uint>(at) >= pageTableSize()) {
0383         return nullptr;
0384     }
0385 
0386     // We must manually calculate this one since pageSize varies.
0387     const char *pageStart = reinterpret_cast<const char *>(cachePages());
0388     pageStart += (at * cachePageSize());
0389 
0390     return reinterpret_cast<const void *>(pageStart);
0391 }
0392 
0393 // The following are non-const versions of some of the methods defined
0394 // above.  They use const_cast<> because I feel that is better than
0395 // duplicating the code.  I suppose template member functions (?)
0396 // may work, may investigate later.
0397 IndexTableEntry *SharedMemory::indexTable()
0398 {
0399     const SharedMemory *that = const_cast<const SharedMemory *>(this);
0400     return const_cast<IndexTableEntry *>(that->indexTable());
0401 }
0402 
0403 PageTableEntry *SharedMemory::pageTable()
0404 {
0405     const SharedMemory *that = const_cast<const SharedMemory *>(this);
0406     return const_cast<PageTableEntry *>(that->pageTable());
0407 }
0408 
0409 void *SharedMemory::cachePages()
0410 {
0411     const SharedMemory *that = const_cast<const SharedMemory *>(this);
0412     return const_cast<void *>(that->cachePages());
0413 }
0414 
0415 void *SharedMemory::page(pageID at)
0416 {
0417     const SharedMemory *that = const_cast<const SharedMemory *>(this);
0418     return const_cast<void *>(that->page(at));
0419 }
0420 
0421 uint SharedMemory::pageTableSize() const
0422 {
0423     return cacheSize / cachePageSize();
0424 }
0425 
0426 uint SharedMemory::indexTableSize() const
0427 {
0428     // Assume 2 pages on average are needed -> the number of entries
0429     // would be half of the number of pages.
0430     return pageTableSize() / 2;
0431 }
0432 
0433 /**
0434  * @return the index of the first page, for the set of contiguous
0435  * pages that can hold @p pagesNeeded PAGES.
0436  */
0437 pageID SharedMemory::findEmptyPages(uint pagesNeeded) const
0438 {
0439     if (Q_UNLIKELY(pagesNeeded > pageTableSize())) {
0440         return pageTableSize();
0441     }
0442 
0443     // Loop through the page table, find the first empty page, and just
0444     // makes sure that there are enough free pages.
0445     const PageTableEntry *table = pageTable();
0446     uint contiguousPagesFound = 0;
0447     pageID base = 0;
0448     for (pageID i = 0; i < static_cast<int>(pageTableSize()); ++i) {
0449         if (table[i].index < 0) {
0450             if (contiguousPagesFound == 0) {
0451                 base = i;
0452             }
0453             contiguousPagesFound++;
0454         } else {
0455             contiguousPagesFound = 0;
0456         }
0457 
0458         if (contiguousPagesFound == pagesNeeded) {
0459             return base;
0460         }
0461     }
0462 
0463     return pageTableSize();
0464 }
0465 
0466 // left < right?
0467 bool SharedMemory::lruCompare(const IndexTableEntry &l, const IndexTableEntry &r)
0468 {
0469     // Ensure invalid entries migrate to the end
0470     if (l.firstPage < 0 && r.firstPage >= 0) {
0471         return false;
0472     }
0473     if (l.firstPage >= 0 && r.firstPage < 0) {
0474         return true;
0475     }
0476 
0477     // Most recently used will have the highest absolute time =>
0478     // least recently used (lowest) should go first => use left < right
0479     return l.lastUsedTime < r.lastUsedTime;
0480 }
0481 
0482 // left < right?
0483 bool SharedMemory::seldomUsedCompare(const IndexTableEntry &l, const IndexTableEntry &r)
0484 {
0485     // Ensure invalid entries migrate to the end
0486     if (l.firstPage < 0 && r.firstPage >= 0) {
0487         return false;
0488     }
0489     if (l.firstPage >= 0 && r.firstPage < 0) {
0490         return true;
0491     }
0492 
0493     // Put lowest use count at start by using left < right
0494     return l.useCount < r.useCount;
0495 }
0496 
0497 // left < right?
0498 bool SharedMemory::ageCompare(const IndexTableEntry &l, const IndexTableEntry &r)
0499 {
0500     // Ensure invalid entries migrate to the end
0501     if (l.firstPage < 0 && r.firstPage >= 0) {
0502         return false;
0503     }
0504     if (l.firstPage >= 0 && r.firstPage < 0) {
0505         return true;
0506     }
0507 
0508     // Oldest entries die first -- they have the lowest absolute add time,
0509     // so just like the others use left < right
0510     return l.addTime < r.addTime;
0511 }
0512 
0513 void SharedMemory::defragment()
0514 {
0515     if (cacheAvail * cachePageSize() == cacheSize) {
0516         return; // That was easy
0517     }
0518 
0519     qCDebug(KCOREADDONS_DEBUG) << "Defragmenting the shared cache";
0520 
0521     // Just do a linear scan, and anytime there is free space, swap it
0522     // with the pages to its right. In order to meet the precondition
0523     // we need to skip any used pages first.
0524 
0525     pageID currentPage = 0;
0526     pageID idLimit = static_cast<pageID>(pageTableSize());
0527     PageTableEntry *pages = pageTable();
0528 
0529     if (Q_UNLIKELY(!pages || idLimit <= 0)) {
0530         throw KSDCCorrupted();
0531     }
0532 
0533     // Skip used pages
0534     while (currentPage < idLimit && pages[currentPage].index >= 0) {
0535         ++currentPage;
0536     }
0537 
0538     pageID freeSpot = currentPage;
0539 
0540     // Main loop, starting from a free page, skip to the used pages and
0541     // move them back.
0542     while (currentPage < idLimit) {
0543         // Find the next used page
0544         while (currentPage < idLimit && pages[currentPage].index < 0) {
0545             ++currentPage;
0546         }
0547 
0548         if (currentPage >= idLimit) {
0549             break;
0550         }
0551 
0552         // Found an entry, move it.
0553         qint32 affectedIndex = pages[currentPage].index;
0554         if (Q_UNLIKELY(affectedIndex < 0 || affectedIndex >= idLimit || indexTable()[affectedIndex].firstPage != currentPage)) {
0555             throw KSDCCorrupted();
0556         }
0557 
0558         indexTable()[affectedIndex].firstPage = freeSpot;
0559 
0560         // Moving one page at a time guarantees we can use memcpy safely
0561         // (in other words, the source and destination will not overlap).
0562         while (currentPage < idLimit && pages[currentPage].index >= 0) {
0563             const void *const sourcePage = page(currentPage);
0564             void *const destinationPage = page(freeSpot);
0565 
0566             // We always move used pages into previously-found empty spots,
0567             // so check ordering as well for logic errors.
0568             if (Q_UNLIKELY(!sourcePage || !destinationPage || sourcePage < destinationPage)) {
0569                 throw KSDCCorrupted();
0570             }
0571 
0572             ::memcpy(destinationPage, sourcePage, cachePageSize());
0573             pages[freeSpot].index = affectedIndex;
0574             pages[currentPage].index = -1;
0575             ++currentPage;
0576             ++freeSpot;
0577 
0578             // If we've just moved the very last page and it happened to
0579             // be at the very end of the cache then we're done.
0580             if (currentPage >= idLimit) {
0581                 break;
0582             }
0583 
0584             // We're moving consecutive used pages whether they belong to
0585             // our affected entry or not, so detect if we've started moving
0586             // the data for a different entry and adjust if necessary.
0587             if (affectedIndex != pages[currentPage].index) {
0588                 indexTable()[pages[currentPage].index].firstPage = freeSpot;
0589             }
0590             affectedIndex = pages[currentPage].index;
0591         }
0592 
0593         // At this point currentPage is on a page that is unused, and the
0594         // cycle repeats. However, currentPage is not the first unused
0595         // page, freeSpot is, so leave it alone.
0596     }
0597 }
0598 
0599 /**
0600  * Finds the index entry for a given key.
0601  * @param key UTF-8 encoded key to search for.
0602  * @return The index of the entry in the cache named by @p key. Returns
0603  *         <0 if no such entry is present.
0604  */
0605 qint32 SharedMemory::findNamedEntry(const QByteArray &key) const
0606 {
0607     uint keyHash = SharedMemory::generateHash(key);
0608     uint position = keyHash % indexTableSize();
0609     uint probeNumber = 1; // See insert() for description
0610 
0611     // Imagine 3 entries A, B, C in this logical probing chain. If B is
0612     // later removed then we can't find C either. So, we must keep
0613     // searching for probeNumber number of tries (or we find the item,
0614     // obviously).
0615     while (indexTable()[position].fileNameHash != keyHash && probeNumber < MAX_PROBE_COUNT) {
0616         position = (keyHash + (probeNumber + probeNumber * probeNumber) / 2) % indexTableSize();
0617         probeNumber++;
0618     }
0619 
0620     if (indexTable()[position].fileNameHash == keyHash) {
0621         pageID firstPage = indexTable()[position].firstPage;
0622         if (firstPage < 0 || static_cast<uint>(firstPage) >= pageTableSize()) {
0623             return -1;
0624         }
0625 
0626         const void *resultPage = page(firstPage);
0627         if (Q_UNLIKELY(!resultPage)) {
0628             throw KSDCCorrupted();
0629         }
0630 
0631         const char *utf8FileName = reinterpret_cast<const char *>(resultPage);
0632         if (qstrncmp(utf8FileName, key.constData(), cachePageSize()) == 0) {
0633             return position;
0634         }
0635     }
0636 
0637     return -1; // Not found, or a different one found.
0638 }
0639 
0640 // Function to use with std::unique_ptr in removeUsedPages below...
0641 void SharedMemory::deleteTable(IndexTableEntry *table)
0642 {
0643     delete[] table;
0644 }
0645 
0646 /**
0647  * Removes the requested number of pages.
0648  *
0649  * @param numberNeeded the number of pages required to fulfill a current request.
0650  *        This number should be <0 and <= the number of pages in the cache.
0651  * @return The identifier of the beginning of a consecutive block of pages able
0652  *         to fill the request. Returns a value >= pageTableSize() if no such
0653  *         request can be filled.
0654  * @internal
0655  */
0656 uint SharedMemory::removeUsedPages(uint numberNeeded)
0657 {
0658     if (numberNeeded == 0) {
0659         qCCritical(KCOREADDONS_DEBUG) << "Internal error: Asked to remove exactly 0 pages for some reason.";
0660         throw KSDCCorrupted();
0661     }
0662 
0663     if (numberNeeded > pageTableSize()) {
0664         qCCritical(KCOREADDONS_DEBUG) << "Internal error: Requested more space than exists in the cache.";
0665         qCCritical(KCOREADDONS_DEBUG) << numberNeeded << "requested, " << pageTableSize() << "is the total possible.";
0666         throw KSDCCorrupted();
0667     }
0668 
0669     // If the cache free space is large enough we will defragment first
0670     // instead since it's likely we're highly fragmented.
0671     // Otherwise, we will (eventually) simply remove entries per the
0672     // eviction order set for the cache until there is enough room
0673     // available to hold the number of pages we need.
0674 
0675     qCDebug(KCOREADDONS_DEBUG) << "Removing old entries to free up" << numberNeeded << "pages," << cacheAvail << "are already theoretically available.";
0676 
0677     if (cacheAvail > 3 * numberNeeded) {
0678         defragment();
0679         uint result = findEmptyPages(numberNeeded);
0680 
0681         if (result < pageTableSize()) {
0682             return result;
0683         } else {
0684             qCCritical(KCOREADDONS_DEBUG) << "Just defragmented a locked cache, but still there"
0685                                           << "isn't enough room for the current request.";
0686         }
0687     }
0688 
0689     // At this point we know we'll have to free some space up, so sort our
0690     // list of entries by whatever the current criteria are and start
0691     // killing expired entries.
0692     std::unique_ptr<IndexTableEntry, decltype(deleteTable) *> tablePtr(new IndexTableEntry[indexTableSize()], deleteTable);
0693 
0694     if (!tablePtr) {
0695         qCCritical(KCOREADDONS_DEBUG) << "Unable to allocate temporary memory for sorting the cache!";
0696         clearInternalTables();
0697         throw KSDCCorrupted();
0698     }
0699 
0700     // We use tablePtr to ensure the data is destroyed, but do the access
0701     // via a helper pointer to allow for array ops.
0702     IndexTableEntry *table = tablePtr.get();
0703 
0704     ::memcpy(table, indexTable(), sizeof(IndexTableEntry) * indexTableSize());
0705 
0706     // Our entry ID is simply its index into the
0707     // index table, which qSort will rearrange all willy-nilly, so first
0708     // we'll save the *real* entry ID into firstPage (which is useless in
0709     // our copy of the index table). On the other hand if the entry is not
0710     // used then we note that with -1.
0711     for (uint i = 0; i < indexTableSize(); ++i) {
0712         table[i].firstPage = table[i].useCount > 0 ? static_cast<pageID>(i) : -1;
0713     }
0714 
0715     // Declare the comparison function that we'll use to pass to qSort,
0716     // based on our cache eviction policy.
0717     bool (*compareFunction)(const IndexTableEntry &, const IndexTableEntry &);
0718     switch (evictionPolicy.loadRelaxed()) {
0719     case KSharedDataCache::EvictLeastOftenUsed:
0720     case KSharedDataCache::NoEvictionPreference:
0721     default:
0722         compareFunction = seldomUsedCompare;
0723         break;
0724 
0725     case KSharedDataCache::EvictLeastRecentlyUsed:
0726         compareFunction = lruCompare;
0727         break;
0728 
0729     case KSharedDataCache::EvictOldest:
0730         compareFunction = ageCompare;
0731         break;
0732     }
0733 
0734     std::sort(table, table + indexTableSize(), compareFunction);
0735 
0736     // Least recently used entries will be in the front.
0737     // Start killing until we have room.
0738 
0739     // Note on removeEntry: It expects an index into the index table,
0740     // but our sorted list is all jumbled. But we stored the real index
0741     // in the firstPage member.
0742     // Remove entries until we've removed at least the required number
0743     // of pages.
0744     uint i = 0;
0745     while (i < indexTableSize() && numberNeeded > cacheAvail) {
0746         int curIndex = table[i++].firstPage; // Really an index, not a page
0747 
0748         // Removed everything, still no luck (or curIndex is set but too high).
0749         if (curIndex < 0 || static_cast<uint>(curIndex) >= indexTableSize()) {
0750             qCCritical(KCOREADDONS_DEBUG) << "Trying to remove index" << curIndex << "out-of-bounds for index table of size" << indexTableSize();
0751             throw KSDCCorrupted();
0752         }
0753 
0754         qCDebug(KCOREADDONS_DEBUG) << "Removing entry of" << indexTable()[curIndex].totalItemSize << "size";
0755         removeEntry(curIndex);
0756     }
0757 
0758     // At this point let's see if we have freed up enough data by
0759     // defragmenting first and seeing if we can find that free space.
0760     defragment();
0761 
0762     pageID result = pageTableSize();
0763     while (i < indexTableSize() && (static_cast<uint>(result = findEmptyPages(numberNeeded))) >= pageTableSize()) {
0764         int curIndex = table[i++].firstPage;
0765 
0766         if (curIndex < 0) {
0767             // One last shot.
0768             defragment();
0769             return findEmptyPages(numberNeeded);
0770         }
0771 
0772         if (Q_UNLIKELY(static_cast<uint>(curIndex) >= indexTableSize())) {
0773             throw KSDCCorrupted();
0774         }
0775 
0776         removeEntry(curIndex);
0777     }
0778 
0779     // Whew.
0780     return result;
0781 }
0782 
0783 // Returns the total size required for a given cache size.
0784 uint SharedMemory::totalSize(uint cacheSize, uint effectivePageSize)
0785 {
0786     uint numberPages = intCeil(cacheSize, effectivePageSize);
0787     uint indexTableSize = numberPages / 2;
0788 
0789     // Knowing the number of pages, we can determine what addresses we'd be
0790     // using (properly aligned), and from there determine how much memory
0791     // we'd use.
0792     IndexTableEntry *indexTableStart = offsetAs<IndexTableEntry>(static_cast<void *>(nullptr), sizeof(SharedMemory));
0793 
0794     indexTableStart += indexTableSize;
0795 
0796     PageTableEntry *pageTableStart = reinterpret_cast<PageTableEntry *>(indexTableStart);
0797     pageTableStart = alignTo<PageTableEntry>(pageTableStart);
0798     pageTableStart += numberPages;
0799 
0800     // The weird part, we must manually adjust the pointer based on the page size.
0801     char *cacheStart = reinterpret_cast<char *>(pageTableStart);
0802     cacheStart += (numberPages * effectivePageSize);
0803 
0804     // ALIGNOF gives pointer alignment
0805     cacheStart = alignTo<char>(cacheStart, ALIGNOF(void *));
0806 
0807     // We've traversed the header, index, page table, and cache.
0808     // Wherever we're at now is the size of the enchilada.
0809     return static_cast<uint>(reinterpret_cast<quintptr>(cacheStart));
0810 }
0811 
0812 uint SharedMemory::fileNameHash(const QByteArray &utf8FileName) const
0813 {
0814     return generateHash(utf8FileName) % indexTableSize();
0815 }
0816 
0817 void SharedMemory::clear()
0818 {
0819     clearInternalTables();
0820 }
0821 
0822 // Must be called while the lock is already held!
0823 void SharedMemory::removeEntry(uint index)
0824 {
0825     if (index >= indexTableSize() || cacheAvail > pageTableSize()) {
0826         throw KSDCCorrupted();
0827     }
0828 
0829     PageTableEntry *pageTableEntries = pageTable();
0830     IndexTableEntry *entriesIndex = indexTable();
0831 
0832     // Update page table first
0833     pageID firstPage = entriesIndex[index].firstPage;
0834     if (firstPage < 0 || static_cast<quint32>(firstPage) >= pageTableSize()) {
0835         qCDebug(KCOREADDONS_DEBUG) << "Trying to remove an entry which is already invalid. This "
0836                                    << "cache is likely corrupt.";
0837         throw KSDCCorrupted();
0838     }
0839 
0840     if (index != static_cast<uint>(pageTableEntries[firstPage].index)) {
0841         qCCritical(KCOREADDONS_DEBUG) << "Removing entry" << index << "but the matching data"
0842                                       << "doesn't link back -- cache is corrupt, clearing.";
0843         throw KSDCCorrupted();
0844     }
0845 
0846     uint entriesToRemove = intCeil(entriesIndex[index].totalItemSize, cachePageSize());
0847     uint savedCacheSize = cacheAvail;
0848     for (uint i = firstPage; i < pageTableSize() && static_cast<uint>(pageTableEntries[i].index) == index; ++i) {
0849         pageTableEntries[i].index = -1;
0850         cacheAvail++;
0851     }
0852 
0853     if ((cacheAvail - savedCacheSize) != entriesToRemove) {
0854         qCCritical(KCOREADDONS_DEBUG) << "We somehow did not remove" << entriesToRemove << "when removing entry" << index << ", instead we removed"
0855                                       << (cacheAvail - savedCacheSize);
0856         throw KSDCCorrupted();
0857     }
0858 
0859 // For debugging
0860 #ifdef NDEBUG
0861     void *const startOfData = page(firstPage);
0862     if (startOfData) {
0863         QByteArray str((const char *)startOfData);
0864         str.prepend(" REMOVED: ");
0865         str.prepend(QByteArray::number(index));
0866         str.prepend("ENTRY ");
0867 
0868         ::memcpy(startOfData, str.constData(), str.size() + 1);
0869     }
0870 #endif
0871 
0872     // Update the index
0873     entriesIndex[index].fileNameHash = 0;
0874     entriesIndex[index].totalItemSize = 0;
0875     entriesIndex[index].useCount = 0;
0876     entriesIndex[index].lastUsedTime = 0;
0877     entriesIndex[index].addTime = 0;
0878     entriesIndex[index].firstPage = -1;
0879 }