Warning, file /graphics/krita/libs/image/3rdparty/lock_free_map/qsbr.h was not indexed or was modified since last indexation (in which case cross-reference links may be missing, inaccurate or erroneous).

0001 /*------------------------------------------------------------------------
0002   Junction: Concurrent data structures in C++
0003   Copyright (c) 2016 Jeff Preshing
0004   Distributed under the Simplified BSD License.
0005   Original location: https://github.com/preshing/junction
0006   This software is distributed WITHOUT ANY WARRANTY; without even the
0007   implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
0008   See the LICENSE file for more information.
0009 ------------------------------------------------------------------------*/
0010 
0011 #ifndef QSBR_H
0012 #define QSBR_H
0013 
0014 #include <QVector>
0015 #include <QMutex>
0016 #include <QMutexLocker>
0017 #include <kis_lockless_stack.h>
0018 
0019 #define CALL_MEMBER(obj, pmf) ((obj).*(pmf))
0020 
0021 class QSBR
0022 {
0023 private:
0024     struct Action {
0025         void (*func)(void*);
0026         quint64 param[4]; // Size limit found experimentally. Verified by assert below.
0027 
0028         Action() = default;
0029 
0030         Action(void (*f)(void*), void* p, quint64 paramSize) : func(f)
0031         {
0032             KIS_ASSERT(paramSize <= sizeof(param)); // Verify size limit.
0033             memcpy(&param, p, paramSize);
0034         }
0035 
0036         void operator()()
0037         {
0038             func(&param);
0039         }
0040     };
0041 
0042     QAtomicInt m_rawPointerUsers;
0043     KisLocklessStack<Action> m_pendingActions;
0044     KisLocklessStack<Action> m_migrationReclaimActions;
0045 
0046     void releasePoolSafely(KisLocklessStack<Action> *pool, bool force = false) {
0047         KisLocklessStack<Action> tmp;
0048         tmp.mergeFrom(*pool);
0049         if (tmp.isEmpty()) return;
0050 
0051         if (force || tmp.size() > 4096) {
0052             while (m_rawPointerUsers.loadAcquire());
0053 
0054             Action action;
0055             while (tmp.pop(action)) {
0056                 action();
0057             }
0058         } else {
0059             if (!m_rawPointerUsers.loadAcquire()) {
0060                 Action action;
0061                 while (tmp.pop(action)) {
0062                     action();
0063                 }
0064             } else {
0065                 // push elements back to the source
0066                 pool->mergeFrom(tmp);
0067             }
0068         }
0069     }
0070 
0071 public:
0072 
0073     template <class T>
0074     void enqueue(void (T::*pmf)(), T* target, bool migration = false)
0075     {
0076         struct Closure {
0077             void (T::*pmf)();
0078             T* target;
0079 
0080             static void thunk(void* param)
0081             {
0082                 Closure* self = (Closure*) param;
0083                 CALL_MEMBER(*self->target, self->pmf)();
0084             }
0085         };
0086 
0087         Closure closure = {pmf, target};
0088 
0089         if (migration) {
0090             m_migrationReclaimActions.push(Action(Closure::thunk, &closure, sizeof(closure)));
0091         } else {
0092             m_pendingActions.push(Action(Closure::thunk, &closure, sizeof(closure)));
0093         }
0094     }
0095 
0096     void update()
0097     {
0098         releasePoolSafely(&m_pendingActions);
0099         releasePoolSafely(&m_migrationReclaimActions);
0100     }
0101 
0102     void flush()
0103     {
0104         releasePoolSafely(&m_pendingActions, true);
0105         releasePoolSafely(&m_migrationReclaimActions, true);
0106     }
0107 
0108     void lockRawPointerAccess()
0109     {
0110         m_rawPointerUsers.ref();
0111     }
0112 
0113     void unlockRawPointerAccess()
0114     {
0115         m_rawPointerUsers.deref();
0116     }
0117 
0118     bool sanityRawPointerAccessLocked() const {
0119         return m_rawPointerUsers.loadAcquire();
0120     }
0121 };
0122 
0123 #endif // QSBR_H