File indexing completed on 2024-05-12 15:43:15

0001 /*
0002  *  This file is part of the KDE libraries
0003  *  Copyright (C) 2003, 2004, 2005, 2006, 2007 Apple Computer, Inc.
0004  *  Copyright (C) 2007 Eric Seidel <eric@webkit.org>
0005  *  Copyright (C) 2007 Maksim Orlovich <maksim@kde.org>
0006  *
0007  *  This library is free software; you can redistribute it and/or
0008  *  modify it under the terms of the GNU Lesser General Public
0009  *  License as published by the Free Software Foundation; either
0010  *  version 2 of the License, or (at your option) any later version.
0011  *
0012  *  This library is distributed in the hope that it will be useful,
0013  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
0014  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
0015  *  Lesser General Public License for more details.
0016  *
0017  *  You should have received a copy of the GNU Lesser General Public
0018  *  License along with this library; if not, write to the Free Software
0019  *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
0020  *
0021  */
0022 
0023 #include "collector.h"
0024 
0025 #include <wtf/FastMalloc.h>
0026 #include "internal.h"
0027 #include "list.h"
0028 #include "value.h"
0029 
0030 #include <setjmp.h>
0031 #include <limits.h>
0032 #include <algorithm>
0033 
0034 #if PLATFORM(DARWIN)
0035 
0036 #include <pthread.h>
0037 #include <mach/mach_port.h>
0038 #include <mach/mach_init.h>
0039 #include <mach/task.h>
0040 #include <mach/thread_act.h>
0041 #include <mach/vm_map.h>
0042 
0043 #elif PLATFORM(WIN_OS) || defined(WTF_COMPILER_CYGWIN)
0044 
0045 #include <windows.h>
0046 #include <winnt.h>
0047 
0048 #elif PLATFORM(UNIX)
0049 
0050 #include <stdlib.h>
0051 #include <sys/types.h>
0052 #include <sys/mman.h>
0053 #include <pthread.h> //krazy:exclude=includes (yes, it's duplicate, but in a different #if branch)
0054 #include <unistd.h>
0055 
0056 #if PLATFORM(SOLARIS_OS)
0057 #include <thread.h>
0058 #include <signal.h>
0059 using std::memset;
0060 #endif
0061 
0062 #if HAVE_PTHREAD_NP_H
0063 #include <pthread_np.h>
0064 #endif
0065 
0066 #endif
0067 
0068 #define DEBUG_COLLECTOR 0
0069 
0070 #if HAVE_VALGRIND_MEMCHECK_H && !defined(NDEBUG)
0071 #include <valgrind/memcheck.h>
0072 #if defined(VALGRIND_MAKE_MEM_DEFINED)
0073 #define VG_DEFINED(p) VALGRIND_MAKE_MEM_DEFINED(&p, sizeof(void*))
0074 #else
0075 #define VG_DEFINED(p)
0076 #endif
0077 #else
0078 #define VG_DEFINED(p)
0079 #endif
0080 
0081 using std::max;
0082 
0083 namespace KJS
0084 {
0085 
0086 // tunable parameters
0087 const size_t SPARE_EMPTY_BLOCKS = 2;
0088 const size_t MIN_ARRAY_SIZE = 14;
0089 const size_t GROWTH_FACTOR = 2;
0090 const size_t LOW_WATER_FACTOR = 4;
0091 const size_t ALLOCATIONS_PER_COLLECTION = 4000;
0092 
0093 // A whee bit like a WTF::Vector, but perfectly POD, so can be used in global context
0094 // w/o worries.
0095 struct BlockList {
0096     CollectorBlock **m_data;
0097     size_t m_used;
0098     size_t m_capacity;
0099 
0100     CollectorBlock *operator[](size_t pos)
0101     {
0102         return m_data[pos];
0103     }
0104 
0105     size_t used() const
0106     {
0107         return m_used;
0108     }
0109 
0110     void append(CollectorBlock *block)
0111     {
0112         if (m_used == m_capacity) {
0113             static const size_t maxNumBlocks = ULONG_MAX / sizeof(CollectorBlock *) / GROWTH_FACTOR;
0114             if (m_capacity > maxNumBlocks) {
0115                 CRASH();
0116             }
0117             m_capacity = max(MIN_ARRAY_SIZE, m_capacity * GROWTH_FACTOR);
0118             m_data = static_cast<CollectorBlock **>(fastRealloc(m_data, m_capacity * sizeof(CollectorBlock *)));
0119         }
0120         m_data[m_used] = block;
0121         ++m_used;
0122     }
0123 
0124     void remove(CollectorBlock *block)
0125     {
0126         size_t c;
0127         for (c = 0; c < m_used; ++c)
0128             if (m_data[c] == block) {
0129                 break;
0130             }
0131 
0132         if (c == m_used) {
0133             return;
0134         }
0135 
0136         // Move things up, and shrink..
0137         --m_used;
0138         for (; c < m_used; ++c) {
0139             m_data[c] = m_data[c + 1];
0140         }
0141     }
0142 };
0143 
0144 struct CollectorHeap {
0145     // This contains the list of both normal and oversize blocks
0146     BlockList allBlocks;
0147 
0148     // Just the normal blocks
0149     BlockList blocks;
0150 
0151     size_t firstBlockWithPossibleSpace;
0152 
0153     // The oversize block handling is a bit tricky, since we do not wish to slow down
0154     // the usual paths. Hence, we do the following:
0155     // 1) We set the marked bits for any extension portions of the block.
0156     //    this way the stack GC doesn't have to do anything special if a pointer
0157     //    happens to go that way.
0158     // 2) We keep track of an allBlocks list to help the stack GC tests things.
0159     //
0160     // The oversize heap itself represents blocks as follows:
0161     // 1) free: marked = false, allocd = false, trailer = false, zeroIfFree = 0
0162     // 2) alloc'd, head: marked = depends, allocd = true, trailer = false, zeroIfFree is uncertain
0163     // 3) alloc'd, trailer: marked = true (see above), allocd = true, trailer = true, zeroIfFree is uncertain
0164     //
0165     // For now, we don't use a freelist, so performance may be quite bad if
0166     // this is used heavily; this is just because the cell does not have
0167     // back and forward links; which we need since we can pick a non-first cell
0168     // ### actually, it may be possible to shuffle the list. Not now, though
0169     BlockList oversizeBlocks;
0170 
0171     size_t numLiveObjects;
0172     size_t numLiveObjectsAtLastCollect;
0173     size_t extraCost;
0174 };
0175 
0176 static CollectorHeap heap;
0177 
0178 bool Collector::memoryFull = false;
0179 
0180 static CollectorBlock *allocateBlock()
0181 {
0182 #if PLATFORM(DARWIN)
0183     vm_address_t address = 0;
0184     vm_map(current_task(), &address, BLOCK_SIZE, BLOCK_OFFSET_MASK, VM_FLAGS_ANYWHERE, MEMORY_OBJECT_NULL, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
0185 #elif PLATFORM(WIN_OS) || defined(WTF_COMPILER_CYGWIN)
0186     // windows virtual address granularity is naturally 64k
0187     LPVOID address = VirtualAlloc(NULL, BLOCK_SIZE, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
0188 #elif HAVE_FUNC_POSIX_MEMALIGN
0189     void *address;
0190     posix_memalign(&address, BLOCK_SIZE, BLOCK_SIZE);
0191     memset(reinterpret_cast<void *>(address), 0, BLOCK_SIZE);
0192 #else
0193     static size_t pagesize = getpagesize();
0194 
0195     size_t extra = 0;
0196     if (BLOCK_SIZE > pagesize) {
0197         extra = BLOCK_SIZE - pagesize;
0198     }
0199 
0200     void *mmapResult = mmap(NULL, BLOCK_SIZE + extra, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
0201     uintptr_t address = reinterpret_cast<uintptr_t>(mmapResult);
0202 
0203     size_t adjust = 0;
0204     if ((address & BLOCK_OFFSET_MASK) != 0) {
0205         adjust = BLOCK_SIZE - (address & BLOCK_OFFSET_MASK);
0206     }
0207 
0208     if (adjust > 0) {
0209         munmap(reinterpret_cast<void *>(address), adjust);
0210     }
0211 
0212     if (adjust < extra) {
0213         munmap(reinterpret_cast<void *>(address + adjust + BLOCK_SIZE), extra - adjust);
0214     }
0215 
0216     address += adjust;
0217     memset(reinterpret_cast<void *>(address), 0, BLOCK_SIZE);
0218 #endif
0219     CollectorBlock *ptr = reinterpret_cast<CollectorBlock *>(address);
0220     heap.allBlocks.append(ptr); // Register with the heap
0221     return ptr;
0222 }
0223 
0224 static void freeBlock(CollectorBlock *block)
0225 {
0226     // Unregister the block first
0227     heap.allBlocks.remove(block);
0228 
0229 #if PLATFORM(DARWIN)
0230     vm_deallocate(current_task(), reinterpret_cast<vm_address_t>(block), BLOCK_SIZE);
0231 #elif PLATFORM(WIN_OS) || defined(WTF_COMPILER_CYGWIN)
0232     VirtualFree(block, BLOCK_SIZE, MEM_RELEASE);
0233 #elif HAVE_FUNC_POSIX_MEMALIGN
0234     free(block);
0235 #else
0236     munmap(block, BLOCK_SIZE);
0237 #endif
0238 }
0239 
0240 void Collector::recordExtraCost(size_t cost)
0241 {
0242     // Our frequency of garbage collection tries to balance memory use against speed
0243     // by collecting based on the number of newly created values. However, for values
0244     // that hold on to a great deal of memory that's not in the form of other JS values,
0245     // that is not good enough - in some cases a lot of those objects can pile up and
0246     // use crazy amounts of memory without a GC happening. So we track these extra
0247     // memory costs. Only unusually large objects are noted, and we only keep track
0248     // of this extra cost until the next GC. In garbage collected languages, most values
0249     // are either very short lived temporaries, or have extremely long lifetimes. So
0250     // if a large value survives one garbage collection, there is not much point to
0251     // collecting more frequently as long as it stays alive.
0252 
0253     heap.extraCost += cost;
0254 }
0255 
0256 static void *allocOversize(size_t s)
0257 {
0258     size_t cellsNeeded = (s + (CELL_SIZE - 1)) / CELL_SIZE;
0259 
0260     // printf("allocOversize, size:%d, cellsNeeded:%d\n", s, cellsNeeded);
0261 
0262     // We are not oversize enough to deal with things close to 64K in size
0263     assert(cellsNeeded <= CELLS_PER_BLOCK);
0264 
0265     // Look through the blocks, see if one has enough, and where.
0266     CollectorBlock *sufficientBlock = nullptr;
0267     size_t startOffset = -1;
0268     for (size_t b = 0; b < heap.oversizeBlocks.used() && !sufficientBlock; ++b) {
0269         CollectorBlock *candidate = heap.oversizeBlocks[b];
0270         if (cellsNeeded <= CELLS_PER_BLOCK - candidate->usedCells) {
0271             // Well, there is a chance we will fit.. Let's see if we can find a batch long enough..
0272             for (size_t i = 0; i < CELLS_PER_BLOCK; i++) {
0273                 if (i % 32 == 0 && candidate->allocd.bits[i / 32] == 0xFFFFFFFF) {
0274                     // Can skip this and 31 other cells
0275                     i += 31;
0276                     continue;
0277                 }
0278 
0279                 if (candidate->allocd.get(i)) {
0280                     continue;
0281                 }
0282 
0283                 // This cell is free -- are there enough free cells after it?
0284                 startOffset = i;
0285                 size_t last = i + cellsNeeded - 1;
0286 
0287                 if (last >= CELLS_PER_BLOCK) { // No way it will fit
0288                     break;
0289                 }
0290 
0291                 ++i;
0292                 while (i <= last && !candidate->allocd.get(i)) {
0293                     ++i;
0294                 }
0295 
0296                 // Did we get through enough?
0297                 if (i == last + 1) {
0298                     sufficientBlock = candidate;
0299                     break;
0300                 }
0301 
0302                 // Not enough room -- and the current entry has a non-zero zeroIfFree,
0303                 // so we should go on at i + 1 on next iteration
0304 
0305             } // for each cell per block.
0306         } // if enough room in block
0307     } // for each block
0308 
0309     if (!sufficientBlock) {
0310         sufficientBlock = allocateBlock();
0311         startOffset     = 0;
0312         heap.oversizeBlocks.append(sufficientBlock);
0313     }
0314 
0315     sufficientBlock->usedCells += cellsNeeded;
0316 
0317     // Set proper bits for trailers and the head
0318     sufficientBlock->allocd.set(startOffset);
0319     for (size_t t = startOffset + 1; t < startOffset + cellsNeeded; ++t) {
0320         sufficientBlock->trailer.set(t);
0321         sufficientBlock->marked.set(t);
0322         sufficientBlock->allocd.set(t);
0323     }
0324 
0325     void *result = sufficientBlock->cells + startOffset;
0326     memset(result, 0, s);
0327     heap.numLiveObjects = heap.numLiveObjects + 1;
0328     return result;
0329 }
0330 
0331 void *Collector::allocate(size_t s)
0332 {
0333     assert(JSLock::lockCount() > 0);
0334 
0335     // collect if needed
0336     size_t numLiveObjects = heap.numLiveObjects;
0337     size_t numLiveObjectsAtLastCollect = heap.numLiveObjectsAtLastCollect;
0338     size_t numNewObjects = numLiveObjects - numLiveObjectsAtLastCollect;
0339     size_t newCost = numNewObjects + heap.extraCost;
0340 
0341     if (newCost >= ALLOCATIONS_PER_COLLECTION && newCost >= numLiveObjectsAtLastCollect) {
0342         collect();
0343         numLiveObjects = heap.numLiveObjects;
0344     }
0345 
0346     if (s > CELL_SIZE) {
0347         return allocOversize(s);
0348     }
0349 
0350     // slab allocator
0351 
0352     size_t usedBlocks = heap.blocks.used();
0353 
0354     size_t i = heap.firstBlockWithPossibleSpace;
0355     CollectorBlock *targetBlock;
0356     size_t targetBlockUsedCells;
0357     if (i != usedBlocks) {
0358         targetBlock = heap.blocks[i];
0359         targetBlockUsedCells = targetBlock->usedCells;
0360         assert(targetBlockUsedCells <= CELLS_PER_BLOCK);
0361         while (targetBlockUsedCells == CELLS_PER_BLOCK) {
0362             if (++i == usedBlocks) {
0363                 goto allocateNewBlock;
0364             }
0365             targetBlock = heap.blocks[i];
0366             targetBlockUsedCells = targetBlock->usedCells;
0367             assert(targetBlockUsedCells <= CELLS_PER_BLOCK);
0368         }
0369         heap.firstBlockWithPossibleSpace = i;
0370     } else {
0371     allocateNewBlock:
0372         // didn't find one, need to allocate a new block
0373         targetBlock = allocateBlock();
0374         targetBlock->freeList = targetBlock->cells;
0375         targetBlockUsedCells = 0;
0376         heap.blocks.append(targetBlock);
0377         heap.firstBlockWithPossibleSpace = usedBlocks; // previous usedBlocks -> new one's index
0378     }
0379 
0380     // find a free spot in the block and detach it from the free list
0381     CollectorCell *newCell = targetBlock->freeList;
0382 
0383     // "next" field is a byte offset -- 0 means next cell, so a zeroed block is already initialized
0384     // could avoid the casts by using a cell offset, but this avoids a relatively-slow multiply
0385     targetBlock->freeList = reinterpret_cast<CollectorCell *>(reinterpret_cast<char *>(newCell + 1) + newCell->u.freeCell.next);
0386 
0387     targetBlock->usedCells = targetBlockUsedCells + 1;
0388     heap.numLiveObjects = numLiveObjects + 1;
0389 
0390     return newCell;
0391 }
0392 
0393 #if USE(MULTIPLE_THREADS)
0394 
0395 struct Collector::Thread {
0396     Thread(pthread_t pthread, mach_port_t mthread) : posixThread(pthread), machThread(mthread) {}
0397     Thread *next;
0398     pthread_t posixThread;
0399     mach_port_t machThread;
0400 };
0401 
0402 pthread_key_t registeredThreadKey;
0403 pthread_once_t registeredThreadKeyOnce = PTHREAD_ONCE_INIT;
0404 
0405 Collector::Thread *registeredThreads;
0406 
0407 static void destroyRegisteredThread(void *data)
0408 {
0409     Collector::Thread *thread = (Collector::Thread *)data;
0410 
0411     if (registeredThreads == thread) {
0412         registeredThreads = registeredThreads->next;
0413     } else {
0414         Collector::Thread *last = registeredThreads;
0415         for (Collector::Thread *t = registeredThreads->next; t != NULL; t = t->next) {
0416             if (t == thread) {
0417                 last->next = t->next;
0418                 break;
0419             }
0420             last = t;
0421         }
0422     }
0423 
0424     delete thread;
0425 }
0426 
0427 static void initializeRegisteredThreadKey()
0428 {
0429     pthread_key_create(&registeredThreadKey, destroyRegisteredThread);
0430 }
0431 
0432 void Collector::registerThread()
0433 {
0434     pthread_once(&registeredThreadKeyOnce, initializeRegisteredThreadKey);
0435 
0436     if (!pthread_getspecific(registeredThreadKey)) {
0437         pthread_t pthread = pthread_self();
0438         WTF::fastMallocRegisterThread(pthread);
0439         Collector::Thread *thread = new Collector::Thread(pthread, pthread_mach_thread_np(pthread));
0440         thread->next = registeredThreads;
0441         registeredThreads = thread;
0442         pthread_setspecific(registeredThreadKey, thread);
0443     }
0444 }
0445 
0446 #endif
0447 
0448 #define IS_POINTER_ALIGNED(p) (((intptr_t)(p) & (sizeof(char *) - 1)) == 0)
0449 
0450 // cell size needs to be a power of two for this to be valid
0451 #define IS_CELL_ALIGNED(p) (((intptr_t)(p) & CELL_MASK) == 0)
0452 
0453 void Collector::markStackObjectsConservatively(void *start, void *end)
0454 {
0455     if (start > end) {
0456         void *tmp = start;
0457         start = end;
0458         end = tmp;
0459     }
0460 
0461     assert(((char *)end - (char *)start) < 0x1000000);
0462     assert(IS_POINTER_ALIGNED(start));
0463     assert(IS_POINTER_ALIGNED(end));
0464 
0465     char **p = (char **)start;
0466     char **e = (char **)end;
0467 
0468     // We use allBlocks here since we want to mark oversize cells as well.
0469     // Their trailers will have the mark bit set already, to avoid trouble.
0470     size_t usedBlocks = heap.allBlocks.used();
0471     CollectorBlock **blocks = heap.allBlocks.m_data;
0472 
0473     const size_t lastCellOffset = sizeof(CollectorCell) * (CELLS_PER_BLOCK - 1);
0474 
0475     while (p != e) {
0476         char *x = *p++;
0477         VG_DEFINED(x);
0478         if (IS_CELL_ALIGNED(x) && x) {
0479             uintptr_t offset = reinterpret_cast<uintptr_t>(x) & BLOCK_OFFSET_MASK;
0480             CollectorBlock *blockAddr = reinterpret_cast<CollectorBlock *>(x - offset);
0481             for (size_t block = 0; block < usedBlocks; block++) {
0482                 if ((blocks[block] == blockAddr) && (offset <= lastCellOffset)) {
0483                     if (((CollectorCell *)x)->u.freeCell.zeroIfFree != nullptr) {
0484                         JSCell *imp = reinterpret_cast<JSCell *>(x);
0485                         if (!JSValue::marked(imp)) {
0486                             JSValue::mark(imp);
0487                         }
0488                     }
0489                 } // if valid block
0490             } // for each block
0491         } // if cell-aligned
0492     } // for each pointer
0493 }
0494 
0495 static inline void *currentThreadStackBase()
0496 {
0497 #if PLATFORM(DARWIN)
0498     pthread_t thread = pthread_self();
0499     void *stackBase = pthread_get_stackaddr_np(thread);
0500 #elif (PLATFORM(WIN_OS) || defined(WTF_COMPILER_CYGWIN))
0501     // tested with mingw32, mingw64, msvc2008, cygwin
0502     NT_TIB *pTib = (NT_TIB *)NtCurrentTeb();
0503     void *stackBase = (void *)pTib->StackBase;
0504 #elif PLATFORM(SOLARIS_OS)
0505     stack_t s;
0506     thr_stksegment(&s);
0507     return s.ss_sp;
0508     // NOTREACHED
0509     void *stackBase = nullptr;
0510 #elif PLATFORM(UNIX)
0511     static void *stackBase = nullptr;
0512     static pthread_t stackThread;
0513     pthread_t thread = pthread_self();
0514     if (stackBase == nullptr || thread != stackThread) {
0515 #if defined(__OpenBSD__)
0516         stack_t ss;
0517         pthread_stackseg_np(thread, &ss);
0518         stackBase = (void*)((size_t) ss.ss_sp - ss.ss_size);
0519 #else
0520         pthread_attr_t sattr;
0521 #if HAVE_PTHREAD_NP_H || defined(__NetBSD__)
0522         // e.g. on FreeBSD 5.4, neundorf@kde.org
0523         // also on NetBSD 3 and 4, raphael.langerhorst@kdemail.net
0524         // HIGHLY RECOMMENDED by manpage to allocate storage, avoids
0525         // crashing in JS immediately in FreeBSD.
0526         pthread_attr_init(&sattr);
0527         pthread_attr_get_np(thread, &sattr);
0528 #else
0529         // FIXME: this function is non-portable; other POSIX systems may have different np alternatives
0530         pthread_getattr_np(thread, &sattr);
0531 #endif // picking the _np function to use --- Linux or BSD
0532 
0533         size_t stackSize;
0534         pthread_attr_getstack(&sattr, &stackBase, &stackSize);
0535         stackBase = (char *)stackBase + stackSize;      // a matter of interpretation, apparently...
0536         pthread_attr_destroy(&sattr);
0537 #endif  // OpenBSD
0538         assert(stackBase);
0539         stackThread = thread;
0540     }
0541 #else
0542 #error Need a way to get the stack base on this platform
0543 #endif
0544     return stackBase;
0545 }
0546 
0547 void Collector::markCurrentThreadConservatively()
0548 {
0549     // setjmp forces volatile registers onto the stack
0550     jmp_buf registers;
0551 #if defined(WTF_COMPILER_MSVC)
0552 #pragma warning(push)
0553 #pragma warning(disable: 4611)
0554 #endif
0555     setjmp(registers);
0556 #if defined(WTF_COMPILER_MSVC)
0557 #pragma warning(pop)
0558 #endif
0559 
0560     void *dummy;
0561     void *stackPointer = &dummy;
0562     void *stackBase = currentThreadStackBase();
0563 
0564     markStackObjectsConservatively(stackPointer, stackBase);
0565 }
0566 
0567 #if USE(MULTIPLE_THREADS)
0568 
0569 typedef unsigned long usword_t; // word size, assumed to be either 32 or 64 bit
0570 
0571 void Collector::markOtherThreadConservatively(Thread *thread)
0572 {
0573     thread_suspend(thread->machThread);
0574 
0575 #if PLATFORM(X86)
0576     i386_thread_state_t regs;
0577     unsigned user_count = sizeof(regs) / sizeof(int);
0578     thread_state_flavor_t flavor = i386_THREAD_STATE;
0579 #elif PLATFORM(X86_64)
0580     x86_thread_state64_t  regs;
0581     unsigned user_count = x86_THREAD_STATE64_COUNT;
0582     thread_state_flavor_t flavor = x86_THREAD_STATE64;
0583 #elif PLATFORM(PPC)
0584     ppc_thread_state_t  regs;
0585     unsigned user_count = PPC_THREAD_STATE_COUNT;
0586     thread_state_flavor_t flavor = PPC_THREAD_STATE;
0587 #elif PLATFORM(PPC64)
0588     ppc_thread_state64_t  regs;
0589     unsigned user_count = PPC_THREAD_STATE64_COUNT;
0590     thread_state_flavor_t flavor = PPC_THREAD_STATE64;
0591 #else
0592 #error Unknown Architecture
0593 #endif
0594     // get the thread register state
0595     thread_get_state(thread->machThread, flavor, (thread_state_t)&regs, &user_count);
0596 
0597     // scan the registers
0598     markStackObjectsConservatively((void *)&regs, (void *)((char *)&regs + (user_count * sizeof(usword_t))));
0599 
0600     // scan the stack
0601 #if PLATFORM(X86) && __DARWIN_UNIX03
0602     markStackObjectsConservatively((void *)regs.__esp, pthread_get_stackaddr_np(thread->posixThread));
0603 #elif PLATFORM(X86)
0604     markStackObjectsConservatively((void *)regs.esp, pthread_get_stackaddr_np(thread->posixThread));
0605 #elif PLATFORM(X86_64) && __DARWIN_UNIX03
0606     markStackObjectsConservatively((void *)regs.__rsp, pthread_get_stackaddr_np(thread->posixThread));
0607 #elif PLATFORM(X86_64)
0608     markStackObjectsConservatively((void *)regs.rsp, pthread_get_stackaddr_np(thread->posixThread));
0609 #elif (PLATFORM(PPC) || PLATFORM(PPC64)) && __DARWIN_UNIX03
0610     markStackObjectsConservatively((void *)regs.__r1, pthread_get_stackaddr_np(thread->posixThread));
0611 #elif PLATFORM(PPC) || PLATFORM(PPC64)
0612     markStackObjectsConservatively((void *)regs.r1, pthread_get_stackaddr_np(thread->posixThread));
0613 #else
0614 #error Unknown Architecture
0615 #endif
0616 
0617     thread_resume(thread->machThread);
0618 }
0619 
0620 #endif
0621 
0622 void Collector::markStackObjectsConservatively()
0623 {
0624     markCurrentThreadConservatively();
0625 
0626 #if USE(MULTIPLE_THREADS)
0627     for (Thread *thread = registeredThreads; thread != NULL; thread = thread->next) {
0628         if (thread->posixThread != pthread_self()) {
0629             markOtherThreadConservatively(thread);
0630         }
0631     }
0632 #endif
0633 }
0634 
0635 typedef HashCountedSet<JSCell *> ProtectCounts;
0636 
0637 static ProtectCounts &protectedValues()
0638 {
0639     static ProtectCounts pv;
0640     return pv;
0641 }
0642 
0643 void Collector::protect(JSValue *k)
0644 {
0645     assert(k);
0646     assert(JSLock::lockCount() > 0);
0647 
0648     if (JSImmediate::isImmediate(k)) {
0649         return;
0650     }
0651 
0652     protectedValues().add(k->asCell());
0653 }
0654 
0655 void Collector::unprotect(JSValue *k)
0656 {
0657     assert(k);
0658     assert(JSLock::lockCount() > 0);
0659 
0660     if (JSImmediate::isImmediate(k)) {
0661         return;
0662     }
0663 
0664     protectedValues().remove(k->asCell());
0665 }
0666 
0667 void Collector::markProtectedObjects()
0668 {
0669     ProtectCounts &pv = protectedValues();
0670     ProtectCounts::iterator end = pv.end();
0671     for (ProtectCounts::iterator it = pv.begin(); it != end; ++it) {
0672         JSCell *val = it->first;
0673         if (!val->marked()) {
0674             val->mark();
0675         }
0676     }
0677 }
0678 
0679 bool Collector::collect()
0680 {
0681     assert(JSLock::lockCount() > 0);
0682 
0683 #if USE(MULTIPLE_THREADS)
0684     bool currentThreadIsMainThread = pthread_main_np();
0685 #else
0686     bool currentThreadIsMainThread = true;
0687 #endif
0688 
0689     Interpreter::markSourceCachedObjects();
0690 
0691     if (Interpreter::s_hook) {
0692         Interpreter *scr = Interpreter::s_hook;
0693         do {
0694             scr->mark(currentThreadIsMainThread);
0695             scr = scr->next;
0696         } while (scr != Interpreter::s_hook);
0697     }
0698 
0699     // MARK: first mark all referenced objects recursively starting out from the set of root objects
0700 
0701     markStackObjectsConservatively();
0702     markProtectedObjects();
0703     List::markProtectedLists();
0704 #if USE(MULTIPLE_THREADS)
0705     if (!currentThreadIsMainThread) {
0706         markMainThreadOnlyObjects();
0707     }
0708 #endif
0709 
0710     // SWEEP: delete everything with a zero refcount (garbage) and unmark everything else
0711 
0712     // Note: if a cell has zeroIfFree == 0, it is either free,
0713     // or in the middle of being constructed and has not yet
0714     // had its vtable filled. Hence, such cells should not be cleaned up
0715 
0716     size_t emptyBlocks = 0;
0717     size_t numLiveObjects = heap.numLiveObjects;
0718 
0719     for (size_t block = 0; block < heap.blocks.used(); block++) {
0720         CollectorBlock *curBlock = heap.blocks[block];
0721 
0722         size_t usedCells = curBlock->usedCells;
0723         CollectorCell *freeList = curBlock->freeList;
0724 
0725         if (usedCells == CELLS_PER_BLOCK) {
0726             // special case with a block where all cells are used -- testing indicates this happens often
0727             for (size_t i = 0; i < CELLS_PER_BLOCK; i++) {
0728                 CollectorCell *cell = curBlock->cells + i;
0729                 JSCell *imp = reinterpret_cast<JSCell *>(cell);
0730                 if (!curBlock->marked.get(i) && currentThreadIsMainThread) {
0731                     // special case for allocated but uninitialized object
0732                     // (We don't need this check earlier because nothing prior this point assumes the object has a valid vptr.)
0733                     if (cell->u.freeCell.zeroIfFree == nullptr) {
0734                         continue;
0735                     }
0736                     imp->~JSCell();
0737                     --usedCells;
0738                     --numLiveObjects;
0739 
0740                     // put cell on the free list
0741                     cell->u.freeCell.zeroIfFree = nullptr;
0742                     cell->u.freeCell.next = reinterpret_cast<char *>(freeList) - reinterpret_cast<char *>(cell + 1);
0743                     freeList = cell;
0744                 }
0745             }
0746         } else {
0747             size_t minimumCellsToProcess = usedCells;
0748             for (size_t i = 0; (i < minimumCellsToProcess) & (i < CELLS_PER_BLOCK); i++) {
0749                 CollectorCell *cell = curBlock->cells + i;
0750                 if (cell->u.freeCell.zeroIfFree == nullptr) {
0751                     ++minimumCellsToProcess;
0752                 } else {
0753                     JSCell *imp = reinterpret_cast<JSCell *>(cell);
0754                     if (!curBlock->marked.get(i) && currentThreadIsMainThread) {
0755                         imp->~JSCell();
0756                         --usedCells;
0757                         --numLiveObjects;
0758 
0759                         // put cell on the free list
0760                         cell->u.freeCell.zeroIfFree = nullptr;
0761                         cell->u.freeCell.next = reinterpret_cast<char *>(freeList) - reinterpret_cast<char *>(cell + 1);
0762                         freeList = cell;
0763                     }
0764                 }
0765             }
0766         }
0767 
0768         curBlock->marked.clearAll();
0769         curBlock->usedCells = usedCells;
0770         curBlock->freeList = freeList;
0771 
0772         if (usedCells == 0) {
0773             emptyBlocks++;
0774             if (emptyBlocks > SPARE_EMPTY_BLOCKS) {
0775 #if !DEBUG_COLLECTOR
0776                 freeBlock(curBlock);
0777 #endif
0778                 // swap with the last block so we compact as we go
0779                 heap.blocks.m_data[block] = heap.blocks[heap.blocks.used() - 1];
0780                 heap.blocks.m_used--;
0781                 block--; // Don't move forward a step in this case
0782             }
0783         }
0784     }
0785 
0786     if (heap.numLiveObjects != numLiveObjects) {
0787         heap.firstBlockWithPossibleSpace = 0;
0788     }
0789 
0790     // Now sweep oversize blocks.
0791     emptyBlocks = 0;
0792     for (size_t ob = 0; ob < heap.oversizeBlocks.used(); ++ob) {
0793         CollectorBlock *curBlock = heap.oversizeBlocks[ob];
0794         CollectorCell *freeList = curBlock->freeList;
0795         size_t usedCells = curBlock->usedCells;
0796 
0797         // Go through the cells
0798         for (size_t i = 0; i < CELLS_PER_BLOCK; i++) {
0799             if (i % 32 == 0 && curBlock->allocd.bits[i / 32] == 0) {
0800                 // Nothing used around here, skip this and 31 next cells
0801                 i += 31;
0802                 continue;
0803             }
0804 
0805             CollectorCell *cell = curBlock->cells + i;
0806             if (cell->u.freeCell.zeroIfFree == nullptr) {
0807                 continue;
0808             }
0809 
0810             if (!curBlock->allocd.get(i)) {
0811                 continue;
0812             }
0813 
0814             JSCell *imp = reinterpret_cast<JSCell *>(cell);
0815 
0816             // Live and trailer cells will all have the mark set,
0817             // so we only have to collect with it clear --
0818             // and we already took care of those that are
0819             // already free (or being initialized) above
0820             if (!curBlock->marked.get(i)) {
0821                 // Free this block...
0822                 imp->~JSCell();
0823                 --numLiveObjects;
0824                 --usedCells;
0825 
0826                 // Mark the block and the trailers as available
0827                 cell->u.freeCell.zeroIfFree = nullptr;
0828                 curBlock->allocd.clear(i);
0829 
0830                 ++i; // Go to the potential trailer..
0831                 while (curBlock->trailer.get(i) && i < CELLS_PER_BLOCK) {
0832                     --usedCells;
0833                     curBlock->allocd.clear(i);
0834                     curBlock->trailer.clear(i);
0835                     curBlock->marked.clear(i);
0836                     curBlock->cells[i].u.freeCell.zeroIfFree = nullptr;
0837                     ++i;
0838                 }
0839                 --i; // Last item we processed.
0840             } else {
0841                 // If this is not a trailer cell, clear the mark
0842                 if (!curBlock->trailer.get(i)) {
0843                     curBlock->marked.clear(i);
0844                 }
0845             }
0846         } // each cell
0847         curBlock->usedCells = usedCells;
0848         curBlock->freeList = freeList;
0849         if (usedCells == 0) {
0850             emptyBlocks++;
0851             if (emptyBlocks > SPARE_EMPTY_BLOCKS) {
0852                 freeBlock(curBlock);
0853 
0854                 // swap with the last block so we compact as we go
0855                 heap.oversizeBlocks.m_data[ob] = heap.oversizeBlocks[heap.oversizeBlocks.used() - 1];
0856                 heap.oversizeBlocks.m_used--;
0857                 ob--; // Don't move forward a step in this case
0858             }
0859         }
0860     } // each oversize block
0861 
0862     bool deleted = heap.numLiveObjects != numLiveObjects;
0863 
0864     heap.numLiveObjects = numLiveObjects;
0865     heap.numLiveObjectsAtLastCollect = numLiveObjects;
0866     heap.extraCost = 0;
0867 
0868     bool newMemoryFull = (numLiveObjects >= KJS_MEM_LIMIT);
0869     if (newMemoryFull && newMemoryFull != memoryFull) {
0870         reportOutOfMemoryToAllInterpreters();
0871     }
0872     memoryFull = newMemoryFull;
0873 
0874     return deleted;
0875 }
0876 
0877 size_t Collector::size()
0878 {
0879     return heap.numLiveObjects;
0880 }
0881 
0882 #ifdef KJS_DEBUG_MEM
0883 void Collector::finalCheck()
0884 {
0885 }
0886 #endif
0887 
0888 size_t Collector::numInterpreters()
0889 {
0890     size_t count = 0;
0891     if (Interpreter::s_hook) {
0892         Interpreter *scr = Interpreter::s_hook;
0893         do {
0894             ++count;
0895             scr = scr->next;
0896         } while (scr != Interpreter::s_hook);
0897     }
0898     return count;
0899 }
0900 
0901 size_t Collector::numProtectedObjects()
0902 {
0903     return protectedValues().size();
0904 }
0905 
0906 static const char *typeName(JSCell *val)
0907 {
0908     const char *name = "???";
0909     switch (val->type()) {
0910     case UnspecifiedType:
0911         break;
0912     case UndefinedType:
0913         name = "undefined";
0914         break;
0915     case NullType:
0916         name = "null";
0917         break;
0918     case BooleanType:
0919         name = "boolean";
0920         break;
0921     case StringType:
0922         name = "string";
0923         break;
0924     case NumberType:
0925         name = "number";
0926         break;
0927     case ObjectType: {
0928         const ClassInfo *info = static_cast<JSObject *>(val)->classInfo();
0929         name = info ? info->className : "Object";
0930         break;
0931     }
0932     case GetterSetterType:
0933         name = "gettersetter";
0934         break;
0935     }
0936     return name;
0937 }
0938 
0939 HashCountedSet<const char *> *Collector::rootObjectTypeCounts()
0940 {
0941     HashCountedSet<const char *> *counts = new HashCountedSet<const char *>;
0942 
0943     ProtectCounts &pv = protectedValues();
0944     ProtectCounts::iterator end = pv.end();
0945     for (ProtectCounts::iterator it = pv.begin(); it != end; ++it) {
0946         counts->add(typeName(it->first));
0947     }
0948 
0949     return counts;
0950 }
0951 
0952 void Collector::reportOutOfMemoryToAllInterpreters()
0953 {
0954     if (!Interpreter::s_hook) {
0955         return;
0956     }
0957 
0958     Interpreter *interpreter = Interpreter::s_hook;
0959     do {
0960         ExecState *exec = interpreter->execState();
0961 
0962         exec->setException(Error::create(exec, GeneralError, "Out of memory"));
0963 
0964         interpreter = interpreter->next;
0965     } while (interpreter != Interpreter::s_hook);
0966 }
0967 
0968 } // namespace KJS