File indexing completed on 2024-12-01 09:50:51
0001 /* 0002 * Copyright (C) 1998-2000 Netscape Communications Corporation. 0003 * 0004 * Other contributors: 0005 * Nick Blievers <nickb@adacel.com.au> 0006 * Jeff Hostetler <jeff@nerdone.com> 0007 * Tom Rini <trini@kernel.crashing.org> 0008 * Raffaele Sena <raff@netwinder.org> 0009 * 0010 * This library is free software; you can redistribute it and/or 0011 * modify it under the terms of the GNU Lesser General Public 0012 * License as published by the Free Software Foundation; either 0013 * version 2.1 of the License, or (at your option) any later version. 0014 * 0015 * This library is distributed in the hope that it will be useful, 0016 * but WITHOUT ANY WARRANTY; without even the implied warranty of 0017 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 0018 * Lesser General Public License for more details. 0019 * 0020 * You should have received a copy of the GNU Lesser General Public 0021 * License along with this library; if not, write to the Free Software 0022 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 0023 * 0024 * Alternatively, the contents of this file may be used under the terms 0025 * of either the Mozilla Public License Version 1.1, found at 0026 * http://www.mozilla.org/MPL/ (the "MPL") or the GNU General Public 0027 * License Version 2.0, found at http://www.fsf.org/copyleft/gpl.html 0028 * (the "GPL"), in which case the provisions of the MPL or the GPL are 0029 * applicable instead of those above. If you wish to allow use of your 0030 * version of this file only under the terms of one of those two 0031 * licenses (the MPL or the GPL) and not to allow others to use your 0032 * version of this file under the LGPL, indicate your decision by 0033 * deletingthe provisions above and replace them with the notice and 0034 * other provisions required by the MPL or the GPL, as the case may be. 0035 * If you do not delete the provisions above, a recipient may use your 0036 * version of this file under any of the LGPL, the MPL or the GPL. 0037 */ 0038 0039 /* 0040 * Lifetime-based fast allocation, inspired by much prior art, including 0041 * "Fast Allocation and Deallocation of Memory Based on Object Lifetimes" 0042 * David R. Hanson, Software -- Practice and Experience, Vol. 20(1). 0043 */ 0044 0045 #include "arena.h" 0046 0047 #include <stdlib.h> 0048 #include <string.h> 0049 0050 #if HAVE_GETPAGESIZE 0051 #include <unistd.h> 0052 #define POOL_SIZE qMax(8192u, 2*( unsigned ) getpagesize()) 0053 #else 0054 #define POOL_SIZE 8192 0055 #endif 0056 0057 //#define DEBUG_ARENA_MALLOC 0058 #ifdef DEBUG_ARENA_MALLOC 0059 #include <assert.h> 0060 #include <stdio.h> 0061 #endif 0062 0063 namespace khtml 0064 { 0065 0066 #ifdef DEBUG_ARENA_MALLOC 0067 static int i = 0; 0068 #endif 0069 0070 #define FREELIST_MAX 50 0071 #define LARGE_ALLOCATION_CEIL(pool) (pool)->arenasize * 256 0072 #define MAX_DISCRETE_ALLOCATION(pool) (pool)->arenasize * 64 0073 static Arena *arena_freelist = nullptr; 0074 static int freelist_count = 0; 0075 0076 #define ARENA_DEFAULT_ALIGN sizeof(double) 0077 #define BIT(n) ((unsigned int)1 << (n)) 0078 #define BITMASK(n) (BIT(n) - 1) 0079 #define CEILING_LOG2(_log2,_n) \ 0080 unsigned int j_ = (unsigned int)(_n); \ 0081 (_log2) = 0; \ 0082 if ((j_) & ((j_)-1)) \ 0083 (_log2) += 1; \ 0084 if ((j_) >> 16) \ 0085 (_log2) += 16, (j_) >>= 16; \ 0086 if ((j_) >> 8) \ 0087 (_log2) += 8, (j_) >>= 8; \ 0088 if ((j_) >> 4) \ 0089 (_log2) += 4, (j_) >>= 4; \ 0090 if ((j_) >> 2) \ 0091 (_log2) += 2, (j_) >>= 2; \ 0092 if ((j_) >> 1) \ 0093 (_log2) += 1; 0094 0095 int CeilingLog2(unsigned int i) 0096 { 0097 int log2; 0098 CEILING_LOG2(log2, i); 0099 return log2; 0100 } 0101 0102 void InitArenaPool(ArenaPool *pool, const char * /*name*/, 0103 unsigned int /*size*/, unsigned int align) 0104 { 0105 unsigned int size = POOL_SIZE; 0106 if (align == 0) { 0107 align = ARENA_DEFAULT_ALIGN; 0108 } 0109 pool->mask = BITMASK(CeilingLog2(align)); 0110 pool->first.next = nullptr; 0111 pool->first.base = pool->first.avail = pool->first.limit = 0112 (uword)ARENA_ALIGN(pool, &pool->first + 1); 0113 pool->current = &pool->first; 0114 pool->arenasize = size; 0115 pool->largealloc = LARGE_ALLOCATION_CEIL(pool); 0116 pool->cumul = freelist_count * size; 0117 } 0118 0119 /* 0120 ** ArenaAllocate() -- allocate space from an arena pool 0121 ** 0122 ** Description: ArenaAllocate() allocates space from an arena 0123 ** pool. 0124 ** 0125 ** First try to satisfy the request from arenas starting at 0126 ** pool->current. 0127 ** 0128 ** If there is not enough space in the arena pool->current, try 0129 ** to claim an arena, on a first fit basis, from the global 0130 ** freelist (arena_freelist). 0131 ** 0132 ** If no arena in arena_freelist is suitable, then try to 0133 ** allocate a new arena from the heap. 0134 ** 0135 ** Returns: pointer to allocated space or NULL 0136 ** 0137 */ 0138 void *ArenaAllocate(ArenaPool *pool, unsigned int nb) 0139 { 0140 Arena *a; 0141 char *rp; /* returned pointer */ 0142 0143 #ifdef DEBUG_ARENA_MALLOC 0144 assert((nb & pool->mask) == 0); 0145 #endif 0146 0147 nb = (uword)ARENA_ALIGN(pool, nb); /* force alignment */ 0148 0149 /* attempt to allocate from arenas at pool->current */ 0150 { 0151 a = pool->current; 0152 do { 0153 if (a->avail + nb <= a->limit) { 0154 pool->current = a; 0155 rp = (char *)a->avail; 0156 a->avail += nb; 0157 VALGRIND_MEMPOOL_ALLOC(a->base, rp, nb); 0158 return rp; 0159 } 0160 } while (nullptr != (a = a->next)); 0161 } 0162 0163 /* attempt to allocate from arena_freelist */ 0164 { 0165 Arena *p; /* previous pointer, for unlinking from freelist */ 0166 0167 for (a = p = arena_freelist; a != nullptr; p = a, a = a->next) { 0168 if (a->base + nb <= a->limit) { 0169 if (p == arena_freelist) { 0170 arena_freelist = a->next; 0171 } else { 0172 p->next = a->next; 0173 } 0174 a->avail = a->base; 0175 rp = (char *)a->avail; 0176 a->avail += nb; 0177 VALGRIND_MEMPOOL_ALLOC(a->base, rp, nb); 0178 /* the newly allocated arena is linked after pool->current 0179 * and becomes pool->current */ 0180 a->next = pool->current->next; 0181 pool->current->next = a; 0182 pool->current = a; 0183 if (nullptr == pool->first.next) { 0184 pool->first.next = a; 0185 } 0186 freelist_count--; 0187 return (rp); 0188 } 0189 } 0190 } 0191 0192 /* attempt to allocate from the heap */ 0193 { 0194 unsigned int sz; 0195 #if HAVE_MMAP 0196 if (pool->cumul > pool->largealloc) { 0197 // High memory pressure. Switch to a fractional allocation strategy 0198 // so that malloc gets a chance to successfully trim us down when it's over. 0199 sz = qMin(pool->cumul / 12, MAX_DISCRETE_ALLOCATION(pool)); 0200 #ifdef DEBUG_ARENA_MALLOC 0201 printf("allocating %d bytes (fractional strategy)\n", sz); 0202 #endif 0203 } else 0204 #endif 0205 sz = pool->arenasize > nb ? pool->arenasize : nb; 0206 sz += sizeof * a + pool->mask; /* header and alignment slop */ 0207 pool->cumul += sz; 0208 #ifdef DEBUG_ARENA_MALLOC 0209 i++; 0210 printf("Malloc: %d\n", i); 0211 #endif 0212 a = (Arena *)malloc(sz); 0213 if (a) { 0214 a->limit = (uword)a + sz; 0215 a->base = a->avail = (uword)ARENA_ALIGN(pool, a + 1); 0216 VALGRIND_CREATE_MEMPOOL(a->base, 0, 0); 0217 rp = (char *)a->avail; 0218 a->avail += nb; 0219 VALGRIND_MEMPOOL_ALLOC(a->base, rp, nb); 0220 0221 /* the newly allocated arena is linked after pool->current 0222 * and becomes pool->current */ 0223 a->next = pool->current->next; 0224 pool->current->next = a; 0225 pool->current = a; 0226 if (!pool->first.next) { 0227 pool->first.next = a; 0228 } 0229 return (rp); 0230 } 0231 } 0232 0233 /* we got to here, and there's no memory to allocate */ 0234 return (nullptr); 0235 } /* --- end ArenaAllocate() --- */ 0236 0237 /* 0238 * Free tail arenas linked after head, which may not be the true list head. 0239 * Reset pool->current to point to head in case it pointed at a tail arena. 0240 */ 0241 static void FreeArenaList(ArenaPool *pool, Arena *head, bool reallyFree) 0242 { 0243 Arena **ap, *a; 0244 0245 ap = &head->next; 0246 a = *ap; 0247 if (!a) { 0248 return; 0249 } 0250 0251 #ifdef DEBUG_ARENA_MALLOC 0252 printf("****** Freeing arena pool. Total allocated memory: %d\n", pool->cumul); 0253 0254 do { 0255 assert(a->base <= a->avail && a->avail <= a->limit); 0256 a->avail = a->base; 0257 CLEAR_UNUSED(a); 0258 } while ((a = a->next) != 0); 0259 a = *ap; 0260 #endif 0261 0262 if (freelist_count >= FREELIST_MAX) { 0263 reallyFree = true; 0264 } 0265 0266 if (reallyFree) { 0267 do { 0268 *ap = a->next; 0269 VALGRIND_DESTROY_MEMPOOL(a->base); 0270 CLEAR_ARENA(a); 0271 #ifdef DEBUG_ARENA_MALLOC 0272 if (a) { 0273 i--; 0274 printf("Free: %d\n", i); 0275 } 0276 #endif 0277 free(a); a = nullptr; 0278 } while ((a = *ap) != nullptr); 0279 } else { 0280 /* Insert as much of the arena chain as we can hold at the front of the freelist. */ 0281 do { 0282 ap = &(*ap)->next; 0283 freelist_count++; 0284 } while (*ap && freelist_count < FREELIST_MAX); 0285 0286 /* Get rid of excess */ 0287 if (*ap) { 0288 Arena *xa, *n; 0289 for (xa = *ap; xa; xa = n) { 0290 VALGRIND_DESTROY_MEMPOOL(xa->base); 0291 n = xa->next; 0292 #ifdef DEBUG_ARENA_MALLOC 0293 i--; 0294 printf("Free: %d\n", i); 0295 #endif 0296 CLEAR_ARENA(xa); 0297 free(xa); 0298 } 0299 } 0300 *ap = arena_freelist; 0301 arena_freelist = a; 0302 head->next = nullptr; 0303 } 0304 pool->current = head; 0305 } 0306 0307 void ArenaRelease(ArenaPool *pool, char *mark) 0308 { 0309 Arena *a; 0310 0311 for (a = pool->first.next; a; a = a->next) { 0312 if (UPTRDIFF(mark, a->base) < UPTRDIFF(a->avail, a->base)) { 0313 a->avail = (uword)ARENA_ALIGN(pool, mark); 0314 FreeArenaList(pool, a, false); 0315 return; 0316 } 0317 } 0318 } 0319 0320 void FreeArenaPool(ArenaPool *pool) 0321 { 0322 FreeArenaList(pool, &pool->first, false); 0323 } 0324 0325 void FinishArenaPool(ArenaPool *pool) 0326 { 0327 FreeArenaList(pool, &pool->first, true); 0328 } 0329 0330 void ArenaFinish() 0331 { 0332 Arena *a, *next; 0333 #ifdef DEBUG_ARENA_MALLOC 0334 printf("releasing global Arena freelist\n"); 0335 #endif 0336 for (a = arena_freelist; a; a = next) { 0337 next = a->next; 0338 free(a); a = nullptr; 0339 } 0340 freelist_count = 0; 0341 arena_freelist = nullptr; 0342 } 0343 0344 } // namespace