2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
5 * Copyright (c) 2000 by Hewlett-Packard Company. All rights reserved.
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
17 #include "private/gc_priv.h"
20 * These are extra allocation routines which are likely to be less
21 * frequently used than those in malloc.c. They are separate in the
22 * hope that the .o file will be excluded from statically linked
23 * executables. We should probably break this up further.
30 # ifndef WIN32_LEAN_AND_MEAN
31 # define WIN32_LEAN_AND_MEAN 1
39 /* Some externally visible but unadvertised variables to allow access to */
40 /* free lists from inlined allocators without including gc_priv.h */
41 /* or introducing dependencies on internal data structure layouts. */
42 void ** const GC_objfreelist_ptr = GC_objfreelist;
43 void ** const GC_aobjfreelist_ptr = GC_aobjfreelist;
44 void ** const GC_uobjfreelist_ptr = GC_uobjfreelist;
45 # ifdef ATOMIC_UNCOLLECTABLE
46 void ** const GC_auobjfreelist_ptr = GC_auobjfreelist;
50 STATIC void * GC_generic_or_special_malloc(size_t lb, int knd)
53 # ifdef STUBBORN_ALLOC
55 return(GC_malloc_stubborn((size_t)lb));
58 return(GC_malloc_atomic((size_t)lb));
60 return(GC_malloc((size_t)lb));
62 return(GC_malloc_uncollectable((size_t)lb));
63 # ifdef ATOMIC_UNCOLLECTABLE
65 return(GC_malloc_atomic_uncollectable((size_t)lb));
66 # endif /* ATOMIC_UNCOLLECTABLE */
68 return(GC_generic_malloc(lb,knd));
72 /* Change the size of the block pointed to by p to contain at least */
73 /* lb bytes. The object may be (and quite likely will be) moved. */
74 /* The kind (e.g. atomic) is the same as that of the old. */
75 /* Shrinking of large blocks is not implemented well. */
76 GC_API void * GC_CALL GC_realloc(void * p, size_t lb)
80 size_t sz; /* Current size in bytes */
81 size_t orig_sz; /* Original sz in bytes */
84 if (p == 0) return(GC_malloc(lb)); /* Required by ANSI */
88 obj_kind = hhdr -> hb_obj_kind;
91 if (sz > MAXOBJBYTES) {
92 /* Round it up to the next whole heap block */
95 sz = (sz+HBLKSIZE-1) & (~HBLKMASK);
97 descr = GC_obj_kinds[obj_kind].ok_descriptor;
98 if (GC_obj_kinds[obj_kind].ok_relocate_descr) descr += sz;
99 hhdr -> hb_descr = descr;
100 # ifdef MARK_BIT_PER_OBJ
101 GC_ASSERT(hhdr -> hb_inv_sz == LARGE_INV_SZ);
103 GC_ASSERT(hhdr -> hb_large_block &&
104 hhdr -> hb_map[ANY_INDEX] == 1);
106 if (IS_UNCOLLECTABLE(obj_kind)) GC_non_gc_bytes += (sz - orig_sz);
107 /* Extra area is already cleared by GC_alloc_large_and_clear. */
109 if (ADD_SLOP(lb) <= sz) {
110 if (lb >= (sz >> 1)) {
111 # ifdef STUBBORN_ALLOC
112 if (obj_kind == STUBBORN) GC_change_stubborn(p);
115 /* Clear unneeded part of object to avoid bogus pointer */
117 /* Safe for stubborn objects. */
118 BZERO(((ptr_t)p) + lb, orig_sz - lb);
124 GC_generic_or_special_malloc((word)lb, obj_kind);
126 if (result == 0) return(0);
127 /* Could also return original object. But this */
128 /* gives the client warning of imminent disaster. */
129 BCOPY(p, result, lb);
138 GC_generic_or_special_malloc((word)lb, obj_kind);
140 if (result == 0) return(0);
141 BCOPY(p, result, sz);
149 # if defined(REDIRECT_MALLOC) && !defined(REDIRECT_REALLOC)
150 # define REDIRECT_REALLOC GC_realloc
153 # ifdef REDIRECT_REALLOC
155 /* As with malloc, avoid two levels of extra calls here. */
157 # define GC_debug_realloc_replacement(p, lb) \
158 GC_debug_realloc(p, lb, GC_DBG_RA "unknown", 0)
160 void * realloc(void * p, size_t lb)
162 return(REDIRECT_REALLOC(p, lb));
165 # undef GC_debug_realloc_replacement
166 # endif /* REDIRECT_REALLOC */
169 /* Allocate memory such that only pointers to near the */
170 /* beginning of the object are considered. */
171 /* We avoid holding allocation lock while we clear memory. */
172 GC_INNER void * GC_generic_malloc_ignore_off_page(size_t lb, int k)
182 return(GC_generic_malloc((word)lb, k));
183 lg = ROUNDED_UP_GRANULES(lb);
184 lb_rounded = GRANULES_TO_BYTES(lg);
186 return((*GC_get_oom_fn())(lb));
187 n_blocks = OBJ_SZ_TO_BLOCKS(lb_rounded);
188 init = GC_obj_kinds[k].ok_init;
189 if (GC_have_errors) GC_print_all_errors();
190 GC_INVOKE_FINALIZERS();
192 result = (ptr_t)GC_alloc_large(ADD_SLOP(lb), k, IGNORE_OFF_PAGE);
194 if (GC_debugging_started) {
195 BZERO(result, n_blocks * HBLKSIZE);
198 /* Clear any memory that might be used for GC descriptors */
199 /* before we release the lock. */
200 ((word *)result)[0] = 0;
201 ((word *)result)[1] = 0;
202 ((word *)result)[GRANULES_TO_WORDS(lg)-1] = 0;
203 ((word *)result)[GRANULES_TO_WORDS(lg)-2] = 0;
207 GC_bytes_allocd += lb_rounded;
209 GC_oom_func oom_fn = GC_oom_fn;
211 return((*oom_fn)(lb));
214 if (init && !GC_debugging_started) {
215 BZERO(result, n_blocks * HBLKSIZE);
221 GC_API void * GC_CALL GC_malloc_ignore_off_page(size_t lb)
223 return((void *)GC_generic_malloc_ignore_off_page(lb, NORMAL));
226 GC_API void * GC_CALL GC_malloc_atomic_ignore_off_page(size_t lb)
228 return((void *)GC_generic_malloc_ignore_off_page(lb, PTRFREE));
231 /* Increment GC_bytes_allocd from code that doesn't have direct access */
233 GC_API void GC_CALL GC_incr_bytes_allocd(size_t n)
235 GC_bytes_allocd += n;
238 /* The same for GC_bytes_freed. */
239 GC_API void GC_CALL GC_incr_bytes_freed(size_t n)
244 # ifdef PARALLEL_MARK
245 STATIC volatile signed_word GC_bytes_allocd_tmp = 0;
246 /* Number of bytes of memory allocated since */
247 /* we released the GC lock. Instead of */
248 /* reacquiring the GC lock just to add this in, */
249 /* we add it in the next time we reacquire */
250 /* the lock. (Atomically adding it doesn't */
251 /* work, since we would have to atomically */
252 /* update it in GC_malloc, which is too */
254 # endif /* PARALLEL_MARK */
256 /* Return a list of 1 or more objects of the indicated size, linked */
257 /* through the first word in the object. This has the advantage that */
258 /* it acquires the allocation lock only once, and may greatly reduce */
259 /* time wasted contending for the allocation lock. Typical usage would */
260 /* be in a thread that requires many items of the same size. It would */
261 /* keep its own free list in thread-local storage, and call */
262 /* GC_malloc_many or friends to replenish it. (We do not round up */
263 /* object sizes, since a call indicates the intention to consume many */
264 /* objects of exactly this size.) */
265 /* We assume that the size is a multiple of GRANULE_BYTES. */
266 /* We return the free-list by assigning it to *result, since it is */
267 /* not safe to return, e.g. a linked list of pointer-free objects, */
268 /* since the collector would not retain the entire list if it were */
269 /* invoked just as we were returning. */
270 /* Note that the client should usually clear the link field. */
271 GC_API void GC_CALL GC_generic_malloc_many(size_t lb, int k, void **result)
276 size_t lw; /* Length in words. */
277 size_t lg; /* Length in granules. */
278 signed_word my_bytes_allocd = 0;
279 struct obj_kind * ok = &(GC_obj_kinds[k]);
282 GC_ASSERT(lb != 0 && (lb & (GRANULE_BYTES-1)) == 0);
283 if (!SMALL_OBJ(lb)) {
284 op = GC_generic_malloc(lb, k);
285 if(0 != op) obj_link(op) = 0;
289 lw = BYTES_TO_WORDS(lb);
290 lg = BYTES_TO_GRANULES(lb);
291 if (GC_have_errors) GC_print_all_errors();
292 GC_INVOKE_FINALIZERS();
294 if (!GC_is_initialized) GC_init();
295 /* Do our share of marking work */
296 if (GC_incremental && !GC_dont_gc) {
298 GC_collect_a_little_inner(1);
301 /* First see if we can reclaim a page of objects waiting to be */
304 struct hblk ** rlh = ok -> ok_reclaim_list;
309 while ((hbp = *rlh) != 0) {
311 *rlh = hhdr -> hb_next;
312 GC_ASSERT(hhdr -> hb_sz == lb);
313 hhdr -> hb_last_reclaimed = (unsigned short) GC_gc_no;
314 # ifdef PARALLEL_MARK
316 signed_word my_bytes_allocd_tmp = GC_bytes_allocd_tmp;
318 GC_ASSERT(my_bytes_allocd_tmp >= 0);
319 /* We only decrement it while holding the GC lock. */
320 /* Thus we can't accidentally adjust it down in more */
321 /* than one thread simultaneously. */
322 if (my_bytes_allocd_tmp != 0) {
323 (void)AO_fetch_and_add(
324 (volatile void *)(&GC_bytes_allocd_tmp),
325 (AO_t)(-my_bytes_allocd_tmp));
326 GC_bytes_allocd += my_bytes_allocd_tmp;
328 GC_acquire_mark_lock();
329 ++ GC_fl_builder_count;
331 GC_release_mark_lock();
334 op = GC_reclaim_generic(hbp, hhdr, lb,
335 ok -> ok_init, 0, &my_bytes_allocd);
337 /* We also reclaimed memory, so we need to adjust */
339 /* This should be atomic, so the results may be */
341 GC_bytes_found += my_bytes_allocd;
342 # ifdef PARALLEL_MARK
345 (void)AO_fetch_and_add(
346 (volatile AO_t *)(&GC_bytes_allocd_tmp),
347 (AO_t)(my_bytes_allocd));
348 GC_acquire_mark_lock();
349 -- GC_fl_builder_count;
350 if (GC_fl_builder_count == 0) GC_notify_all_builder();
351 GC_release_mark_lock();
352 (void) GC_clear_stack(0);
356 GC_bytes_allocd += my_bytes_allocd;
359 # ifdef PARALLEL_MARK
361 GC_acquire_mark_lock();
362 -- GC_fl_builder_count;
363 if (GC_fl_builder_count == 0) GC_notify_all_builder();
364 GC_release_mark_lock();
366 /* GC lock is needed for reclaim list access. We */
367 /* must decrement fl_builder_count before reaquiring GC */
368 /* lock. Hopefully this path is rare. */
373 /* Next try to use prefix of global free list if there is one. */
374 /* We don't refill it, but we need to use it up before allocating */
375 /* a new block ourselves. */
376 opp = &(GC_obj_kinds[k].ok_freelist[lg]);
377 if ( (op = *opp) != 0 ) {
380 for (p = op; p != 0; p = obj_link(p)) {
381 my_bytes_allocd += lb;
382 if ((word)my_bytes_allocd >= HBLKSIZE) {
388 GC_bytes_allocd += my_bytes_allocd;
391 /* Next try to allocate a new block worth of objects of this size. */
393 struct hblk *h = GC_allochblk(lb, k, 0);
395 if (IS_UNCOLLECTABLE(k)) GC_set_hdr_marks(HDR(h));
396 GC_bytes_allocd += HBLKSIZE - HBLKSIZE % lb;
397 # ifdef PARALLEL_MARK
399 GC_acquire_mark_lock();
400 ++ GC_fl_builder_count;
402 GC_release_mark_lock();
404 op = GC_build_fl(h, lw,
405 (ok -> ok_init || GC_debugging_started), 0);
408 GC_acquire_mark_lock();
409 -- GC_fl_builder_count;
410 if (GC_fl_builder_count == 0) GC_notify_all_builder();
411 GC_release_mark_lock();
412 (void) GC_clear_stack(0);
416 op = GC_build_fl(h, lw, (ok -> ok_init || GC_debugging_started), 0);
421 /* As a last attempt, try allocating a single object. Note that */
422 /* this may trigger a collection or expand the heap. */
423 op = GC_generic_malloc_inner(lb, k);
424 if (0 != op) obj_link(op) = 0;
429 (void) GC_clear_stack(0);
432 /* Note that the "atomic" version of this would be unsafe, since the */
433 /* links would not be seen by the collector. */
434 GC_API void * GC_CALL GC_malloc_many(size_t lb)
437 GC_generic_malloc_many((lb + EXTRA_BYTES + GRANULE_BYTES-1)
438 & ~(GRANULE_BYTES-1),
443 /* Not well tested nor integrated. */
444 /* Debug version is tricky and currently missing. */
447 GC_API void * GC_CALL GC_memalign(size_t align, size_t lb)
453 if (align <= GRANULE_BYTES) return GC_malloc(lb);
454 if (align >= HBLKSIZE/2 || lb >= HBLKSIZE/2) {
455 if (align > HBLKSIZE) {
456 return (*GC_get_oom_fn())(LONG_MAX-1024); /* Fail */
458 return GC_malloc(lb <= HBLKSIZE? HBLKSIZE : lb);
459 /* Will be HBLKSIZE aligned. */
461 /* We could also try to make sure that the real rounded-up object size */
462 /* is a multiple of align. That would be correct up to HBLKSIZE. */
463 new_lb = lb + align - 1;
464 result = GC_malloc(new_lb);
465 offset = (word)result % align;
467 offset = align - offset;
468 if (!GC_all_interior_pointers) {
469 if (offset >= VALID_OFFSET_SZ) return GC_malloc(HBLKSIZE);
470 GC_register_displacement(offset);
473 result = (void *) ((ptr_t)result + offset);
474 GC_ASSERT((word)result % align == 0);
478 /* This one exists largerly to redirect posix_memalign for leaks finding. */
479 GC_API int GC_CALL GC_posix_memalign(void **memptr, size_t align, size_t lb)
481 /* Check alignment properly. */
482 if (((align - 1) & align) != 0 || align < sizeof(void *)) {
484 return ERROR_INVALID_PARAMETER;
490 if ((*memptr = GC_memalign(align, lb)) == NULL) {
492 return ERROR_NOT_ENOUGH_MEMORY;
500 #ifdef ATOMIC_UNCOLLECTABLE
501 /* Allocate lb bytes of pointerfree, untraced, uncollectable data */
502 /* This is normally roughly equivalent to the system malloc. */
503 /* But it may be useful if malloc is redefined. */
504 GC_API void * GC_CALL GC_malloc_atomic_uncollectable(size_t lb)
511 if( SMALL_OBJ(lb) ) {
512 if (EXTRA_BYTES != 0 && lb != 0) lb--;
513 /* We don't need the extra byte, since this won't be */
514 /* collected anyway. */
515 lg = GC_size_map[lb];
516 opp = &(GC_auobjfreelist[lg]);
518 if( (op = *opp) != 0 ) {
521 GC_bytes_allocd += GRANULES_TO_BYTES(lg);
522 /* Mark bit was already set while object was on free list. */
523 GC_non_gc_bytes += GRANULES_TO_BYTES(lg);
527 op = (ptr_t)GC_generic_malloc(lb, AUNCOLLECTABLE);
529 GC_ASSERT(0 == op || GC_is_marked(op));
534 op = (ptr_t)GC_generic_malloc(lb, AUNCOLLECTABLE);
535 if (0 == op) return(0);
537 GC_ASSERT(((word)op & (HBLKSIZE - 1)) == 0);
541 set_mark_bit_from_hdr(hhdr, 0); /* Only object. */
543 GC_ASSERT(hhdr -> hb_n_marks == 0);
545 hhdr -> hb_n_marks = 1;
550 #endif /* ATOMIC_UNCOLLECTABLE */
552 /* provide a version of strdup() that uses the collector to allocate the
553 copy of the string */
554 GC_API char * GC_CALL GC_strdup(const char *s)
558 if (s == NULL) return NULL;
560 if ((copy = GC_malloc_atomic(lb)) == NULL) {
569 /* strcpy() is deprecated in WinCE */
575 GC_API char * GC_CALL GC_strndup(const char *str, size_t size)
578 size_t len = strlen(str); /* str is expected to be non-NULL */
581 copy = GC_malloc_atomic(len + 1);
588 BCOPY(str, copy, len);
593 #ifdef GC_REQUIRE_WCSDUP
594 # include <wchar.h> /* for wcslen() */
596 GC_API wchar_t * GC_CALL GC_wcsdup(const wchar_t *str)
598 size_t lb = (wcslen(str) + 1) * sizeof(wchar_t);
599 wchar_t *copy = GC_malloc_atomic(lb);
606 BCOPY(str, copy, lb);
609 #endif /* GC_REQUIRE_WCSDUP */