2 Default header file for malloc-2.8.x, written by Doug Lea
3 and released to the public domain, as explained at
4 http://creativecommons.org/licenses/publicdomain.
6 last update: Mon Aug 15 08:55:52 2005 Doug Lea (dl at gee)
8 This header is for ANSI C/C++ only. You can set any of
9 the following #defines before including:
11 * If USE_DL_PREFIX is defined, it is assumed that malloc.c
12 was also compiled with this option, so all routines
13 have names starting with "dl".
15 * If HAVE_USR_INCLUDE_MALLOC_H is defined, it is assumed that this
16 file will be #included AFTER <malloc.h>. This is needed only if
17 your system defines a struct mallinfo that is incompatible with the
18 standard one declared here. Otherwise, you can include this file
19 INSTEAD of your system system <malloc.h>. At least on ANSI, all
20 declarations should be compatible with system versions
22 * If MSPACES is defined, declarations for mspace versions are included.
32 #include <stddef.h> /* for size_t */
33 #include <mono/utils/mono-compiler.h>
38 #define dlcalloc calloc
40 #define dlmalloc malloc
41 #define dlmemalign memalign
42 #define dlrealloc realloc
43 #define dlvalloc valloc
44 #define dlpvalloc pvalloc
45 #define dlmallinfo mallinfo
46 #define dlmallopt mallopt
47 #define dlmalloc_trim malloc_trim
48 #define dlmalloc_stats malloc_stats
49 #define dlmalloc_usable_size malloc_usable_size
50 #define dlmalloc_footprint malloc_footprint
51 #define dlindependent_calloc independent_calloc
52 #define dlindependent_comalloc independent_comalloc
53 #endif /* USE_DL_PREFIX */
55 #ifdef ENABLE_EXTENSION_MODULE
56 #include "../../../mono-extensions/mono/utils/dlmalloc.h"
61 Returns a pointer to a newly allocated chunk of at least n bytes, or
62 null if no space is available, in which case errno is set to ENOMEM
65 If n is zero, malloc returns a minimum-sized chunk. (The minimum
66 size is 16 bytes on most 32bit systems, and 32 bytes on 64bit
67 systems.) Note that size_t is an unsigned type, so calls with
68 arguments that would be negative if signed are interpreted as
69 requests for huge amounts of space, which will often fail. The
70 maximum supported value of n differs across systems, but is in all
71 cases less than the maximum representable value of a size_t.
73 void* dlmalloc(size_t);
77 Releases the chunk of memory pointed to by p, that had been previously
78 allocated using malloc or a related routine such as realloc.
79 It has no effect if p is null. If p was not malloced or already
80 freed, free(p) will by default cuase the current program to abort.
85 calloc(size_t n_elements, size_t element_size);
86 Returns a pointer to n_elements * element_size bytes, with all locations
89 void* dlcalloc(size_t, size_t);
92 realloc(void* p, size_t n)
93 Returns a pointer to a chunk of size n that contains the same data
94 as does chunk p up to the minimum of (n, p's size) bytes, or null
95 if no space is available.
97 The returned pointer may or may not be the same as p. The algorithm
98 prefers extending p in most cases when possible, otherwise it
99 employs the equivalent of a malloc-copy-free sequence.
101 If p is null, realloc is equivalent to malloc.
103 If space is not available, realloc returns null, errno is set (if on
104 ANSI) and p is NOT freed.
106 if n is for fewer bytes than already held by p, the newly unused
107 space is lopped off and freed if possible. realloc with a size
108 argument of zero (re)allocates a minimum-sized chunk.
110 The old unix realloc convention of allowing the last-free'd chunk
111 to be used as an argument to realloc is not supported.
114 void* dlrealloc(void*, size_t);
117 memalign(size_t alignment, size_t n);
118 Returns a pointer to a newly allocated chunk of n bytes, aligned
119 in accord with the alignment argument.
121 The alignment argument should be a power of two. If the argument is
122 not a power of two, the nearest greater power is used.
123 8-byte alignment is guaranteed by normal malloc calls, so don't
124 bother calling memalign with an argument of 8 or less.
126 Overreliance on memalign is a sure way to fragment space.
128 void* dlmemalign(size_t, size_t);
132 Equivalent to memalign(pagesize, n), where pagesize is the page
133 size of the system. If the pagesize is unknown, 4096 is used.
135 void* dlvalloc(size_t);
138 mallopt(int parameter_number, int parameter_value)
139 Sets tunable parameters The format is to provide a
140 (parameter-number, parameter-value) pair. mallopt then sets the
141 corresponding parameter to the argument value if it can (i.e., so
142 long as the value is meaningful), and returns 1 if successful else
143 0. SVID/XPG/ANSI defines four standard param numbers for mallopt,
144 normally defined in malloc.h. None of these are use in this malloc,
145 so setting them has no effect. But this malloc also supports other
148 Symbol param # default allowed param values
149 M_TRIM_THRESHOLD -1 2*1024*1024 any (-1U disables trimming)
150 M_GRANULARITY -2 page size any power of 2 >= page size
151 M_MMAP_THRESHOLD -3 256*1024 any (or 0 if no MMAP support)
153 int dlmallopt(int, int);
155 #define M_TRIM_THRESHOLD (-1)
156 #define M_GRANULARITY (-2)
157 #define M_MMAP_THRESHOLD (-3)
162 Returns the number of bytes obtained from the system. The total
163 number of bytes allocated by malloc, realloc etc., is less than this
164 value. Unlike mallinfo, this function returns only a precomputed
165 result, so can be called frequently to monitor memory consumption.
166 Even if locks are otherwise defined, this function does not use them,
167 so results might not be up to date.
169 size_t dlmalloc_footprint(void);
174 Returns (by copy) a struct containing various summary statistics:
176 arena: current total non-mmapped bytes allocated from system
177 ordblks: the number of free chunks
179 hblks: current number of mmapped regions
180 hblkhd: total bytes held in mmapped regions
181 usmblks: the maximum total allocated space. This will be greater
182 than current total if trimming has occurred.
184 uordblks: current total allocated space (normal or mmapped)
185 fordblks: total free space
186 keepcost: the maximum number of bytes that could ideally be released
187 back to system via malloc_trim. ("ideally" means that
188 it ignores page restrictions etc.)
190 Because these fields are ints, but internal bookkeeping may
191 be kept as longs, the reported values may wrap around zero and
194 #ifndef HAVE_USR_INCLUDE_MALLOC_H
196 #ifndef MALLINFO_FIELD_TYPE
197 #define MALLINFO_FIELD_TYPE size_t
198 #endif /* MALLINFO_FIELD_TYPE */
200 MALLINFO_FIELD_TYPE arena; /* non-mmapped space allocated from system */
201 MALLINFO_FIELD_TYPE ordblks; /* number of free chunks */
202 MALLINFO_FIELD_TYPE smblks; /* always 0 */
203 MALLINFO_FIELD_TYPE hblks; /* always 0 */
204 MALLINFO_FIELD_TYPE hblkhd; /* space in mmapped regions */
205 MALLINFO_FIELD_TYPE usmblks; /* maximum total allocated space */
206 MALLINFO_FIELD_TYPE fsmblks; /* always 0 */
207 MALLINFO_FIELD_TYPE uordblks; /* total allocated space */
208 MALLINFO_FIELD_TYPE fordblks; /* total free space */
209 MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */
211 #endif /* _MALLOC_H */
212 #endif /* HAVE_USR_INCLUDE_MALLOC_H */
214 struct mallinfo dlmallinfo(void);
215 #endif /* NO_MALLINFO */
218 independent_calloc(size_t n_elements, size_t element_size, void* chunks[]);
220 independent_calloc is similar to calloc, but instead of returning a
221 single cleared space, it returns an array of pointers to n_elements
222 independent elements that can hold contents of size elem_size, each
223 of which starts out cleared, and can be independently freed,
224 realloc'ed etc. The elements are guaranteed to be adjacently
225 allocated (this is not guaranteed to occur with multiple callocs or
226 mallocs), which may also improve cache locality in some
229 The "chunks" argument is optional (i.e., may be null, which is
230 probably the most typical usage). If it is null, the returned array
231 is itself dynamically allocated and should also be freed when it is
232 no longer needed. Otherwise, the chunks array must be of at least
233 n_elements in length. It is filled in with the pointers to the
236 In either case, independent_calloc returns this pointer array, or
237 null if the allocation failed. If n_elements is zero and "chunks"
238 is null, it returns a chunk representing an array with zero elements
239 (which should be freed if not wanted).
241 Each element must be individually freed when it is no longer
242 needed. If you'd like to instead be able to free all at once, you
243 should instead use regular calloc and assign pointers into this
244 space to represent elements. (In this case though, you cannot
245 independently free elements.)
247 independent_calloc simplifies and speeds up implementations of many
248 kinds of pools. It may also be useful when constructing large data
249 structures that initially have a fixed number of fixed-sized nodes,
250 but the number is not known at compile time, and some of the nodes
251 may later need to be freed. For example:
253 struct Node { int item; struct Node* next; };
255 struct Node* build_list() {
257 int n = read_number_of_nodes_needed();
258 if (n <= 0) return 0;
259 pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0);
260 if (pool == 0) die();
261 // organize into a linked list...
262 struct Node* first = pool[0];
263 for (i = 0; i < n-1; ++i)
264 pool[i]->next = pool[i+1];
265 free(pool); // Can now free the array (or not, if it is needed later)
269 void** dlindependent_calloc(size_t, size_t, void**);
272 independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
274 independent_comalloc allocates, all at once, a set of n_elements
275 chunks with sizes indicated in the "sizes" array. It returns
276 an array of pointers to these elements, each of which can be
277 independently freed, realloc'ed etc. The elements are guaranteed to
278 be adjacently allocated (this is not guaranteed to occur with
279 multiple callocs or mallocs), which may also improve cache locality
280 in some applications.
282 The "chunks" argument is optional (i.e., may be null). If it is null
283 the returned array is itself dynamically allocated and should also
284 be freed when it is no longer needed. Otherwise, the chunks array
285 must be of at least n_elements in length. It is filled in with the
286 pointers to the chunks.
288 In either case, independent_comalloc returns this pointer array, or
289 null if the allocation failed. If n_elements is zero and chunks is
290 null, it returns a chunk representing an array with zero elements
291 (which should be freed if not wanted).
293 Each element must be individually freed when it is no longer
294 needed. If you'd like to instead be able to free all at once, you
295 should instead use a single regular malloc, and assign pointers at
296 particular offsets in the aggregate space. (In this case though, you
297 cannot independently free elements.)
299 independent_comallac differs from independent_calloc in that each
300 element may have a different size, and also that it does not
301 automatically clear elements.
303 independent_comalloc can be used to speed up allocation in cases
304 where several structs or objects must always be allocated at the
305 same time. For example:
310 void send_message(char* msg) {
311 int msglen = strlen(msg);
312 size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };
314 if (independent_comalloc(3, sizes, chunks) == 0)
316 struct Head* head = (struct Head*)(chunks[0]);
317 char* body = (char*)(chunks[1]);
318 struct Foot* foot = (struct Foot*)(chunks[2]);
322 In general though, independent_comalloc is worth using only for
323 larger values of n_elements. For small values, you probably won't
324 detect enough difference from series of malloc calls to bother.
326 Overuse of independent_comalloc can increase overall memory usage,
327 since it cannot reuse existing noncontiguous small chunks that
328 might be available for some of the elements.
330 void** dlindependent_comalloc(size_t, size_t*, void**);
335 Equivalent to valloc(minimum-page-that-holds(n)), that is,
336 round up n to nearest pagesize.
338 void* dlpvalloc(size_t);
341 malloc_trim(size_t pad);
343 If possible, gives memory back to the system (via negative arguments
344 to sbrk) if there is unused memory at the `high' end of the malloc
345 pool or in unused MMAP segments. You can call this after freeing
346 large blocks of memory to potentially reduce the system-level memory
347 requirements of a program. However, it cannot guarantee to reduce
348 memory. Under some allocation patterns, some large free blocks of
349 memory will be locked between two used chunks, so they cannot be
350 given back to the system.
352 The `pad' argument to malloc_trim represents the amount of free
353 trailing space to leave untrimmed. If this argument is zero, only
354 the minimum amount of memory to maintain internal data structures
355 will be left. Non-zero arguments can be supplied to maintain enough
356 trailing space to service future expected allocations without having
357 to re-obtain memory from the system.
359 Malloc_trim returns 1 if it actually released any memory, else 0.
361 int dlmalloc_trim(size_t);
364 malloc_usable_size(void* p);
366 Returns the number of bytes you can actually use in
367 an allocated chunk, which may be more than you requested (although
368 often not) due to alignment and minimum size constraints.
369 You can use this many bytes without worrying about
370 overwriting other allocated objects. This is not a particularly great
371 programming practice. malloc_usable_size can be more useful in
372 debugging and assertions, for example:
375 assert(malloc_usable_size(p) >= 256);
377 size_t dlmalloc_usable_size(void*);
381 Prints on stderr the amount of space obtained from the system (both
382 via sbrk and mmap), the maximum amount (which may be more than
383 current if malloc_trim and/or munmap got called), and the current
384 number of bytes allocated via malloc (or realloc, etc) but not yet
385 freed. Note that this is the number of bytes allocated, not the
386 number requested. It will be larger than the number requested
387 because of alignment and bookkeeping overhead. Because it includes
388 alignment wastage as being in use, this figure may be greater than
389 zero even when no user-level chunks are allocated.
391 The reported current and maximum system memory can be inaccurate if
392 a program makes other calls to system memory allocation functions
393 (normally sbrk) outside of malloc.
395 malloc_stats prints only the most commonly interesting statistics.
396 More information can be obtained by calling mallinfo.
398 void dlmalloc_stats(void);
400 #endif /* !ONLY_MSPACES */
405 mspace is an opaque type representing an independent
406 region of space that supports mspace_malloc, etc.
408 typedef void* mspace;
411 create_mspace creates and returns a new independent space with the
412 given initial capacity, or, if 0, the default granularity size. It
413 returns null if there is no system memory available to create the
414 space. If argument locked is non-zero, the space uses a separate
415 lock to control access. The capacity of the space will grow
416 dynamically as needed to service mspace_malloc requests. You can
417 control the sizes of incremental increases of this space by
418 compiling with a different DEFAULT_GRANULARITY or dynamically
419 setting with mallopt(M_GRANULARITY, value).
421 mspace create_mspace(size_t capacity, int locked);
424 destroy_mspace destroys the given space, and attempts to return all
425 of its memory back to the system, returning the total number of
426 bytes freed. After destruction, the results of access to all memory
427 used by the space become undefined.
429 size_t destroy_mspace(mspace msp);
432 create_mspace_with_base uses the memory supplied as the initial base
433 of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this
434 space is used for bookkeeping, so the capacity must be at least this
435 large. (Otherwise 0 is returned.) When this initial space is
436 exhausted, additional memory will be obtained from the system.
437 Destroying this space will deallocate all additionally allocated
438 space (if possible) but not the initial base.
440 mspace create_mspace_with_base(void* base, size_t capacity, int locked);
443 mspace_malloc behaves as malloc, but operates within
446 void* mspace_malloc(mspace msp, size_t bytes);
449 mspace_free behaves as free, but operates within
452 If compiled with FOOTERS==1, mspace_free is not actually needed.
453 free may be called instead of mspace_free because freed chunks from
454 any space are handled by their originating spaces.
456 void mspace_free(mspace msp, void* mem);
459 mspace_realloc behaves as realloc, but operates within
462 If compiled with FOOTERS==1, mspace_realloc is not actually
463 needed. realloc may be called instead of mspace_realloc because
464 realloced chunks from any space are handled by their originating
467 void* mspace_realloc(mspace msp, void* mem, size_t newsize);
470 mspace_calloc behaves as calloc, but operates within
473 void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size);
476 mspace_memalign behaves as memalign, but operates within
479 void* mspace_memalign(mspace msp, size_t alignment, size_t bytes);
482 mspace_independent_calloc behaves as independent_calloc, but
483 operates within the given space.
485 void** mspace_independent_calloc(mspace msp, size_t n_elements,
486 size_t elem_size, void* chunks[]);
489 mspace_independent_comalloc behaves as independent_comalloc, but
490 operates within the given space.
492 void** mspace_independent_comalloc(mspace msp, size_t n_elements,
493 size_t sizes[], void* chunks[]);
496 mspace_footprint() returns the number of bytes obtained from the
497 system for this space.
499 size_t mspace_footprint(mspace msp);
504 mspace_mallinfo behaves as mallinfo, but reports properties of
507 struct mallinfo mspace_mallinfo(mspace msp);
508 #endif /* NO_MALLINFO */
511 mspace_malloc_stats behaves as malloc_stats, but reports
512 properties of the given space.
514 void mspace_malloc_stats(mspace msp);
517 mspace_trim behaves as malloc_trim, but
518 operates within the given space.
520 int mspace_trim(mspace msp, size_t pad);
523 An alias for mallopt.
525 int mspace_mallopt(int, int);
530 }; /* end of extern "C" */
533 #endif /* MALLOC_280_H */