1/* Malloc implementation for multiple threads without lock contention.
2 Copyright (C) 1996-2017 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Wolfram Gloger <wg@malloc.de>
5 and Doug Lea <dl@cs.oswego.edu>, 2001.
6
7 The GNU C Library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public License as
9 published by the Free Software Foundation; either version 2.1 of the
10 License, or (at your option) any later version.
11
12 The GNU C Library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public
18 License along with the GNU C Library; see the file COPYING.LIB. If
19 not, see <http://www.gnu.org/licenses/>. */
20
21/*
22 This is a version (aka ptmalloc2) of malloc/free/realloc written by
23 Doug Lea and adapted to multiple threads/arenas by Wolfram Gloger.
24
25 There have been substantial changes made after the integration into
26 glibc in all parts of the code. Do not look for much commonality
27 with the ptmalloc2 version.
28
29* Version ptmalloc2-20011215
30 based on:
31 VERSION 2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee)
32
33* Quickstart
34
35 In order to compile this implementation, a Makefile is provided with
36 the ptmalloc2 distribution, which has pre-defined targets for some
37 popular systems (e.g. "make posix" for Posix threads). All that is
38 typically required with regard to compiler flags is the selection of
39 the thread package via defining one out of USE_PTHREADS, USE_THR or
40 USE_SPROC. Check the thread-m.h file for what effects this has.
41 Many/most systems will additionally require USE_TSD_DATA_HACK to be
42 defined, so this is the default for "make posix".
43
44* Why use this malloc?
45
46 This is not the fastest, most space-conserving, most portable, or
47 most tunable malloc ever written. However it is among the fastest
48 while also being among the most space-conserving, portable and tunable.
49 Consistent balance across these factors results in a good general-purpose
50 allocator for malloc-intensive programs.
51
52 The main properties of the algorithms are:
53 * For large (>= 512 bytes) requests, it is a pure best-fit allocator,
54 with ties normally decided via FIFO (i.e. least recently used).
55 * For small (<= 64 bytes by default) requests, it is a caching
56 allocator, that maintains pools of quickly recycled chunks.
57 * In between, and for combinations of large and small requests, it does
58 the best it can trying to meet both goals at once.
59 * For very large requests (>= 128KB by default), it relies on system
60 memory mapping facilities, if supported.
61
62 For a longer but slightly out of date high-level description, see
63 http://gee.cs.oswego.edu/dl/html/malloc.html
64
65 You may already by default be using a C library containing a malloc
66 that is based on some version of this malloc (for example in
67 linux). You might still want to use the one in this file in order to
68 customize settings or to avoid overheads associated with library
69 versions.
70
71* Contents, described in more detail in "description of public routines" below.
72
73 Standard (ANSI/SVID/...) functions:
74 malloc(size_t n);
75 calloc(size_t n_elements, size_t element_size);
76 free(void* p);
77 realloc(void* p, size_t n);
78 memalign(size_t alignment, size_t n);
79 valloc(size_t n);
80 mallinfo()
81 mallopt(int parameter_number, int parameter_value)
82
83 Additional functions:
84 independent_calloc(size_t n_elements, size_t size, void* chunks[]);
85 independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
86 pvalloc(size_t n);
87 cfree(void* p);
88 malloc_trim(size_t pad);
89 malloc_usable_size(void* p);
90 malloc_stats();
91
92* Vital statistics:
93
94 Supported pointer representation: 4 or 8 bytes
95 Supported size_t representation: 4 or 8 bytes
96 Note that size_t is allowed to be 4 bytes even if pointers are 8.
97 You can adjust this by defining INTERNAL_SIZE_T
98
99 Alignment: 2 * sizeof(size_t) (default)
100 (i.e., 8 byte alignment with 4byte size_t). This suffices for
101 nearly all current machines and C compilers. However, you can
102 define MALLOC_ALIGNMENT to be wider than this if necessary.
103
104 Minimum overhead per allocated chunk: 4 or 8 bytes
105 Each malloced chunk has a hidden word of overhead holding size
106 and status information.
107
108 Minimum allocated size: 4-byte ptrs: 16 bytes (including 4 overhead)
109 8-byte ptrs: 24/32 bytes (including, 4/8 overhead)
110
111 When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte
112 ptrs but 4 byte size) or 24 (for 8/8) additional bytes are
113 needed; 4 (8) for a trailing size field and 8 (16) bytes for
114 free list pointers. Thus, the minimum allocatable size is
115 16/24/32 bytes.
116
117 Even a request for zero bytes (i.e., malloc(0)) returns a
118 pointer to something of the minimum allocatable size.
119
120 The maximum overhead wastage (i.e., number of extra bytes
121 allocated than were requested in malloc) is less than or equal
122 to the minimum size, except for requests >= mmap_threshold that
123 are serviced via mmap(), where the worst case wastage is 2 *
124 sizeof(size_t) bytes plus the remainder from a system page (the
125 minimal mmap unit); typically 4096 or 8192 bytes.
126
127 Maximum allocated size: 4-byte size_t: 2^32 minus about two pages
128 8-byte size_t: 2^64 minus about two pages
129
130 It is assumed that (possibly signed) size_t values suffice to
131 represent chunk sizes. `Possibly signed' is due to the fact
132 that `size_t' may be defined on a system as either a signed or
133 an unsigned type. The ISO C standard says that it must be
134 unsigned, but a few systems are known not to adhere to this.
135 Additionally, even when size_t is unsigned, sbrk (which is by
136 default used to obtain memory from system) accepts signed
137 arguments, and may not be able to handle size_t-wide arguments
138 with negative sign bit. Generally, values that would
139 appear as negative after accounting for overhead and alignment
140 are supported only via mmap(), which does not have this
141 limitation.
142
143 Requests for sizes outside the allowed range will perform an optional
144 failure action and then return null. (Requests may also
145 also fail because a system is out of memory.)
146
147 Thread-safety: thread-safe
148
149 Compliance: I believe it is compliant with the 1997 Single Unix Specification
150 Also SVID/XPG, ANSI C, and probably others as well.
151
152* Synopsis of compile-time options:
153
154 People have reported using previous versions of this malloc on all
155 versions of Unix, sometimes by tweaking some of the defines
156 below. It has been tested most extensively on Solaris and Linux.
157 People also report using it in stand-alone embedded systems.
158
159 The implementation is in straight, hand-tuned ANSI C. It is not
160 at all modular. (Sorry!) It uses a lot of macros. To be at all
161 usable, this code should be compiled using an optimizing compiler
162 (for example gcc -O3) that can simplify expressions and control
163 paths. (FAQ: some macros import variables as arguments rather than
164 declare locals because people reported that some debuggers
165 otherwise get confused.)
166
167 OPTION DEFAULT VALUE
168
169 Compilation Environment options:
170
171 HAVE_MREMAP 0
172
173 Changing default word sizes:
174
175 INTERNAL_SIZE_T size_t
176
177 Configuration and functionality options:
178
179 USE_PUBLIC_MALLOC_WRAPPERS NOT defined
180 USE_MALLOC_LOCK NOT defined
181 MALLOC_DEBUG NOT defined
182 REALLOC_ZERO_BYTES_FREES 1
183 TRIM_FASTBINS 0
184
185 Options for customizing MORECORE:
186
187 MORECORE sbrk
188 MORECORE_FAILURE -1
189 MORECORE_CONTIGUOUS 1
190 MORECORE_CANNOT_TRIM NOT defined
191 MORECORE_CLEARS 1
192 MMAP_AS_MORECORE_SIZE (1024 * 1024)
193
194 Tuning options that are also dynamically changeable via mallopt:
195
196 DEFAULT_MXFAST 64 (for 32bit), 128 (for 64bit)
197 DEFAULT_TRIM_THRESHOLD 128 * 1024
198 DEFAULT_TOP_PAD 0
199 DEFAULT_MMAP_THRESHOLD 128 * 1024
200 DEFAULT_MMAP_MAX 65536
201
202 There are several other #defined constants and macros that you
203 probably don't want to touch unless you are extending or adapting malloc. */
204
205/*
206 void* is the pointer type that malloc should say it returns
207*/
208
209#ifndef void
210#define void void
211#endif /*void*/
212
213#include <stddef.h> /* for size_t */
214#include <stdlib.h> /* for getenv(), abort() */
215#include <unistd.h> /* for __libc_enable_secure */
216
217#include <atomic.h>
218#include <_itoa.h>
219#include <bits/wordsize.h>
220#include <sys/sysinfo.h>
221
222#include <ldsodefs.h>
223
224#include <unistd.h>
225#include <stdio.h> /* needed for malloc_stats */
226#include <errno.h>
227
228#include <shlib-compat.h>
229
230/* For uintptr_t. */
231#include <stdint.h>
232
233/* For va_arg, va_start, va_end. */
234#include <stdarg.h>
235
236/* For MIN, MAX, powerof2. */
237#include <sys/param.h>
238
239/* For ALIGN_UP et. al. */
240#include <libc-internal.h>
241
242#include <malloc/malloc-internal.h>
243
244/*
245 Debugging:
246
247 Because freed chunks may be overwritten with bookkeeping fields, this
248 malloc will often die when freed memory is overwritten by user
249 programs. This can be very effective (albeit in an annoying way)
250 in helping track down dangling pointers.
251
252 If you compile with -DMALLOC_DEBUG, a number of assertion checks are
253 enabled that will catch more memory errors. You probably won't be
254 able to make much sense of the actual assertion errors, but they
255 should help you locate incorrectly overwritten memory. The checking
256 is fairly extensive, and will slow down execution
257 noticeably. Calling malloc_stats or mallinfo with MALLOC_DEBUG set
258 will attempt to check every non-mmapped allocated and free chunk in
259 the course of computing the summmaries. (By nature, mmapped regions
260 cannot be checked very much automatically.)
261
262 Setting MALLOC_DEBUG may also be helpful if you are trying to modify
263 this code. The assertions in the check routines spell out in more
264 detail the assumptions and invariants underlying the algorithms.
265
266 Setting MALLOC_DEBUG does NOT provide an automated mechanism for
267 checking that all accesses to malloced memory stay within their
268 bounds. However, there are several add-ons and adaptations of this
269 or other mallocs available that do this.
270*/
271
272#ifndef MALLOC_DEBUG
273#define MALLOC_DEBUG 0
274#endif
275
276#ifdef NDEBUG
277# define assert(expr) ((void) 0)
278#else
279# define assert(expr) \
280 ((expr) \
281 ? ((void) 0) \
282 : __malloc_assert (#expr, __FILE__, __LINE__, __func__))
283
284extern const char *__progname;
285
286static void
287__malloc_assert (const char *assertion, const char *file, unsigned int line,
288 const char *function)
289{
290 (void) __fxprintf (NULL, "%s%s%s:%u: %s%sAssertion `%s' failed.\n",
291 __progname, __progname[0] ? ": " : "",
292 file, line,
293 function ? function : "", function ? ": " : "",
294 assertion);
295 fflush (stderr);
296 abort ();
297}
298#endif
299
300
301/*
302 REALLOC_ZERO_BYTES_FREES should be set if a call to
303 realloc with zero bytes should be the same as a call to free.
304 This is required by the C standard. Otherwise, since this malloc
305 returns a unique pointer for malloc(0), so does realloc(p, 0).
306*/
307
308#ifndef REALLOC_ZERO_BYTES_FREES
309#define REALLOC_ZERO_BYTES_FREES 1
310#endif
311
312/*
313 TRIM_FASTBINS controls whether free() of a very small chunk can
314 immediately lead to trimming. Setting to true (1) can reduce memory
315 footprint, but will almost always slow down programs that use a lot
316 of small chunks.
317
318 Define this only if you are willing to give up some speed to more
319 aggressively reduce system-level memory footprint when releasing
320 memory in programs that use many small chunks. You can get
321 essentially the same effect by setting MXFAST to 0, but this can
322 lead to even greater slowdowns in programs using many small chunks.
323 TRIM_FASTBINS is an in-between compile-time option, that disables
324 only those chunks bordering topmost memory from being placed in
325 fastbins.
326*/
327
328#ifndef TRIM_FASTBINS
329#define TRIM_FASTBINS 0
330#endif
331
332
333/* Definition for getting more memory from the OS. */
334#define MORECORE (*__morecore)
335#define MORECORE_FAILURE 0
336void * __default_morecore (ptrdiff_t);
337void *(*__morecore)(ptrdiff_t) = __default_morecore;
338
339
340#include <string.h>
341
342/*
343 MORECORE-related declarations. By default, rely on sbrk
344*/
345
346
347/*
348 MORECORE is the name of the routine to call to obtain more memory
349 from the system. See below for general guidance on writing
350 alternative MORECORE functions, as well as a version for WIN32 and a
351 sample version for pre-OSX macos.
352*/
353
354#ifndef MORECORE
355#define MORECORE sbrk
356#endif
357
358/*
359 MORECORE_FAILURE is the value returned upon failure of MORECORE
360 as well as mmap. Since it cannot be an otherwise valid memory address,
361 and must reflect values of standard sys calls, you probably ought not
362 try to redefine it.
363*/
364
365#ifndef MORECORE_FAILURE
366#define MORECORE_FAILURE (-1)
367#endif
368
369/*
370 If MORECORE_CONTIGUOUS is true, take advantage of fact that
371 consecutive calls to MORECORE with positive arguments always return
372 contiguous increasing addresses. This is true of unix sbrk. Even
373 if not defined, when regions happen to be contiguous, malloc will
374 permit allocations spanning regions obtained from different
375 calls. But defining this when applicable enables some stronger
376 consistency checks and space efficiencies.
377*/
378
379#ifndef MORECORE_CONTIGUOUS
380#define MORECORE_CONTIGUOUS 1
381#endif
382
383/*
384 Define MORECORE_CANNOT_TRIM if your version of MORECORE
385 cannot release space back to the system when given negative
386 arguments. This is generally necessary only if you are using
387 a hand-crafted MORECORE function that cannot handle negative arguments.
388*/
389
390/* #define MORECORE_CANNOT_TRIM */
391
392/* MORECORE_CLEARS (default 1)
393 The degree to which the routine mapped to MORECORE zeroes out
394 memory: never (0), only for newly allocated space (1) or always
395 (2). The distinction between (1) and (2) is necessary because on
396 some systems, if the application first decrements and then
397 increments the break value, the contents of the reallocated space
398 are unspecified.
399 */
400
401#ifndef MORECORE_CLEARS
402# define MORECORE_CLEARS 1
403#endif
404
405
406/*
407 MMAP_AS_MORECORE_SIZE is the minimum mmap size argument to use if
408 sbrk fails, and mmap is used as a backup. The value must be a
409 multiple of page size. This backup strategy generally applies only
410 when systems have "holes" in address space, so sbrk cannot perform
411 contiguous expansion, but there is still space available on system.
412 On systems for which this is known to be useful (i.e. most linux
413 kernels), this occurs only when programs allocate huge amounts of
414 memory. Between this, and the fact that mmap regions tend to be
415 limited, the size should be large, to avoid too many mmap calls and
416 thus avoid running out of kernel resources. */
417
418#ifndef MMAP_AS_MORECORE_SIZE
419#define MMAP_AS_MORECORE_SIZE (1024 * 1024)
420#endif
421
422/*
423 Define HAVE_MREMAP to make realloc() use mremap() to re-allocate
424 large blocks.
425*/
426
427#ifndef HAVE_MREMAP
428#define HAVE_MREMAP 0
429#endif
430
431/* We may need to support __malloc_initialize_hook for backwards
432 compatibility. */
433
434#if SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_24)
435# define HAVE_MALLOC_INIT_HOOK 1
436#else
437# define HAVE_MALLOC_INIT_HOOK 0
438#endif
439
440
441/*
442 This version of malloc supports the standard SVID/XPG mallinfo
443 routine that returns a struct containing usage properties and
444 statistics. It should work on any SVID/XPG compliant system that has
445 a /usr/include/malloc.h defining struct mallinfo. (If you'd like to
446 install such a thing yourself, cut out the preliminary declarations
447 as described above and below and save them in a malloc.h file. But
448 there's no compelling reason to bother to do this.)
449
450 The main declaration needed is the mallinfo struct that is returned
451 (by-copy) by mallinfo(). The SVID/XPG malloinfo struct contains a
452 bunch of fields that are not even meaningful in this version of
453 malloc. These fields are are instead filled by mallinfo() with
454 other numbers that might be of interest.
455*/
456
457
458/* ---------- description of public routines ------------ */
459
460/*
461 malloc(size_t n)
462 Returns a pointer to a newly allocated chunk of at least n bytes, or null
463 if no space is available. Additionally, on failure, errno is
464 set to ENOMEM on ANSI C systems.
465
466 If n is zero, malloc returns a minumum-sized chunk. (The minimum
467 size is 16 bytes on most 32bit systems, and 24 or 32 bytes on 64bit
468 systems.) On most systems, size_t is an unsigned type, so calls
469 with negative arguments are interpreted as requests for huge amounts
470 of space, which will often fail. The maximum supported value of n
471 differs across systems, but is in all cases less than the maximum
472 representable value of a size_t.
473*/
474void* __libc_malloc(size_t);
475libc_hidden_proto (__libc_malloc)
476
477/*
478 free(void* p)
479 Releases the chunk of memory pointed to by p, that had been previously
480 allocated using malloc or a related routine such as realloc.
481 It has no effect if p is null. It can have arbitrary (i.e., bad!)
482 effects if p has already been freed.
483
484 Unless disabled (using mallopt), freeing very large spaces will
485 when possible, automatically trigger operations that give
486 back unused memory to the system, thus reducing program footprint.
487*/
488void __libc_free(void*);
489libc_hidden_proto (__libc_free)
490
491/*
492 calloc(size_t n_elements, size_t element_size);
493 Returns a pointer to n_elements * element_size bytes, with all locations
494 set to zero.
495*/
496void* __libc_calloc(size_t, size_t);
497
498/*
499 realloc(void* p, size_t n)
500 Returns a pointer to a chunk of size n that contains the same data
501 as does chunk p up to the minimum of (n, p's size) bytes, or null
502 if no space is available.
503
504 The returned pointer may or may not be the same as p. The algorithm
505 prefers extending p when possible, otherwise it employs the
506 equivalent of a malloc-copy-free sequence.
507
508 If p is null, realloc is equivalent to malloc.
509
510 If space is not available, realloc returns null, errno is set (if on
511 ANSI) and p is NOT freed.
512
513 if n is for fewer bytes than already held by p, the newly unused
514 space is lopped off and freed if possible. Unless the #define
515 REALLOC_ZERO_BYTES_FREES is set, realloc with a size argument of
516 zero (re)allocates a minimum-sized chunk.
517
518 Large chunks that were internally obtained via mmap will always
519 be reallocated using malloc-copy-free sequences unless
520 the system supports MREMAP (currently only linux).
521
522 The old unix realloc convention of allowing the last-free'd chunk
523 to be used as an argument to realloc is not supported.
524*/
525void* __libc_realloc(void*, size_t);
526libc_hidden_proto (__libc_realloc)
527
528/*
529 memalign(size_t alignment, size_t n);
530 Returns a pointer to a newly allocated chunk of n bytes, aligned
531 in accord with the alignment argument.
532
533 The alignment argument should be a power of two. If the argument is
534 not a power of two, the nearest greater power is used.
535 8-byte alignment is guaranteed by normal malloc calls, so don't
536 bother calling memalign with an argument of 8 or less.
537
538 Overreliance on memalign is a sure way to fragment space.
539*/
540void* __libc_memalign(size_t, size_t);
541libc_hidden_proto (__libc_memalign)
542
543/*
544 valloc(size_t n);
545 Equivalent to memalign(pagesize, n), where pagesize is the page
546 size of the system. If the pagesize is unknown, 4096 is used.
547*/
548void* __libc_valloc(size_t);
549
550
551
552/*
553 mallopt(int parameter_number, int parameter_value)
554 Sets tunable parameters The format is to provide a
555 (parameter-number, parameter-value) pair. mallopt then sets the
556 corresponding parameter to the argument value if it can (i.e., so
557 long as the value is meaningful), and returns 1 if successful else
558 0. SVID/XPG/ANSI defines four standard param numbers for mallopt,
559 normally defined in malloc.h. Only one of these (M_MXFAST) is used
560 in this malloc. The others (M_NLBLKS, M_GRAIN, M_KEEP) don't apply,
561 so setting them has no effect. But this malloc also supports four
562 other options in mallopt. See below for details. Briefly, supported
563 parameters are as follows (listed defaults are for "typical"
564 configurations).
565
566 Symbol param # default allowed param values
567 M_MXFAST 1 64 0-80 (0 disables fastbins)
568 M_TRIM_THRESHOLD -1 128*1024 any (-1U disables trimming)
569 M_TOP_PAD -2 0 any
570 M_MMAP_THRESHOLD -3 128*1024 any (or 0 if no MMAP support)
571 M_MMAP_MAX -4 65536 any (0 disables use of mmap)
572*/
573int __libc_mallopt(int, int);
574libc_hidden_proto (__libc_mallopt)
575
576
577/*
578 mallinfo()
579 Returns (by copy) a struct containing various summary statistics:
580
581 arena: current total non-mmapped bytes allocated from system
582 ordblks: the number of free chunks
583 smblks: the number of fastbin blocks (i.e., small chunks that
584 have been freed but not use resused or consolidated)
585 hblks: current number of mmapped regions
586 hblkhd: total bytes held in mmapped regions
587 usmblks: always 0
588 fsmblks: total bytes held in fastbin blocks
589 uordblks: current total allocated space (normal or mmapped)
590 fordblks: total free space
591 keepcost: the maximum number of bytes that could ideally be released
592 back to system via malloc_trim. ("ideally" means that
593 it ignores page restrictions etc.)
594
595 Because these fields are ints, but internal bookkeeping may
596 be kept as longs, the reported values may wrap around zero and
597 thus be inaccurate.
598*/
599struct mallinfo __libc_mallinfo(void);
600
601
602/*
603 pvalloc(size_t n);
604 Equivalent to valloc(minimum-page-that-holds(n)), that is,
605 round up n to nearest pagesize.
606 */
607void* __libc_pvalloc(size_t);
608
609/*
610 malloc_trim(size_t pad);
611
612 If possible, gives memory back to the system (via negative
613 arguments to sbrk) if there is unused memory at the `high' end of
614 the malloc pool. You can call this after freeing large blocks of
615 memory to potentially reduce the system-level memory requirements
616 of a program. However, it cannot guarantee to reduce memory. Under
617 some allocation patterns, some large free blocks of memory will be
618 locked between two used chunks, so they cannot be given back to
619 the system.
620
621 The `pad' argument to malloc_trim represents the amount of free
622 trailing space to leave untrimmed. If this argument is zero,
623 only the minimum amount of memory to maintain internal data
624 structures will be left (one page or less). Non-zero arguments
625 can be supplied to maintain enough trailing space to service
626 future expected allocations without having to re-obtain memory
627 from the system.
628
629 Malloc_trim returns 1 if it actually released any memory, else 0.
630 On systems that do not support "negative sbrks", it will always
631 return 0.
632*/
633int __malloc_trim(size_t);
634
635/*
636 malloc_usable_size(void* p);
637
638 Returns the number of bytes you can actually use in
639 an allocated chunk, which may be more than you requested (although
640 often not) due to alignment and minimum size constraints.
641 You can use this many bytes without worrying about
642 overwriting other allocated objects. This is not a particularly great
643 programming practice. malloc_usable_size can be more useful in
644 debugging and assertions, for example:
645
646 p = malloc(n);
647 assert(malloc_usable_size(p) >= 256);
648
649*/
650size_t __malloc_usable_size(void*);
651
652/*
653 malloc_stats();
654 Prints on stderr the amount of space obtained from the system (both
655 via sbrk and mmap), the maximum amount (which may be more than
656 current if malloc_trim and/or munmap got called), and the current
657 number of bytes allocated via malloc (or realloc, etc) but not yet
658 freed. Note that this is the number of bytes allocated, not the
659 number requested. It will be larger than the number requested
660 because of alignment and bookkeeping overhead. Because it includes
661 alignment wastage as being in use, this figure may be greater than
662 zero even when no user-level chunks are allocated.
663
664 The reported current and maximum system memory can be inaccurate if
665 a program makes other calls to system memory allocation functions
666 (normally sbrk) outside of malloc.
667
668 malloc_stats prints only the most commonly interesting statistics.
669 More information can be obtained by calling mallinfo.
670
671*/
672void __malloc_stats(void);
673
674/*
675 malloc_get_state(void);
676
677 Returns the state of all malloc variables in an opaque data
678 structure.
679*/
680void* __malloc_get_state(void);
681
682/*
683 malloc_set_state(void* state);
684
685 Restore the state of all malloc variables from data obtained with
686 malloc_get_state().
687*/
688int __malloc_set_state(void*);
689
690/*
691 posix_memalign(void **memptr, size_t alignment, size_t size);
692
693 POSIX wrapper like memalign(), checking for validity of size.
694*/
695int __posix_memalign(void **, size_t, size_t);
696
697/* mallopt tuning options */
698
699/*
700 M_MXFAST is the maximum request size used for "fastbins", special bins
701 that hold returned chunks without consolidating their spaces. This
702 enables future requests for chunks of the same size to be handled
703 very quickly, but can increase fragmentation, and thus increase the
704 overall memory footprint of a program.
705
706 This malloc manages fastbins very conservatively yet still
707 efficiently, so fragmentation is rarely a problem for values less
708 than or equal to the default. The maximum supported value of MXFAST
709 is 80. You wouldn't want it any higher than this anyway. Fastbins
710 are designed especially for use with many small structs, objects or
711 strings -- the default handles structs/objects/arrays with sizes up
712 to 8 4byte fields, or small strings representing words, tokens,
713 etc. Using fastbins for larger objects normally worsens
714 fragmentation without improving speed.
715
716 M_MXFAST is set in REQUEST size units. It is internally used in
717 chunksize units, which adds padding and alignment. You can reduce
718 M_MXFAST to 0 to disable all use of fastbins. This causes the malloc
719 algorithm to be a closer approximation of fifo-best-fit in all cases,
720 not just for larger requests, but will generally cause it to be
721 slower.
722*/
723
724
725/* M_MXFAST is a standard SVID/XPG tuning option, usually listed in malloc.h */
726#ifndef M_MXFAST
727#define M_MXFAST 1
728#endif
729
730#ifndef DEFAULT_MXFAST
731#define DEFAULT_MXFAST (64 * SIZE_SZ / 4)
732#endif
733
734
735/*
736 M_TRIM_THRESHOLD is the maximum amount of unused top-most memory
737 to keep before releasing via malloc_trim in free().
738
739 Automatic trimming is mainly useful in long-lived programs.
740 Because trimming via sbrk can be slow on some systems, and can
741 sometimes be wasteful (in cases where programs immediately
742 afterward allocate more large chunks) the value should be high
743 enough so that your overall system performance would improve by
744 releasing this much memory.
745
746 The trim threshold and the mmap control parameters (see below)
747 can be traded off with one another. Trimming and mmapping are
748 two different ways of releasing unused memory back to the
749 system. Between these two, it is often possible to keep
750 system-level demands of a long-lived program down to a bare
751 minimum. For example, in one test suite of sessions measuring
752 the XF86 X server on Linux, using a trim threshold of 128K and a
753 mmap threshold of 192K led to near-minimal long term resource
754 consumption.
755
756 If you are using this malloc in a long-lived program, it should
757 pay to experiment with these values. As a rough guide, you
758 might set to a value close to the average size of a process
759 (program) running on your system. Releasing this much memory
760 would allow such a process to run in memory. Generally, it's
761 worth it to tune for trimming rather tham memory mapping when a
762 program undergoes phases where several large chunks are
763 allocated and released in ways that can reuse each other's
764 storage, perhaps mixed with phases where there are no such
765 chunks at all. And in well-behaved long-lived programs,
766 controlling release of large blocks via trimming versus mapping
767 is usually faster.
768
769 However, in most programs, these parameters serve mainly as
770 protection against the system-level effects of carrying around
771 massive amounts of unneeded memory. Since frequent calls to
772 sbrk, mmap, and munmap otherwise degrade performance, the default
773 parameters are set to relatively high values that serve only as
774 safeguards.
775
776 The trim value It must be greater than page size to have any useful
777 effect. To disable trimming completely, you can set to
778 (unsigned long)(-1)
779
780 Trim settings interact with fastbin (MXFAST) settings: Unless
781 TRIM_FASTBINS is defined, automatic trimming never takes place upon
782 freeing a chunk with size less than or equal to MXFAST. Trimming is
783 instead delayed until subsequent freeing of larger chunks. However,
784 you can still force an attempted trim by calling malloc_trim.
785
786 Also, trimming is not generally possible in cases where
787 the main arena is obtained via mmap.
788
789 Note that the trick some people use of mallocing a huge space and
790 then freeing it at program startup, in an attempt to reserve system
791 memory, doesn't have the intended effect under automatic trimming,
792 since that memory will immediately be returned to the system.
793*/
794
795#define M_TRIM_THRESHOLD -1
796
797#ifndef DEFAULT_TRIM_THRESHOLD
798#define DEFAULT_TRIM_THRESHOLD (128 * 1024)
799#endif
800
801/*
802 M_TOP_PAD is the amount of extra `padding' space to allocate or
803 retain whenever sbrk is called. It is used in two ways internally:
804
805 * When sbrk is called to extend the top of the arena to satisfy
806 a new malloc request, this much padding is added to the sbrk
807 request.
808
809 * When malloc_trim is called automatically from free(),
810 it is used as the `pad' argument.
811
812 In both cases, the actual amount of padding is rounded
813 so that the end of the arena is always a system page boundary.
814
815 The main reason for using padding is to avoid calling sbrk so
816 often. Having even a small pad greatly reduces the likelihood
817 that nearly every malloc request during program start-up (or
818 after trimming) will invoke sbrk, which needlessly wastes
819 time.
820
821 Automatic rounding-up to page-size units is normally sufficient
822 to avoid measurable overhead, so the default is 0. However, in
823 systems where sbrk is relatively slow, it can pay to increase
824 this value, at the expense of carrying around more memory than
825 the program needs.
826*/
827
828#define M_TOP_PAD -2
829
830#ifndef DEFAULT_TOP_PAD
831#define DEFAULT_TOP_PAD (0)
832#endif
833
834/*
835 MMAP_THRESHOLD_MAX and _MIN are the bounds on the dynamically
836 adjusted MMAP_THRESHOLD.
837*/
838
839#ifndef DEFAULT_MMAP_THRESHOLD_MIN
840#define DEFAULT_MMAP_THRESHOLD_MIN (128 * 1024)
841#endif
842
843#ifndef DEFAULT_MMAP_THRESHOLD_MAX
844 /* For 32-bit platforms we cannot increase the maximum mmap
845 threshold much because it is also the minimum value for the
846 maximum heap size and its alignment. Going above 512k (i.e., 1M
847 for new heaps) wastes too much address space. */
848# if __WORDSIZE == 32
849# define DEFAULT_MMAP_THRESHOLD_MAX (512 * 1024)
850# else
851# define DEFAULT_MMAP_THRESHOLD_MAX (4 * 1024 * 1024 * sizeof(long))
852# endif
853#endif
854
855/*
856 M_MMAP_THRESHOLD is the request size threshold for using mmap()
857 to service a request. Requests of at least this size that cannot
858 be allocated using already-existing space will be serviced via mmap.
859 (If enough normal freed space already exists it is used instead.)
860
861 Using mmap segregates relatively large chunks of memory so that
862 they can be individually obtained and released from the host
863 system. A request serviced through mmap is never reused by any
864 other request (at least not directly; the system may just so
865 happen to remap successive requests to the same locations).
866
867 Segregating space in this way has the benefits that:
868
869 1. Mmapped space can ALWAYS be individually released back
870 to the system, which helps keep the system level memory
871 demands of a long-lived program low.
872 2. Mapped memory can never become `locked' between
873 other chunks, as can happen with normally allocated chunks, which
874 means that even trimming via malloc_trim would not release them.
875 3. On some systems with "holes" in address spaces, mmap can obtain
876 memory that sbrk cannot.
877
878 However, it has the disadvantages that:
879
880 1. The space cannot be reclaimed, consolidated, and then
881 used to service later requests, as happens with normal chunks.
882 2. It can lead to more wastage because of mmap page alignment
883 requirements
884 3. It causes malloc performance to be more dependent on host
885 system memory management support routines which may vary in
886 implementation quality and may impose arbitrary
887 limitations. Generally, servicing a request via normal
888 malloc steps is faster than going through a system's mmap.
889
890 The advantages of mmap nearly always outweigh disadvantages for
891 "large" chunks, but the value of "large" varies across systems. The
892 default is an empirically derived value that works well in most
893 systems.
894
895
896 Update in 2006:
897 The above was written in 2001. Since then the world has changed a lot.
898 Memory got bigger. Applications got bigger. The virtual address space
899 layout in 32 bit linux changed.
900
901 In the new situation, brk() and mmap space is shared and there are no
902 artificial limits on brk size imposed by the kernel. What is more,
903 applications have started using transient allocations larger than the
904 128Kb as was imagined in 2001.
905
906 The price for mmap is also high now; each time glibc mmaps from the
907 kernel, the kernel is forced to zero out the memory it gives to the
908 application. Zeroing memory is expensive and eats a lot of cache and
909 memory bandwidth. This has nothing to do with the efficiency of the
910 virtual memory system, by doing mmap the kernel just has no choice but
911 to zero.
912
913 In 2001, the kernel had a maximum size for brk() which was about 800
914 megabytes on 32 bit x86, at that point brk() would hit the first
915 mmaped shared libaries and couldn't expand anymore. With current 2.6
916 kernels, the VA space layout is different and brk() and mmap
917 both can span the entire heap at will.
918
919 Rather than using a static threshold for the brk/mmap tradeoff,
920 we are now using a simple dynamic one. The goal is still to avoid
921 fragmentation. The old goals we kept are
922 1) try to get the long lived large allocations to use mmap()
923 2) really large allocations should always use mmap()
924 and we're adding now:
925 3) transient allocations should use brk() to avoid forcing the kernel
926 having to zero memory over and over again
927
928 The implementation works with a sliding threshold, which is by default
929 limited to go between 128Kb and 32Mb (64Mb for 64 bitmachines) and starts
930 out at 128Kb as per the 2001 default.
931
932 This allows us to satisfy requirement 1) under the assumption that long
933 lived allocations are made early in the process' lifespan, before it has
934 started doing dynamic allocations of the same size (which will
935 increase the threshold).
936
937 The upperbound on the threshold satisfies requirement 2)
938
939 The threshold goes up in value when the application frees memory that was
940 allocated with the mmap allocator. The idea is that once the application
941 starts freeing memory of a certain size, it's highly probable that this is
942 a size the application uses for transient allocations. This estimator
943 is there to satisfy the new third requirement.
944
945*/
946
947#define M_MMAP_THRESHOLD -3
948
949#ifndef DEFAULT_MMAP_THRESHOLD
950#define DEFAULT_MMAP_THRESHOLD DEFAULT_MMAP_THRESHOLD_MIN
951#endif
952
953/*
954 M_MMAP_MAX is the maximum number of requests to simultaneously
955 service using mmap. This parameter exists because
956 some systems have a limited number of internal tables for
957 use by mmap, and using more than a few of them may degrade
958 performance.
959
960 The default is set to a value that serves only as a safeguard.
961 Setting to 0 disables use of mmap for servicing large requests.
962*/
963
964#define M_MMAP_MAX -4
965
966#ifndef DEFAULT_MMAP_MAX
967#define DEFAULT_MMAP_MAX (65536)
968#endif
969
970#include <malloc.h>
971
972#ifndef RETURN_ADDRESS
973#define RETURN_ADDRESS(X_) (NULL)
974#endif
975
976/* On some platforms we can compile internal, not exported functions better.
977 Let the environment provide a macro and define it to be empty if it
978 is not available. */
979#ifndef internal_function
980# define internal_function
981#endif
982
983/* Forward declarations. */
984struct malloc_chunk;
985typedef struct malloc_chunk* mchunkptr;
986
987/* Internal routines. */
988
989static void* _int_malloc(mstate, size_t);
990static void _int_free(mstate, mchunkptr, int);
991static void* _int_realloc(mstate, mchunkptr, INTERNAL_SIZE_T,
992 INTERNAL_SIZE_T);
993static void* _int_memalign(mstate, size_t, size_t);
994static void* _mid_memalign(size_t, size_t, void *);
995
996static void malloc_printerr(int action, const char *str, void *ptr, mstate av);
997
998static void* internal_function mem2mem_check(void *p, size_t sz);
999static int internal_function top_check(void);
1000static void internal_function munmap_chunk(mchunkptr p);
1001#if HAVE_MREMAP
1002static mchunkptr internal_function mremap_chunk(mchunkptr p, size_t new_size);
1003#endif
1004
1005static void* malloc_check(size_t sz, const void *caller);
1006static void free_check(void* mem, const void *caller);
1007static void* realloc_check(void* oldmem, size_t bytes,
1008 const void *caller);
1009static void* memalign_check(size_t alignment, size_t bytes,
1010 const void *caller);
1011
1012/* ------------------ MMAP support ------------------ */
1013
1014
1015#include <fcntl.h>
1016#include <sys/mman.h>
1017
1018#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
1019# define MAP_ANONYMOUS MAP_ANON
1020#endif
1021
1022#ifndef MAP_NORESERVE
1023# define MAP_NORESERVE 0
1024#endif
1025
1026#define MMAP(addr, size, prot, flags) \
1027 __mmap((addr), (size), (prot), (flags)|MAP_ANONYMOUS|MAP_PRIVATE, -1, 0)
1028
1029
1030/*
1031 ----------------------- Chunk representations -----------------------
1032*/
1033
1034
1035/*
1036 This struct declaration is misleading (but accurate and necessary).
1037 It declares a "view" into memory allowing access to necessary
1038 fields at known offsets from a given base. See explanation below.
1039*/
1040
1041struct malloc_chunk {
1042
1043 INTERNAL_SIZE_T mchunk_prev_size; /* Size of previous chunk (if free). */
1044 INTERNAL_SIZE_T mchunk_size; /* Size in bytes, including overhead. */
1045
1046 struct malloc_chunk* fd; /* double links -- used only if free. */
1047 struct malloc_chunk* bk;
1048
1049 /* Only used for large blocks: pointer to next larger size. */
1050 struct malloc_chunk* fd_nextsize; /* double links -- used only if free. */
1051 struct malloc_chunk* bk_nextsize;
1052};
1053
1054
1055/*
1056 malloc_chunk details:
1057
1058 (The following includes lightly edited explanations by Colin Plumb.)
1059
1060 Chunks of memory are maintained using a `boundary tag' method as
1061 described in e.g., Knuth or Standish. (See the paper by Paul
1062 Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a
1063 survey of such techniques.) Sizes of free chunks are stored both
1064 in the front of each chunk and at the end. This makes
1065 consolidating fragmented chunks into bigger chunks very fast. The
1066 size fields also hold bits representing whether chunks are free or
1067 in use.
1068
1069 An allocated chunk looks like this:
1070
1071
1072 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1073 | Size of previous chunk, if unallocated (P clear) |
1074 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1075 | Size of chunk, in bytes |A|M|P|
1076 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1077 | User data starts here... .
1078 . .
1079 . (malloc_usable_size() bytes) .
1080 . |
1081nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1082 | (size of chunk, but used for application data) |
1083 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1084 | Size of next chunk, in bytes |A|0|1|
1085 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1086
1087 Where "chunk" is the front of the chunk for the purpose of most of
1088 the malloc code, but "mem" is the pointer that is returned to the
1089 user. "Nextchunk" is the beginning of the next contiguous chunk.
1090
1091 Chunks always begin on even word boundaries, so the mem portion
1092 (which is returned to the user) is also on an even word boundary, and
1093 thus at least double-word aligned.
1094
1095 Free chunks are stored in circular doubly-linked lists, and look like this:
1096
1097 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1098 | Size of previous chunk, if unallocated (P clear) |
1099 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1100 `head:' | Size of chunk, in bytes |A|0|P|
1101 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1102 | Forward pointer to next chunk in list |
1103 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1104 | Back pointer to previous chunk in list |
1105 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1106 | Unused space (may be 0 bytes long) .
1107 . .
1108 . |
1109nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1110 `foot:' | Size of chunk, in bytes |
1111 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1112 | Size of next chunk, in bytes |A|0|0|
1113 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1114
1115 The P (PREV_INUSE) bit, stored in the unused low-order bit of the
1116 chunk size (which is always a multiple of two words), is an in-use
1117 bit for the *previous* chunk. If that bit is *clear*, then the
1118 word before the current chunk size contains the previous chunk
1119 size, and can be used to find the front of the previous chunk.
1120 The very first chunk allocated always has this bit set,
1121 preventing access to non-existent (or non-owned) memory. If
1122 prev_inuse is set for any given chunk, then you CANNOT determine
1123 the size of the previous chunk, and might even get a memory
1124 addressing fault when trying to do so.
1125
1126 The A (NON_MAIN_ARENA) bit is cleared for chunks on the initial,
1127 main arena, described by the main_arena variable. When additional
1128 threads are spawned, each thread receives its own arena (up to a
1129 configurable limit, after which arenas are reused for multiple
1130 threads), and the chunks in these arenas have the A bit set. To
1131 find the arena for a chunk on such a non-main arena, heap_for_ptr
1132 performs a bit mask operation and indirection through the ar_ptr
1133 member of the per-heap header heap_info (see arena.c).
1134
1135 Note that the `foot' of the current chunk is actually represented
1136 as the prev_size of the NEXT chunk. This makes it easier to
1137 deal with alignments etc but can be very confusing when trying
1138 to extend or adapt this code.
1139
1140 The three exceptions to all this are:
1141
1142 1. The special chunk `top' doesn't bother using the
1143 trailing size field since there is no next contiguous chunk
1144 that would have to index off it. After initialization, `top'
1145 is forced to always exist. If it would become less than
1146 MINSIZE bytes long, it is replenished.
1147
1148 2. Chunks allocated via mmap, which have the second-lowest-order
1149 bit M (IS_MMAPPED) set in their size fields. Because they are
1150 allocated one-by-one, each must contain its own trailing size
1151 field. If the M bit is set, the other bits are ignored
1152 (because mmapped chunks are neither in an arena, nor adjacent
1153 to a freed chunk). The M bit is also used for chunks which
1154 originally came from a dumped heap via malloc_set_state in
1155 hooks.c.
1156
1157 3. Chunks in fastbins are treated as allocated chunks from the
1158 point of view of the chunk allocator. They are consolidated
1159 with their neighbors only in bulk, in malloc_consolidate.
1160*/
1161
1162/*
1163 ---------- Size and alignment checks and conversions ----------
1164*/
1165
1166/* conversion from malloc headers to user pointers, and back */
1167
1168#define chunk2mem(p) ((void*)((char*)(p) + 2*SIZE_SZ))
1169#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
1170
1171/* The smallest possible chunk */
1172#define MIN_CHUNK_SIZE (offsetof(struct malloc_chunk, fd_nextsize))
1173
1174/* The smallest size we can malloc is an aligned minimal chunk */
1175
1176#define MINSIZE \
1177 (unsigned long)(((MIN_CHUNK_SIZE+MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK))
1178
1179/* Check if m has acceptable alignment */
1180
1181#define aligned_OK(m) (((unsigned long)(m) & MALLOC_ALIGN_MASK) == 0)
1182
1183#define misaligned_chunk(p) \
1184 ((uintptr_t)(MALLOC_ALIGNMENT == 2 * SIZE_SZ ? (p) : chunk2mem (p)) \
1185 & MALLOC_ALIGN_MASK)
1186
1187
1188/*
1189 Check if a request is so large that it would wrap around zero when
1190 padded and aligned. To simplify some other code, the bound is made
1191 low enough so that adding MINSIZE will also not wrap around zero.
1192 */
1193
1194#define REQUEST_OUT_OF_RANGE(req) \
1195 ((unsigned long) (req) >= \
1196 (unsigned long) (INTERNAL_SIZE_T) (-2 * MINSIZE))
1197
1198/* pad request bytes into a usable size -- internal version */
1199
1200#define request2size(req) \
1201 (((req) + SIZE_SZ + MALLOC_ALIGN_MASK < MINSIZE) ? \
1202 MINSIZE : \
1203 ((req) + SIZE_SZ + MALLOC_ALIGN_MASK) & ~MALLOC_ALIGN_MASK)
1204
1205/* Same, except also perform an argument and result check. First, we check
1206 that the padding done by request2size didn't result in an integer
1207 overflow. Then we check (using REQUEST_OUT_OF_RANGE) that the resulting
1208 size isn't so large that a later alignment would lead to another integer
1209 overflow. */
1210#define checked_request2size(req, sz) \
1211({ \
1212 (sz) = request2size (req); \
1213 if (((sz) < (req)) \
1214 || REQUEST_OUT_OF_RANGE (sz)) \
1215 { \
1216 __set_errno (ENOMEM); \
1217 return 0; \
1218 } \
1219})
1220
1221/*
1222 --------------- Physical chunk operations ---------------
1223 */
1224
1225
1226/* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
1227#define PREV_INUSE 0x1
1228
1229/* extract inuse bit of previous chunk */
1230#define prev_inuse(p) ((p)->mchunk_size & PREV_INUSE)
1231
1232
1233/* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
1234#define IS_MMAPPED 0x2
1235
1236/* check for mmap()'ed chunk */
1237#define chunk_is_mmapped(p) ((p)->mchunk_size & IS_MMAPPED)
1238
1239
1240/* size field is or'ed with NON_MAIN_ARENA if the chunk was obtained
1241 from a non-main arena. This is only set immediately before handing
1242 the chunk to the user, if necessary. */
1243#define NON_MAIN_ARENA 0x4
1244
1245/* Check for chunk from main arena. */
1246#define chunk_main_arena(p) (((p)->mchunk_size & NON_MAIN_ARENA) == 0)
1247
1248/* Mark a chunk as not being on the main arena. */
1249#define set_non_main_arena(p) ((p)->mchunk_size |= NON_MAIN_ARENA)
1250
1251
1252/*
1253 Bits to mask off when extracting size
1254
1255 Note: IS_MMAPPED is intentionally not masked off from size field in
1256 macros for which mmapped chunks should never be seen. This should
1257 cause helpful core dumps to occur if it is tried by accident by
1258 people extending or adapting this malloc.
1259 */
1260#define SIZE_BITS (PREV_INUSE | IS_MMAPPED | NON_MAIN_ARENA)
1261
1262/* Get size, ignoring use bits */
1263#define chunksize(p) (chunksize_nomask (p) & ~(SIZE_BITS))
1264
1265/* Like chunksize, but do not mask SIZE_BITS. */
1266#define chunksize_nomask(p) ((p)->mchunk_size)
1267
1268/* Ptr to next physical malloc_chunk. */
1269#define next_chunk(p) ((mchunkptr) (((char *) (p)) + chunksize (p)))
1270
1271/* Size of the chunk below P. Only valid if prev_inuse (P). */
1272#define prev_size(p) ((p)->mchunk_prev_size)
1273
1274/* Set the size of the chunk below P. Only valid if prev_inuse (P). */
1275#define set_prev_size(p, sz) ((p)->mchunk_prev_size = (sz))
1276
1277/* Ptr to previous physical malloc_chunk. Only valid if prev_inuse (P). */
1278#define prev_chunk(p) ((mchunkptr) (((char *) (p)) - prev_size (p)))
1279
1280/* Treat space at ptr + offset as a chunk */
1281#define chunk_at_offset(p, s) ((mchunkptr) (((char *) (p)) + (s)))
1282
1283/* extract p's inuse bit */
1284#define inuse(p) \
1285 ((((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size) & PREV_INUSE)
1286
1287/* set/clear chunk as being inuse without otherwise disturbing */
1288#define set_inuse(p) \
1289 ((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size |= PREV_INUSE
1290
1291#define clear_inuse(p) \
1292 ((mchunkptr) (((char *) (p)) + chunksize (p)))->mchunk_size &= ~(PREV_INUSE)
1293
1294
1295/* check/set/clear inuse bits in known places */
1296#define inuse_bit_at_offset(p, s) \
1297 (((mchunkptr) (((char *) (p)) + (s)))->mchunk_size & PREV_INUSE)
1298
1299#define set_inuse_bit_at_offset(p, s) \
1300 (((mchunkptr) (((char *) (p)) + (s)))->mchunk_size |= PREV_INUSE)
1301
1302#define clear_inuse_bit_at_offset(p, s) \
1303 (((mchunkptr) (((char *) (p)) + (s)))->mchunk_size &= ~(PREV_INUSE))
1304
1305
1306/* Set size at head, without disturbing its use bit */
1307#define set_head_size(p, s) ((p)->mchunk_size = (((p)->mchunk_size & SIZE_BITS) | (s)))
1308
1309/* Set size/use field */
1310#define set_head(p, s) ((p)->mchunk_size = (s))
1311
1312/* Set size at footer (only when chunk is not in use) */
1313#define set_foot(p, s) (((mchunkptr) ((char *) (p) + (s)))->mchunk_prev_size = (s))
1314
1315
1316#pragma GCC poison mchunk_size
1317#pragma GCC poison mchunk_prev_size
1318
1319/*
1320 -------------------- Internal data structures --------------------
1321
1322 All internal state is held in an instance of malloc_state defined
1323 below. There are no other static variables, except in two optional
1324 cases:
1325 * If USE_MALLOC_LOCK is defined, the mALLOC_MUTEx declared above.
1326 * If mmap doesn't support MAP_ANONYMOUS, a dummy file descriptor
1327 for mmap.
1328
1329 Beware of lots of tricks that minimize the total bookkeeping space
1330 requirements. The result is a little over 1K bytes (for 4byte
1331 pointers and size_t.)
1332 */
1333
1334/*
1335 Bins
1336
1337 An array of bin headers for free chunks. Each bin is doubly
1338 linked. The bins are approximately proportionally (log) spaced.
1339 There are a lot of these bins (128). This may look excessive, but
1340 works very well in practice. Most bins hold sizes that are
1341 unusual as malloc request sizes, but are more usual for fragments
1342 and consolidated sets of chunks, which is what these bins hold, so
1343 they can be found quickly. All procedures maintain the invariant
1344 that no consolidated chunk physically borders another one, so each
1345 chunk in a list is known to be preceeded and followed by either
1346 inuse chunks or the ends of memory.
1347
1348 Chunks in bins are kept in size order, with ties going to the
1349 approximately least recently used chunk. Ordering isn't needed
1350 for the small bins, which all contain the same-sized chunks, but
1351 facilitates best-fit allocation for larger chunks. These lists
1352 are just sequential. Keeping them in order almost never requires
1353 enough traversal to warrant using fancier ordered data
1354 structures.
1355
1356 Chunks of the same size are linked with the most
1357 recently freed at the front, and allocations are taken from the
1358 back. This results in LRU (FIFO) allocation order, which tends
1359 to give each chunk an equal opportunity to be consolidated with
1360 adjacent freed chunks, resulting in larger free chunks and less
1361 fragmentation.
1362
1363 To simplify use in double-linked lists, each bin header acts
1364 as a malloc_chunk. This avoids special-casing for headers.
1365 But to conserve space and improve locality, we allocate
1366 only the fd/bk pointers of bins, and then use repositioning tricks
1367 to treat these as the fields of a malloc_chunk*.
1368 */
1369
1370typedef struct malloc_chunk *mbinptr;
1371
1372/* addressing -- note that bin_at(0) does not exist */
1373#define bin_at(m, i) \
1374 (mbinptr) (((char *) &((m)->bins[((i) - 1) * 2])) \
1375 - offsetof (struct malloc_chunk, fd))
1376
1377/* analog of ++bin */
1378#define next_bin(b) ((mbinptr) ((char *) (b) + (sizeof (mchunkptr) << 1)))
1379
1380/* Reminders about list directionality within bins */
1381#define first(b) ((b)->fd)
1382#define last(b) ((b)->bk)
1383
1384/* Take a chunk off a bin list */
1385#define unlink(AV, P, BK, FD) { \
1386 FD = P->fd; \
1387 BK = P->bk; \
1388 if (__builtin_expect (FD->bk != P || BK->fd != P, 0)) \
1389 malloc_printerr (check_action, "corrupted double-linked list", P, AV); \
1390 else { \
1391 FD->bk = BK; \
1392 BK->fd = FD; \
1393 if (!in_smallbin_range (chunksize_nomask (P)) \
1394 && __builtin_expect (P->fd_nextsize != NULL, 0)) { \
1395 if (__builtin_expect (P->fd_nextsize->bk_nextsize != P, 0) \
1396 || __builtin_expect (P->bk_nextsize->fd_nextsize != P, 0)) \
1397 malloc_printerr (check_action, \
1398 "corrupted double-linked list (not small)", \
1399 P, AV); \
1400 if (FD->fd_nextsize == NULL) { \
1401 if (P->fd_nextsize == P) \
1402 FD->fd_nextsize = FD->bk_nextsize = FD; \
1403 else { \
1404 FD->fd_nextsize = P->fd_nextsize; \
1405 FD->bk_nextsize = P->bk_nextsize; \
1406 P->fd_nextsize->bk_nextsize = FD; \
1407 P->bk_nextsize->fd_nextsize = FD; \
1408 } \
1409 } else { \
1410 P->fd_nextsize->bk_nextsize = P->bk_nextsize; \
1411 P->bk_nextsize->fd_nextsize = P->fd_nextsize; \
1412 } \
1413 } \
1414 } \
1415}
1416
1417/*
1418 Indexing
1419
1420 Bins for sizes < 512 bytes contain chunks of all the same size, spaced
1421 8 bytes apart. Larger bins are approximately logarithmically spaced:
1422
1423 64 bins of size 8
1424 32 bins of size 64
1425 16 bins of size 512
1426 8 bins of size 4096
1427 4 bins of size 32768
1428 2 bins of size 262144
1429 1 bin of size what's left
1430
1431 There is actually a little bit of slop in the numbers in bin_index
1432 for the sake of speed. This makes no difference elsewhere.
1433
1434 The bins top out around 1MB because we expect to service large
1435 requests via mmap.
1436
1437 Bin 0 does not exist. Bin 1 is the unordered list; if that would be
1438 a valid chunk size the small bins are bumped up one.
1439 */
1440
1441#define NBINS 128
1442#define NSMALLBINS 64
1443#define SMALLBIN_WIDTH MALLOC_ALIGNMENT
1444#define SMALLBIN_CORRECTION (MALLOC_ALIGNMENT > 2 * SIZE_SZ)
1445#define MIN_LARGE_SIZE ((NSMALLBINS - SMALLBIN_CORRECTION) * SMALLBIN_WIDTH)
1446
1447#define in_smallbin_range(sz) \
1448 ((unsigned long) (sz) < (unsigned long) MIN_LARGE_SIZE)
1449
1450#define smallbin_index(sz) \
1451 ((SMALLBIN_WIDTH == 16 ? (((unsigned) (sz)) >> 4) : (((unsigned) (sz)) >> 3))\
1452 + SMALLBIN_CORRECTION)
1453
1454#define largebin_index_32(sz) \
1455 (((((unsigned long) (sz)) >> 6) <= 38) ? 56 + (((unsigned long) (sz)) >> 6) :\
1456 ((((unsigned long) (sz)) >> 9) <= 20) ? 91 + (((unsigned long) (sz)) >> 9) :\
1457 ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\
1458 ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\
1459 ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\
1460 126)
1461
1462#define largebin_index_32_big(sz) \
1463 (((((unsigned long) (sz)) >> 6) <= 45) ? 49 + (((unsigned long) (sz)) >> 6) :\
1464 ((((unsigned long) (sz)) >> 9) <= 20) ? 91 + (((unsigned long) (sz)) >> 9) :\
1465 ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\
1466 ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\
1467 ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\
1468 126)
1469
1470// XXX It remains to be seen whether it is good to keep the widths of
1471// XXX the buckets the same or whether it should be scaled by a factor
1472// XXX of two as well.
1473#define largebin_index_64(sz) \
1474 (((((unsigned long) (sz)) >> 6) <= 48) ? 48 + (((unsigned long) (sz)) >> 6) :\
1475 ((((unsigned long) (sz)) >> 9) <= 20) ? 91 + (((unsigned long) (sz)) >> 9) :\
1476 ((((unsigned long) (sz)) >> 12) <= 10) ? 110 + (((unsigned long) (sz)) >> 12) :\
1477 ((((unsigned long) (sz)) >> 15) <= 4) ? 119 + (((unsigned long) (sz)) >> 15) :\
1478 ((((unsigned long) (sz)) >> 18) <= 2) ? 124 + (((unsigned long) (sz)) >> 18) :\
1479 126)
1480
1481#define largebin_index(sz) \
1482 (SIZE_SZ == 8 ? largebin_index_64 (sz) \
1483 : MALLOC_ALIGNMENT == 16 ? largebin_index_32_big (sz) \
1484 : largebin_index_32 (sz))
1485
1486#define bin_index(sz) \
1487 ((in_smallbin_range (sz)) ? smallbin_index (sz) : largebin_index (sz))
1488
1489
1490/*
1491 Unsorted chunks
1492
1493 All remainders from chunk splits, as well as all returned chunks,
1494 are first placed in the "unsorted" bin. They are then placed
1495 in regular bins after malloc gives them ONE chance to be used before
1496 binning. So, basically, the unsorted_chunks list acts as a queue,
1497 with chunks being placed on it in free (and malloc_consolidate),
1498 and taken off (to be either used or placed in bins) in malloc.
1499
1500 The NON_MAIN_ARENA flag is never set for unsorted chunks, so it
1501 does not have to be taken into account in size comparisons.
1502 */
1503
1504/* The otherwise unindexable 1-bin is used to hold unsorted chunks. */
1505#define unsorted_chunks(M) (bin_at (M, 1))
1506
1507/*
1508 Top
1509
1510 The top-most available chunk (i.e., the one bordering the end of
1511 available memory) is treated specially. It is never included in
1512 any bin, is used only if no other chunk is available, and is
1513 released back to the system if it is very large (see
1514 M_TRIM_THRESHOLD). Because top initially
1515 points to its own bin with initial zero size, thus forcing
1516 extension on the first malloc request, we avoid having any special
1517 code in malloc to check whether it even exists yet. But we still
1518 need to do so when getting memory from system, so we make
1519 initial_top treat the bin as a legal but unusable chunk during the
1520 interval between initialization and the first call to
1521 sysmalloc. (This is somewhat delicate, since it relies on
1522 the 2 preceding words to be zero during this interval as well.)
1523 */
1524
1525/* Conveniently, the unsorted bin can be used as dummy top on first call */
1526#define initial_top(M) (unsorted_chunks (M))
1527
1528/*
1529 Binmap
1530
1531 To help compensate for the large number of bins, a one-level index
1532 structure is used for bin-by-bin searching. `binmap' is a
1533 bitvector recording whether bins are definitely empty so they can
1534 be skipped over during during traversals. The bits are NOT always
1535 cleared as soon as bins are empty, but instead only
1536 when they are noticed to be empty during traversal in malloc.
1537 */
1538
1539/* Conservatively use 32 bits per map word, even if on 64bit system */
1540#define BINMAPSHIFT 5
1541#define BITSPERMAP (1U << BINMAPSHIFT)
1542#define BINMAPSIZE (NBINS / BITSPERMAP)
1543
1544#define idx2block(i) ((i) >> BINMAPSHIFT)
1545#define idx2bit(i) ((1U << ((i) & ((1U << BINMAPSHIFT) - 1))))
1546
1547#define mark_bin(m, i) ((m)->binmap[idx2block (i)] |= idx2bit (i))
1548#define unmark_bin(m, i) ((m)->binmap[idx2block (i)] &= ~(idx2bit (i)))
1549#define get_binmap(m, i) ((m)->binmap[idx2block (i)] & idx2bit (i))
1550
1551/*
1552 Fastbins
1553
1554 An array of lists holding recently freed small chunks. Fastbins
1555 are not doubly linked. It is faster to single-link them, and
1556 since chunks are never removed from the middles of these lists,
1557 double linking is not necessary. Also, unlike regular bins, they
1558 are not even processed in FIFO order (they use faster LIFO) since
1559 ordering doesn't much matter in the transient contexts in which
1560 fastbins are normally used.
1561
1562 Chunks in fastbins keep their inuse bit set, so they cannot
1563 be consolidated with other free chunks. malloc_consolidate
1564 releases all chunks in fastbins and consolidates them with
1565 other free chunks.
1566 */
1567
1568typedef struct malloc_chunk *mfastbinptr;
1569#define fastbin(ar_ptr, idx) ((ar_ptr)->fastbinsY[idx])
1570
1571/* offset 2 to use otherwise unindexable first 2 bins */
1572#define fastbin_index(sz) \
1573 ((((unsigned int) (sz)) >> (SIZE_SZ == 8 ? 4 : 3)) - 2)
1574
1575
1576/* The maximum fastbin request size we support */
1577#define MAX_FAST_SIZE (80 * SIZE_SZ / 4)
1578
1579#define NFASTBINS (fastbin_index (request2size (MAX_FAST_SIZE)) + 1)
1580
1581/*
1582 FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free()
1583 that triggers automatic consolidation of possibly-surrounding
1584 fastbin chunks. This is a heuristic, so the exact value should not
1585 matter too much. It is defined at half the default trim threshold as a
1586 compromise heuristic to only attempt consolidation if it is likely
1587 to lead to trimming. However, it is not dynamically tunable, since
1588 consolidation reduces fragmentation surrounding large chunks even
1589 if trimming is not used.
1590 */
1591
1592#define FASTBIN_CONSOLIDATION_THRESHOLD (65536UL)
1593
1594/*
1595 Since the lowest 2 bits in max_fast don't matter in size comparisons,
1596 they are used as flags.
1597 */
1598
1599/*
1600 FASTCHUNKS_BIT held in max_fast indicates that there are probably
1601 some fastbin chunks. It is set true on entering a chunk into any
1602 fastbin, and cleared only in malloc_consolidate.
1603
1604 The truth value is inverted so that have_fastchunks will be true
1605 upon startup (since statics are zero-filled), simplifying
1606 initialization checks.
1607 */
1608
1609#define FASTCHUNKS_BIT (1U)
1610
1611#define have_fastchunks(M) (((M)->flags & FASTCHUNKS_BIT) == 0)
1612#define clear_fastchunks(M) catomic_or (&(M)->flags, FASTCHUNKS_BIT)
1613#define set_fastchunks(M) catomic_and (&(M)->flags, ~FASTCHUNKS_BIT)
1614
1615/*
1616 NONCONTIGUOUS_BIT indicates that MORECORE does not return contiguous
1617 regions. Otherwise, contiguity is exploited in merging together,
1618 when possible, results from consecutive MORECORE calls.
1619
1620 The initial value comes from MORECORE_CONTIGUOUS, but is
1621 changed dynamically if mmap is ever used as an sbrk substitute.
1622 */
1623
1624#define NONCONTIGUOUS_BIT (2U)
1625
1626#define contiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) == 0)
1627#define noncontiguous(M) (((M)->flags & NONCONTIGUOUS_BIT) != 0)
1628#define set_noncontiguous(M) ((M)->flags |= NONCONTIGUOUS_BIT)
1629#define set_contiguous(M) ((M)->flags &= ~NONCONTIGUOUS_BIT)
1630
1631/* ARENA_CORRUPTION_BIT is set if a memory corruption was detected on the
1632 arena. Such an arena is no longer used to allocate chunks. Chunks
1633 allocated in that arena before detecting corruption are not freed. */
1634
1635#define ARENA_CORRUPTION_BIT (4U)
1636
1637#define arena_is_corrupt(A) (((A)->flags & ARENA_CORRUPTION_BIT))
1638#define set_arena_corrupt(A) ((A)->flags |= ARENA_CORRUPTION_BIT)
1639
1640/*
1641 Set value of max_fast.
1642 Use impossibly small value if 0.
1643 Precondition: there are no existing fastbin chunks.
1644 Setting the value clears fastchunk bit but preserves noncontiguous bit.
1645 */
1646
1647#define set_max_fast(s) \
1648 global_max_fast = (((s) == 0) \
1649 ? SMALLBIN_WIDTH : ((s + SIZE_SZ) & ~MALLOC_ALIGN_MASK))
1650#define get_max_fast() global_max_fast
1651
1652
1653/*
1654 ----------- Internal state representation and initialization -----------
1655 */
1656
1657struct malloc_state
1658{
1659 /* Serialize access. */
1660 __libc_lock_define (, mutex);
1661
1662 /* Flags (formerly in max_fast). */
1663 int flags;
1664
1665 /* Fastbins */
1666 mfastbinptr fastbinsY[NFASTBINS];
1667
1668 /* Base of the topmost chunk -- not otherwise kept in a bin */
1669 mchunkptr top;
1670
1671 /* The remainder from the most recent split of a small request */
1672 mchunkptr last_remainder;
1673
1674 /* Normal bins packed as described above */
1675 mchunkptr bins[NBINS * 2 - 2];
1676
1677 /* Bitmap of bins */
1678 unsigned int binmap[BINMAPSIZE];
1679
1680 /* Linked list */
1681 struct malloc_state *next;
1682
1683 /* Linked list for free arenas. Access to this field is serialized
1684 by free_list_lock in arena.c. */
1685 struct malloc_state *next_free;
1686
1687 /* Number of threads attached to this arena. 0 if the arena is on
1688 the free list. Access to this field is serialized by
1689 free_list_lock in arena.c. */
1690 INTERNAL_SIZE_T attached_threads;
1691
1692 /* Memory allocated from the system in this arena. */
1693 INTERNAL_SIZE_T system_mem;
1694 INTERNAL_SIZE_T max_system_mem;
1695};
1696
1697struct malloc_par
1698{
1699 /* Tunable parameters */
1700 unsigned long trim_threshold;
1701 INTERNAL_SIZE_T top_pad;
1702 INTERNAL_SIZE_T mmap_threshold;
1703 INTERNAL_SIZE_T arena_test;
1704 INTERNAL_SIZE_T arena_max;
1705
1706 /* Memory map support */
1707 int n_mmaps;
1708 int n_mmaps_max;
1709 int max_n_mmaps;
1710 /* the mmap_threshold is dynamic, until the user sets
1711 it manually, at which point we need to disable any
1712 dynamic behavior. */
1713 int no_dyn_threshold;
1714
1715 /* Statistics */
1716 INTERNAL_SIZE_T mmapped_mem;
1717 INTERNAL_SIZE_T max_mmapped_mem;
1718
1719 /* First address handed out by MORECORE/sbrk. */
1720 char *sbrk_base;
1721};
1722
1723/* There are several instances of this struct ("arenas") in this
1724 malloc. If you are adapting this malloc in a way that does NOT use
1725 a static or mmapped malloc_state, you MUST explicitly zero-fill it
1726 before using. This malloc relies on the property that malloc_state
1727 is initialized to all zeroes (as is true of C statics). */
1728
1729static struct malloc_state main_arena =
1730{
1731 .mutex = _LIBC_LOCK_INITIALIZER,
1732 .next = &main_arena,
1733 .attached_threads = 1
1734};
1735
1736/* These variables are used for undumping support. Chunked are marked
1737 as using mmap, but we leave them alone if they fall into this
1738 range. NB: The chunk size for these chunks only includes the
1739 initial size field (of SIZE_SZ bytes), there is no trailing size
1740 field (unlike with regular mmapped chunks). */
1741static mchunkptr dumped_main_arena_start; /* Inclusive. */
1742static mchunkptr dumped_main_arena_end; /* Exclusive. */
1743
1744/* True if the pointer falls into the dumped arena. Use this after
1745 chunk_is_mmapped indicates a chunk is mmapped. */
1746#define DUMPED_MAIN_ARENA_CHUNK(p) \
1747 ((p) >= dumped_main_arena_start && (p) < dumped_main_arena_end)
1748
1749/* There is only one instance of the malloc parameters. */
1750
1751static struct malloc_par mp_ =
1752{
1753 .top_pad = DEFAULT_TOP_PAD,
1754 .n_mmaps_max = DEFAULT_MMAP_MAX,
1755 .mmap_threshold = DEFAULT_MMAP_THRESHOLD,
1756 .trim_threshold = DEFAULT_TRIM_THRESHOLD,
1757#define NARENAS_FROM_NCORES(n) ((n) * (sizeof (long) == 4 ? 2 : 8))
1758 .arena_test = NARENAS_FROM_NCORES (1)
1759};
1760
1761/* Maximum size of memory handled in fastbins. */
1762static INTERNAL_SIZE_T global_max_fast;
1763
1764/*
1765 Initialize a malloc_state struct.
1766
1767 This is called only from within malloc_consolidate, which needs
1768 be called in the same contexts anyway. It is never called directly
1769 outside of malloc_consolidate because some optimizing compilers try
1770 to inline it at all call points, which turns out not to be an
1771 optimization at all. (Inlining it in malloc_consolidate is fine though.)
1772 */
1773
1774static void
1775malloc_init_state (mstate av)
1776{
1777 int i;
1778 mbinptr bin;
1779
1780 /* Establish circular links for normal bins */
1781 for (i = 1; i < NBINS; ++i)
1782 {
1783 bin = bin_at (av, i);
1784 bin->fd = bin->bk = bin;
1785 }
1786
1787#if MORECORE_CONTIGUOUS
1788 if (av != &main_arena)
1789#endif
1790 set_noncontiguous (av);
1791 if (av == &main_arena)
1792 set_max_fast (DEFAULT_MXFAST);
1793 av->flags |= FASTCHUNKS_BIT;
1794
1795 av->top = initial_top (av);
1796}
1797
1798/*
1799 Other internal utilities operating on mstates
1800 */
1801
1802static void *sysmalloc (INTERNAL_SIZE_T, mstate);
1803static int systrim (size_t, mstate);
1804static void malloc_consolidate (mstate);
1805
1806
1807/* -------------- Early definitions for debugging hooks ---------------- */
1808
1809/* Define and initialize the hook variables. These weak definitions must
1810 appear before any use of the variables in a function (arena.c uses one). */
1811#ifndef weak_variable
1812/* In GNU libc we want the hook variables to be weak definitions to
1813 avoid a problem with Emacs. */
1814# define weak_variable weak_function
1815#endif
1816
1817/* Forward declarations. */
1818static void *malloc_hook_ini (size_t sz,
1819 const void *caller) __THROW;
1820static void *realloc_hook_ini (void *ptr, size_t sz,
1821 const void *caller) __THROW;
1822static void *memalign_hook_ini (size_t alignment, size_t sz,
1823 const void *caller) __THROW;
1824
1825#if HAVE_MALLOC_INIT_HOOK
1826void weak_variable (*__malloc_initialize_hook) (void) = NULL;
1827compat_symbol (libc, __malloc_initialize_hook,
1828 __malloc_initialize_hook, GLIBC_2_0);
1829#endif
1830
1831void weak_variable (*__free_hook) (void *__ptr,
1832 const void *) = NULL;
1833void *weak_variable (*__malloc_hook)
1834 (size_t __size, const void *) = malloc_hook_ini;
1835void *weak_variable (*__realloc_hook)
1836 (void *__ptr, size_t __size, const void *)
1837 = realloc_hook_ini;
1838void *weak_variable (*__memalign_hook)
1839 (size_t __alignment, size_t __size, const void *)
1840 = memalign_hook_ini;
1841void weak_variable (*__after_morecore_hook) (void) = NULL;
1842
1843
1844/* ---------------- Error behavior ------------------------------------ */
1845
1846#ifndef DEFAULT_CHECK_ACTION
1847# define DEFAULT_CHECK_ACTION 3
1848#endif
1849
1850static int check_action = DEFAULT_CHECK_ACTION;
1851
1852
1853/* ------------------ Testing support ----------------------------------*/
1854
1855static int perturb_byte;
1856
1857static void
1858alloc_perturb (char *p, size_t n)
1859{
1860 if (__glibc_unlikely (perturb_byte))
1861 memset (p, perturb_byte ^ 0xff, n);
1862}
1863
1864static void
1865free_perturb (char *p, size_t n)
1866{
1867 if (__glibc_unlikely (perturb_byte))
1868 memset (p, perturb_byte, n);
1869}
1870
1871
1872
1873#include <stap-probe.h>
1874
1875/* ------------------- Support for multiple arenas -------------------- */
1876#include "arena.c"
1877
1878/*
1879 Debugging support
1880
1881 These routines make a number of assertions about the states
1882 of data structures that should be true at all times. If any
1883 are not true, it's very likely that a user program has somehow
1884 trashed memory. (It's also possible that there is a coding error
1885 in malloc. In which case, please report it!)
1886 */
1887
1888#if !MALLOC_DEBUG
1889
1890# define check_chunk(A, P)
1891# define check_free_chunk(A, P)
1892# define check_inuse_chunk(A, P)
1893# define check_remalloced_chunk(A, P, N)
1894# define check_malloced_chunk(A, P, N)
1895# define check_malloc_state(A)
1896
1897#else
1898
1899# define check_chunk(A, P) do_check_chunk (A, P)
1900# define check_free_chunk(A, P) do_check_free_chunk (A, P)
1901# define check_inuse_chunk(A, P) do_check_inuse_chunk (A, P)
1902# define check_remalloced_chunk(A, P, N) do_check_remalloced_chunk (A, P, N)
1903# define check_malloced_chunk(A, P, N) do_check_malloced_chunk (A, P, N)
1904# define check_malloc_state(A) do_check_malloc_state (A)
1905
1906/*
1907 Properties of all chunks
1908 */
1909
1910static void
1911do_check_chunk (mstate av, mchunkptr p)
1912{
1913 unsigned long sz = chunksize (p);
1914 /* min and max possible addresses assuming contiguous allocation */
1915 char *max_address = (char *) (av->top) + chunksize (av->top);
1916 char *min_address = max_address - av->system_mem;
1917
1918 if (!chunk_is_mmapped (p))
1919 {
1920 /* Has legal address ... */
1921 if (p != av->top)
1922 {
1923 if (contiguous (av))
1924 {
1925 assert (((char *) p) >= min_address);
1926 assert (((char *) p + sz) <= ((char *) (av->top)));
1927 }
1928 }
1929 else
1930 {
1931 /* top size is always at least MINSIZE */
1932 assert ((unsigned long) (sz) >= MINSIZE);
1933 /* top predecessor always marked inuse */
1934 assert (prev_inuse (p));
1935 }
1936 }
1937 else if (!DUMPED_MAIN_ARENA_CHUNK (p))
1938 {
1939 /* address is outside main heap */
1940 if (contiguous (av) && av->top != initial_top (av))
1941 {
1942 assert (((char *) p) < min_address || ((char *) p) >= max_address);
1943 }
1944 /* chunk is page-aligned */
1945 assert (((prev_size (p) + sz) & (GLRO (dl_pagesize) - 1)) == 0);
1946 /* mem is aligned */
1947 assert (aligned_OK (chunk2mem (p)));
1948 }
1949}
1950
1951/*
1952 Properties of free chunks
1953 */
1954
1955static void
1956do_check_free_chunk (mstate av, mchunkptr p)
1957{
1958 INTERNAL_SIZE_T sz = p->size & ~(PREV_INUSE | NON_MAIN_ARENA);
1959 mchunkptr next = chunk_at_offset (p, sz);
1960
1961 do_check_chunk (av, p);
1962
1963 /* Chunk must claim to be free ... */
1964 assert (!inuse (p));
1965 assert (!chunk_is_mmapped (p));
1966
1967 /* Unless a special marker, must have OK fields */
1968 if ((unsigned long) (sz) >= MINSIZE)
1969 {
1970 assert ((sz & MALLOC_ALIGN_MASK) == 0);
1971 assert (aligned_OK (chunk2mem (p)));
1972 /* ... matching footer field */
1973 assert (prev_size (p) == sz);
1974 /* ... and is fully consolidated */
1975 assert (prev_inuse (p));
1976 assert (next == av->top || inuse (next));
1977
1978 /* ... and has minimally sane links */
1979 assert (p->fd->bk == p);
1980 assert (p->bk->fd == p);
1981 }
1982 else /* markers are always of size SIZE_SZ */
1983 assert (sz == SIZE_SZ);
1984}
1985
1986/*
1987 Properties of inuse chunks
1988 */
1989
1990static void
1991do_check_inuse_chunk (mstate av, mchunkptr p)
1992{
1993 mchunkptr next;
1994
1995 do_check_chunk (av, p);
1996
1997 if (chunk_is_mmapped (p))
1998 return; /* mmapped chunks have no next/prev */
1999
2000 /* Check whether it claims to be in use ... */
2001 assert (inuse (p));
2002
2003 next = next_chunk (p);
2004
2005 /* ... and is surrounded by OK chunks.
2006 Since more things can be checked with free chunks than inuse ones,
2007 if an inuse chunk borders them and debug is on, it's worth doing them.
2008 */
2009 if (!prev_inuse (p))
2010 {
2011 /* Note that we cannot even look at prev unless it is not inuse */
2012 mchunkptr prv = prev_chunk (p);
2013 assert (next_chunk (prv) == p);
2014 do_check_free_chunk (av, prv);
2015 }
2016
2017 if (next == av->top)
2018 {
2019 assert (prev_inuse (next));
2020 assert (chunksize (next) >= MINSIZE);
2021 }
2022 else if (!inuse (next))
2023 do_check_free_chunk (av, next);
2024}
2025
2026/*
2027 Properties of chunks recycled from fastbins
2028 */
2029
2030static void
2031do_check_remalloced_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T s)
2032{
2033 INTERNAL_SIZE_T sz = p->size & ~(PREV_INUSE | NON_MAIN_ARENA);
2034
2035 if (!chunk_is_mmapped (p))
2036 {
2037 assert (av == arena_for_chunk (p));
2038 if (chunk_main_arena (p))
2039 assert (av == &main_arena);
2040 else
2041 assert (av != &main_arena);
2042 }
2043
2044 do_check_inuse_chunk (av, p);
2045
2046 /* Legal size ... */
2047 assert ((sz & MALLOC_ALIGN_MASK) == 0);
2048 assert ((unsigned long) (sz) >= MINSIZE);
2049 /* ... and alignment */
2050 assert (aligned_OK (chunk2mem (p)));
2051 /* chunk is less than MINSIZE more than request */
2052 assert ((long) (sz) - (long) (s) >= 0);
2053 assert ((long) (sz) - (long) (s + MINSIZE) < 0);
2054}
2055
2056/*
2057 Properties of nonrecycled chunks at the point they are malloced
2058 */
2059
2060static void
2061do_check_malloced_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T s)
2062{
2063 /* same as recycled case ... */
2064 do_check_remalloced_chunk (av, p, s);
2065
2066 /*
2067 ... plus, must obey implementation invariant that prev_inuse is
2068 always true of any allocated chunk; i.e., that each allocated
2069 chunk borders either a previously allocated and still in-use
2070 chunk, or the base of its memory arena. This is ensured
2071 by making all allocations from the `lowest' part of any found
2072 chunk. This does not necessarily hold however for chunks
2073 recycled via fastbins.
2074 */
2075
2076 assert (prev_inuse (p));
2077}
2078
2079
2080/*
2081 Properties of malloc_state.
2082
2083 This may be useful for debugging malloc, as well as detecting user
2084 programmer errors that somehow write into malloc_state.
2085
2086 If you are extending or experimenting with this malloc, you can
2087 probably figure out how to hack this routine to print out or
2088 display chunk addresses, sizes, bins, and other instrumentation.
2089 */
2090
2091static void
2092do_check_malloc_state (mstate av)
2093{
2094 int i;
2095 mchunkptr p;
2096 mchunkptr q;
2097 mbinptr b;
2098 unsigned int idx;
2099 INTERNAL_SIZE_T size;
2100 unsigned long total = 0;
2101 int max_fast_bin;
2102
2103 /* internal size_t must be no wider than pointer type */
2104 assert (sizeof (INTERNAL_SIZE_T) <= sizeof (char *));
2105
2106 /* alignment is a power of 2 */
2107 assert ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT - 1)) == 0);
2108
2109 /* cannot run remaining checks until fully initialized */
2110 if (av->top == 0 || av->top == initial_top (av))
2111 return;
2112
2113 /* pagesize is a power of 2 */
2114 assert (powerof2(GLRO (dl_pagesize)));
2115
2116 /* A contiguous main_arena is consistent with sbrk_base. */
2117 if (av == &main_arena && contiguous (av))
2118 assert ((char *) mp_.sbrk_base + av->system_mem ==
2119 (char *) av->top + chunksize (av->top));
2120
2121 /* properties of fastbins */
2122
2123 /* max_fast is in allowed range */
2124 assert ((get_max_fast () & ~1) <= request2size (MAX_FAST_SIZE));
2125
2126 max_fast_bin = fastbin_index (get_max_fast ());
2127
2128 for (i = 0; i < NFASTBINS; ++i)
2129 {
2130 p = fastbin (av, i);
2131
2132 /* The following test can only be performed for the main arena.
2133 While mallopt calls malloc_consolidate to get rid of all fast
2134 bins (especially those larger than the new maximum) this does
2135 only happen for the main arena. Trying to do this for any
2136 other arena would mean those arenas have to be locked and
2137 malloc_consolidate be called for them. This is excessive. And
2138 even if this is acceptable to somebody it still cannot solve
2139 the problem completely since if the arena is locked a
2140 concurrent malloc call might create a new arena which then
2141 could use the newly invalid fast bins. */
2142
2143 /* all bins past max_fast are empty */
2144 if (av == &main_arena && i > max_fast_bin)
2145 assert (p == 0);
2146
2147 while (p != 0)
2148 {
2149 /* each chunk claims to be inuse */
2150 do_check_inuse_chunk (av, p);
2151 total += chunksize (p);
2152 /* chunk belongs in this bin */
2153 assert (fastbin_index (chunksize (p)) == i);
2154 p = p->fd;
2155 }
2156 }
2157
2158 if (total != 0)
2159 assert (have_fastchunks (av));
2160 else if (!have_fastchunks (av))
2161 assert (total == 0);
2162
2163 /* check normal bins */
2164 for (i = 1; i < NBINS; ++i)
2165 {
2166 b = bin_at (av, i);
2167
2168 /* binmap is accurate (except for bin 1 == unsorted_chunks) */
2169 if (i >= 2)
2170 {
2171 unsigned int binbit = get_binmap (av, i);
2172 int empty = last (b) == b;
2173 if (!binbit)
2174 assert (empty);
2175 else if (!empty)
2176 assert (binbit);
2177 }
2178
2179 for (p = last (b); p != b; p = p->bk)
2180 {
2181 /* each chunk claims to be free */
2182 do_check_free_chunk (av, p);
2183 size = chunksize (p);
2184 total += size;
2185 if (i >= 2)
2186 {
2187 /* chunk belongs in bin */
2188 idx = bin_index (size);
2189 assert (idx == i);
2190 /* lists are sorted */
2191 assert (p->bk == b ||
2192 (unsigned long) chunksize (p->bk) >= (unsigned long) chunksize (p));
2193
2194 if (!in_smallbin_range (size))
2195 {
2196 if (p->fd_nextsize != NULL)
2197 {
2198 if (p->fd_nextsize == p)
2199 assert (p->bk_nextsize == p);
2200 else
2201 {
2202 if (p->fd_nextsize == first (b))
2203 assert (chunksize (p) < chunksize (p->fd_nextsize));
2204 else
2205 assert (chunksize (p) > chunksize (p->fd_nextsize));
2206
2207 if (p == first (b))
2208 assert (chunksize (p) > chunksize (p->bk_nextsize));
2209 else
2210 assert (chunksize (p) < chunksize (p->bk_nextsize));
2211 }
2212 }
2213 else
2214 assert (p->bk_nextsize == NULL);
2215 }
2216 }
2217 else if (!in_smallbin_range (size))
2218 assert (p->fd_nextsize == NULL && p->bk_nextsize == NULL);
2219 /* chunk is followed by a legal chain of inuse chunks */
2220 for (q = next_chunk (p);
2221 (q != av->top && inuse (q) &&
2222 (unsigned long) (chunksize (q)) >= MINSIZE);
2223 q = next_chunk (q))
2224 do_check_inuse_chunk (av, q);
2225 }
2226 }
2227
2228 /* top chunk is OK */
2229 check_chunk (av, av->top);
2230}
2231#endif
2232
2233
2234/* ----------------- Support for debugging hooks -------------------- */
2235#include "hooks.c"
2236
2237
2238/* ----------- Routines dealing with system allocation -------------- */
2239
2240/*
2241 sysmalloc handles malloc cases requiring more memory from the system.
2242 On entry, it is assumed that av->top does not have enough
2243 space to service request for nb bytes, thus requiring that av->top
2244 be extended or replaced.
2245 */
2246
2247static void *
2248sysmalloc (INTERNAL_SIZE_T nb, mstate av)
2249{
2250 mchunkptr old_top; /* incoming value of av->top */
2251 INTERNAL_SIZE_T old_size; /* its size */
2252 char *old_end; /* its end address */
2253
2254 long size; /* arg to first MORECORE or mmap call */
2255 char *brk; /* return value from MORECORE */
2256
2257 long correction; /* arg to 2nd MORECORE call */
2258 char *snd_brk; /* 2nd return val */
2259
2260 INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
2261 INTERNAL_SIZE_T end_misalign; /* partial page left at end of new space */
2262 char *aligned_brk; /* aligned offset into brk */
2263
2264 mchunkptr p; /* the allocated/returned chunk */
2265 mchunkptr remainder; /* remainder from allocation */
2266 unsigned long remainder_size; /* its size */
2267
2268
2269 size_t pagesize = GLRO (dl_pagesize);
2270 bool tried_mmap = false;
2271
2272
2273 /*
2274 If have mmap, and the request size meets the mmap threshold, and
2275 the system supports mmap, and there are few enough currently
2276 allocated mmapped regions, try to directly map this request
2277 rather than expanding top.
2278 */
2279
2280 if (av == NULL
2281 || ((unsigned long) (nb) >= (unsigned long) (mp_.mmap_threshold)
2282 && (mp_.n_mmaps < mp_.n_mmaps_max)))
2283 {
2284 char *mm; /* return value from mmap call*/
2285
2286 try_mmap:
2287 /*
2288 Round up size to nearest page. For mmapped chunks, the overhead
2289 is one SIZE_SZ unit larger than for normal chunks, because there
2290 is no following chunk whose prev_size field could be used.
2291
2292 See the front_misalign handling below, for glibc there is no
2293 need for further alignments unless we have have high alignment.
2294 */
2295 if (MALLOC_ALIGNMENT == 2 * SIZE_SZ)
2296 size = ALIGN_UP (nb + SIZE_SZ, pagesize);
2297 else
2298 size = ALIGN_UP (nb + SIZE_SZ + MALLOC_ALIGN_MASK, pagesize);
2299 tried_mmap = true;
2300
2301 /* Don't try if size wraps around 0 */
2302 if ((unsigned long) (size) > (unsigned long) (nb))
2303 {
2304 mm = (char *) (MMAP (0, size, PROT_READ | PROT_WRITE, 0));
2305
2306 if (mm != MAP_FAILED)
2307 {
2308 /*
2309 The offset to the start of the mmapped region is stored
2310 in the prev_size field of the chunk. This allows us to adjust
2311 returned start address to meet alignment requirements here
2312 and in memalign(), and still be able to compute proper
2313 address argument for later munmap in free() and realloc().
2314 */
2315
2316 if (MALLOC_ALIGNMENT == 2 * SIZE_SZ)
2317 {
2318 /* For glibc, chunk2mem increases the address by 2*SIZE_SZ and
2319 MALLOC_ALIGN_MASK is 2*SIZE_SZ-1. Each mmap'ed area is page
2320 aligned and therefore definitely MALLOC_ALIGN_MASK-aligned. */
2321 assert (((INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK) == 0);
2322 front_misalign = 0;
2323 }
2324 else
2325 front_misalign = (INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK;
2326 if (front_misalign > 0)
2327 {
2328 correction = MALLOC_ALIGNMENT - front_misalign;
2329 p = (mchunkptr) (mm + correction);
2330 set_prev_size (p, correction);
2331 set_head (p, (size - correction) | IS_MMAPPED);
2332 }
2333 else
2334 {
2335 p = (mchunkptr) mm;
2336 set_prev_size (p, 0);
2337 set_head (p, size | IS_MMAPPED);
2338 }
2339
2340 /* update statistics */
2341
2342 int new = atomic_exchange_and_add (&mp_.n_mmaps, 1) + 1;
2343 atomic_max (&mp_.max_n_mmaps, new);
2344
2345 unsigned long sum;
2346 sum = atomic_exchange_and_add (&mp_.mmapped_mem, size) + size;
2347 atomic_max (&mp_.max_mmapped_mem, sum);
2348
2349 check_chunk (av, p);
2350
2351 return chunk2mem (p);
2352 }
2353 }
2354 }
2355
2356 /* There are no usable arenas and mmap also failed. */
2357 if (av == NULL)
2358 return 0;
2359
2360 /* Record incoming configuration of top */
2361
2362 old_top = av->top;
2363 old_size = chunksize (old_top);
2364 old_end = (char *) (chunk_at_offset (old_top, old_size));
2365
2366 brk = snd_brk = (char *) (MORECORE_FAILURE);
2367
2368 /*
2369 If not the first time through, we require old_size to be
2370 at least MINSIZE and to have prev_inuse set.
2371 */
2372
2373 assert ((old_top == initial_top (av) && old_size == 0) ||
2374 ((unsigned long) (old_size) >= MINSIZE &&
2375 prev_inuse (old_top) &&
2376 ((unsigned long) old_end & (pagesize - 1)) == 0));
2377
2378 /* Precondition: not enough current space to satisfy nb request */
2379 assert ((unsigned long) (old_size) < (unsigned long) (nb + MINSIZE));
2380
2381
2382 if (av != &main_arena)
2383 {
2384 heap_info *old_heap, *heap;
2385 size_t old_heap_size;
2386
2387 /* First try to extend the current heap. */
2388 old_heap = heap_for_ptr (old_top);
2389 old_heap_size = old_heap->size;
2390 if ((long) (MINSIZE + nb - old_size) > 0
2391 && grow_heap (old_heap, MINSIZE + nb - old_size) == 0)
2392 {
2393 av->system_mem += old_heap->size - old_heap_size;
2394 set_head (old_top, (((char *) old_heap + old_heap->size) - (char *) old_top)
2395 | PREV_INUSE);
2396 }
2397 else if ((heap = new_heap (nb + (MINSIZE + sizeof (*heap)), mp_.top_pad)))
2398 {
2399 /* Use a newly allocated heap. */
2400 heap->ar_ptr = av;
2401 heap->prev = old_heap;
2402 av->system_mem += heap->size;
2403 /* Set up the new top. */
2404 top (av) = chunk_at_offset (heap, sizeof (*heap));
2405 set_head (top (av), (heap->size - sizeof (*heap)) | PREV_INUSE);
2406
2407 /* Setup fencepost and free the old top chunk with a multiple of
2408 MALLOC_ALIGNMENT in size. */
2409 /* The fencepost takes at least MINSIZE bytes, because it might
2410 become the top chunk again later. Note that a footer is set
2411 up, too, although the chunk is marked in use. */
2412 old_size = (old_size - MINSIZE) & ~MALLOC_ALIGN_MASK;
2413 set_head (chunk_at_offset (old_top, old_size + 2 * SIZE_SZ), 0 | PREV_INUSE);
2414 if (old_size >= MINSIZE)
2415 {
2416 set_head (chunk_at_offset (old_top, old_size), (2 * SIZE_SZ) | PREV_INUSE);
2417 set_foot (chunk_at_offset (old_top, old_size), (2 * SIZE_SZ));
2418 set_head (old_top, old_size | PREV_INUSE | NON_MAIN_ARENA);
2419 _int_free (av, old_top, 1);
2420 }
2421 else
2422 {
2423 set_head (old_top, (old_size + 2 * SIZE_SZ) | PREV_INUSE);
2424 set_foot (old_top, (old_size + 2 * SIZE_SZ));
2425 }
2426 }
2427 else if (!tried_mmap)
2428 /* We can at least try to use to mmap memory. */
2429 goto try_mmap;
2430 }
2431 else /* av == main_arena */
2432
2433
2434 { /* Request enough space for nb + pad + overhead */
2435 size = nb + mp_.top_pad + MINSIZE;
2436
2437 /*
2438 If contiguous, we can subtract out existing space that we hope to
2439 combine with new space. We add it back later only if
2440 we don't actually get contiguous space.
2441 */
2442
2443 if (contiguous (av))
2444 size -= old_size;
2445
2446 /*
2447 Round to a multiple of page size.
2448 If MORECORE is not contiguous, this ensures that we only call it
2449 with whole-page arguments. And if MORECORE is contiguous and
2450 this is not first time through, this preserves page-alignment of
2451 previous calls. Otherwise, we correct to page-align below.
2452 */
2453
2454 size = ALIGN_UP (size, pagesize);
2455
2456 /*
2457 Don't try to call MORECORE if argument is so big as to appear
2458 negative. Note that since mmap takes size_t arg, it may succeed
2459 below even if we cannot call MORECORE.
2460 */
2461
2462 if (size > 0)
2463 {
2464 brk = (char *) (MORECORE (size));
2465 LIBC_PROBE (memory_sbrk_more, 2, brk, size);
2466 }
2467
2468 if (brk != (char *) (MORECORE_FAILURE))
2469 {
2470 /* Call the `morecore' hook if necessary. */
2471 void (*hook) (void) = atomic_forced_read (__after_morecore_hook);
2472 if (__builtin_expect (hook != NULL, 0))
2473 (*hook)();
2474 }
2475 else
2476 {
2477 /*
2478 If have mmap, try using it as a backup when MORECORE fails or
2479 cannot be used. This is worth doing on systems that have "holes" in
2480 address space, so sbrk cannot extend to give contiguous space, but
2481 space is available elsewhere. Note that we ignore mmap max count
2482 and threshold limits, since the space will not be used as a
2483 segregated mmap region.
2484 */
2485
2486 /* Cannot merge with old top, so add its size back in */
2487 if (contiguous (av))
2488 size = ALIGN_UP (size + old_size, pagesize);
2489
2490 /* If we are relying on mmap as backup, then use larger units */
2491 if ((unsigned long) (size) < (unsigned long) (MMAP_AS_MORECORE_SIZE))
2492 size = MMAP_AS_MORECORE_SIZE;
2493
2494 /* Don't try if size wraps around 0 */
2495 if ((unsigned long) (size) > (unsigned long) (nb))
2496 {
2497 char *mbrk = (char *) (MMAP (0, size, PROT_READ | PROT_WRITE, 0));
2498
2499 if (mbrk != MAP_FAILED)
2500 {
2501 /* We do not need, and cannot use, another sbrk call to find end */
2502 brk = mbrk;
2503 snd_brk = brk + size;
2504
2505 /*
2506 Record that we no longer have a contiguous sbrk region.
2507 After the first time mmap is used as backup, we do not
2508 ever rely on contiguous space since this could incorrectly
2509 bridge regions.
2510 */
2511 set_noncontiguous (av);
2512 }
2513 }
2514 }
2515
2516 if (brk != (char *) (MORECORE_FAILURE))
2517 {
2518 if (mp_.sbrk_base == 0)
2519 mp_.sbrk_base = brk;
2520 av->system_mem += size;
2521
2522 /*
2523 If MORECORE extends previous space, we can likewise extend top size.
2524 */
2525
2526 if (brk == old_end && snd_brk == (char *) (MORECORE_FAILURE))
2527 set_head (old_top, (size + old_size) | PREV_INUSE);
2528
2529 else if (contiguous (av) && old_size && brk < old_end)
2530 {
2531 /* Oops! Someone else killed our space.. Can't touch anything. */
2532 malloc_printerr (3, "break adjusted to free malloc space", brk,
2533 av);
2534 }
2535
2536 /*
2537 Otherwise, make adjustments:
2538
2539 * If the first time through or noncontiguous, we need to call sbrk
2540 just to find out where the end of memory lies.
2541
2542 * We need to ensure that all returned chunks from malloc will meet
2543 MALLOC_ALIGNMENT
2544
2545 * If there was an intervening foreign sbrk, we need to adjust sbrk
2546 request size to account for fact that we will not be able to
2547 combine new space with existing space in old_top.
2548
2549 * Almost all systems internally allocate whole pages at a time, in
2550 which case we might as well use the whole last page of request.
2551 So we allocate enough more memory to hit a page boundary now,
2552 which in turn causes future contiguous calls to page-align.
2553 */
2554
2555 else
2556 {
2557 front_misalign = 0;
2558 end_misalign = 0;
2559 correction = 0;
2560 aligned_brk = brk;
2561
2562 /* handle contiguous cases */
2563 if (contiguous (av))
2564 {
2565 /* Count foreign sbrk as system_mem. */
2566 if (old_size)
2567 av->system_mem += brk - old_end;
2568
2569 /* Guarantee alignment of first new chunk made from this space */
2570
2571 front_misalign = (INTERNAL_SIZE_T) chunk2mem (brk) & MALLOC_ALIGN_MASK;
2572 if (front_misalign > 0)
2573 {
2574 /*
2575 Skip over some bytes to arrive at an aligned position.
2576 We don't need to specially mark these wasted front bytes.
2577 They will never be accessed anyway because
2578 prev_inuse of av->top (and any chunk created from its start)
2579 is always true after initialization.
2580 */
2581
2582 correction = MALLOC_ALIGNMENT - front_misalign;
2583 aligned_brk += correction;
2584 }
2585
2586 /*
2587 If this isn't adjacent to existing space, then we will not
2588 be able to merge with old_top space, so must add to 2nd request.
2589 */
2590
2591 correction += old_size;
2592
2593 /* Extend the end address to hit a page boundary */
2594 end_misalign = (INTERNAL_SIZE_T) (brk + size + correction);
2595 correction += (ALIGN_UP (end_misalign, pagesize)) - end_misalign;
2596
2597 assert (correction >= 0);
2598 snd_brk = (char *) (MORECORE (correction));
2599
2600 /*
2601 If can't allocate correction, try to at least find out current
2602 brk. It might be enough to proceed without failing.
2603
2604 Note that if second sbrk did NOT fail, we assume that space
2605 is contiguous with first sbrk. This is a safe assumption unless
2606 program is multithreaded but doesn't use locks and a foreign sbrk
2607 occurred between our first and second calls.
2608 */
2609
2610 if (snd_brk == (char *) (MORECORE_FAILURE))
2611 {
2612 correction = 0;
2613 snd_brk = (char *) (MORECORE (0));
2614 }
2615 else
2616 {
2617 /* Call the `morecore' hook if necessary. */
2618 void (*hook) (void) = atomic_forced_read (__after_morecore_hook);
2619 if (__builtin_expect (hook != NULL, 0))
2620 (*hook)();
2621 }
2622 }
2623
2624 /* handle non-contiguous cases */
2625 else
2626 {
2627 if (MALLOC_ALIGNMENT == 2 * SIZE_SZ)
2628 /* MORECORE/mmap must correctly align */
2629 assert (((unsigned long) chunk2mem (brk) & MALLOC_ALIGN_MASK) == 0);
2630 else
2631 {
2632 front_misalign = (INTERNAL_SIZE_T) chunk2mem (brk) & MALLOC_ALIGN_MASK;
2633 if (front_misalign > 0)
2634 {
2635 /*
2636 Skip over some bytes to arrive at an aligned position.
2637 We don't need to specially mark these wasted front bytes.
2638 They will never be accessed anyway because
2639 prev_inuse of av->top (and any chunk created from its start)
2640 is always true after initialization.
2641 */
2642
2643 aligned_brk += MALLOC_ALIGNMENT - front_misalign;
2644 }
2645 }
2646
2647 /* Find out current end of memory */
2648 if (snd_brk == (char *) (MORECORE_FAILURE))
2649 {
2650 snd_brk = (char *) (MORECORE (0));
2651 }
2652 }
2653
2654 /* Adjust top based on results of second sbrk */
2655 if (snd_brk != (char *) (MORECORE_FAILURE))
2656 {
2657 av->top = (mchunkptr) aligned_brk;
2658 set_head (av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE);
2659 av->system_mem += correction;
2660
2661 /*
2662 If not the first time through, we either have a
2663 gap due to foreign sbrk or a non-contiguous region. Insert a
2664 double fencepost at old_top to prevent consolidation with space
2665 we don't own. These fenceposts are artificial chunks that are
2666 marked as inuse and are in any case too small to use. We need
2667 two to make sizes and alignments work out.
2668 */
2669
2670 if (old_size != 0)
2671 {
2672 /*
2673 Shrink old_top to insert fenceposts, keeping size a
2674 multiple of MALLOC_ALIGNMENT. We know there is at least
2675 enough space in old_top to do this.
2676 */
2677 old_size = (old_size - 4 * SIZE_SZ) & ~MALLOC_ALIGN_MASK;
2678 set_head (old_top, old_size | PREV_INUSE);
2679
2680 /*
2681 Note that the following assignments completely overwrite
2682 old_top when old_size was previously MINSIZE. This is
2683 intentional. We need the fencepost, even if old_top otherwise gets
2684 lost.
2685 */
2686 set_head (chunk_at_offset (old_top, old_size),
2687 (2 * SIZE_SZ) | PREV_INUSE);
2688 set_head (chunk_at_offset (old_top, old_size + 2 * SIZE_SZ),
2689 (2 * SIZE_SZ) | PREV_INUSE);
2690
2691 /* If possible, release the rest. */
2692 if (old_size >= MINSIZE)
2693 {
2694 _int_free (av, old_top, 1);
2695 }
2696 }
2697 }
2698 }
2699 }
2700 } /* if (av != &main_arena) */
2701
2702 if ((unsigned long) av->system_mem > (unsigned long) (av->max_system_mem))
2703 av->max_system_mem = av->system_mem;
2704 check_malloc_state (av);
2705
2706 /* finally, do the allocation */
2707 p = av->top;
2708 size = chunksize (p);
2709
2710 /* check that one of the above allocation paths succeeded */
2711 if ((unsigned long) (size) >= (unsigned long) (nb + MINSIZE))
2712 {
2713 remainder_size = size - nb;
2714 remainder = chunk_at_offset (p, nb);
2715 av->top = remainder;
2716 set_head (p, nb | PREV_INUSE | (av != &main_arena ? NON_MAIN_ARENA : 0));
2717 set_head (remainder, remainder_size | PREV_INUSE);
2718 check_malloced_chunk (av, p, nb);
2719 return chunk2mem (p);
2720 }
2721
2722 /* catch all failure paths */
2723 __set_errno (ENOMEM);
2724 return 0;
2725}
2726
2727
2728/*
2729 systrim is an inverse of sorts to sysmalloc. It gives memory back
2730 to the system (via negative arguments to sbrk) if there is unused
2731 memory at the `high' end of the malloc pool. It is called
2732 automatically by free() when top space exceeds the trim
2733 threshold. It is also called by the public malloc_trim routine. It
2734 returns 1 if it actually released any memory, else 0.
2735 */
2736
2737static int
2738systrim (size_t pad, mstate av)
2739{
2740 long top_size; /* Amount of top-most memory */
2741 long extra; /* Amount to release */
2742 long released; /* Amount actually released */
2743 char *current_brk; /* address returned by pre-check sbrk call */
2744 char *new_brk; /* address returned by post-check sbrk call */
2745 size_t pagesize;
2746 long top_area;
2747
2748 pagesize = GLRO (dl_pagesize);
2749 top_size = chunksize (av->top);
2750
2751 top_area = top_size - MINSIZE - 1;
2752 if (top_area <= pad)
2753 return 0;
2754
2755 /* Release in pagesize units and round down to the nearest page. */
2756 extra = ALIGN_DOWN(top_area - pad, pagesize);
2757
2758 if (extra == 0)
2759 return 0;
2760
2761 /*
2762 Only proceed if end of memory is where we last set it.
2763 This avoids problems if there were foreign sbrk calls.
2764 */
2765 current_brk = (char *) (MORECORE (0));
2766 if (current_brk == (char *) (av->top) + top_size)
2767 {
2768 /*
2769 Attempt to release memory. We ignore MORECORE return value,
2770 and instead call again to find out where new end of memory is.
2771 This avoids problems if first call releases less than we asked,
2772 of if failure somehow altered brk value. (We could still
2773 encounter problems if it altered brk in some very bad way,
2774 but the only thing we can do is adjust anyway, which will cause
2775 some downstream failure.)
2776 */
2777
2778 MORECORE (-extra);
2779 /* Call the `morecore' hook if necessary. */
2780 void (*hook) (void) = atomic_forced_read (__after_morecore_hook);
2781 if (__builtin_expect (hook != NULL, 0))
2782 (*hook)();
2783 new_brk = (char *) (MORECORE (0));
2784
2785 LIBC_PROBE (memory_sbrk_less, 2, new_brk, extra);
2786
2787 if (new_brk != (char *) MORECORE_FAILURE)
2788 {
2789 released = (long) (current_brk - new_brk);
2790
2791 if (released != 0)
2792 {
2793 /* Success. Adjust top. */
2794 av->system_mem -= released;
2795 set_head (av->top, (top_size - released) | PREV_INUSE);
2796 check_malloc_state (av);
2797 return 1;
2798 }
2799 }
2800 }
2801 return 0;
2802}
2803
2804static void
2805internal_function
2806munmap_chunk (mchunkptr p)
2807{
2808 INTERNAL_SIZE_T size = chunksize (p);
2809
2810 assert (chunk_is_mmapped (p));
2811
2812 /* Do nothing if the chunk is a faked mmapped chunk in the dumped
2813 main arena. We never free this memory. */
2814 if (DUMPED_MAIN_ARENA_CHUNK (p))
2815 return;
2816
2817 uintptr_t block = (uintptr_t) p - prev_size (p);
2818 size_t total_size = prev_size (p) + size;
2819 /* Unfortunately we have to do the compilers job by hand here. Normally
2820 we would test BLOCK and TOTAL-SIZE separately for compliance with the
2821 page size. But gcc does not recognize the optimization possibility
2822 (in the moment at least) so we combine the two values into one before
2823 the bit test. */
2824 if (__builtin_expect (((block | total_size) & (GLRO (dl_pagesize) - 1)) != 0, 0))
2825 {
2826 malloc_printerr (check_action, "munmap_chunk(): invalid pointer",
2827 chunk2mem (p), NULL);
2828 return;
2829 }
2830
2831 atomic_decrement (&mp_.n_mmaps);
2832 atomic_add (&mp_.mmapped_mem, -total_size);
2833
2834 /* If munmap failed the process virtual memory address space is in a
2835 bad shape. Just leave the block hanging around, the process will
2836 terminate shortly anyway since not much can be done. */
2837 __munmap ((char *) block, total_size);
2838}
2839
2840#if HAVE_MREMAP
2841
2842static mchunkptr
2843internal_function
2844mremap_chunk (mchunkptr p, size_t new_size)
2845{
2846 size_t pagesize = GLRO (dl_pagesize);
2847 INTERNAL_SIZE_T offset = prev_size (p);
2848 INTERNAL_SIZE_T size = chunksize (p);
2849 char *cp;
2850
2851 assert (chunk_is_mmapped (p));
2852 assert (((size + offset) & (GLRO (dl_pagesize) - 1)) == 0);
2853
2854 /* Note the extra SIZE_SZ overhead as in mmap_chunk(). */
2855 new_size = ALIGN_UP (new_size + offset + SIZE_SZ, pagesize);
2856
2857 /* No need to remap if the number of pages does not change. */
2858 if (size + offset == new_size)
2859 return p;
2860
2861 cp = (char *) __mremap ((char *) p - offset, size + offset, new_size,
2862 MREMAP_MAYMOVE);
2863
2864 if (cp == MAP_FAILED)
2865 return 0;
2866
2867 p = (mchunkptr) (cp + offset);
2868
2869 assert (aligned_OK (chunk2mem (p)));
2870
2871 assert (prev_size (p) == offset);
2872 set_head (p, (new_size - offset) | IS_MMAPPED);
2873
2874 INTERNAL_SIZE_T new;
2875 new = atomic_exchange_and_add (&mp_.mmapped_mem, new_size - size - offset)
2876 + new_size - size - offset;
2877 atomic_max (&mp_.max_mmapped_mem, new);
2878 return p;
2879}
2880#endif /* HAVE_MREMAP */
2881
2882/*------------------------ Public wrappers. --------------------------------*/
2883
2884void *
2885__libc_malloc (size_t bytes)
2886{
2887 mstate ar_ptr;
2888 void *victim;
2889
2890 void *(*hook) (size_t, const void *)
2891 = atomic_forced_read (__malloc_hook);
2892 if (__builtin_expect (hook != NULL, 0))
2893 return (*hook)(bytes, RETURN_ADDRESS (0));
2894
2895 arena_get (ar_ptr, bytes);
2896
2897 victim = _int_malloc (ar_ptr, bytes);
2898 /* Retry with another arena only if we were able to find a usable arena
2899 before. */
2900 if (!victim && ar_ptr != NULL)
2901 {
2902 LIBC_PROBE (memory_malloc_retry, 1, bytes);
2903 ar_ptr = arena_get_retry (ar_ptr, bytes);
2904 victim = _int_malloc (ar_ptr, bytes);
2905 }
2906
2907 if (ar_ptr != NULL)
2908 __libc_lock_unlock (ar_ptr->mutex);
2909
2910 assert (!victim || chunk_is_mmapped (mem2chunk (victim)) ||
2911 ar_ptr == arena_for_chunk (mem2chunk (victim)));
2912 return victim;
2913}
2914libc_hidden_def (__libc_malloc)
2915
2916void
2917__libc_free (void *mem)
2918{
2919 mstate ar_ptr;
2920 mchunkptr p; /* chunk corresponding to mem */
2921
2922 void (*hook) (void *, const void *)
2923 = atomic_forced_read (__free_hook);
2924 if (__builtin_expect (hook != NULL, 0))
2925 {
2926 (*hook)(mem, RETURN_ADDRESS (0));
2927 return;
2928 }
2929
2930 if (mem == 0) /* free(0) has no effect */
2931 return;
2932
2933 p = mem2chunk (mem);
2934
2935 if (chunk_is_mmapped (p)) /* release mmapped memory. */
2936 {
2937 /* See if the dynamic brk/mmap threshold needs adjusting.
2938 Dumped fake mmapped chunks do not affect the threshold. */
2939 if (!mp_.no_dyn_threshold
2940 && chunksize_nomask (p) > mp_.mmap_threshold
2941 && chunksize_nomask (p) <= DEFAULT_MMAP_THRESHOLD_MAX
2942 && !DUMPED_MAIN_ARENA_CHUNK (p))
2943 {
2944 mp_.mmap_threshold = chunksize (p);
2945 mp_.trim_threshold = 2 * mp_.mmap_threshold;
2946 LIBC_PROBE (memory_mallopt_free_dyn_thresholds, 2,
2947 mp_.mmap_threshold, mp_.trim_threshold);
2948 }
2949 munmap_chunk (p);
2950 return;
2951 }
2952
2953 ar_ptr = arena_for_chunk (p);
2954 _int_free (ar_ptr, p, 0);
2955}
2956libc_hidden_def (__libc_free)
2957
2958void *
2959__libc_realloc (void *oldmem, size_t bytes)
2960{
2961 mstate ar_ptr;
2962 INTERNAL_SIZE_T nb; /* padded request size */
2963
2964 void *newp; /* chunk to return */
2965
2966 void *(*hook) (void *, size_t, const void *) =
2967 atomic_forced_read (__realloc_hook);
2968 if (__builtin_expect (hook != NULL, 0))
2969 return (*hook)(oldmem, bytes, RETURN_ADDRESS (0));
2970
2971#if REALLOC_ZERO_BYTES_FREES
2972 if (bytes == 0 && oldmem != NULL)
2973 {
2974 __libc_free (oldmem); return 0;
2975 }
2976#endif
2977
2978 /* realloc of null is supposed to be same as malloc */
2979 if (oldmem == 0)
2980 return __libc_malloc (bytes);
2981
2982 /* chunk corresponding to oldmem */
2983 const mchunkptr oldp = mem2chunk (oldmem);
2984 /* its size */
2985 const INTERNAL_SIZE_T oldsize = chunksize (oldp);
2986
2987 if (chunk_is_mmapped (oldp))
2988 ar_ptr = NULL;
2989 else
2990 ar_ptr = arena_for_chunk (oldp);
2991
2992 /* Little security check which won't hurt performance: the allocator
2993 never wrapps around at the end of the address space. Therefore
2994 we can exclude some size values which might appear here by
2995 accident or by "design" from some intruder. We need to bypass
2996 this check for dumped fake mmap chunks from the old main arena
2997 because the new malloc may provide additional alignment. */
2998 if ((__builtin_expect ((uintptr_t) oldp > (uintptr_t) -oldsize, 0)
2999 || __builtin_expect (misaligned_chunk (oldp), 0))
3000 && !DUMPED_MAIN_ARENA_CHUNK (oldp))
3001 {
3002 malloc_printerr (check_action, "realloc(): invalid pointer", oldmem,
3003 ar_ptr);
3004 return NULL;
3005 }
3006
3007 checked_request2size (bytes, nb);
3008
3009 if (chunk_is_mmapped (oldp))
3010 {
3011 /* If this is a faked mmapped chunk from the dumped main arena,
3012 always make a copy (and do not free the old chunk). */
3013 if (DUMPED_MAIN_ARENA_CHUNK (oldp))
3014 {
3015 /* Must alloc, copy, free. */
3016 void *newmem = __libc_malloc (bytes);
3017 if (newmem == 0)
3018 return NULL;
3019 /* Copy as many bytes as are available from the old chunk
3020 and fit into the new size. NB: The overhead for faked
3021 mmapped chunks is only SIZE_SZ, not 2 * SIZE_SZ as for
3022 regular mmapped chunks. */
3023 if (bytes > oldsize - SIZE_SZ)
3024 bytes = oldsize - SIZE_SZ;
3025 memcpy (newmem, oldmem, bytes);
3026 return newmem;
3027 }
3028
3029 void *newmem;
3030
3031#if HAVE_MREMAP
3032 newp = mremap_chunk (oldp, nb);
3033 if (newp)
3034 return chunk2mem (newp);
3035#endif
3036 /* Note the extra SIZE_SZ overhead. */
3037 if (oldsize - SIZE_SZ >= nb)
3038 return oldmem; /* do nothing */
3039
3040 /* Must alloc, copy, free. */
3041 newmem = __libc_malloc (bytes);
3042 if (newmem == 0)
3043 return 0; /* propagate failure */
3044
3045 memcpy (newmem, oldmem, oldsize - 2 * SIZE_SZ);
3046 munmap_chunk (oldp);
3047 return newmem;
3048 }
3049
3050 __libc_lock_lock (ar_ptr->mutex);
3051
3052 newp = _int_realloc (ar_ptr, oldp, oldsize, nb);
3053
3054 __libc_lock_unlock (ar_ptr->mutex);
3055 assert (!newp || chunk_is_mmapped (mem2chunk (newp)) ||
3056 ar_ptr == arena_for_chunk (mem2chunk (newp)));
3057
3058 if (newp == NULL)
3059 {
3060 /* Try harder to allocate memory in other arenas. */
3061 LIBC_PROBE (memory_realloc_retry, 2, bytes, oldmem);
3062 newp = __libc_malloc (bytes);
3063 if (newp != NULL)
3064 {
3065 memcpy (newp, oldmem, oldsize - SIZE_SZ);
3066 _int_free (ar_ptr, oldp, 0);
3067 }
3068 }
3069
3070 return newp;
3071}
3072libc_hidden_def (__libc_realloc)
3073
3074void *
3075__libc_memalign (size_t alignment, size_t bytes)
3076{
3077 void *address = RETURN_ADDRESS (0);
3078 return _mid_memalign (alignment, bytes, address);
3079}
3080
3081static void *
3082_mid_memalign (size_t alignment, size_t bytes, void *address)
3083{
3084 mstate ar_ptr;
3085 void *p;
3086
3087 void *(*hook) (size_t, size_t, const void *) =
3088 atomic_forced_read (__memalign_hook);
3089 if (__builtin_expect (hook != NULL, 0))
3090 return (*hook)(alignment, bytes, address);
3091
3092 /* If we need less alignment than we give anyway, just relay to malloc. */
3093 if (alignment <= MALLOC_ALIGNMENT)
3094 return __libc_malloc (bytes);
3095
3096 /* Otherwise, ensure that it is at least a minimum chunk size */
3097 if (alignment < MINSIZE)
3098 alignment = MINSIZE;
3099
3100 /* If the alignment is greater than SIZE_MAX / 2 + 1 it cannot be a
3101 power of 2 and will cause overflow in the check below. */
3102 if (alignment > SIZE_MAX / 2 + 1)
3103 {
3104 __set_errno (EINVAL);
3105 return 0;
3106 }
3107
3108 /* Check for overflow. */
3109 if (bytes > SIZE_MAX - alignment - MINSIZE)
3110 {
3111 __set_errno (ENOMEM);
3112 return 0;
3113 }
3114
3115
3116 /* Make sure alignment is power of 2. */
3117 if (!powerof2 (alignment))
3118 {
3119 size_t a = MALLOC_ALIGNMENT * 2;
3120 while (a < alignment)
3121 a <<= 1;
3122 alignment = a;
3123 }
3124
3125 arena_get (ar_ptr, bytes + alignment + MINSIZE);
3126
3127 p = _int_memalign (ar_ptr, alignment, bytes);
3128 if (!p && ar_ptr != NULL)
3129 {
3130 LIBC_PROBE (memory_memalign_retry, 2, bytes, alignment);
3131 ar_ptr = arena_get_retry (ar_ptr, bytes);
3132 p = _int_memalign (ar_ptr, alignment, bytes);
3133 }
3134
3135 if (ar_ptr != NULL)
3136 __libc_lock_unlock (ar_ptr->mutex);
3137
3138 assert (!p || chunk_is_mmapped (mem2chunk (p)) ||
3139 ar_ptr == arena_for_chunk (mem2chunk (p)));
3140 return p;
3141}
3142/* For ISO C11. */
3143weak_alias (__libc_memalign, aligned_alloc)
3144libc_hidden_def (__libc_memalign)
3145
3146void *
3147__libc_valloc (size_t bytes)
3148{
3149 if (__malloc_initialized < 0)
3150 ptmalloc_init ();
3151
3152 void *address = RETURN_ADDRESS (0);
3153 size_t pagesize = GLRO (dl_pagesize);
3154 return _mid_memalign (pagesize, bytes, address);
3155}
3156
3157void *
3158__libc_pvalloc (size_t bytes)
3159{
3160 if (__malloc_initialized < 0)
3161 ptmalloc_init ();
3162
3163 void *address = RETURN_ADDRESS (0);
3164 size_t pagesize = GLRO (dl_pagesize);
3165 size_t rounded_bytes = ALIGN_UP (bytes, pagesize);
3166
3167 /* Check for overflow. */
3168 if (bytes > SIZE_MAX - 2 * pagesize - MINSIZE)
3169 {
3170 __set_errno (ENOMEM);
3171 return 0;
3172 }
3173
3174 return _mid_memalign (pagesize, rounded_bytes, address);
3175}
3176
3177void *
3178__libc_calloc (size_t n, size_t elem_size)
3179{
3180 mstate av;
3181 mchunkptr oldtop, p;
3182 INTERNAL_SIZE_T bytes, sz, csz, oldtopsize;
3183 void *mem;
3184 unsigned long clearsize;
3185 unsigned long nclears;
3186 INTERNAL_SIZE_T *d;
3187
3188 /* size_t is unsigned so the behavior on overflow is defined. */
3189 bytes = n * elem_size;
3190#define HALF_INTERNAL_SIZE_T \
3191 (((INTERNAL_SIZE_T) 1) << (8 * sizeof (INTERNAL_SIZE_T) / 2))
3192 if (__builtin_expect ((n | elem_size) >= HALF_INTERNAL_SIZE_T, 0))
3193 {
3194 if (elem_size != 0 && bytes / elem_size != n)
3195 {
3196 __set_errno (ENOMEM);
3197 return 0;
3198 }
3199 }
3200
3201 void *(*hook) (size_t, const void *) =
3202 atomic_forced_read (__malloc_hook);
3203 if (__builtin_expect (hook != NULL, 0))
3204 {
3205 sz = bytes;
3206 mem = (*hook)(sz, RETURN_ADDRESS (0));
3207 if (mem == 0)
3208 return 0;
3209
3210 return memset (mem, 0, sz);
3211 }
3212
3213 sz = bytes;
3214
3215 arena_get (av, sz);
3216 if (av)
3217 {
3218 /* Check if we hand out the top chunk, in which case there may be no
3219 need to clear. */
3220#if MORECORE_CLEARS
3221 oldtop = top (av);
3222 oldtopsize = chunksize (top (av));
3223# if MORECORE_CLEARS < 2
3224 /* Only newly allocated memory is guaranteed to be cleared. */
3225 if (av == &main_arena &&
3226 oldtopsize < mp_.sbrk_base + av->max_system_mem - (char *) oldtop)
3227 oldtopsize = (mp_.sbrk_base + av->max_system_mem - (char *) oldtop);
3228# endif
3229 if (av != &main_arena)
3230 {
3231 heap_info *heap = heap_for_ptr (oldtop);
3232 if (oldtopsize < (char *) heap + heap->mprotect_size - (char *) oldtop)
3233 oldtopsize = (char *) heap + heap->mprotect_size - (char *) oldtop;
3234 }
3235#endif
3236 }
3237 else
3238 {
3239 /* No usable arenas. */
3240 oldtop = 0;
3241 oldtopsize = 0;
3242 }
3243 mem = _int_malloc (av, sz);
3244
3245
3246 assert (!mem || chunk_is_mmapped (mem2chunk (mem)) ||
3247 av == arena_for_chunk (mem2chunk (mem)));
3248
3249 if (mem == 0 && av != NULL)
3250 {
3251 LIBC_PROBE (memory_calloc_retry, 1, sz);
3252 av = arena_get_retry (av, sz);
3253 mem = _int_malloc (av, sz);
3254 }
3255
3256 if (av != NULL)
3257 __libc_lock_unlock (av->mutex);
3258
3259 /* Allocation failed even after a retry. */
3260 if (mem == 0)
3261 return 0;
3262
3263 p = mem2chunk (mem);
3264
3265 /* Two optional cases in which clearing not necessary */
3266 if (chunk_is_mmapped (p))
3267 {
3268 if (__builtin_expect (perturb_byte, 0))
3269 return memset (mem, 0, sz);
3270
3271 return mem;
3272 }
3273
3274 csz = chunksize (p);
3275
3276#if MORECORE_CLEARS
3277 if (perturb_byte == 0 && (p == oldtop && csz > oldtopsize))
3278 {
3279 /* clear only the bytes from non-freshly-sbrked memory */
3280 csz = oldtopsize;
3281 }
3282#endif
3283
3284 /* Unroll clear of <= 36 bytes (72 if 8byte sizes). We know that
3285 contents have an odd number of INTERNAL_SIZE_T-sized words;
3286 minimally 3. */
3287 d = (INTERNAL_SIZE_T *) mem;
3288 clearsize = csz - SIZE_SZ;
3289 nclears = clearsize / sizeof (INTERNAL_SIZE_T);
3290 assert (nclears >= 3);
3291
3292 if (nclears > 9)
3293 return memset (d, 0, clearsize);
3294
3295 else
3296 {
3297 *(d + 0) = 0;
3298 *(d + 1) = 0;
3299 *(d + 2) = 0;
3300 if (nclears > 4)
3301 {
3302 *(d + 3) = 0;
3303 *(d + 4) = 0;
3304 if (nclears > 6)
3305 {
3306 *(d + 5) = 0;
3307 *(d + 6) = 0;
3308 if (nclears > 8)
3309 {
3310 *(d + 7) = 0;
3311 *(d + 8) = 0;
3312 }
3313 }
3314 }
3315 }
3316
3317 return mem;
3318}
3319
3320/*
3321 ------------------------------ malloc ------------------------------
3322 */
3323
3324static void *
3325_int_malloc (mstate av, size_t bytes)
3326{
3327 INTERNAL_SIZE_T nb; /* normalized request size */
3328 unsigned int idx; /* associated bin index */
3329 mbinptr bin; /* associated bin */
3330
3331 mchunkptr victim; /* inspected/selected chunk */
3332 INTERNAL_SIZE_T size; /* its size */
3333 int victim_index; /* its bin index */
3334
3335 mchunkptr remainder; /* remainder from a split */
3336 unsigned long remainder_size; /* its size */
3337
3338 unsigned int block; /* bit map traverser */
3339 unsigned int bit; /* bit map traverser */
3340 unsigned int map; /* current word of binmap */
3341
3342 mchunkptr fwd; /* misc temp for linking */
3343 mchunkptr bck; /* misc temp for linking */
3344
3345 const char *errstr = NULL;
3346
3347 /*
3348 Convert request size to internal form by adding SIZE_SZ bytes
3349 overhead plus possibly more to obtain necessary alignment and/or
3350 to obtain a size of at least MINSIZE, the smallest allocatable
3351 size. Also, checked_request2size traps (returning 0) request sizes
3352 that are so large that they wrap around zero when padded and
3353 aligned.
3354 */
3355
3356 checked_request2size (bytes, nb);
3357
3358 /* There are no usable arenas. Fall back to sysmalloc to get a chunk from
3359 mmap. */
3360 if (__glibc_unlikely (av == NULL))
3361 {
3362 void *p = sysmalloc (nb, av);
3363 if (p != NULL)
3364 alloc_perturb (p, bytes);
3365 return p;
3366 }
3367
3368 /*
3369 If the size qualifies as a fastbin, first check corresponding bin.
3370 This code is safe to execute even if av is not yet initialized, so we
3371 can try it without checking, which saves some time on this fast path.
3372 */
3373
3374 if ((unsigned long) (nb) <= (unsigned long) (get_max_fast ()))
3375 {
3376 idx = fastbin_index (nb);
3377 mfastbinptr *fb = &fastbin (av, idx);
3378 mchunkptr pp = *fb;
3379 do
3380 {
3381 victim = pp;
3382 if (victim == NULL)
3383 break;
3384 }
3385 while ((pp = catomic_compare_and_exchange_val_acq (fb, victim->fd, victim))
3386 != victim);
3387 if (victim != 0)
3388 {
3389 if (__builtin_expect (fastbin_index (chunksize (victim)) != idx, 0))
3390 {
3391 errstr = "malloc(): memory corruption (fast)";
3392 errout:
3393 malloc_printerr (check_action, errstr, chunk2mem (victim), av);
3394 return NULL;
3395 }
3396 check_remalloced_chunk (av, victim, nb);
3397 void *p = chunk2mem (victim);
3398 alloc_perturb (p, bytes);
3399 return p;
3400 }
3401 }
3402
3403 /*
3404 If a small request, check regular bin. Since these "smallbins"
3405 hold one size each, no searching within bins is necessary.
3406 (For a large request, we need to wait until unsorted chunks are
3407 processed to find best fit. But for small ones, fits are exact
3408 anyway, so we can check now, which is faster.)
3409 */
3410
3411 if (in_smallbin_range (nb))
3412 {
3413 idx = smallbin_index (nb);
3414 bin = bin_at (av, idx);
3415
3416 if ((victim = last (bin)) != bin)
3417 {
3418 if (victim == 0) /* initialization check */
3419 malloc_consolidate (av);
3420 else
3421 {
3422 bck = victim->bk;
3423 if (__glibc_unlikely (bck->fd != victim))
3424 {
3425 errstr = "malloc(): smallbin double linked list corrupted";
3426 goto errout;
3427 }
3428 set_inuse_bit_at_offset (victim, nb);
3429 bin->bk = bck;
3430 bck->fd = bin;
3431
3432 if (av != &main_arena)
3433 set_non_main_arena (victim);
3434 check_malloced_chunk (av, victim, nb);
3435 void *p = chunk2mem (victim);
3436 alloc_perturb (p, bytes);
3437 return p;
3438 }
3439 }
3440 }
3441
3442 /*
3443 If this is a large request, consolidate fastbins before continuing.
3444 While it might look excessive to kill all fastbins before
3445 even seeing if there is space available, this avoids
3446 fragmentation problems normally associated with fastbins.
3447 Also, in practice, programs tend to have runs of either small or
3448 large requests, but less often mixtures, so consolidation is not
3449 invoked all that often in most programs. And the programs that
3450 it is called frequently in otherwise tend to fragment.
3451 */
3452
3453 else
3454 {
3455 idx = largebin_index (nb);
3456 if (have_fastchunks (av))
3457 malloc_consolidate (av);
3458 }
3459
3460 /*
3461 Process recently freed or remaindered chunks, taking one only if
3462 it is exact fit, or, if this a small request, the chunk is remainder from
3463 the most recent non-exact fit. Place other traversed chunks in
3464 bins. Note that this step is the only place in any routine where
3465 chunks are placed in bins.
3466
3467 The outer loop here is needed because we might not realize until
3468 near the end of malloc that we should have consolidated, so must
3469 do so and retry. This happens at most once, and only when we would
3470 otherwise need to expand memory to service a "small" request.
3471 */
3472
3473 for (;; )
3474 {
3475 int iters = 0;
3476 while ((victim = unsorted_chunks (av)->bk) != unsorted_chunks (av))
3477 {
3478 bck = victim->bk;
3479 if (__builtin_expect (chunksize_nomask (victim) <= 2 * SIZE_SZ, 0)
3480 || __builtin_expect (chunksize_nomask (victim)
3481 > av->system_mem, 0))
3482 malloc_printerr (check_action, "malloc(): memory corruption",
3483 chunk2mem (victim), av);
3484 size = chunksize (victim);
3485
3486 /*
3487 If a small request, try to use last remainder if it is the
3488 only chunk in unsorted bin. This helps promote locality for
3489 runs of consecutive small requests. This is the only
3490 exception to best-fit, and applies only when there is
3491 no exact fit for a small chunk.
3492 */
3493
3494 if (in_smallbin_range (nb) &&
3495 bck == unsorted_chunks (av) &&
3496 victim == av->last_remainder &&
3497 (unsigned long) (size) > (unsigned long) (nb + MINSIZE))
3498 {
3499 /* split and reattach remainder */
3500 remainder_size = size - nb;
3501 remainder = chunk_at_offset (victim, nb);
3502 unsorted_chunks (av)->bk = unsorted_chunks (av)->fd = remainder;
3503 av->last_remainder = remainder;
3504 remainder->bk = remainder->fd = unsorted_chunks (av);
3505 if (!in_smallbin_range (remainder_size))
3506 {
3507 remainder->fd_nextsize = NULL;
3508 remainder->bk_nextsize = NULL;
3509 }
3510
3511 set_head (victim, nb | PREV_INUSE |
3512 (av != &main_arena ? NON_MAIN_ARENA : 0));
3513 set_head (remainder, remainder_size | PREV_INUSE);
3514 set_foot (remainder, remainder_size);
3515
3516 check_malloced_chunk (av, victim, nb);
3517 void *p = chunk2mem (victim);
3518 alloc_perturb (p, bytes);
3519 return p;
3520 }
3521
3522 /* remove from unsorted list */
3523 unsorted_chunks (av)->bk = bck;
3524 bck->fd = unsorted_chunks (av);
3525
3526 /* Take now instead of binning if exact fit */
3527
3528 if (size == nb)
3529 {
3530 set_inuse_bit_at_offset (victim, size);
3531 if (av != &main_arena)
3532 set_non_main_arena (victim);
3533 check_malloced_chunk (av, victim, nb);
3534 void *p = chunk2mem (victim);
3535 alloc_perturb (p, bytes);
3536 return p;
3537 }
3538
3539 /* place chunk in bin */
3540
3541 if (in_smallbin_range (size))
3542 {
3543 victim_index = smallbin_index (size);
3544 bck = bin_at (av, victim_index);
3545 fwd = bck->fd;
3546 }
3547 else
3548 {
3549 victim_index = largebin_index (size);
3550 bck = bin_at (av, victim_index);
3551 fwd = bck->fd;
3552
3553 /* maintain large bins in sorted order */
3554 if (fwd != bck)
3555 {
3556 /* Or with inuse bit to speed comparisons */
3557 size |= PREV_INUSE;
3558 /* if smaller than smallest, bypass loop below */
3559 assert (chunk_main_arena (bck->bk));
3560 if ((unsigned long) (size)
3561 < (unsigned long) chunksize_nomask (bck->bk))
3562 {
3563 fwd = bck;
3564 bck = bck->bk;
3565
3566 victim->fd_nextsize = fwd->fd;
3567 victim->bk_nextsize = fwd->fd->bk_nextsize;
3568 fwd->fd->bk_nextsize = victim->bk_nextsize->fd_nextsize = victim;
3569 }
3570 else
3571 {
3572 assert (chunk_main_arena (fwd));
3573 while ((unsigned long) size < chunksize_nomask (fwd))
3574 {
3575 fwd = fwd->fd_nextsize;
3576 assert (chunk_main_arena (fwd));
3577 }
3578
3579 if ((unsigned long) size
3580 == (unsigned long) chunksize_nomask (fwd))
3581 /* Always insert in the second position. */
3582 fwd = fwd->fd;
3583 else
3584 {
3585 victim->fd_nextsize = fwd;
3586 victim->bk_nextsize = fwd->bk_nextsize;
3587 fwd->bk_nextsize = victim;
3588 victim->bk_nextsize->fd_nextsize = victim;
3589 }
3590 bck = fwd->bk;
3591 }
3592 }
3593 else
3594 victim->fd_nextsize = victim->bk_nextsize = victim;
3595 }
3596
3597 mark_bin (av, victim_index);
3598 victim->bk = bck;
3599 victim->fd = fwd;
3600 fwd->bk = victim;
3601 bck->fd = victim;
3602
3603#define MAX_ITERS 10000
3604 if (++iters >= MAX_ITERS)
3605 break;
3606 }
3607
3608 /*
3609 If a large request, scan through the chunks of current bin in
3610 sorted order to find smallest that fits. Use the skip list for this.
3611 */
3612
3613 if (!in_smallbin_range (nb))
3614 {
3615 bin = bin_at (av, idx);
3616
3617 /* skip scan if empty or largest chunk is too small */
3618 if ((victim = first (bin)) != bin
3619 && (unsigned long) chunksize_nomask (victim)
3620 >= (unsigned long) (nb))
3621 {
3622 victim = victim->bk_nextsize;
3623 while (((unsigned long) (size = chunksize (victim)) <
3624 (unsigned long) (nb)))
3625 victim = victim->bk_nextsize;
3626
3627 /* Avoid removing the first entry for a size so that the skip
3628 list does not have to be rerouted. */
3629 if (victim != last (bin)
3630 && chunksize_nomask (victim)
3631 == chunksize_nomask (victim->fd))
3632 victim = victim->fd;
3633
3634 remainder_size = size - nb;
3635 unlink (av, victim, bck, fwd);
3636
3637 /* Exhaust */
3638 if (remainder_size < MINSIZE)
3639 {
3640 set_inuse_bit_at_offset (victim, size);
3641 if (av != &main_arena)
3642 set_non_main_arena (victim);
3643 }
3644 /* Split */
3645 else
3646 {
3647 remainder = chunk_at_offset (victim, nb);
3648 /* We cannot assume the unsorted list is empty and therefore
3649 have to perform a complete insert here. */
3650 bck = unsorted_chunks (av);
3651 fwd = bck->fd;
3652 if (__glibc_unlikely (fwd->bk != bck))
3653 {
3654 errstr = "malloc(): corrupted unsorted chunks";
3655 goto errout;
3656 }
3657 remainder->bk = bck;
3658 remainder->fd = fwd;
3659 bck->fd = remainder;
3660 fwd->bk = remainder;
3661 if (!in_smallbin_range (remainder_size))
3662 {
3663 remainder->fd_nextsize = NULL;
3664 remainder->bk_nextsize = NULL;
3665 }
3666 set_head (victim, nb | PREV_INUSE |
3667 (av != &main_arena ? NON_MAIN_ARENA : 0));
3668 set_head (remainder, remainder_size | PREV_INUSE);
3669 set_foot (remainder, remainder_size);
3670 }
3671 check_malloced_chunk (av, victim, nb);
3672 void *p = chunk2mem (victim);
3673 alloc_perturb (p, bytes);
3674 return p;
3675 }
3676 }
3677
3678 /*
3679 Search for a chunk by scanning bins, starting with next largest
3680 bin. This search is strictly by best-fit; i.e., the smallest
3681 (with ties going to approximately the least recently used) chunk
3682 that fits is selected.
3683
3684 The bitmap avoids needing to check that most blocks are nonempty.
3685 The particular case of skipping all bins during warm-up phases
3686 when no chunks have been returned yet is faster than it might look.
3687 */
3688
3689 ++idx;
3690 bin = bin_at (av, idx);
3691 block = idx2block (idx);
3692 map = av->binmap[block];
3693 bit = idx2bit (idx);
3694
3695 for (;; )
3696 {
3697 /* Skip rest of block if there are no more set bits in this block. */
3698 if (bit > map || bit == 0)
3699 {
3700 do
3701 {
3702 if (++block >= BINMAPSIZE) /* out of bins */
3703 goto use_top;
3704 }
3705 while ((map = av->binmap[block]) == 0);
3706
3707 bin = bin_at (av, (block << BINMAPSHIFT));
3708 bit = 1;
3709 }
3710
3711 /* Advance to bin with set bit. There must be one. */
3712 while ((bit & map) == 0)
3713 {
3714 bin = next_bin (bin);
3715 bit <<= 1;
3716 assert (bit != 0);
3717 }
3718
3719 /* Inspect the bin. It is likely to be non-empty */
3720 victim = last (bin);
3721
3722 /* If a false alarm (empty bin), clear the bit. */
3723 if (victim == bin)
3724 {
3725 av->binmap[block] = map &= ~bit; /* Write through */
3726 bin = next_bin (bin);
3727 bit <<= 1;
3728 }
3729
3730 else
3731 {
3732 size = chunksize (victim);
3733
3734 /* We know the first chunk in this bin is big enough to use. */
3735 assert ((unsigned long) (size) >= (unsigned long) (nb));
3736
3737 remainder_size = size - nb;
3738
3739 /* unlink */
3740 unlink (av, victim, bck, fwd);
3741
3742 /* Exhaust */
3743 if (remainder_size < MINSIZE)
3744 {
3745 set_inuse_bit_at_offset (victim, size);
3746 if (av != &main_arena)
3747 set_non_main_arena (victim);
3748 }
3749
3750 /* Split */
3751 else
3752 {
3753 remainder = chunk_at_offset (victim, nb);
3754
3755 /* We cannot assume the unsorted list is empty and therefore
3756 have to perform a complete insert here. */
3757 bck = unsorted_chunks (av);
3758 fwd = bck->fd;
3759 if (__glibc_unlikely (fwd->bk != bck))
3760 {
3761 errstr = "malloc(): corrupted unsorted chunks 2";
3762 goto errout;
3763 }
3764 remainder->bk = bck;
3765 remainder->fd = fwd;
3766 bck->fd = remainder;
3767 fwd->bk = remainder;
3768
3769 /* advertise as last remainder */
3770 if (in_smallbin_range (nb))
3771 av->last_remainder = remainder;
3772 if (!in_smallbin_range (remainder_size))
3773 {
3774 remainder->fd_nextsize = NULL;
3775 remainder->bk_nextsize = NULL;
3776 }
3777 set_head (victim, nb | PREV_INUSE |
3778 (av != &main_arena ? NON_MAIN_ARENA : 0));
3779 set_head (remainder, remainder_size | PREV_INUSE);
3780 set_foot (remainder, remainder_size);
3781 }
3782 check_malloced_chunk (av, victim, nb);
3783 void *p = chunk2mem (victim);
3784 alloc_perturb (p, bytes);
3785 return p;
3786 }
3787 }
3788
3789 use_top:
3790 /*
3791 If large enough, split off the chunk bordering the end of memory
3792 (held in av->top). Note that this is in accord with the best-fit
3793 search rule. In effect, av->top is treated as larger (and thus
3794 less well fitting) than any other available chunk since it can
3795 be extended to be as large as necessary (up to system
3796 limitations).
3797
3798 We require that av->top always exists (i.e., has size >=
3799 MINSIZE) after initialization, so if it would otherwise be
3800 exhausted by current request, it is replenished. (The main
3801 reason for ensuring it exists is that we may need MINSIZE space
3802 to put in fenceposts in sysmalloc.)
3803 */
3804
3805 victim = av->top;
3806 size = chunksize (victim);
3807
3808 if ((unsigned long) (size) >= (unsigned long) (nb + MINSIZE))
3809 {
3810 remainder_size = size - nb;
3811 remainder = chunk_at_offset (victim, nb);
3812 av->top = remainder;
3813 set_head (victim, nb | PREV_INUSE |
3814 (av != &main_arena ? NON_MAIN_ARENA : 0));
3815 set_head (remainder, remainder_size | PREV_INUSE);
3816
3817 check_malloced_chunk (av, victim, nb);
3818 void *p = chunk2mem (victim);
3819 alloc_perturb (p, bytes);
3820 return p;
3821 }
3822
3823 /* When we are using atomic ops to free fast chunks we can get
3824 here for all block sizes. */
3825 else if (have_fastchunks (av))
3826 {
3827 malloc_consolidate (av);
3828 /* restore original bin index */
3829 if (in_smallbin_range (nb))
3830 idx = smallbin_index (nb);
3831 else
3832 idx = largebin_index (nb);
3833 }
3834
3835 /*
3836 Otherwise, relay to handle system-dependent cases
3837 */
3838 else
3839 {
3840 void *p = sysmalloc (nb, av);
3841 if (p != NULL)
3842 alloc_perturb (p, bytes);
3843 return p;
3844 }
3845 }
3846}
3847
3848/*
3849 ------------------------------ free ------------------------------
3850 */
3851
3852static void
3853_int_free (mstate av, mchunkptr p, int have_lock)
3854{
3855 INTERNAL_SIZE_T size; /* its size */
3856 mfastbinptr *fb; /* associated fastbin */
3857 mchunkptr nextchunk; /* next contiguous chunk */
3858 INTERNAL_SIZE_T nextsize; /* its size */
3859 int nextinuse; /* true if nextchunk is used */
3860 INTERNAL_SIZE_T prevsize; /* size of previous contiguous chunk */
3861 mchunkptr bck; /* misc temp for linking */
3862 mchunkptr fwd; /* misc temp for linking */
3863
3864 const char *errstr = NULL;
3865 int locked = 0;
3866
3867 size = chunksize (p);
3868
3869 /* Little security check which won't hurt performance: the
3870 allocator never wrapps around at the end of the address space.
3871 Therefore we can exclude some size values which might appear
3872 here by accident or by "design" from some intruder. */
3873 if (__builtin_expect ((uintptr_t) p > (uintptr_t) -size, 0)
3874 || __builtin_expect (misaligned_chunk (p), 0))
3875 {
3876 errstr = "free(): invalid pointer";
3877 errout:
3878 if (!have_lock && locked)
3879 __libc_lock_unlock (av->mutex);
3880 malloc_printerr (check_action, errstr, chunk2mem (p), av);
3881 return;
3882 }
3883 /* We know that each chunk is at least MINSIZE bytes in size or a
3884 multiple of MALLOC_ALIGNMENT. */
3885 if (__glibc_unlikely (size < MINSIZE || !aligned_OK (size)))
3886 {
3887 errstr = "free(): invalid size";
3888 goto errout;
3889 }
3890
3891 check_inuse_chunk(av, p);
3892
3893 /*
3894 If eligible, place chunk on a fastbin so it can be found
3895 and used quickly in malloc.
3896 */
3897
3898 if ((unsigned long)(size) <= (unsigned long)(get_max_fast ())
3899
3900#if TRIM_FASTBINS
3901 /*
3902 If TRIM_FASTBINS set, don't place chunks
3903 bordering top into fastbins
3904 */
3905 && (chunk_at_offset(p, size) != av->top)
3906#endif
3907 ) {
3908
3909 if (__builtin_expect (chunksize_nomask (chunk_at_offset (p, size))
3910 <= 2 * SIZE_SZ, 0)
3911 || __builtin_expect (chunksize (chunk_at_offset (p, size))
3912 >= av->system_mem, 0))
3913 {
3914 /* We might not have a lock at this point and concurrent modifications
3915 of system_mem might have let to a false positive. Redo the test
3916 after getting the lock. */
3917 if (have_lock
3918 || ({ assert (locked == 0);
3919 __libc_lock_lock (av->mutex);
3920 locked = 1;
3921 chunksize_nomask (chunk_at_offset (p, size)) <= 2 * SIZE_SZ
3922 || chunksize (chunk_at_offset (p, size)) >= av->system_mem;
3923 }))
3924 {
3925 errstr = "free(): invalid next size (fast)";
3926 goto errout;
3927 }
3928 if (! have_lock)
3929 {
3930 __libc_lock_unlock (av->mutex);
3931 locked = 0;
3932 }
3933 }
3934
3935 free_perturb (chunk2mem(p), size - 2 * SIZE_SZ);
3936
3937 set_fastchunks(av);
3938 unsigned int idx = fastbin_index(size);
3939 fb = &fastbin (av, idx);
3940
3941 /* Atomically link P to its fastbin: P->FD = *FB; *FB = P; */
3942 mchunkptr old = *fb, old2;
3943 unsigned int old_idx = ~0u;
3944 do
3945 {
3946 /* Check that the top of the bin is not the record we are going to add
3947 (i.e., double free). */
3948 if (__builtin_expect (old == p, 0))
3949 {
3950 errstr = "double free or corruption (fasttop)";
3951 goto errout;
3952 }
3953 /* Check that size of fastbin chunk at the top is the same as
3954 size of the chunk that we are adding. We can dereference OLD
3955 only if we have the lock, otherwise it might have already been
3956 deallocated. See use of OLD_IDX below for the actual check. */
3957 if (have_lock && old != NULL)
3958 old_idx = fastbin_index(chunksize(old));
3959 p->fd = old2 = old;
3960 }
3961 while ((old = catomic_compare_and_exchange_val_rel (fb, p, old2)) != old2);
3962
3963 if (have_lock && old != NULL && __builtin_expect (old_idx != idx, 0))
3964 {
3965 errstr = "invalid fastbin entry (free)";
3966 goto errout;
3967 }
3968 }
3969
3970 /*
3971 Consolidate other non-mmapped chunks as they arrive.
3972 */
3973
3974 else if (!chunk_is_mmapped(p)) {
3975 if (! have_lock) {
3976 __libc_lock_lock (av->mutex);
3977 locked = 1;
3978 }
3979
3980 nextchunk = chunk_at_offset(p, size);
3981
3982 /* Lightweight tests: check whether the block is already the
3983 top block. */
3984 if (__glibc_unlikely (p == av->top))
3985 {
3986 errstr = "double free or corruption (top)";
3987 goto errout;
3988 }
3989 /* Or whether the next chunk is beyond the boundaries of the arena. */
3990 if (__builtin_expect (contiguous (av)
3991 && (char *) nextchunk
3992 >= ((char *) av->top + chunksize(av->top)), 0))
3993 {
3994 errstr = "double free or corruption (out)";
3995 goto errout;
3996 }
3997 /* Or whether the block is actually not marked used. */
3998 if (__glibc_unlikely (!prev_inuse(nextchunk)))
3999 {
4000 errstr = "double free or corruption (!prev)";
4001 goto errout;
4002 }
4003
4004 nextsize = chunksize(nextchunk);
4005 if (__builtin_expect (chunksize_nomask (nextchunk) <= 2 * SIZE_SZ, 0)
4006 || __builtin_expect (nextsize >= av->system_mem, 0))
4007 {
4008 errstr = "free(): invalid next size (normal)";
4009 goto errout;
4010 }
4011
4012 free_perturb (chunk2mem(p), size - 2 * SIZE_SZ);
4013
4014 /* consolidate backward */
4015 if (!prev_inuse(p)) {
4016 prevsize = prev_size (p);
4017 size += prevsize;
4018 p = chunk_at_offset(p, -((long) prevsize));
4019 unlink(av, p, bck, fwd);
4020 }
4021
4022 if (nextchunk != av->top) {
4023 /* get and clear inuse bit */
4024 nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
4025
4026 /* consolidate forward */
4027 if (!nextinuse) {
4028 unlink(av, nextchunk, bck, fwd);
4029 size += nextsize;
4030 } else
4031 clear_inuse_bit_at_offset(nextchunk, 0);
4032
4033 /*
4034 Place the chunk in unsorted chunk list. Chunks are
4035 not placed into regular bins until after they have
4036 been given one chance to be used in malloc.
4037 */
4038
4039 bck = unsorted_chunks(av);
4040 fwd = bck->fd;
4041 if (__glibc_unlikely (fwd->bk != bck))
4042 {
4043 errstr = "free(): corrupted unsorted chunks";
4044 goto errout;
4045 }
4046 p->fd = fwd;
4047 p->bk = bck;
4048 if (!in_smallbin_range(size))
4049 {
4050 p->fd_nextsize = NULL;
4051 p->bk_nextsize = NULL;
4052 }
4053 bck->fd = p;
4054 fwd->bk = p;
4055
4056 set_head(p, size | PREV_INUSE);
4057 set_foot(p, size);
4058
4059 check_free_chunk(av, p);
4060 }
4061
4062 /*
4063 If the chunk borders the current high end of memory,
4064 consolidate into top
4065 */
4066
4067 else {
4068 size += nextsize;
4069 set_head(p, size | PREV_INUSE);
4070 av->top = p;
4071 check_chunk(av, p);
4072 }
4073
4074 /*
4075 If freeing a large space, consolidate possibly-surrounding
4076 chunks. Then, if the total unused topmost memory exceeds trim
4077 threshold, ask malloc_trim to reduce top.
4078
4079 Unless max_fast is 0, we don't know if there are fastbins
4080 bordering top, so we cannot tell for sure whether threshold
4081 has been reached unless fastbins are consolidated. But we
4082 don't want to consolidate on each free. As a compromise,
4083 consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
4084 is reached.
4085 */
4086
4087 if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
4088 if (have_fastchunks(av))
4089 malloc_consolidate(av);
4090
4091 if (av == &main_arena) {
4092#ifndef MORECORE_CANNOT_TRIM
4093 if ((unsigned long)(chunksize(av->top)) >=
4094 (unsigned long)(mp_.trim_threshold))
4095 systrim(mp_.top_pad, av);
4096#endif
4097 } else {
4098 /* Always try heap_trim(), even if the top chunk is not
4099 large, because the corresponding heap might go away. */
4100 heap_info *heap = heap_for_ptr(top(av));
4101
4102 assert(heap->ar_ptr == av);
4103 heap_trim(heap, mp_.top_pad);
4104 }
4105 }
4106
4107 if (! have_lock) {
4108 assert (locked);
4109 __libc_lock_unlock (av->mutex);
4110 }
4111 }
4112 /*
4113 If the chunk was allocated via mmap, release via munmap().
4114 */
4115
4116 else {
4117 munmap_chunk (p);
4118 }
4119}
4120
4121/*
4122 ------------------------- malloc_consolidate -------------------------
4123
4124 malloc_consolidate is a specialized version of free() that tears
4125 down chunks held in fastbins. Free itself cannot be used for this
4126 purpose since, among other things, it might place chunks back onto
4127 fastbins. So, instead, we need to use a minor variant of the same
4128 code.
4129
4130 Also, because this routine needs to be called the first time through
4131 malloc anyway, it turns out to be the perfect place to trigger
4132 initialization code.
4133*/
4134
4135static void malloc_consolidate(mstate av)
4136{
4137 mfastbinptr* fb; /* current fastbin being consolidated */
4138 mfastbinptr* maxfb; /* last fastbin (for loop control) */
4139 mchunkptr p; /* current chunk being consolidated */
4140 mchunkptr nextp; /* next chunk to consolidate */
4141 mchunkptr unsorted_bin; /* bin header */
4142 mchunkptr first_unsorted; /* chunk to link to */
4143
4144 /* These have same use as in free() */
4145 mchunkptr nextchunk;
4146 INTERNAL_SIZE_T size;
4147 INTERNAL_SIZE_T nextsize;
4148 INTERNAL_SIZE_T prevsize;
4149 int nextinuse;
4150 mchunkptr bck;
4151 mchunkptr fwd;
4152
4153 /*
4154 If max_fast is 0, we know that av hasn't
4155 yet been initialized, in which case do so below
4156 */
4157
4158 if (get_max_fast () != 0) {
4159 clear_fastchunks(av);
4160
4161 unsorted_bin = unsorted_chunks(av);
4162
4163 /*
4164 Remove each chunk from fast bin and consolidate it, placing it
4165 then in unsorted bin. Among other reasons for doing this,
4166 placing in unsorted bin avoids needing to calculate actual bins
4167 until malloc is sure that chunks aren't immediately going to be
4168 reused anyway.
4169 */
4170
4171 maxfb = &fastbin (av, NFASTBINS - 1);
4172 fb = &fastbin (av, 0);
4173 do {
4174 p = atomic_exchange_acq (fb, NULL);
4175 if (p != 0) {
4176 do {
4177 check_inuse_chunk(av, p);
4178 nextp = p->fd;
4179
4180 /* Slightly streamlined version of consolidation code in free() */
4181 size = chunksize (p);
4182 nextchunk = chunk_at_offset(p, size);
4183 nextsize = chunksize(nextchunk);
4184
4185 if (!prev_inuse(p)) {
4186 prevsize = prev_size (p);
4187 size += prevsize;
4188 p = chunk_at_offset(p, -((long) prevsize));
4189 unlink(av, p, bck, fwd);
4190 }
4191
4192 if (nextchunk != av->top) {
4193 nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
4194
4195 if (!nextinuse) {
4196 size += nextsize;
4197 unlink(av, nextchunk, bck, fwd);
4198 } else
4199 clear_inuse_bit_at_offset(nextchunk, 0);
4200
4201 first_unsorted = unsorted_bin->fd;
4202 unsorted_bin->fd = p;
4203 first_unsorted->bk = p;
4204
4205 if (!in_smallbin_range (size)) {
4206 p->fd_nextsize = NULL;
4207 p->bk_nextsize = NULL;
4208 }
4209
4210 set_head(p, size | PREV_INUSE);
4211 p->bk = unsorted_bin;
4212 p->fd = first_unsorted;
4213 set_foot(p, size);
4214 }
4215
4216 else {
4217 size += nextsize;
4218 set_head(p, size | PREV_INUSE);
4219 av->top = p;
4220 }
4221
4222 } while ( (p = nextp) != 0);
4223
4224 }
4225 } while (fb++ != maxfb);
4226 }
4227 else {
4228 malloc_init_state(av);
4229 check_malloc_state(av);
4230 }
4231}
4232
4233/*
4234 ------------------------------ realloc ------------------------------
4235*/
4236
4237void*
4238_int_realloc(mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
4239 INTERNAL_SIZE_T nb)
4240{
4241 mchunkptr newp; /* chunk to return */
4242 INTERNAL_SIZE_T newsize; /* its size */
4243 void* newmem; /* corresponding user mem */
4244
4245 mchunkptr next; /* next contiguous chunk after oldp */
4246
4247 mchunkptr remainder; /* extra space at end of newp */
4248 unsigned long remainder_size; /* its size */
4249
4250 mchunkptr bck; /* misc temp for linking */
4251 mchunkptr fwd; /* misc temp for linking */
4252
4253 const char *errstr = NULL;
4254
4255 /* oldmem size */
4256 if (__builtin_expect (chunksize_nomask (oldp) <= 2 * SIZE_SZ, 0)
4257 || __builtin_expect (oldsize >= av->system_mem, 0))
4258 {
4259 errstr = "realloc(): invalid old size";
4260 errout:
4261 malloc_printerr (check_action, errstr, chunk2mem (oldp), av);
4262 return NULL;
4263 }
4264
4265 check_inuse_chunk (av, oldp);
4266
4267 /* All callers already filter out mmap'ed chunks. */
4268 assert (!chunk_is_mmapped (oldp));
4269
4270 next = chunk_at_offset (oldp, oldsize);
4271 INTERNAL_SIZE_T nextsize = chunksize (next);
4272 if (__builtin_expect (chunksize_nomask (next) <= 2 * SIZE_SZ, 0)
4273 || __builtin_expect (nextsize >= av->system_mem, 0))
4274 {
4275 errstr = "realloc(): invalid next size";
4276 goto errout;
4277 }
4278
4279 if ((unsigned long) (oldsize) >= (unsigned long) (nb))
4280 {
4281 /* already big enough; split below */
4282 newp = oldp;
4283 newsize = oldsize;
4284 }
4285
4286 else
4287 {
4288 /* Try to expand forward into top */
4289 if (next == av->top &&
4290 (unsigned long) (newsize = oldsize + nextsize) >=
4291 (unsigned long) (nb + MINSIZE))
4292 {
4293 set_head_size (oldp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
4294 av->top = chunk_at_offset (oldp, nb);
4295 set_head (av->top, (newsize - nb) | PREV_INUSE);
4296 check_inuse_chunk (av, oldp);
4297 return chunk2mem (oldp);
4298 }
4299
4300 /* Try to expand forward into next chunk; split off remainder below */
4301 else if (next != av->top &&
4302 !inuse (next) &&
4303 (unsigned long) (newsize = oldsize + nextsize) >=
4304 (unsigned long) (nb))
4305 {
4306 newp = oldp;
4307 unlink (av, next, bck, fwd);
4308 }
4309
4310 /* allocate, copy, free */
4311 else
4312 {
4313 newmem = _int_malloc (av, nb - MALLOC_ALIGN_MASK);
4314 if (newmem == 0)
4315 return 0; /* propagate failure */
4316
4317 newp = mem2chunk (newmem);
4318 newsize = chunksize (newp);
4319
4320 /*
4321 Avoid copy if newp is next chunk after oldp.
4322 */
4323 if (newp == next)
4324 {
4325 newsize += oldsize;
4326 newp = oldp;
4327 }
4328 else
4329 {
4330 memcpy (newmem, chunk2mem (oldp), oldsize - SIZE_SZ);
4331 _int_free (av, oldp, 1);
4332 check_inuse_chunk (av, newp);
4333 return chunk2mem (newp);
4334 }
4335 }
4336 }
4337
4338 /* If possible, free extra space in old or extended chunk */
4339
4340 assert ((unsigned long) (newsize) >= (unsigned long) (nb));
4341
4342 remainder_size = newsize - nb;
4343
4344 if (remainder_size < MINSIZE) /* not enough extra to split off */
4345 {
4346 set_head_size (newp, newsize | (av != &main_arena ? NON_MAIN_ARENA : 0));
4347 set_inuse_bit_at_offset (newp, newsize);
4348 }
4349 else /* split remainder */
4350 {
4351 remainder = chunk_at_offset (newp, nb);
4352 set_head_size (newp, nb | (av != &main_arena ? NON_MAIN_ARENA : 0));
4353 set_head (remainder, remainder_size | PREV_INUSE |
4354 (av != &main_arena ? NON_MAIN_ARENA : 0));
4355 /* Mark remainder as inuse so free() won't complain */
4356 set_inuse_bit_at_offset (remainder, remainder_size);
4357 _int_free (av, remainder, 1);
4358 }
4359
4360 check_inuse_chunk (av, newp);
4361 return chunk2mem (newp);
4362}
4363
4364/*
4365 ------------------------------ memalign ------------------------------
4366 */
4367
4368static void *
4369_int_memalign (mstate av, size_t alignment, size_t bytes)
4370{
4371 INTERNAL_SIZE_T nb; /* padded request size */
4372 char *m; /* memory returned by malloc call */
4373 mchunkptr p; /* corresponding chunk */
4374 char *brk; /* alignment point within p */
4375 mchunkptr newp; /* chunk to return */
4376 INTERNAL_SIZE_T newsize; /* its size */
4377 INTERNAL_SIZE_T leadsize; /* leading space before alignment point */
4378 mchunkptr remainder; /* spare room at end to split off */
4379 unsigned long remainder_size; /* its size */
4380 INTERNAL_SIZE_T size;
4381
4382
4383
4384 checked_request2size (bytes, nb);
4385
4386 /*
4387 Strategy: find a spot within that chunk that meets the alignment
4388 request, and then possibly free the leading and trailing space.
4389 */
4390
4391
4392 /* Check for overflow. */
4393 if (nb > SIZE_MAX - alignment - MINSIZE)
4394 {
4395 __set_errno (ENOMEM);
4396 return 0;
4397 }
4398
4399 /* Call malloc with worst case padding to hit alignment. */
4400
4401 m = (char *) (_int_malloc (av, nb + alignment + MINSIZE));
4402
4403 if (m == 0)
4404 return 0; /* propagate failure */
4405
4406 p = mem2chunk (m);
4407
4408 if ((((unsigned long) (m)) % alignment) != 0) /* misaligned */
4409
4410 { /*
4411 Find an aligned spot inside chunk. Since we need to give back
4412 leading space in a chunk of at least MINSIZE, if the first
4413 calculation places us at a spot with less than MINSIZE leader,
4414 we can move to the next aligned spot -- we've allocated enough
4415 total room so that this is always possible.
4416 */
4417 brk = (char *) mem2chunk (((unsigned long) (m + alignment - 1)) &
4418 - ((signed long) alignment));
4419 if ((unsigned long) (brk - (char *) (p)) < MINSIZE)
4420 brk += alignment;
4421
4422 newp = (mchunkptr) brk;
4423 leadsize = brk - (char *) (p);
4424 newsize = chunksize (p) - leadsize;
4425
4426 /* For mmapped chunks, just adjust offset */
4427 if (chunk_is_mmapped (p))
4428 {
4429 set_prev_size (newp, prev_size (p) + leadsize);
4430 set_head (newp, newsize | IS_MMAPPED);
4431 return chunk2mem (newp);
4432 }
4433
4434 /* Otherwise, give back leader, use the rest */
4435 set_head (newp, newsize | PREV_INUSE |
4436 (av != &main_arena ? NON_MAIN_ARENA : 0));
4437 set_inuse_bit_at_offset (newp, newsize);
4438 set_head_size (p, leadsize | (av != &main_arena ? NON_MAIN_ARENA : 0));
4439 _int_free (av, p, 1);
4440 p = newp;
4441
4442 assert (newsize >= nb &&
4443 (((unsigned long) (chunk2mem (p))) % alignment) == 0);
4444 }
4445
4446 /* Also give back spare room at the end */
4447 if (!chunk_is_mmapped (p))
4448 {
4449 size = chunksize (p);
4450 if ((unsigned long) (size) > (unsigned long) (nb + MINSIZE))
4451 {
4452 remainder_size = size - nb;
4453 remainder = chunk_at_offset (p, nb);
4454 set_head (remainder, remainder_size | PREV_INUSE |
4455 (av != &main_arena ? NON_MAIN_ARENA : 0));
4456 set_head_size (p, nb);
4457 _int_free (av, remainder, 1);
4458 }
4459 }
4460
4461 check_inuse_chunk (av, p);
4462 return chunk2mem (p);
4463}
4464
4465
4466/*
4467 ------------------------------ malloc_trim ------------------------------
4468 */
4469
4470static int
4471mtrim (mstate av, size_t pad)
4472{
4473 /* Don't touch corrupt arenas. */
4474 if (arena_is_corrupt (av))
4475 return 0;
4476
4477 /* Ensure initialization/consolidation */
4478 malloc_consolidate (av);
4479
4480 const size_t ps = GLRO (dl_pagesize);
4481 int psindex = bin_index (ps);
4482 const size_t psm1 = ps - 1;
4483
4484 int result = 0;
4485 for (int i = 1; i < NBINS; ++i)
4486 if (i == 1 || i >= psindex)
4487 {
4488 mbinptr bin = bin_at (av, i);
4489
4490 for (mchunkptr p = last (bin); p != bin; p = p->bk)
4491 {
4492 INTERNAL_SIZE_T size = chunksize (p);
4493
4494 if (size > psm1 + sizeof (struct malloc_chunk))
4495 {
4496 /* See whether the chunk contains at least one unused page. */
4497 char *paligned_mem = (char *) (((uintptr_t) p
4498 + sizeof (struct malloc_chunk)
4499 + psm1) & ~psm1);
4500
4501 assert ((char *) chunk2mem (p) + 4 * SIZE_SZ <= paligned_mem);
4502 assert ((char *) p + size > paligned_mem);
4503
4504 /* This is the size we could potentially free. */
4505 size -= paligned_mem - (char *) p;
4506
4507 if (size > psm1)
4508 {
4509#if MALLOC_DEBUG
4510 /* When debugging we simulate destroying the memory
4511 content. */
4512 memset (paligned_mem, 0x89, size & ~psm1);
4513#endif
4514 __madvise (paligned_mem, size & ~psm1, MADV_DONTNEED);
4515
4516 result = 1;
4517 }
4518 }
4519 }
4520 }
4521
4522#ifndef MORECORE_CANNOT_TRIM
4523 return result | (av == &main_arena ? systrim (pad, av) : 0);
4524
4525#else
4526 return result;
4527#endif
4528}
4529
4530
4531int
4532__malloc_trim (size_t s)
4533{
4534 int result = 0;
4535
4536 if (__malloc_initialized < 0)
4537 ptmalloc_init ();
4538
4539 mstate ar_ptr = &main_arena;
4540 do
4541 {
4542 __libc_lock_lock (ar_ptr->mutex);
4543 result |= mtrim (ar_ptr, s);
4544 __libc_lock_unlock (ar_ptr->mutex);
4545
4546 ar_ptr = ar_ptr->next;
4547 }
4548 while (ar_ptr != &main_arena);
4549
4550 return result;
4551}
4552
4553
4554/*
4555 ------------------------- malloc_usable_size -------------------------
4556 */
4557
4558static size_t
4559musable (void *mem)
4560{
4561 mchunkptr p;
4562 if (mem != 0)
4563 {
4564 p = mem2chunk (mem);
4565
4566 if (__builtin_expect (using_malloc_checking == 1, 0))
4567 return malloc_check_get_size (p);
4568
4569 if (chunk_is_mmapped (p))
4570 {
4571 if (DUMPED_MAIN_ARENA_CHUNK (p))
4572 return chunksize (p) - SIZE_SZ;
4573 else
4574 return chunksize (p) - 2 * SIZE_SZ;
4575 }
4576 else if (inuse (p))
4577 return chunksize (p) - SIZE_SZ;
4578 }
4579 return 0;
4580}
4581
4582
4583size_t
4584__malloc_usable_size (void *m)
4585{
4586 size_t result;
4587
4588 result = musable (m);
4589 return result;
4590}
4591
4592/*
4593 ------------------------------ mallinfo ------------------------------
4594 Accumulate malloc statistics for arena AV into M.
4595 */
4596
4597static void
4598int_mallinfo (mstate av, struct mallinfo *m)
4599{
4600 size_t i;
4601 mbinptr b;
4602 mchunkptr p;
4603 INTERNAL_SIZE_T avail;
4604 INTERNAL_SIZE_T fastavail;
4605 int nblocks;
4606 int nfastblocks;
4607
4608 /* Ensure initialization */
4609 if (av->top == 0)
4610 malloc_consolidate (av);
4611
4612 check_malloc_state (av);
4613
4614 /* Account for top */
4615 avail = chunksize (av->top);
4616 nblocks = 1; /* top always exists */
4617
4618 /* traverse fastbins */
4619 nfastblocks = 0;
4620 fastavail = 0;
4621
4622 for (i = 0; i < NFASTBINS; ++i)
4623 {
4624 for (p = fastbin (av, i); p != 0; p = p->fd)
4625 {
4626 ++nfastblocks;
4627 fastavail += chunksize (p);
4628 }
4629 }
4630
4631 avail += fastavail;
4632
4633 /* traverse regular bins */
4634 for (i = 1; i < NBINS; ++i)
4635 {
4636 b = bin_at (av, i);
4637 for (p = last (b); p != b; p = p->bk)
4638 {
4639 ++nblocks;
4640 avail += chunksize (p);
4641 }
4642 }
4643
4644 m->smblks += nfastblocks;
4645 m->ordblks += nblocks;
4646 m->fordblks += avail;
4647 m->uordblks += av->system_mem - avail;
4648 m->arena += av->system_mem;
4649 m->fsmblks += fastavail;
4650 if (av == &main_arena)
4651 {
4652 m->hblks = mp_.n_mmaps;
4653 m->hblkhd = mp_.mmapped_mem;
4654 m->usmblks = 0;
4655 m->keepcost = chunksize (av->top);
4656 }
4657}
4658
4659
4660struct mallinfo
4661__libc_mallinfo (void)
4662{
4663 struct mallinfo m;
4664 mstate ar_ptr;
4665
4666 if (__malloc_initialized < 0)
4667 ptmalloc_init ();
4668
4669 memset (&m, 0, sizeof (m));
4670 ar_ptr = &main_arena;
4671 do
4672 {
4673 __libc_lock_lock (ar_ptr->mutex);
4674 int_mallinfo (ar_ptr, &m);
4675 __libc_lock_unlock (ar_ptr->mutex);
4676
4677 ar_ptr = ar_ptr->next;
4678 }
4679 while (ar_ptr != &main_arena);
4680
4681 return m;
4682}
4683
4684/*
4685 ------------------------------ malloc_stats ------------------------------
4686 */
4687
4688void
4689__malloc_stats (void)
4690{
4691 int i;
4692 mstate ar_ptr;
4693 unsigned int in_use_b = mp_.mmapped_mem, system_b = in_use_b;
4694
4695 if (__malloc_initialized < 0)
4696 ptmalloc_init ();
4697 _IO_flockfile (stderr);
4698 int old_flags2 = ((_IO_FILE *) stderr)->_flags2;
4699 ((_IO_FILE *) stderr)->_flags2 |= _IO_FLAGS2_NOTCANCEL;
4700 for (i = 0, ar_ptr = &main_arena;; i++)
4701 {
4702 struct mallinfo mi;
4703
4704 memset (&mi, 0, sizeof (mi));
4705 __libc_lock_lock (ar_ptr->mutex);
4706 int_mallinfo (ar_ptr, &mi);
4707 fprintf (stderr, "Arena %d:\n", i);
4708 fprintf (stderr, "system bytes = %10u\n", (unsigned int) mi.arena);
4709 fprintf (stderr, "in use bytes = %10u\n", (unsigned int) mi.uordblks);
4710#if MALLOC_DEBUG > 1
4711 if (i > 0)
4712 dump_heap (heap_for_ptr (top (ar_ptr)));
4713#endif
4714 system_b += mi.arena;
4715 in_use_b += mi.uordblks;
4716 __libc_lock_unlock (ar_ptr->mutex);
4717 ar_ptr = ar_ptr->next;
4718 if (ar_ptr == &main_arena)
4719 break;
4720 }
4721 fprintf (stderr, "Total (incl. mmap):\n");
4722 fprintf (stderr, "system bytes = %10u\n", system_b);
4723 fprintf (stderr, "in use bytes = %10u\n", in_use_b);
4724 fprintf (stderr, "max mmap regions = %10u\n", (unsigned int) mp_.max_n_mmaps);
4725 fprintf (stderr, "max mmap bytes = %10lu\n",
4726 (unsigned long) mp_.max_mmapped_mem);
4727 ((_IO_FILE *) stderr)->_flags2 |= old_flags2;
4728 _IO_funlockfile (stderr);
4729}
4730
4731
4732/*
4733 ------------------------------ mallopt ------------------------------
4734 */
4735static inline int
4736__always_inline
4737do_set_trim_threshold (size_t value)
4738{
4739 LIBC_PROBE (memory_mallopt_trim_threshold, 3, value, mp_.trim_threshold,
4740 mp_.no_dyn_threshold);
4741 mp_.trim_threshold = value;
4742 mp_.no_dyn_threshold = 1;
4743 return 1;
4744}
4745
4746static inline int
4747__always_inline
4748do_set_top_pad (size_t value)
4749{
4750 LIBC_PROBE (memory_mallopt_top_pad, 3, value, mp_.top_pad,
4751 mp_.no_dyn_threshold);
4752 mp_.top_pad = value;
4753 mp_.no_dyn_threshold = 1;
4754 return 1;
4755}
4756
4757static inline int
4758__always_inline
4759do_set_mmap_threshold (size_t value)
4760{
4761 /* Forbid setting the threshold too high. */
4762 if (value <= HEAP_MAX_SIZE / 2)
4763 {
4764 LIBC_PROBE (memory_mallopt_mmap_threshold, 3, value, mp_.mmap_threshold,
4765 mp_.no_dyn_threshold);
4766 mp_.mmap_threshold = value;
4767 mp_.no_dyn_threshold = 1;
4768 return 1;
4769 }
4770 return 0;
4771}
4772
4773static inline int
4774__always_inline
4775do_set_mmaps_max (int32_t value)
4776{
4777 LIBC_PROBE (memory_mallopt_mmap_max, 3, value, mp_.n_mmaps_max,
4778 mp_.no_dyn_threshold);
4779 mp_.n_mmaps_max = value;
4780 mp_.no_dyn_threshold = 1;
4781 return 1;
4782}
4783
4784static inline int
4785__always_inline
4786do_set_mallopt_check (int32_t value)
4787{
4788 LIBC_PROBE (memory_mallopt_check_action, 2, value, check_action);
4789 check_action = value;
4790 return 1;
4791}
4792
4793static inline int
4794__always_inline
4795do_set_perturb_byte (int32_t value)
4796{
4797 LIBC_PROBE (memory_mallopt_perturb, 2, value, perturb_byte);
4798 perturb_byte = value;
4799 return 1;
4800}
4801
4802static inline int
4803__always_inline
4804do_set_arena_test (size_t value)
4805{
4806 LIBC_PROBE (memory_mallopt_arena_test, 2, value, mp_.arena_test);
4807 mp_.arena_test = value;
4808 return 1;
4809}
4810
4811static inline int
4812__always_inline
4813do_set_arena_max (size_t value)
4814{
4815 LIBC_PROBE (memory_mallopt_arena_max, 2, value, mp_.arena_max);
4816 mp_.arena_max = value;
4817 return 1;
4818}
4819
4820
4821int
4822__libc_mallopt (int param_number, int value)
4823{
4824 mstate av = &main_arena;
4825 int res = 1;
4826
4827 if (__malloc_initialized < 0)
4828 ptmalloc_init ();
4829 __libc_lock_lock (av->mutex);
4830 /* Ensure initialization/consolidation */
4831 malloc_consolidate (av);
4832
4833 LIBC_PROBE (memory_mallopt, 2, param_number, value);
4834
4835 switch (param_number)
4836 {
4837 case M_MXFAST:
4838 if (value >= 0 && value <= MAX_FAST_SIZE)
4839 {
4840 LIBC_PROBE (memory_mallopt_mxfast, 2, value, get_max_fast ());
4841 set_max_fast (value);
4842 }
4843 else
4844 res = 0;
4845 break;
4846
4847 case M_TRIM_THRESHOLD:
4848 do_set_trim_threshold (value);
4849 break;
4850
4851 case M_TOP_PAD:
4852 do_set_top_pad (value);
4853 break;
4854
4855 case M_MMAP_THRESHOLD:
4856 res = do_set_mmap_threshold (value);
4857 break;
4858
4859 case M_MMAP_MAX:
4860 do_set_mmaps_max (value);
4861 break;
4862
4863 case M_CHECK_ACTION:
4864 do_set_mallopt_check (value);
4865 break;
4866
4867 case M_PERTURB:
4868 do_set_perturb_byte (value);
4869 break;
4870
4871 case M_ARENA_TEST:
4872 if (value > 0)
4873 do_set_arena_test (value);
4874 break;
4875
4876 case M_ARENA_MAX:
4877 if (value > 0)
4878 do_set_arena_max (value);
4879 break;
4880 }
4881 __libc_lock_unlock (av->mutex);
4882 return res;
4883}
4884libc_hidden_def (__libc_mallopt)
4885
4886
4887/*
4888 -------------------- Alternative MORECORE functions --------------------
4889 */
4890
4891
4892/*
4893 General Requirements for MORECORE.
4894
4895 The MORECORE function must have the following properties:
4896
4897 If MORECORE_CONTIGUOUS is false:
4898
4899 * MORECORE must allocate in multiples of pagesize. It will
4900 only be called with arguments that are multiples of pagesize.
4901
4902 * MORECORE(0) must return an address that is at least
4903 MALLOC_ALIGNMENT aligned. (Page-aligning always suffices.)
4904
4905 else (i.e. If MORECORE_CONTIGUOUS is true):
4906
4907 * Consecutive calls to MORECORE with positive arguments
4908 return increasing addresses, indicating that space has been
4909 contiguously extended.
4910
4911 * MORECORE need not allocate in multiples of pagesize.
4912 Calls to MORECORE need not have args of multiples of pagesize.
4913
4914 * MORECORE need not page-align.
4915
4916 In either case:
4917
4918 * MORECORE may allocate more memory than requested. (Or even less,
4919 but this will generally result in a malloc failure.)
4920
4921 * MORECORE must not allocate memory when given argument zero, but
4922 instead return one past the end address of memory from previous
4923 nonzero call. This malloc does NOT call MORECORE(0)
4924 until at least one call with positive arguments is made, so
4925 the initial value returned is not important.
4926
4927 * Even though consecutive calls to MORECORE need not return contiguous
4928 addresses, it must be OK for malloc'ed chunks to span multiple
4929 regions in those cases where they do happen to be contiguous.
4930
4931 * MORECORE need not handle negative arguments -- it may instead
4932 just return MORECORE_FAILURE when given negative arguments.
4933 Negative arguments are always multiples of pagesize. MORECORE
4934 must not misinterpret negative args as large positive unsigned
4935 args. You can suppress all such calls from even occurring by defining
4936 MORECORE_CANNOT_TRIM,
4937
4938 There is some variation across systems about the type of the
4939 argument to sbrk/MORECORE. If size_t is unsigned, then it cannot
4940 actually be size_t, because sbrk supports negative args, so it is
4941 normally the signed type of the same width as size_t (sometimes
4942 declared as "intptr_t", and sometimes "ptrdiff_t"). It doesn't much
4943 matter though. Internally, we use "long" as arguments, which should
4944 work across all reasonable possibilities.
4945
4946 Additionally, if MORECORE ever returns failure for a positive
4947 request, then mmap is used as a noncontiguous system allocator. This
4948 is a useful backup strategy for systems with holes in address spaces
4949 -- in this case sbrk cannot contiguously expand the heap, but mmap
4950 may be able to map noncontiguous space.
4951
4952 If you'd like mmap to ALWAYS be used, you can define MORECORE to be
4953 a function that always returns MORECORE_FAILURE.
4954
4955 If you are using this malloc with something other than sbrk (or its
4956 emulation) to supply memory regions, you probably want to set
4957 MORECORE_CONTIGUOUS as false. As an example, here is a custom
4958 allocator kindly contributed for pre-OSX macOS. It uses virtually
4959 but not necessarily physically contiguous non-paged memory (locked
4960 in, present and won't get swapped out). You can use it by
4961 uncommenting this section, adding some #includes, and setting up the
4962 appropriate defines above:
4963
4964 *#define MORECORE osMoreCore
4965 *#define MORECORE_CONTIGUOUS 0
4966
4967 There is also a shutdown routine that should somehow be called for
4968 cleanup upon program exit.
4969
4970 *#define MAX_POOL_ENTRIES 100
4971 *#define MINIMUM_MORECORE_SIZE (64 * 1024)
4972 static int next_os_pool;
4973 void *our_os_pools[MAX_POOL_ENTRIES];
4974
4975 void *osMoreCore(int size)
4976 {
4977 void *ptr = 0;
4978 static void *sbrk_top = 0;
4979
4980 if (size > 0)
4981 {
4982 if (size < MINIMUM_MORECORE_SIZE)
4983 size = MINIMUM_MORECORE_SIZE;
4984 if (CurrentExecutionLevel() == kTaskLevel)
4985 ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
4986 if (ptr == 0)
4987 {
4988 return (void *) MORECORE_FAILURE;
4989 }
4990 // save ptrs so they can be freed during cleanup
4991 our_os_pools[next_os_pool] = ptr;
4992 next_os_pool++;
4993 ptr = (void *) ((((unsigned long) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
4994 sbrk_top = (char *) ptr + size;
4995 return ptr;
4996 }
4997 else if (size < 0)
4998 {
4999 // we don't currently support shrink behavior
5000 return (void *) MORECORE_FAILURE;
5001 }
5002 else
5003 {
5004 return sbrk_top;
5005 }
5006 }
5007
5008 // cleanup any allocated memory pools
5009 // called as last thing before shutting down driver
5010
5011 void osCleanupMem(void)
5012 {
5013 void **ptr;
5014
5015 for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
5016 if (*ptr)
5017 {
5018 PoolDeallocate(*ptr);
5019 * ptr = 0;
5020 }
5021 }
5022
5023 */
5024
5025
5026/* Helper code. */
5027
5028extern char **__libc_argv attribute_hidden;
5029
5030static void
5031malloc_printerr (int action, const char *str, void *ptr, mstate ar_ptr)
5032{
5033 /* Avoid using this arena in future. We do not attempt to synchronize this
5034 with anything else because we minimally want to ensure that __libc_message
5035 gets its resources safely without stumbling on the current corruption. */
5036 if (ar_ptr)
5037 set_arena_corrupt (ar_ptr);
5038
5039 if ((action & 5) == 5)
5040 __libc_message (action & 2, "%s\n", str);
5041 else if (action & 1)
5042 {
5043 char buf[2 * sizeof (uintptr_t) + 1];
5044
5045 buf[sizeof (buf) - 1] = '\0';
5046 char *cp = _itoa_word ((uintptr_t) ptr, &buf[sizeof (buf) - 1], 16, 0);
5047 while (cp > buf)
5048 *--cp = '0';
5049
5050 __libc_message (action & 2, "*** Error in `%s': %s: 0x%s ***\n",
5051 __libc_argv[0] ? : "<unknown>", str, cp);
5052 }
5053 else if (action & 2)
5054 abort ();
5055}
5056
5057/* We need a wrapper function for one of the additions of POSIX. */
5058int
5059__posix_memalign (void **memptr, size_t alignment, size_t size)
5060{
5061 void *mem;
5062
5063 /* Test whether the SIZE argument is valid. It must be a power of
5064 two multiple of sizeof (void *). */
5065 if (alignment % sizeof (void *) != 0
5066 || !powerof2 (alignment / sizeof (void *))
5067 || alignment == 0)
5068 return EINVAL;
5069
5070
5071 void *address = RETURN_ADDRESS (0);
5072 mem = _mid_memalign (alignment, size, address);
5073
5074 if (mem != NULL)
5075 {
5076 *memptr = mem;
5077 return 0;
5078 }
5079
5080 return ENOMEM;
5081}
5082weak_alias (__posix_memalign, posix_memalign)
5083
5084
5085int
5086__malloc_info (int options, FILE *fp)
5087{
5088 /* For now, at least. */
5089 if (options != 0)
5090 return EINVAL;
5091
5092 int n = 0;
5093 size_t total_nblocks = 0;
5094 size_t total_nfastblocks = 0;
5095 size_t total_avail = 0;
5096 size_t total_fastavail = 0;
5097 size_t total_system = 0;
5098 size_t total_max_system = 0;
5099 size_t total_aspace = 0;
5100 size_t total_aspace_mprotect = 0;
5101
5102
5103
5104 if (__malloc_initialized < 0)
5105 ptmalloc_init ();
5106
5107 fputs ("<malloc version=\"1\">\n", fp);
5108
5109 /* Iterate over all arenas currently in use. */
5110 mstate ar_ptr = &main_arena;
5111 do
5112 {
5113 fprintf (fp, "<heap nr=\"%d\">\n<sizes>\n", n++);
5114
5115 size_t nblocks = 0;
5116 size_t nfastblocks = 0;
5117 size_t avail = 0;
5118 size_t fastavail = 0;
5119 struct
5120 {
5121 size_t from;
5122 size_t to;
5123 size_t total;
5124 size_t count;
5125 } sizes[NFASTBINS + NBINS - 1];
5126#define nsizes (sizeof (sizes) / sizeof (sizes[0]))
5127
5128 __libc_lock_lock (ar_ptr->mutex);
5129
5130 for (size_t i = 0; i < NFASTBINS; ++i)
5131 {
5132 mchunkptr p = fastbin (ar_ptr, i);
5133 if (p != NULL)
5134 {
5135 size_t nthissize = 0;
5136 size_t thissize = chunksize (p);
5137
5138 while (p != NULL)
5139 {
5140 ++nthissize;
5141 p = p->fd;
5142 }
5143
5144 fastavail += nthissize * thissize;
5145 nfastblocks += nthissize;
5146 sizes[i].from = thissize - (MALLOC_ALIGNMENT - 1);
5147 sizes[i].to = thissize;
5148 sizes[i].count = nthissize;
5149 }
5150 else
5151 sizes[i].from = sizes[i].to = sizes[i].count = 0;
5152
5153 sizes[i].total = sizes[i].count * sizes[i].to;
5154 }
5155
5156
5157 mbinptr bin;
5158 struct malloc_chunk *r;
5159
5160 for (size_t i = 1; i < NBINS; ++i)
5161 {
5162 bin = bin_at (ar_ptr, i);
5163 r = bin->fd;
5164 sizes[NFASTBINS - 1 + i].from = ~((size_t) 0);
5165 sizes[NFASTBINS - 1 + i].to = sizes[NFASTBINS - 1 + i].total
5166 = sizes[NFASTBINS - 1 + i].count = 0;
5167
5168 if (r != NULL)
5169 while (r != bin)
5170 {
5171 size_t r_size = chunksize_nomask (r);
5172 ++sizes[NFASTBINS - 1 + i].count;
5173 sizes[NFASTBINS - 1 + i].total += r_size;
5174 sizes[NFASTBINS - 1 + i].from
5175 = MIN (sizes[NFASTBINS - 1 + i].from, r_size);
5176 sizes[NFASTBINS - 1 + i].to = MAX (sizes[NFASTBINS - 1 + i].to,
5177 r_size);
5178
5179 r = r->fd;
5180 }
5181
5182 if (sizes[NFASTBINS - 1 + i].count == 0)
5183 sizes[NFASTBINS - 1 + i].from = 0;
5184 nblocks += sizes[NFASTBINS - 1 + i].count;
5185 avail += sizes[NFASTBINS - 1 + i].total;
5186 }
5187
5188 __libc_lock_unlock (ar_ptr->mutex);
5189
5190 total_nfastblocks += nfastblocks;
5191 total_fastavail += fastavail;
5192
5193 total_nblocks += nblocks;
5194 total_avail += avail;
5195
5196 for (size_t i = 0; i < nsizes; ++i)
5197 if (sizes[i].count != 0 && i != NFASTBINS)
5198 fprintf (fp, " \
5199 <size from=\"%zu\" to=\"%zu\" total=\"%zu\" count=\"%zu\"/>\n",
5200 sizes[i].from, sizes[i].to, sizes[i].total, sizes[i].count);
5201
5202 if (sizes[NFASTBINS].count != 0)
5203 fprintf (fp, "\
5204 <unsorted from=\"%zu\" to=\"%zu\" total=\"%zu\" count=\"%zu\"/>\n",
5205 sizes[NFASTBINS].from, sizes[NFASTBINS].to,
5206 sizes[NFASTBINS].total, sizes[NFASTBINS].count);
5207
5208 total_system += ar_ptr->system_mem;
5209 total_max_system += ar_ptr->max_system_mem;
5210
5211 fprintf (fp,
5212 "</sizes>\n<total type=\"fast\" count=\"%zu\" size=\"%zu\"/>\n"
5213 "<total type=\"rest\" count=\"%zu\" size=\"%zu\"/>\n"
5214 "<system type=\"current\" size=\"%zu\"/>\n"
5215 "<system type=\"max\" size=\"%zu\"/>\n",
5216 nfastblocks, fastavail, nblocks, avail,
5217 ar_ptr->system_mem, ar_ptr->max_system_mem);
5218
5219 if (ar_ptr != &main_arena)
5220 {
5221 heap_info *heap = heap_for_ptr (top (ar_ptr));
5222 fprintf (fp,
5223 "<aspace type=\"total\" size=\"%zu\"/>\n"
5224 "<aspace type=\"mprotect\" size=\"%zu\"/>\n",
5225 heap->size, heap->mprotect_size);
5226 total_aspace += heap->size;
5227 total_aspace_mprotect += heap->mprotect_size;
5228 }
5229 else
5230 {
5231 fprintf (fp,
5232 "<aspace type=\"total\" size=\"%zu\"/>\n"
5233 "<aspace type=\"mprotect\" size=\"%zu\"/>\n",
5234 ar_ptr->system_mem, ar_ptr->system_mem);
5235 total_aspace += ar_ptr->system_mem;
5236 total_aspace_mprotect += ar_ptr->system_mem;
5237 }
5238
5239 fputs ("</heap>\n", fp);
5240 ar_ptr = ar_ptr->next;
5241 }
5242 while (ar_ptr != &main_arena);
5243
5244 fprintf (fp,
5245 "<total type=\"fast\" count=\"%zu\" size=\"%zu\"/>\n"
5246 "<total type=\"rest\" count=\"%zu\" size=\"%zu\"/>\n"
5247 "<total type=\"mmap\" count=\"%d\" size=\"%zu\"/>\n"
5248 "<system type=\"current\" size=\"%zu\"/>\n"
5249 "<system type=\"max\" size=\"%zu\"/>\n"
5250 "<aspace type=\"total\" size=\"%zu\"/>\n"
5251 "<aspace type=\"mprotect\" size=\"%zu\"/>\n"
5252 "</malloc>\n",
5253 total_nfastblocks, total_fastavail, total_nblocks, total_avail,
5254 mp_.n_mmaps, mp_.mmapped_mem,
5255 total_system, total_max_system,
5256 total_aspace, total_aspace_mprotect);
5257
5258 return 0;
5259}
5260weak_alias (__malloc_info, malloc_info)
5261
5262
5263strong_alias (__libc_calloc, __calloc) weak_alias (__libc_calloc, calloc)
5264strong_alias (__libc_free, __cfree) weak_alias (__libc_free, cfree)
5265strong_alias (__libc_free, __free) strong_alias (__libc_free, free)
5266strong_alias (__libc_malloc, __malloc) strong_alias (__libc_malloc, malloc)
5267strong_alias (__libc_memalign, __memalign)
5268weak_alias (__libc_memalign, memalign)
5269strong_alias (__libc_realloc, __realloc) strong_alias (__libc_realloc, realloc)
5270strong_alias (__libc_valloc, __valloc) weak_alias (__libc_valloc, valloc)
5271strong_alias (__libc_pvalloc, __pvalloc) weak_alias (__libc_pvalloc, pvalloc)
5272strong_alias (__libc_mallinfo, __mallinfo)
5273weak_alias (__libc_mallinfo, mallinfo)
5274strong_alias (__libc_mallopt, __mallopt) weak_alias (__libc_mallopt, mallopt)
5275
5276weak_alias (__malloc_stats, malloc_stats)
5277weak_alias (__malloc_usable_size, malloc_usable_size)
5278weak_alias (__malloc_trim, malloc_trim)
5279
5280
5281/* ------------------------------------------------------------
5282 History:
5283
5284 [see ftp://g.oswego.edu/pub/misc/malloc.c for the history of dlmalloc]
5285
5286 */
5287/*
5288 * Local variables:
5289 * c-basic-offset: 2
5290 * End:
5291 */
5292